code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#%% train ddpg
import os
import gym
import numpy as np
from stable_baselines import DDPG
from stable_baselines.ddpg.policies import MlpPolicy
from stable_baselines.ddpg.policies import LnMlpPolicy
from stable_baselines.common.noise import NormalActionNoise
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
import Arm2DEnv as ae
from utils import SaveOnBestTrainingRewardCallback, snap_code
#%% sac
log_dir_root = './sandbox/ddpg/'
os.makedirs(log_dir_root, exist_ok=True)
# create a snapshot of code
log_dir = snap_code(log_dir_root)
env = ae.ArmModel()
env.mode = 'train'
env = Monitor(env, log_dir)
n_actions = env.action_space.shape[-1]
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma=float(0.1) * np.ones(n_actions))
model = DDPG(LnMlpPolicy, env, verbose=1, gamma=0.98, tau=0.01,
actor_lr=0.0001, critic_lr=0.001, action_noise=action_noise,
buffer_size=int(5E6), batch_size=128, random_exploration=0.001)
callback = SaveOnBestTrainingRewardCallback(check_freq=int(5E4), log_dir=log_dir)
model.learn(total_timesteps=int(6E6), callback=callback)#(total_timesteps=int(4E5))
# %%
model.save("twolink_arm_ddpg") | [
"utils.snap_code",
"os.makedirs",
"stable_baselines.bench.Monitor",
"numpy.zeros",
"numpy.ones",
"Arm2DEnv.ArmModel"
] | [((486, 526), 'os.makedirs', 'os.makedirs', (['log_dir_root'], {'exist_ok': '(True)'}), '(log_dir_root, exist_ok=True)\n', (497, 526), False, 'import os\n'), ((566, 589), 'utils.snap_code', 'snap_code', (['log_dir_root'], {}), '(log_dir_root)\n', (575, 589), False, 'from utils import SaveOnBestTrainingRewardCallback, snap_code\n'), ((597, 610), 'Arm2DEnv.ArmModel', 'ae.ArmModel', ([], {}), '()\n', (608, 610), True, 'import Arm2DEnv as ae\n'), ((636, 657), 'stable_baselines.bench.Monitor', 'Monitor', (['env', 'log_dir'], {}), '(env, log_dir)\n', (643, 657), False, 'from stable_baselines.bench import Monitor\n'), ((736, 755), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (744, 755), True, 'import numpy as np\n'), ((776, 794), 'numpy.ones', 'np.ones', (['n_actions'], {}), '(n_actions)\n', (783, 794), True, 'import numpy as np\n')] |
import lammpstools
import dumpreader
import numpy as np
import sys
if len(sys.argv) < 2:
print >> sys.stderr, "Pass a dump file!"
sys.exit(-1)
fname = sys.argv[1]
d = dumpreader.dumpreader(fname)
b = d.getblock()
# Assume particles on sphere.
R2 = 0.0;
c = 1.0 / float(b.meta.N)
for i in range(0,b.meta.N):
R2 += np.dot( b.x[i], b.x[i] ) * c
R = math.sqrt(R2)
print >> sys.stderr, "<R> = %f" % R
| [
"numpy.dot",
"dumpreader.dumpreader",
"sys.exit"
] | [((178, 206), 'dumpreader.dumpreader', 'dumpreader.dumpreader', (['fname'], {}), '(fname)\n', (199, 206), False, 'import dumpreader\n'), ((139, 151), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (147, 151), False, 'import sys\n'), ((330, 352), 'numpy.dot', 'np.dot', (['b.x[i]', 'b.x[i]'], {}), '(b.x[i], b.x[i])\n', (336, 352), True, 'import numpy as np\n')] |
import glob
import pandas as pd
import numpy as np
import cv2
visual = True
col_names = ['youtube_id', 'timestamp_ms', 'class_id', 'class_name',
'object_id', 'object_presence', 'xmin', 'xmax', 'ymin', 'ymax']
df = pd.DataFrame.from_csv('yt_bb_detection_validation.csv', header=None, index_col=False)
df.columns = col_names
frame_num = len(df['youtube_id'])
img_path = glob.glob('/mnt/qwang/youtubebb/frames/val*/*/*.jpg')
d = {key.split('/')[-1]: value for (value, key) in enumerate(img_path)}
for n in range(frame_num):
if df['object_presence'][n]:
frame_name = df['youtube_id'][n] + '_' + str(df['timestamp_ms'][n]) + '_' + \
str(df['class_id'][n]) + '_' + str(df['object_id'][n]) + '.jpg'
bbox = np.array([df['xmin'][n],df['ymin'][n],df['xmax'][n],df['ymax'][n]])
if frame_name in d.keys():
frame_path = img_path[d[frame_name]]
if visual:
im = cv2.imread(frame_path)
h, w, _ = im.shape
pt1 = (int(bbox[0]*w), int(bbox[1]*h))
pt2 = (int(bbox[2]*w), int(bbox[3]*h))
cv2.rectangle(im, pt1, pt2, (0, 255, 0), 2)
cv2.imshow('img', im)
cv2.waitKey(100)
else:
print('no image: {}'.format(frame_name))
pass
else:
pass
print('done')
| [
"cv2.waitKey",
"pandas.DataFrame.from_csv",
"cv2.imread",
"numpy.array",
"cv2.rectangle",
"glob.glob",
"cv2.imshow"
] | [((230, 319), 'pandas.DataFrame.from_csv', 'pd.DataFrame.from_csv', (['"""yt_bb_detection_validation.csv"""'], {'header': 'None', 'index_col': '(False)'}), "('yt_bb_detection_validation.csv', header=None,\n index_col=False)\n", (251, 319), True, 'import pandas as pd\n'), ((385, 438), 'glob.glob', 'glob.glob', (['"""/mnt/qwang/youtubebb/frames/val*/*/*.jpg"""'], {}), "('/mnt/qwang/youtubebb/frames/val*/*/*.jpg')\n", (394, 438), False, 'import glob\n'), ((758, 828), 'numpy.array', 'np.array', (["[df['xmin'][n], df['ymin'][n], df['xmax'][n], df['ymax'][n]]"], {}), "([df['xmin'][n], df['ymin'][n], df['xmax'][n], df['ymax'][n]])\n", (766, 828), True, 'import numpy as np\n'), ((954, 976), 'cv2.imread', 'cv2.imread', (['frame_path'], {}), '(frame_path)\n', (964, 976), False, 'import cv2\n'), ((1138, 1181), 'cv2.rectangle', 'cv2.rectangle', (['im', 'pt1', 'pt2', '(0, 255, 0)', '(2)'], {}), '(im, pt1, pt2, (0, 255, 0), 2)\n', (1151, 1181), False, 'import cv2\n'), ((1198, 1219), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'im'], {}), "('img', im)\n", (1208, 1219), False, 'import cv2\n'), ((1236, 1252), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (1247, 1252), False, 'import cv2\n')] |
import numpy as np
import SimpleITK as sitk
import os
def find_percentiles(x):
x_flat = np.reshape(x, (-1, 1))
x_float = x_flat.astype(np.float64)
y = np.ma.masked_where(x_float == 0, x_float)
y = np.ma.filled(y, np.nan)
return np.nanpercentile(y, 25), np.nanpercentile(y, 50), np.nanpercentile(y, 75)
heatmaps_dir = './heatmaps/'
edema_heatmap_dir = os.path.join(heatmaps_dir, 'edema_heatmap.nii.gz')
necrosis_heatmap_dir = os.path.join(heatmaps_dir, 'necrosis_heatmap.nii.gz')
tumor_heatmap_dir = os.path.join(heatmaps_dir, 'tumor_heatmap.nii.gz')
edema_heatmap_img = sitk.ReadImage(edema_heatmap_dir)
edema_nda = sitk.GetArrayFromImage(edema_heatmap_img)
necrosis_heatmap_img = sitk.ReadImage(necrosis_heatmap_dir)
necrosis_nda = sitk.GetArrayFromImage(necrosis_heatmap_img)
tumor_heatmap_img = sitk.ReadImage(tumor_heatmap_dir)
tumor_nda = sitk.GetArrayFromImage(tumor_heatmap_img)
VOI = np.zeros(tumor_nda.shape, tumor_nda.dtype)
ed25, ed50, ed75 = find_percentiles(edema_nda)
ne25, ne50, ne75 = find_percentiles(necrosis_nda)
tu25, tu50, tu75 = find_percentiles(tumor_nda)
print('Edema:', ed25, ed50, ed75)
print('Necrosis:', ne25, ne50, ne75)
print('Tumor:', tu25, tu50, tu75)
VOI[edema_nda>=ed25] = 1
VOI[necrosis_nda>=ne25] = 2
VOI[tumor_nda>=tu25] = 3
VOI[edema_nda>=ed50] = 4
VOI[necrosis_nda>=ne50] = 5
VOI[tumor_nda>=tu50] = 6
VOI[edema_nda>=ed75] = 7
VOI[necrosis_nda>=ne75] = 8
VOI[tumor_nda>=tu75] = 9
VOI_img = sitk.GetImageFromArray(VOI)
VOI_img.CopyInformation(edema_heatmap_img)
VOI_path = './VOI'
if not os.path.exists(VOI_path):
os.makedirs(VOI_path)
sitk.WriteImage(VOI_img, os.path.join(VOI_path, 'VOI-1mm.nii.gz'))
for i in range(1,10):
VOI_lab = np.zeros(VOI.shape, VOI.dtype)
VOI_lab_name = 'VOI-1mm-lab'+str(i)+'.nii.gz'
VOI_lab[VOI == i] = 1
VOI_lab_img = sitk.GetImageFromArray(VOI_lab)
VOI_lab_img.CopyInformation(edema_heatmap_img)
sitk.WriteImage(VOI_lab_img, os.path.join(VOI_path, VOI_lab_name))
| [
"numpy.nanpercentile",
"os.makedirs",
"numpy.ma.masked_where",
"SimpleITK.ReadImage",
"numpy.zeros",
"os.path.exists",
"SimpleITK.GetArrayFromImage",
"numpy.reshape",
"SimpleITK.GetImageFromArray",
"numpy.ma.filled",
"os.path.join"
] | [((359, 409), 'os.path.join', 'os.path.join', (['heatmaps_dir', '"""edema_heatmap.nii.gz"""'], {}), "(heatmaps_dir, 'edema_heatmap.nii.gz')\n", (371, 409), False, 'import os\n'), ((433, 486), 'os.path.join', 'os.path.join', (['heatmaps_dir', '"""necrosis_heatmap.nii.gz"""'], {}), "(heatmaps_dir, 'necrosis_heatmap.nii.gz')\n", (445, 486), False, 'import os\n'), ((507, 557), 'os.path.join', 'os.path.join', (['heatmaps_dir', '"""tumor_heatmap.nii.gz"""'], {}), "(heatmaps_dir, 'tumor_heatmap.nii.gz')\n", (519, 557), False, 'import os\n'), ((579, 612), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['edema_heatmap_dir'], {}), '(edema_heatmap_dir)\n', (593, 612), True, 'import SimpleITK as sitk\n'), ((625, 666), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['edema_heatmap_img'], {}), '(edema_heatmap_img)\n', (647, 666), True, 'import SimpleITK as sitk\n'), ((691, 727), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['necrosis_heatmap_dir'], {}), '(necrosis_heatmap_dir)\n', (705, 727), True, 'import SimpleITK as sitk\n'), ((743, 787), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['necrosis_heatmap_img'], {}), '(necrosis_heatmap_img)\n', (765, 787), True, 'import SimpleITK as sitk\n'), ((809, 842), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['tumor_heatmap_dir'], {}), '(tumor_heatmap_dir)\n', (823, 842), True, 'import SimpleITK as sitk\n'), ((855, 896), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['tumor_heatmap_img'], {}), '(tumor_heatmap_img)\n', (877, 896), True, 'import SimpleITK as sitk\n'), ((904, 946), 'numpy.zeros', 'np.zeros', (['tumor_nda.shape', 'tumor_nda.dtype'], {}), '(tumor_nda.shape, tumor_nda.dtype)\n', (912, 946), True, 'import numpy as np\n'), ((1445, 1472), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['VOI'], {}), '(VOI)\n', (1467, 1472), True, 'import SimpleITK as sitk\n'), ((90, 112), 'numpy.reshape', 'np.reshape', (['x', '(-1, 1)'], {}), '(x, (-1, 1))\n', (100, 112), True, 'import numpy as np\n'), ((155, 196), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(x_float == 0)', 'x_float'], {}), '(x_float == 0, x_float)\n', (173, 196), True, 'import numpy as np\n'), ((202, 225), 'numpy.ma.filled', 'np.ma.filled', (['y', 'np.nan'], {}), '(y, np.nan)\n', (214, 225), True, 'import numpy as np\n'), ((1544, 1568), 'os.path.exists', 'os.path.exists', (['VOI_path'], {}), '(VOI_path)\n', (1558, 1568), False, 'import os\n'), ((1571, 1592), 'os.makedirs', 'os.makedirs', (['VOI_path'], {}), '(VOI_path)\n', (1582, 1592), False, 'import os\n'), ((1619, 1659), 'os.path.join', 'os.path.join', (['VOI_path', '"""VOI-1mm.nii.gz"""'], {}), "(VOI_path, 'VOI-1mm.nii.gz')\n", (1631, 1659), False, 'import os\n'), ((1695, 1725), 'numpy.zeros', 'np.zeros', (['VOI.shape', 'VOI.dtype'], {}), '(VOI.shape, VOI.dtype)\n', (1703, 1725), True, 'import numpy as np\n'), ((1811, 1842), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['VOI_lab'], {}), '(VOI_lab)\n', (1833, 1842), True, 'import SimpleITK as sitk\n'), ((234, 257), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '(25)'], {}), '(y, 25)\n', (250, 257), True, 'import numpy as np\n'), ((259, 282), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '(50)'], {}), '(y, 50)\n', (275, 282), True, 'import numpy as np\n'), ((284, 307), 'numpy.nanpercentile', 'np.nanpercentile', (['y', '(75)'], {}), '(y, 75)\n', (300, 307), True, 'import numpy as np\n'), ((1921, 1957), 'os.path.join', 'os.path.join', (['VOI_path', 'VOI_lab_name'], {}), '(VOI_path, VOI_lab_name)\n', (1933, 1957), False, 'import os\n')] |
"""Shuffles input examples in time and writes them to new file."""
import random
import os.path
import argparse
import numpy
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import input_examples
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
INPUT_DIR_ARG_NAME = 'input_example_dir_name'
FIRST_DATE_ARG_NAME = 'first_spc_date_string'
LAST_DATE_ARG_NAME = 'last_spc_date_string'
OUTPUT_DIR_ARG_NAME = 'output_example_dir_name'
RADAR_FIELDS_ARG_NAME = 'radar_field_names'
FIRST_BATCH_NUM_ARG_NAME = 'first_output_batch_number'
NUM_EXAMPLES_PER_CHUNK_ARG_NAME = 'num_examples_per_out_chunk'
NUM_EXAMPLES_PER_OUT_FILE_ARG_NAME = 'num_examples_per_out_file'
INPUT_DIR_HELP_STRING = (
'Name of top-level directory with input files (containing unshuffled '
'examples). Files therein will be found by '
'`input_examples.find_example_file` and read by '
'`input_examples.read_example_file`.')
SPC_DATE_HELP_STRING = (
'SPC date (format "yyyymmdd"). This script will shuffle examples from the '
'time period `{0:s}`...`{1:s}`.'
).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME)
OUTPUT_DIR_HELP_STRING = (
'Name of top-level directory for output files (containing shuffled '
'examples). Files will be written by `input_examples.write_example_file`, '
'to locations in this directory determined by '
'`input_examples.find_example_file`.')
RADAR_FIELDS_HELP_STRING = (
'List of radar fields to output. Each field must be accepted by '
'`radar_utils.check_field_name`. If you leave this argument, all radar '
'fields will be output.')
FIRST_BATCH_NUM_HELP_STRING = (
'First batch number (integer). Used to determine locations of output '
'files.')
NUM_EXAMPLES_PER_CHUNK_HELP_STRING = (
'Number of examples per output chunk (all written to the same file).')
NUM_EXAMPLES_PER_OUT_FILE_HELP_STRING = (
'Number of examples written to each output file.')
DEFAULT_NUM_EXAMPLES_PER_CHUNK = 8
DEFAULT_NUM_EXAMPLES_PER_OUT_FILE = 256
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INPUT_DIR_ARG_NAME, type=str, required=True,
help=INPUT_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_FIELDS_ARG_NAME, type=str, nargs='+', required=False,
default=[''], help=RADAR_FIELDS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_BATCH_NUM_ARG_NAME, type=int, required=True,
help=FIRST_BATCH_NUM_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_EXAMPLES_PER_CHUNK_ARG_NAME, type=int, required=False,
default=DEFAULT_NUM_EXAMPLES_PER_CHUNK,
help=NUM_EXAMPLES_PER_CHUNK_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_EXAMPLES_PER_OUT_FILE_ARG_NAME, type=int, required=False,
default=DEFAULT_NUM_EXAMPLES_PER_OUT_FILE,
help=NUM_EXAMPLES_PER_OUT_FILE_HELP_STRING)
def _find_input_files(
top_input_dir_name, first_spc_date_string, last_spc_date_string):
"""Finds input files (containing unshuffled examples).
:param top_input_dir_name: See documentation at top of file.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:return: input_example_file_names: 1-D list of paths to input files.
:return: num_input_examples: Total number of examples in these files.
"""
input_example_file_names = input_examples.find_many_example_files(
top_directory_name=top_input_dir_name, shuffled=False,
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string,
raise_error_if_any_missing=False)
num_input_examples = 0
for this_file_name in input_example_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_example_dict = input_examples.read_example_file(
netcdf_file_name=this_file_name, read_all_target_vars=True,
metadata_only=True)
num_input_examples += len(
this_example_dict[input_examples.FULL_IDS_KEY]
)
return input_example_file_names, num_input_examples
def _set_output_locations(
top_output_dir_name, num_input_examples, num_examples_per_out_file,
first_output_batch_number):
"""Sets locations of output files.
:param top_output_dir_name: See documentation at top of file.
:param num_input_examples: Total number of examples in input files.
:param num_examples_per_out_file: See documentation at top of file.
:param first_output_batch_number: Same.
:return: output_example_file_names: 1-D list of paths to output files.
"""
num_output_files = int(
numpy.ceil(float(num_input_examples) / num_examples_per_out_file)
)
print((
'Num input examples = {0:d} ... num examples per output file = {1:d} '
'... num output files = {2:d}'
).format(num_input_examples, num_examples_per_out_file, num_output_files))
output_example_file_names = [
input_examples.find_example_file(
top_directory_name=top_output_dir_name, shuffled=True,
batch_number=first_output_batch_number + i,
raise_error_if_missing=False
) for i in range(num_output_files)
]
for this_file_name in output_example_file_names:
if not os.path.isfile(this_file_name):
continue
print('Deleting output file: "{0:s}"...'.format(this_file_name))
os.remove(this_file_name)
return output_example_file_names
def _shuffle_one_input_file(
input_example_file_name, radar_field_names, num_examples_per_out_chunk,
output_example_file_names):
"""Shuffles examples from one input file to many output files.
:param input_example_file_name: Path to input file.
:param radar_field_names: See documentation at top of file.
:param num_examples_per_out_chunk: Same.
:param output_example_file_names: 1-D list of paths to output files.
"""
print('Reading data from: "{0:s}"...'.format(input_example_file_name))
example_dict = input_examples.read_example_file(
netcdf_file_name=input_example_file_name, read_all_target_vars=True,
radar_field_names_to_keep=radar_field_names)
num_examples = len(example_dict[input_examples.FULL_IDS_KEY])
shuffled_indices = numpy.linspace(
0, num_examples - 1, num=num_examples, dtype=int)
numpy.random.shuffle(shuffled_indices)
example_dict = input_examples.subset_examples(
example_dict=example_dict, indices_to_keep=shuffled_indices)
for j in range(0, num_examples, num_examples_per_out_chunk):
this_first_index = j
this_last_index = min(
[j + num_examples_per_out_chunk - 1, num_examples - 1]
)
these_indices = numpy.linspace(
this_first_index, this_last_index,
num=this_last_index - this_first_index + 1, dtype=int)
this_example_dict = input_examples.subset_examples(
example_dict=example_dict, indices_to_keep=these_indices,
create_new_dict=True)
this_output_file_name = random.choice(output_example_file_names)
print('Writing shuffled examples to: "{0:s}"...'.format(
this_output_file_name))
input_examples.write_example_file(
netcdf_file_name=this_output_file_name,
example_dict=this_example_dict,
append_to_file=os.path.isfile(this_output_file_name)
)
def _run(top_input_dir_name, first_spc_date_string, last_spc_date_string,
top_output_dir_name, radar_field_names, first_output_batch_number,
num_examples_per_out_chunk, num_examples_per_out_file):
"""Shuffles input examples in time and writes them to new file.
This is effectively the main method.
:param top_input_dir_name: See documentation at top of file.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param top_output_dir_name: Same.
:param radar_field_names: Same.
:param first_output_batch_number: Same.
:param num_examples_per_out_chunk: Same.
:param num_examples_per_out_file: Same.
"""
if radar_field_names[0] in ['', 'None']:
radar_field_names = None
error_checking.assert_is_geq(num_examples_per_out_chunk, 2)
error_checking.assert_is_geq(num_examples_per_out_file, 100)
input_example_file_names, num_input_examples = _find_input_files(
top_input_dir_name=top_input_dir_name,
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
print(SEPARATOR_STRING)
output_example_file_names = _set_output_locations(
top_output_dir_name=top_output_dir_name,
num_input_examples=num_input_examples,
num_examples_per_out_file=num_examples_per_out_file,
first_output_batch_number=first_output_batch_number)
print(SEPARATOR_STRING)
for this_file_name in input_example_file_names:
_shuffle_one_input_file(
input_example_file_name=this_file_name,
radar_field_names=radar_field_names,
num_examples_per_out_chunk=num_examples_per_out_chunk,
output_example_file_names=output_example_file_names)
print('\n')
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
top_input_dir_name=getattr(INPUT_ARG_OBJECT, INPUT_DIR_ARG_NAME),
first_spc_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME),
last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME),
top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME),
radar_field_names=getattr(INPUT_ARG_OBJECT, RADAR_FIELDS_ARG_NAME),
first_output_batch_number=getattr(
INPUT_ARG_OBJECT, FIRST_BATCH_NUM_ARG_NAME),
num_examples_per_out_chunk=getattr(
INPUT_ARG_OBJECT, NUM_EXAMPLES_PER_CHUNK_ARG_NAME),
num_examples_per_out_file=getattr(
INPUT_ARG_OBJECT, NUM_EXAMPLES_PER_OUT_FILE_ARG_NAME)
)
| [
"gewittergefahr.deep_learning.input_examples.find_many_example_files",
"argparse.ArgumentParser",
"gewittergefahr.gg_utils.error_checking.assert_is_geq",
"gewittergefahr.deep_learning.input_examples.find_example_file",
"random.choice",
"gewittergefahr.deep_learning.input_examples.read_example_file",
"nu... | [((2053, 2078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2076, 2078), False, 'import argparse\n'), ((3724, 3953), 'gewittergefahr.deep_learning.input_examples.find_many_example_files', 'input_examples.find_many_example_files', ([], {'top_directory_name': 'top_input_dir_name', 'shuffled': '(False)', 'first_spc_date_string': 'first_spc_date_string', 'last_spc_date_string': 'last_spc_date_string', 'raise_error_if_any_missing': '(False)'}), '(top_directory_name=\n top_input_dir_name, shuffled=False, first_spc_date_string=\n first_spc_date_string, last_spc_date_string=last_spc_date_string,\n raise_error_if_any_missing=False)\n', (3762, 3953), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((6403, 6553), 'gewittergefahr.deep_learning.input_examples.read_example_file', 'input_examples.read_example_file', ([], {'netcdf_file_name': 'input_example_file_name', 'read_all_target_vars': '(True)', 'radar_field_names_to_keep': 'radar_field_names'}), '(netcdf_file_name=input_example_file_name,\n read_all_target_vars=True, radar_field_names_to_keep=radar_field_names)\n', (6435, 6553), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((6657, 6721), 'numpy.linspace', 'numpy.linspace', (['(0)', '(num_examples - 1)'], {'num': 'num_examples', 'dtype': 'int'}), '(0, num_examples - 1, num=num_examples, dtype=int)\n', (6671, 6721), False, 'import numpy\n'), ((6735, 6773), 'numpy.random.shuffle', 'numpy.random.shuffle', (['shuffled_indices'], {}), '(shuffled_indices)\n', (6755, 6773), False, 'import numpy\n'), ((6794, 6890), 'gewittergefahr.deep_learning.input_examples.subset_examples', 'input_examples.subset_examples', ([], {'example_dict': 'example_dict', 'indices_to_keep': 'shuffled_indices'}), '(example_dict=example_dict, indices_to_keep=\n shuffled_indices)\n', (6824, 6890), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((8579, 8638), 'gewittergefahr.gg_utils.error_checking.assert_is_geq', 'error_checking.assert_is_geq', (['num_examples_per_out_chunk', '(2)'], {}), '(num_examples_per_out_chunk, 2)\n', (8607, 8638), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((8643, 8703), 'gewittergefahr.gg_utils.error_checking.assert_is_geq', 'error_checking.assert_is_geq', (['num_examples_per_out_file', '(100)'], {}), '(num_examples_per_out_file, 100)\n', (8671, 8703), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((4152, 4268), 'gewittergefahr.deep_learning.input_examples.read_example_file', 'input_examples.read_example_file', ([], {'netcdf_file_name': 'this_file_name', 'read_all_target_vars': '(True)', 'metadata_only': '(True)'}), '(netcdf_file_name=this_file_name,\n read_all_target_vars=True, metadata_only=True)\n', (4184, 4268), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((5332, 5501), 'gewittergefahr.deep_learning.input_examples.find_example_file', 'input_examples.find_example_file', ([], {'top_directory_name': 'top_output_dir_name', 'shuffled': '(True)', 'batch_number': '(first_output_batch_number + i)', 'raise_error_if_missing': '(False)'}), '(top_directory_name=top_output_dir_name,\n shuffled=True, batch_number=first_output_batch_number + i,\n raise_error_if_missing=False)\n', (5364, 5501), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((7123, 7231), 'numpy.linspace', 'numpy.linspace', (['this_first_index', 'this_last_index'], {'num': '(this_last_index - this_first_index + 1)', 'dtype': 'int'}), '(this_first_index, this_last_index, num=this_last_index -\n this_first_index + 1, dtype=int)\n', (7137, 7231), False, 'import numpy\n'), ((7282, 7397), 'gewittergefahr.deep_learning.input_examples.subset_examples', 'input_examples.subset_examples', ([], {'example_dict': 'example_dict', 'indices_to_keep': 'these_indices', 'create_new_dict': '(True)'}), '(example_dict=example_dict, indices_to_keep=\n these_indices, create_new_dict=True)\n', (7312, 7397), False, 'from gewittergefahr.deep_learning import input_examples\n'), ((7451, 7491), 'random.choice', 'random.choice', (['output_example_file_names'], {}), '(output_example_file_names)\n', (7464, 7491), False, 'import random\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
values = np.arange(0.001, 1, 0.001, dtype=float)
logit = np.log(values / (1 - values))
inverse_logit = np.exp(logit) / (1 + np.exp(logit))
plt.rcParams['font.size'] = 18 # 横軸や縦軸の名前の文字などのフォントのサイズ
plt.scatter(values, logit, c='blue') # プロット
plt.xlabel('y') # x 軸の名前
plt.ylabel('z (after logit)') # y 軸の名前
plt.xlim(0, 1) # x 軸の範囲の設定
plt.show() # 以上の設定で描画
plt.figure(figsize=figure.figaspect(1)) # 図を正方形に
plt.scatter(values, inverse_logit, c='blue') # プロット
plt.xlabel('y') # x 軸の名前
plt.ylabel('inverse logit of z') # y 軸の名前
plt.xlim(0, 1) # x 軸の範囲の設定
plt.ylim(0, 1) # y 軸の範囲の設定
plt.show() # 以上の設定で描画
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.figure.figaspect"
] | [((154, 193), 'numpy.arange', 'np.arange', (['(0.001)', '(1)', '(0.001)'], {'dtype': 'float'}), '(0.001, 1, 0.001, dtype=float)\n', (163, 193), True, 'import numpy as np\n'), ((203, 232), 'numpy.log', 'np.log', (['(values / (1 - values))'], {}), '(values / (1 - values))\n', (209, 232), True, 'import numpy as np\n'), ((347, 383), 'matplotlib.pyplot.scatter', 'plt.scatter', (['values', 'logit'], {'c': '"""blue"""'}), "(values, logit, c='blue')\n", (358, 383), True, 'import matplotlib.pyplot as plt\n'), ((393, 408), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y"""'], {}), "('y')\n", (403, 408), True, 'import matplotlib.pyplot as plt\n'), ((420, 449), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""z (after logit)"""'], {}), "('z (after logit)')\n", (430, 449), True, 'import matplotlib.pyplot as plt\n'), ((461, 475), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (469, 475), True, 'import matplotlib.pyplot as plt\n'), ((490, 500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (498, 500), True, 'import matplotlib.pyplot as plt\n'), ((567, 611), 'matplotlib.pyplot.scatter', 'plt.scatter', (['values', 'inverse_logit'], {'c': '"""blue"""'}), "(values, inverse_logit, c='blue')\n", (578, 611), True, 'import matplotlib.pyplot as plt\n'), ((621, 636), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y"""'], {}), "('y')\n", (631, 636), True, 'import matplotlib.pyplot as plt\n'), ((648, 680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""inverse logit of z"""'], {}), "('inverse logit of z')\n", (658, 680), True, 'import matplotlib.pyplot as plt\n'), ((692, 706), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (700, 706), True, 'import matplotlib.pyplot as plt\n'), ((721, 735), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (729, 735), True, 'import matplotlib.pyplot as plt\n'), ((750, 760), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n'), ((250, 263), 'numpy.exp', 'np.exp', (['logit'], {}), '(logit)\n', (256, 263), True, 'import numpy as np\n'), ((271, 284), 'numpy.exp', 'np.exp', (['logit'], {}), '(logit)\n', (277, 284), True, 'import numpy as np\n'), ((535, 554), 'matplotlib.figure.figaspect', 'figure.figaspect', (['(1)'], {}), '(1)\n', (551, 554), True, 'import matplotlib.figure as figure\n')] |
# LM model imports
import os
import time
import torch
from torch import optim
from models.context2vec.src.mscc_eval import mscc_evaluation
from models.context2vec.src.model import Context2vec
from models.context2vec.src.args import parse_args
from models.context2vec.src.dataset import WikiDataset
from models.context2vec.src.config import Config
from models.context2vec.src.utils import write_embedding, write_config, read_config, load_vocab
# OCR model imports
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
import numpy as np
import cv2
import itertools
from models.ocr.src.config import letters
# OCRBeamSearch model
from models.OCRBeamSearch.src.Model import Model, DecoderType
from models.OCRBeamSearch.src.SamplePreprocessor import preprocess
import sys
import cv2
import editdistance
from collections import defaultdict
from operator import itemgetter
from nltk.stem import PorterStemmer, WordNetLemmatizer
import Levenshtein
import re
class FilePaths:
"filenames and paths to data"
fnCharList = 'models/OCRBeamSearch/model/charList.txt'
fnAccuracy = 'models/OCRBeamSearch/model/accuracy.txt'
fnTrain = 'models/OCRBeamSearch/data/'
fnInfer = 'models/OCRBeamSearch/data/test.png'
fnCorpus = 'models/OCRBeamSearch/data/corpus.txt'
class Batch:
"batch containing images and ground truth texts"
def __init__(self, gtTexts, imgs):
self.imgs = np.stack(imgs, axis=0)
self.gtTexts = gtTexts
class Inference():
"""
Model Inference
Attributes:
device (str): where to run the code - cpu or gpu
img_width (int): desired width of image (only used for orig OCR)
img_height (int): desired height of image (only used for orig OCR)
stemmer (object): NLTK PorterStemmer class
lemma (object): NLTK WordNetLemmatizer class
"""
def __init__(self, img_width=128, img_height=64, device='cpu', decoding=None):
self.device = device
self.img_width = img_width
self.img_height = img_height
self.decoding = decoding
self.build_language_model()
# self.build_ocr_model()
self.build_beam_ocr_model(decoding)
self.stemmer = PorterStemmer()
self.lemma = WordNetLemmatizer()
def build_language_model(self, model_dir='models/context2vec/models_103'):
"""
Builds Language model
Args:
model_dir (str): path to model directory
Returns:
None
"""
# LANGUAGE MODEL
modelfile = os.path.join(model_dir, 'model.param')
wordsfile = os.path.join(model_dir, 'embedding.vec')
config_file = modelfile+'.config.json'
config_dict = read_config(config_file)
self.lm_model = Context2vec(vocab_size=config_dict['vocab_size'],
counter=[1]*config_dict['vocab_size'],
word_embed_size=config_dict['word_embed_size'],
hidden_size=config_dict['hidden_size'],
n_layers=config_dict['n_layers'],
bidirectional=config_dict['bidirectional'],
dropout=config_dict['dropout'],
pad_idx=config_dict['pad_index'],
device=self.device,
inference=True).to(self.device)
self.lm_model.load_state_dict(torch.load(modelfile, map_location=self.device))
self.itos, self.stoi = load_vocab(wordsfile)
self.unk_token = config_dict['unk_token']
self.bos_token = config_dict['bos_token']
self.eos_token = config_dict['eos_token']
def build_beam_ocr_model(self, decoding):
"""
Builds Beam Search OCR model
Args:
decoderType (str): Decoding Type for Beam Search
Returns:
None
"""
# Beam Search OCR model
if decoding == 'beamsearch':
decoderType = DecoderType.BeamSearch
if decoding == 'wordbeamsearch':
decoderType = DecoderType.WordBeamSearch
else:
decoderType = DecoderType.BestPath
self.beam_ocr_model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore=True)
def build_ocr_model(self):
"""
Builds Original OCR model
"""
self.sess = tf.Session()
K.set_session(self.sess)
ocr_model_path = 'models/ocr/models/weights-improvement2-10-01-3.00.hdf5'
self.ocr_model = load_model(ocr_model_path, custom_objects={'<lambda>': lambda y_true, y_pred: y_pred})
def preprocess_image(self, img_path, img_width, img_height):
"""
Preprocess image for Original OCR model
Args:
img_path (str): Path to image
img_width (int): desired width of image (only used for orig OCR)
img_height (int): desired height of image (only used for orig OCR)
Returns:
img (numpy array): Scaled, formated, and reshaped image as numpy array
"""
img = cv2.imread(img_path)
# grayscale image
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# resize image
img = cv2.resize(img, (img_width, img_height))
# change image type
img = img.astype(np.float32)
# scale image
img /= 255
img = img.reshape((1, img_width, img_height, 1))
return img
def _decode_batch(self, out):
"""
Best Path decoding for original OCR model
Args:
out (array): Predictions array
Returns:
ret (str): String of maximum likihood word
"""
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, 2:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = ''
for c in out_best:
if c < len(letters):
outstr += letters[c]
ret.append(outstr)
return ret
def _return_split_sentence(self, sentence):
"""
Formats input sentence for language model
Args:
sentence (str): Input string
Returns:
tokens (list): List of tokens
target_pos (int): Index of target word
"""
if ' ' not in sentence:
print('sentence should contain white space to split it into tokens')
raise SyntaxError
elif '[]' not in sentence:
print('sentence should contain `[]` that notes the target')
raise SyntaxError
else:
tokens = sentence.lower().strip().split()
target_pos = tokens.index('[]')
return tokens, target_pos
def run_lm_inference_by_user_input(self, sentence, topK=100):
"""
Processes user input sentence
Runs user input sentence through model
Returns topK predictions and probabilities
Args:
sentence (str): User input sentence
topK (int): Number of top predictions to return
Returns:
output (list): list of tuples [(probability, token), ...]
"""
# Dont return these to user
bad_list = ['<PAD>', '<BOS>', '<EOS>', '<UNK>', '<unk>']
# evaluation mode
self.lm_model.eval()
# norm_weight
self.lm_model.norm_embedding_weight(self.lm_model.criterion.W)
tokens, target_pos = self._return_split_sentence(sentence)
tokens[target_pos] = self.unk_token
tokens = [self.bos_token] + tokens + [self.eos_token]
indexed_sentence = [self.stoi[token] if token in self.stoi else self.stoi[self.unk_token] for token in tokens]
# to torch tensor
input_tokens = torch.tensor(indexed_sentence, dtype=torch.long, device=self.device).unsqueeze(0)
# run through model
topv, topi = self.lm_model.run_inference(input_tokens, target=None, target_pos=target_pos, k=topK)
output = []
for value, key in zip(topv, topi):
word = self.itos[key.item()]
if word not in bad_list:
output.append((value.item(), word))
return output
def run_beam_ocr_inference_by_user_image(self, img_path):
"""
Processes user input image BS
Runs user input image through model
Returns top prediction and probability
Args:
img_path (str): Path to user uploaded image
Returns:
recognized (list): Predicted token
probability (list): Probability of predictions
"""
img = preprocess(cv2.imread(img_path, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = self.beam_ocr_model.inferBatch(batch, True)
return (recognized, probability)
def run_ocr_inference_by_user_image(self, img):
"""
Returns top prediction from original OCR model
Args:
img (str): Path to user uploaded image
Returns:
pred_texts (list): Predicted token
"""
net_inp = self.ocr_model.get_layer(name='the_input').input
net_out = self.ocr_model.get_layer(name='softmax').output
net_out_value = self.sess.run(net_out, feed_dict={net_inp: img})
pred_texts = self._decode_batch(net_out_value)
return pred_texts
def create_features_improved(self, lm_preds, ocr_pred, ocr_prob):
"""
Create features for weighing algorithm using
language and OCR models predictions and probabilities
Args:
lm_preds (list): list of tuples [(probability, token), ...]
ocr_pred (list): Predicted token
ocr_prob (list): Probability of predictions
Returns:
features (dict): features for each model prediction token
"""
# create bins for length
bins = {
'small': list(range(0, 3)),
'small-mid': list(range(2, 6)),
'mid': list(range(4, 8)),
'mid-large': list(range(6, 10)),
'large': list(range(8, 12)),
'large-big': list(range(10, 14)),
'big': list(range(12, 100)),
}
bins = defaultdict(lambda: 'na', bins)
ocr_len = len([x for x in ocr_pred[0]])
ocr_pred_bins = [k for k, v in bins.items() if ocr_len in v]
features = {}
bad_list = ['<PAD>', '<BOS>', '<EOS>', '<UNK>', '<unk>'] # ADD foul words
matches = {}
matches_non_ordered = {}
ocr_pred_lower = ocr_pred[0].lower()
for lm_pred in lm_preds:
lm_prob, word = lm_pred[0], lm_pred[1].rstrip()
word = word.lower()
# remove pad, bos, etc...
if word not in bad_list:
# try:
features[word] = {}
features[word]['ocr_prob'] = ocr_prob[0]
features[word]['lm_prob'] = lm_prob
# length
word_len = len(word)
features[word]['exact_length_match'] = word_len == ocr_len
word_bins = [k for k, v in bins.items() if word_len in v]
features[word]['bin_length_match'] = False
for bin_ in ocr_pred_bins:
if bin_ in word_bins:
features[word]['bin_length_match'] = True
# levenshtein distance
features[word]['levenshtein'] = Levenshtein.ratio(word, ocr_pred_lower)
# editdistance (1 / dist) so less is better
features[word]['editdistance'] = 1 / (editdistance.eval(word, ocr_pred_lower) + 0.001) # for divide by zero error
# exact match
exact = word == ocr_pred_lower
exact_stem = self.stemmer.stem(word) == self.stemmer.stem(ocr_pred_lower)
exact_lemma = self.lemma.lemmatize(word) == self.lemma.lemmatize(ocr_pred_lower)
exact_length = word == ocr_pred_lower
features[word]['exact'] = exact
features[word]['exact_stem'] = exact_stem
features[word]['exact_lemma'] = exact_lemma
# match first and last character
first_char_match = word[0] == ocr_pred_lower[0]
last_char_match = word[-1] == ocr_pred_lower[-1]
features[word]['first_char_match'] = first_char_match
features[word]['last_char_match'] = last_char_match
# number of character matches
num_chars = 0
for char in ocr_pred_lower:
if char in word:
num_chars += 1
matches[word] = num_chars
features[word]['num_matches'] = matches[word] / (len(word) + 0.001) # for divide by zero error
# except Exception as e:
# print(str(e))
return features
def get_weights(self):
"""Custom weights to assign each feature based on validation set results"""
weights = {
'first_char_match': 0.63,
'last_char_match': 0.62,
'num_matches': 0.65,
'exact_length_match': 0.43,
'bin_length_match': 0.23,
'levenshtein': 0.95,
'editdistance': 0.49,
}
return weights
def final_scores(self, features, ocr_pred, ocr_prob_threshold, return_topK=None):
"""
Computes final score based on features and weights with some heuristics
Args:
features (dict): features for each model prediction token
ocr_pred (list): OCR Predicted token
ocr_prob_threshold (float): Probability threshold to return OCR model prediction
return_topK (int): Return topK results
Returns:
top_results (str): final predicted token
"""
final_scores = {}
for word, feature_dict in features.items():
# if exact match in both LM and OCR model simply return word
if feature_dict['exact'] or feature_dict['exact_stem'] or feature_dict['exact_lemma']:
return word
first_char_match = feature_dict['first_char_match']
last_char_match = feature_dict['last_char_match']
lm_prob = feature_dict['lm_prob']
ocr_prob = feature_dict['ocr_prob']
# if OCR model is really confident return OCR model prediction
if ocr_prob >= ocr_prob_threshold:
return ocr_pred
# NEXT: if ocr prob is decently high, look for words with small edit distance in LM model
weights = self.get_weights()
# compute final score
for word, dic in features.items():
for feature in weights.keys():
features[word].update({feature: (features[word][feature] * weights[feature])})
final_scores[word] = sum(features[word].values())
# sort top scores
top_results = sorted(final_scores.items(), key=itemgetter(1), reverse=True)
if return_topK:
return top_results[:return_topK]
return top_results[0][0]
def predict(self, sentence, img_path=None, ind_preds=None, ocr_prob_threshold=0.01, return_topK=None):
"""
Computes final score based on features and weights with some heuristics
Args:
sentence (str): User Input string
img_path (str): Path to user uploaded image
ind_preds (boolean): Return individual LM and OCR predictions
ocr_prob_threshold (float): Probability threshold to return OCR model prediction
return_topK (int): Return topK results
Returns:
out (str): final predicted token
"""
# if valid image filepath and contains text
valid_image = os.path.isfile(str(img_path))
valid_text = False
if re.search('[a-zA-Z&.,:;!?\d]', sentence) is not None:
valid_text = True
if valid_text:
lm_preds = self.run_lm_inference_by_user_input(sentence)
if valid_image:
ocr_pred, ocr_pred_prob = self.run_beam_ocr_inference_by_user_image(img_path)
if valid_text and valid_image:
features = self.create_features_improved(lm_preds, ocr_pred, ocr_pred_prob)
final_pred = self.final_scores(features, ocr_pred[0], ocr_prob_threshold)
out = final_pred
if ind_preds:
out = final_pred, lm_preds[0], ocr_pred, ocr_pred_prob
if return_topK:
out = final_pred, lm_preds[:return_topK], ocr_pred, ocr_pred_prob
if not valid_image and not valid_text:
return 'NO INPUT. TRY AGAIN'
if not valid_image:
out = lm_preds[0][1]
if not valid_text:
out = ocr_pred[0]
return out | [
"nltk.stem.PorterStemmer",
"numpy.argmax",
"collections.defaultdict",
"models.context2vec.src.utils.load_vocab",
"os.path.join",
"nltk.stem.WordNetLemmatizer",
"cv2.cvtColor",
"torch.load",
"models.context2vec.src.model.Context2vec",
"editdistance.eval",
"re.search",
"cv2.resize",
"numpy.sta... | [((1465, 1487), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (1473, 1487), True, 'import numpy as np\n'), ((2270, 2285), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (2283, 2285), False, 'from nltk.stem import PorterStemmer, WordNetLemmatizer\n'), ((2307, 2326), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (2324, 2326), False, 'from nltk.stem import PorterStemmer, WordNetLemmatizer\n'), ((2637, 2675), 'os.path.join', 'os.path.join', (['model_dir', '"""model.param"""'], {}), "(model_dir, 'model.param')\n", (2649, 2675), False, 'import os\n'), ((2696, 2736), 'os.path.join', 'os.path.join', (['model_dir', '"""embedding.vec"""'], {}), "(model_dir, 'embedding.vec')\n", (2708, 2736), False, 'import os\n'), ((2806, 2830), 'models.context2vec.src.utils.read_config', 'read_config', (['config_file'], {}), '(config_file)\n', (2817, 2830), False, 'from models.context2vec.src.utils import write_embedding, write_config, read_config, load_vocab\n'), ((3598, 3619), 'models.context2vec.src.utils.load_vocab', 'load_vocab', (['wordsfile'], {}), '(wordsfile)\n', (3608, 3619), False, 'from models.context2vec.src.utils import write_embedding, write_config, read_config, load_vocab\n'), ((4511, 4523), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4521, 4523), True, 'import tensorflow as tf\n'), ((4532, 4556), 'tensorflow.keras.backend.set_session', 'K.set_session', (['self.sess'], {}), '(self.sess)\n', (4545, 4556), True, 'from tensorflow.keras import backend as K\n'), ((4665, 4755), 'tensorflow.keras.models.load_model', 'load_model', (['ocr_model_path'], {'custom_objects': "{'<lambda>': lambda y_true, y_pred: y_pred}"}), "(ocr_model_path, custom_objects={'<lambda>': lambda y_true,\n y_pred: y_pred})\n", (4675, 4755), False, 'from tensorflow.keras.models import load_model\n'), ((5244, 5264), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (5254, 5264), False, 'import cv2\n'), ((5305, 5342), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (5317, 5342), False, 'import cv2\n'), ((5380, 5420), 'cv2.resize', 'cv2.resize', (['img', '(img_width, img_height)'], {}), '(img, (img_width, img_height))\n', (5390, 5420), False, 'import cv2\n'), ((10598, 10630), 'collections.defaultdict', 'defaultdict', (["(lambda : 'na')", 'bins'], {}), "(lambda : 'na', bins)\n", (10609, 10630), False, 'from collections import defaultdict\n'), ((3518, 3565), 'torch.load', 'torch.load', (['modelfile'], {'map_location': 'self.device'}), '(modelfile, map_location=self.device)\n', (3528, 3565), False, 'import torch\n'), ((8937, 8979), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(img_path, cv2.IMREAD_GRAYSCALE)\n', (8947, 8979), False, 'import cv2\n'), ((16387, 16428), 're.search', 're.search', (['"""[a-zA-Z&.,:;!?\\\\d]"""', 'sentence'], {}), "('[a-zA-Z&.,:;!?\\\\d]', sentence)\n", (16396, 16428), False, 'import re\n'), ((2855, 3236), 'models.context2vec.src.model.Context2vec', 'Context2vec', ([], {'vocab_size': "config_dict['vocab_size']", 'counter': "([1] * config_dict['vocab_size'])", 'word_embed_size': "config_dict['word_embed_size']", 'hidden_size': "config_dict['hidden_size']", 'n_layers': "config_dict['n_layers']", 'bidirectional': "config_dict['bidirectional']", 'dropout': "config_dict['dropout']", 'pad_idx': "config_dict['pad_index']", 'device': 'self.device', 'inference': '(True)'}), "(vocab_size=config_dict['vocab_size'], counter=[1] * config_dict\n ['vocab_size'], word_embed_size=config_dict['word_embed_size'],\n hidden_size=config_dict['hidden_size'], n_layers=config_dict['n_layers'\n ], bidirectional=config_dict['bidirectional'], dropout=config_dict[\n 'dropout'], pad_idx=config_dict['pad_index'], device=self.device,\n inference=True)\n", (2866, 3236), False, 'from models.context2vec.src.model import Context2vec\n'), ((5965, 5989), 'numpy.argmax', 'np.argmax', (['out[j, 2:]', '(1)'], {}), '(out[j, 2:], 1)\n', (5974, 5989), True, 'import numpy as np\n'), ((8047, 8115), 'torch.tensor', 'torch.tensor', (['indexed_sentence'], {'dtype': 'torch.long', 'device': 'self.device'}), '(indexed_sentence, dtype=torch.long, device=self.device)\n', (8059, 8115), False, 'import torch\n'), ((11833, 11872), 'Levenshtein.ratio', 'Levenshtein.ratio', (['word', 'ocr_pred_lower'], {}), '(word, ocr_pred_lower)\n', (11850, 11872), False, 'import Levenshtein\n'), ((15483, 15496), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (15493, 15496), False, 'from operator import itemgetter\n'), ((6029, 6056), 'itertools.groupby', 'itertools.groupby', (['out_best'], {}), '(out_best)\n', (6046, 6056), False, 'import itertools\n'), ((11988, 12027), 'editdistance.eval', 'editdistance.eval', (['word', 'ocr_pred_lower'], {}), '(word, ocr_pred_lower)\n', (12005, 12027), False, 'import editdistance\n')] |
import sys
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import numpy as np
import csv
from sklearn import metrics
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import mean_squared_error
import copy
kf = KFold(n_splits=10, shuffle=True)
# 90% CI, t distribution
Z = 1.833
class Bin_NN():
def __init__(self, X, Y, lr=0.008):
num_feature = len(X[0])
output_num = len(Y[0])
self.X = np.array(X)
self.Y = np.array(Y)
self.model = Sequential()
self.model.add(Dense(20, input_shape=(num_feature,), activation='relu'))
self.model.add(Dense(30, activation='relu'))
self.model.add(Dense(30, activation='tanh'))
self.model.add(Dense(2, activation='softmax'))
self.model.compile(loss='mse', optimizer=Adam(lr = lr), metrics=['accuracy'])
self.train_steps = 1000
self.accuracy = []
self.false_positive_rate = []
self.false_negative_rate = []
def train(self):
for train_index, test_index in kf.split(self.X):
# print(self.X, train_index)
X_train, X_test = self.X[train_index], self.X[test_index]
y_train, y_test = self.Y[train_index], self.Y[test_index]
self.X_test = X_test
self.y_test = y_test
self.model.fit(X_train, y_train)
acc, fp, fn = self.selfeval(X_test, y_test)
self.accuracy.append(acc)
self.false_positive_rate.append(fp)
self.false_negative_rate.append(fn)
print('avg accuracy : {}'.format(np.mean(np.array(self.accuracy))))
print('avg false positive rate : {}'.format(np.mean(np.array(self.false_positive_rate))))
print('avg false negative rate : {}'.format(np.mean(np.array(self.false_negative_rate))))
def gen_rand_test_data(self, num_test):
X_ = copy.deepcopy(self.X)
Y_ = copy.deepcopy(self.Y)
X_test = np.random.choice(X_.flatten(), num_test)
y_test = np.random.choice(Y_.flatten(), num_test)
self.predict(X_test, y_test)
def selfeval(self, X_test, y_test):
prediction = self.model.predict(X_test)
print(prediction.shape)
my_pred = []
target = []
for pred in prediction:
if pred[0] > pred[1]: # OD
my_pred.append(1)
else:
my_pred.append(0)
for y in y_test:
if y[0] == 1: # OD
target.append(1)
else:
target.append(0)
false_positives_num = 0
false_negatives_num = 0
for i in range(len(prediction)):
if my_pred[i] == 1 and target[i] == 0:
false_positives_num += 1
if my_pred[i] == 0 and target[i] == 1:
false_negatives_num += 1
my_pred = np.array(my_pred)
target = np.array(target)
num_cor = float(len(np.where(my_pred == target)[0]))
num_total = float(len(y_test))
acc = num_cor / num_total
false_positive_rate = float(false_positives_num) / num_total
false_negative_rate = float(false_negatives_num) / num_total
print('accuracy: {}'.format(acc))
print('false positive rate : {}'.format(false_positive_rate))
print('false negative rate: {}'.format(false_negative_rate))
return acc, false_positive_rate, false_negative_rate
# model -- not tuned, outdated
class NN():
def __init__(self, X, Y, lr=0.0001):
num_feature = len(X[0])
output_num = len(Y[0])
print(output_num)
self.X = np.array(X)
self.Y = np.array(Y)
self.model = Sequential()
self.model.add(Dense(32, input_shape=(num_feature,), activation='relu'))
self.model.add(Dense(output_num, activation='softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr = lr), metrics=['accuracy'])
self.train_steps = 1000
self.scores = []
def train(self):
for train_index, test_index in kf.split(self.X):
# print(self.X, train_index)
X_train, X_test = self.X[train_index], self.X[test_index]
y_train, y_test = self.Y[train_index], self.Y[test_index]
self.X_test = X_test
self.y_test = y_test
# for t in range(self.train_steps):
self.model.fit(X_train, y_train)
self.predict(X_test, y_test)
def predict(self, X_test, y_test):
prediction = self.model.predict(X_test)
bin_prediction = []
for pred in prediction:
if pred[-1] > 0.5: # yes, OD
bin_prediction.append(1)
else:
bin_prediction.append(0)
test_pred = []
for y_t in y_test:
if y_t[-1] == 0: # yes, OD
test_pred.append(1)
else:
test_pred.append(0)
accur = []
for i in range(len(bin_prediction)):
if bin_prediction[i] == test_pred[i]:
accur.append(1)
else:
accur.append(0)
acc = float(np.sum(np.array(accur))) / float(len(accur))
| [
"copy.deepcopy",
"keras.optimizers.Adam",
"sklearn.model_selection.KFold",
"keras.layers.Dense",
"numpy.array",
"numpy.where",
"keras.models.Sequential"
] | [((294, 326), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (299, 326), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((499, 510), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (507, 510), True, 'import numpy as np\n'), ((528, 539), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (536, 539), True, 'import numpy as np\n'), ((561, 573), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (571, 573), False, 'from keras.models import Sequential\n'), ((1929, 1950), 'copy.deepcopy', 'copy.deepcopy', (['self.X'], {}), '(self.X)\n', (1942, 1950), False, 'import copy\n'), ((1964, 1985), 'copy.deepcopy', 'copy.deepcopy', (['self.Y'], {}), '(self.Y)\n', (1977, 1985), False, 'import copy\n'), ((2905, 2922), 'numpy.array', 'np.array', (['my_pred'], {}), '(my_pred)\n', (2913, 2922), True, 'import numpy as np\n'), ((2940, 2956), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (2948, 2956), True, 'import numpy as np\n'), ((3662, 3673), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (3670, 3673), True, 'import numpy as np\n'), ((3691, 3702), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3699, 3702), True, 'import numpy as np\n'), ((3724, 3736), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3734, 3736), False, 'from keras.models import Sequential\n'), ((597, 653), 'keras.layers.Dense', 'Dense', (['(20)'], {'input_shape': '(num_feature,)', 'activation': '"""relu"""'}), "(20, input_shape=(num_feature,), activation='relu')\n", (602, 653), False, 'from keras.layers import Dense\n'), ((678, 706), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (683, 706), False, 'from keras.layers import Dense\n'), ((731, 759), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""tanh"""'}), "(30, activation='tanh')\n", (736, 759), False, 'from keras.layers import Dense\n'), ((784, 814), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (789, 814), False, 'from keras.layers import Dense\n'), ((3760, 3816), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(num_feature,)', 'activation': '"""relu"""'}), "(32, input_shape=(num_feature,), activation='relu')\n", (3765, 3816), False, 'from keras.layers import Dense\n'), ((3841, 3880), 'keras.layers.Dense', 'Dense', (['output_num'], {'activation': '"""softmax"""'}), "(output_num, activation='softmax')\n", (3846, 3880), False, 'from keras.layers import Dense\n'), ((865, 876), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (869, 876), False, 'from keras.optimizers import Adam\n'), ((3952, 3963), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3956, 3963), False, 'from keras.optimizers import Adam\n'), ((1648, 1671), 'numpy.array', 'np.array', (['self.accuracy'], {}), '(self.accuracy)\n', (1656, 1671), True, 'import numpy as np\n'), ((1735, 1769), 'numpy.array', 'np.array', (['self.false_positive_rate'], {}), '(self.false_positive_rate)\n', (1743, 1769), True, 'import numpy as np\n'), ((1833, 1867), 'numpy.array', 'np.array', (['self.false_negative_rate'], {}), '(self.false_negative_rate)\n', (1841, 1867), True, 'import numpy as np\n'), ((2985, 3012), 'numpy.where', 'np.where', (['(my_pred == target)'], {}), '(my_pred == target)\n', (2993, 3012), True, 'import numpy as np\n'), ((5204, 5219), 'numpy.array', 'np.array', (['accur'], {}), '(accur)\n', (5212, 5219), True, 'import numpy as np\n')] |
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
class Augmentation(object):
def __init__(self, train_data, label_data, aug_size=32, aug_configs=None):
self.train_data = train_data
self.label_data = label_data
self.aug_size = aug_size
self.data_gen_args = aug_configs if aug_configs else dict()
def augment(self, batch_size=1):
train_gen = ImageDataGenerator(**self.data_gen_args)
label_gen = ImageDataGenerator(**self.data_gen_args)
for i in range(len(self.train_data)):
train_temp = self.train_data[i].reshape(1, *self.train_data[i].shape)
label_temp = self.label_data[i].reshape(1, *self.label_data[i].shape)
train_gen.fit(train_temp, augment=True, seed=i)
label_gen.fit(label_temp, augment=True, seed=i)
train_flow = train_gen.flow(train_temp, batch_size=batch_size, seed=i)
label_flow = label_gen.flow(label_temp, batch_size=batch_size, seed=i)
self._merge_to_origin(train_flow, label_flow)
print('Augmentation completed.')
def _merge_to_origin(self, train_flow, label_flow):
print('Appending additional train volume.')
for i, aug_train in enumerate(train_flow):
self.train_data = np.append(self.train_data, aug_train, axis=0)
if i + 1 >= self.aug_size:
break
print('Appending additional train labels.')
for i, aug_label in enumerate(label_flow):
self.label_data = np.append(self.label_data, aug_label, axis=0)
if i + 1 >= self.aug_size:
break
| [
"keras.preprocessing.image.ImageDataGenerator",
"numpy.append"
] | [((419, 459), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**self.data_gen_args)\n', (437, 459), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((480, 520), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**self.data_gen_args)\n', (498, 520), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1311, 1356), 'numpy.append', 'np.append', (['self.train_data', 'aug_train'], {'axis': '(0)'}), '(self.train_data, aug_train, axis=0)\n', (1320, 1356), True, 'import numpy as np\n'), ((1553, 1598), 'numpy.append', 'np.append', (['self.label_data', 'aug_label'], {'axis': '(0)'}), '(self.label_data, aug_label, axis=0)\n', (1562, 1598), True, 'import numpy as np\n')] |
import numpy as np
import torch
import cv2
from torch.nn.functional import interpolate
from openmixup.models.utils import batch_shuffle_ddp
@torch.no_grad()
def cutmix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" CutMix augmentation.
"CutMix: Regularization Strategy to Train Strong Classifiers with
Localizable Features (https://arxiv.org/abs/1905.04899)". In ICCV, 2019.
https://github.com/clovaai/CutMix-PyTorch
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def rand_bbox(size, lam):
""" generate random box by lam """
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
bbx1, bby1, bbx2, bby2 = rand_bbox(img.size(), lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def mixup(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" MixUp augmentation.
"Mixup: Beyond Empirical Risk Minimization (https://arxiv.org/abs/1710.09412)".
In ICLR, 2018.
https://github.com/facebookresearch/mixup-cifar10
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
y_a = gt_label
y_b = gt_label[rand_index]
img = lam * img + (1 - lam) * img_
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
img = lam * img + (1 - lam) * img_
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def saliencymix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" SaliencyMix augmentation.
"SaliencyMix: A Saliency Guided Data Augmentation Strategy for Better
Regularization (https://arxiv.org/pdf/2006.01791.pdf)". In ICLR, 2021.
https://github.com/SaliencyMix/SaliencyMix/blob/main/SaliencyMix_CIFAR/saliencymix.py
Args:
img (Tensor): Input images of shape (C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def saliency_bbox(img, lam):
""" generate saliency box by lam """
size = img.size()
W = size[1]
H = size[2]
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# force fp32 when convert to numpy
img = img.type(torch.float32)
# initialize OpenCV's static fine grained saliency detector and
# compute the saliency map
temp_img = img.cpu().numpy().transpose(1, 2, 0)
saliency = cv2.saliency.StaticSaliencyFineGrained_create()
(success, saliencyMap) = saliency.computeSaliency(temp_img)
saliencyMap = (saliencyMap * 255).astype("uint8")
maximum_indices = np.unravel_index(
np.argmax(saliencyMap, axis=None), saliencyMap.shape)
x = maximum_indices[0]
y = maximum_indices[1]
bbx1 = np.clip(x - cut_w // 2, 0, W)
bby1 = np.clip(y - cut_h // 2, 0, H)
bbx2 = np.clip(x + cut_w // 2, 0, W)
bby2 = np.clip(y + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
# detect saliency box
bbx1, bby1, bbx2, bby2 = saliency_bbox(img[rand_index[0]], lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
# detect saliency box
bbx1, bby1, bbx2, bby2 = saliency_bbox(img_[0], lam)
img[:, :, bbx1:bbx2, bby1:bby2] = img_[:, :, bbx1:bbx2, bby1:bby2]
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def smoothmix(img, gt_label, alpha=1.0, lam=None, dist_mode=False, **kwargs):
r""" SmoothMix augmentation.
"SmoothMix: a Simple Yet Effective Data Augmentation to Train Robust
Classifiers". In CVPRW, 2020.
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def gaussian_kernel(kernel_size, rand_w, rand_h, sigma):
s = kernel_size * 2
x_cord = torch.arange(s)
x_grid = x_cord.repeat(s).view(s, s)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1).cuda()
xy_grid = torch.roll(xy_grid, rand_w, 0)
xy_grid = torch.roll(xy_grid, rand_h, 1)
crop_size = s // 4
xy_grid = xy_grid[crop_size: s - crop_size, crop_size: s - crop_size]
mean = (s - 1) / 2
var = sigma ** 2
g_filter = torch.exp(-torch.sum((xy_grid - mean) ** 2, dim=-1) / (2 * var))
g_filter = g_filter.view(kernel_size, kernel_size)
return g_filter
if lam is None:
lam = np.random.beta(alpha, alpha)
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0)).cuda()
if len(img.size()) == 4: # [N, C, H, W]
img_ = img[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
y_a = gt_label
y_b = gt_label[rand_index]
rand_w = int(torch.randint(0, w, (1,)) - w / 2)
rand_h = int(torch.randint(0, h, (1,)) - h / 2)
sigma = ((torch.rand(1) / 4 + 0.25) * h).cuda()
kernel = gaussian_kernel(h, rand_h, rand_w, sigma).cuda()
img = img * (1 - kernel) + img_ * kernel
lam = torch.sum(kernel) / (h * w)
return img, (y_a, y_b, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
rand_w = int(torch.randint(0, w, (1,)) - w / 2)
rand_h = int(torch.randint(0, h, (1,)) - h / 2)
sigma = (torch.rand(1) / 4 + 0.25) * h
kernel = gaussian_kernel(h, rand_h, rand_w, sigma).cuda()
img = img * (1 - kernel) + img_ * kernel
lam = torch.sum(kernel) / (h * w)
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
@torch.no_grad()
def resizemix(img, gt_label, scope=(0.1, 0.8), dist_mode=False,
alpha=1.0, lam=None, use_alpha=False, **kwargs):
r""" ResizeMix augmentation.
"ResizeMix: Mixing Data with Preserved Object Information and True Labels
(https://arxiv.org/abs/2012.11101)".
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
gt_label (Tensor): Ground-truth labels (one-hot).
alpha (float): To sample Beta distribution.
lam (float): The given mixing ratio. If lam is None, sample a lam
from Beta distribution.
use_alpha (bool): Whether to use alpha instead of scope. Notice
that ResizeMix is designed for supervised learning, it uses
Uniform discribution rather than Beta. But in SSL contrastive
learning, it's better to use large alpha.
scope (float): Sample Uniform distribution to get tao.
dist_mode (bool): Whether to do cross gpus index shuffling and
return the mixup shuffle index, which support supervised
and self-supervised methods.
"""
def rand_bbox_tao(size, tao):
""" generate random box by tao (scale) """
W = size[2]
H = size[3]
cut_w = np.int(W * tao)
cut_h = np.int(H * tao)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
assert len(scope) == 2
# normal mixup process
if not dist_mode:
rand_index = torch.randperm(img.size(0))
if len(img.size()) == 4: # [N, C, H, W]
img_resize = img.clone()
img_resize = img_resize[rand_index]
else:
assert img.dim() == 5 # semi-supervised img [N, 2, C, H, W]
# * notice that the rank of two groups of img is fixed
img_resize = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
_, _, h, w = img.size()
shuffled_gt = gt_label[rand_index]
# generate tao
if lam is None:
if use_alpha == True:
tao = np.random.beta(alpha, alpha)
if tao < scope[0] or tao > scope[1]:
tao = np.random.uniform(scope[0], scope[1])
else:
# original settings in ResizeMix
tao = np.random.uniform(scope[0], scope[1])
else:
tao = min(max(lam, scope[0]), scope[1])
bbx1, bby1, bbx2, bby2 = rand_bbox_tao(img.size(), tao)
img_resize = interpolate(
img_resize, (bby2 - bby1, bbx2 - bbx1), mode="nearest"
)
img[:, :, bby1:bby2, bbx1:bbx2] = img_resize
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
return img, (gt_label, shuffled_gt, lam)
# dist mixup with cross gpus shuffle
else:
if len(img.size()) == 5: # self-supervised img [N, 2, C, H, W]
img_ = img[:, 1, ...].contiguous()
img = img[:, 0, ...].contiguous()
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img_, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
else:
assert len(img.size()) == 4 # normal img [N, C, H, w]
img_, idx_shuffle, idx_unshuffle = batch_shuffle_ddp( # N
img, idx_shuffle=kwargs.get("idx_shuffle_mix", None), no_repeat=True)
_, _, h, w = img.size()
# generate tao
if lam is None:
if use_alpha == True:
tao = np.random.beta(alpha, alpha)
if tao < scope[0] or tao > scope[1]:
tao = np.random.uniform(scope[0], scope[1])
else:
# original settings in ResizeMix
tao = np.random.uniform(scope[0], scope[1])
else:
tao = lam
# random box
bbx1, bby1, bbx2, bby2 = rand_bbox_tao(img.size(), tao)
img_ = interpolate(img_, (bby2 - bby1, bbx2 - bbx1), mode="nearest")
img[:, :, bby1:bby2, bbx1:bbx2] = img_
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (w * h))
if gt_label is not None:
y_a = gt_label
y_b, _, _ = batch_shuffle_ddp(
gt_label, idx_shuffle=idx_shuffle, no_repeat=True)
return img, (y_a, y_b, lam)
else:
return img, (idx_shuffle, idx_unshuffle, lam)
| [
"cv2.saliency.StaticSaliencyFineGrained_create",
"numpy.random.uniform",
"torch.randint",
"torch.stack",
"openmixup.models.utils.batch_shuffle_ddp",
"numpy.random.beta",
"numpy.argmax",
"torch.roll",
"numpy.clip",
"numpy.random.randint",
"numpy.int",
"torch.arange",
"torch.rand",
"torch.nn... | [((143, 158), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (156, 158), False, 'import torch\n'), ((3514, 3529), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3527, 3529), False, 'import torch\n'), ((5937, 5952), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5950, 5952), False, 'import torch\n'), ((9989, 10004), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10002, 10004), False, 'import torch\n'), ((13686, 13701), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13699, 13701), False, 'import torch\n'), ((1144, 1162), 'numpy.sqrt', 'np.sqrt', (['(1.0 - lam)'], {}), '(1.0 - lam)\n', (1151, 1162), True, 'import numpy as np\n'), ((1178, 1197), 'numpy.int', 'np.int', (['(W * cut_rat)'], {}), '(W * cut_rat)\n', (1184, 1197), True, 'import numpy as np\n'), ((1214, 1233), 'numpy.int', 'np.int', (['(H * cut_rat)'], {}), '(H * cut_rat)\n', (1220, 1233), True, 'import numpy as np\n'), ((1266, 1286), 'numpy.random.randint', 'np.random.randint', (['W'], {}), '(W)\n', (1283, 1286), True, 'import numpy as np\n'), ((1300, 1320), 'numpy.random.randint', 'np.random.randint', (['H'], {}), '(H)\n', (1317, 1320), True, 'import numpy as np\n'), ((1337, 1367), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'W'], {}), '(cx - cut_w // 2, 0, W)\n', (1344, 1367), True, 'import numpy as np\n'), ((1383, 1413), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'H'], {}), '(cy - cut_h // 2, 0, H)\n', (1390, 1413), True, 'import numpy as np\n'), ((1429, 1459), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'W'], {}), '(cx + cut_w // 2, 0, W)\n', (1436, 1459), True, 'import numpy as np\n'), ((1475, 1505), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'H'], {}), '(cy + cut_h // 2, 0, H)\n', (1482, 1505), True, 'import numpy as np\n'), ((1580, 1608), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (1594, 1608), True, 'import numpy as np\n'), ((4379, 4407), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (4393, 4407), True, 'import numpy as np\n'), ((7022, 7040), 'numpy.sqrt', 'np.sqrt', (['(1.0 - lam)'], {}), '(1.0 - lam)\n', (7029, 7040), True, 'import numpy as np\n'), ((7056, 7075), 'numpy.int', 'np.int', (['(W * cut_rat)'], {}), '(W * cut_rat)\n', (7062, 7075), True, 'import numpy as np\n'), ((7092, 7111), 'numpy.int', 'np.int', (['(H * cut_rat)'], {}), '(H * cut_rat)\n', (7098, 7111), True, 'import numpy as np\n'), ((7376, 7423), 'cv2.saliency.StaticSaliencyFineGrained_create', 'cv2.saliency.StaticSaliencyFineGrained_create', ([], {}), '()\n', (7421, 7423), False, 'import cv2\n'), ((7739, 7768), 'numpy.clip', 'np.clip', (['(x - cut_w // 2)', '(0)', 'W'], {}), '(x - cut_w // 2, 0, W)\n', (7746, 7768), True, 'import numpy as np\n'), ((7784, 7813), 'numpy.clip', 'np.clip', (['(y - cut_h // 2)', '(0)', 'H'], {}), '(y - cut_h // 2, 0, H)\n', (7791, 7813), True, 'import numpy as np\n'), ((7829, 7858), 'numpy.clip', 'np.clip', (['(x + cut_w // 2)', '(0)', 'W'], {}), '(x + cut_w // 2, 0, W)\n', (7836, 7858), True, 'import numpy as np\n'), ((7874, 7903), 'numpy.clip', 'np.clip', (['(y + cut_h // 2)', '(0)', 'H'], {}), '(y + cut_h // 2, 0, H)\n', (7881, 7903), True, 'import numpy as np\n'), ((7982, 8010), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (7996, 8010), True, 'import numpy as np\n'), ((10881, 10896), 'torch.arange', 'torch.arange', (['s'], {}), '(s)\n', (10893, 10896), False, 'import torch\n'), ((11051, 11081), 'torch.roll', 'torch.roll', (['xy_grid', 'rand_w', '(0)'], {}), '(xy_grid, rand_w, 0)\n', (11061, 11081), False, 'import torch\n'), ((11100, 11130), 'torch.roll', 'torch.roll', (['xy_grid', 'rand_h', '(1)'], {}), '(xy_grid, rand_h, 1)\n', (11110, 11130), False, 'import torch\n'), ((11500, 11528), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (11514, 11528), True, 'import numpy as np\n'), ((15007, 15022), 'numpy.int', 'np.int', (['(W * tao)'], {}), '(W * tao)\n', (15013, 15022), True, 'import numpy as np\n'), ((15039, 15054), 'numpy.int', 'np.int', (['(H * tao)'], {}), '(H * tao)\n', (15045, 15054), True, 'import numpy as np\n'), ((15087, 15107), 'numpy.random.randint', 'np.random.randint', (['W'], {}), '(W)\n', (15104, 15107), True, 'import numpy as np\n'), ((15121, 15141), 'numpy.random.randint', 'np.random.randint', (['H'], {}), '(H)\n', (15138, 15141), True, 'import numpy as np\n'), ((15158, 15188), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'W'], {}), '(cx - cut_w // 2, 0, W)\n', (15165, 15188), True, 'import numpy as np\n'), ((15204, 15234), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'H'], {}), '(cy - cut_h // 2, 0, H)\n', (15211, 15234), True, 'import numpy as np\n'), ((15250, 15280), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'W'], {}), '(cx + cut_w // 2, 0, W)\n', (15257, 15280), True, 'import numpy as np\n'), ((15296, 15326), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'H'], {}), '(cy + cut_h // 2, 0, H)\n', (15303, 15326), True, 'import numpy as np\n'), ((16501, 16568), 'torch.nn.functional.interpolate', 'interpolate', (['img_resize', '(bby2 - bby1, bbx2 - bbx1)'], {'mode': '"""nearest"""'}), "(img_resize, (bby2 - bby1, bbx2 - bbx1), mode='nearest')\n", (16512, 16568), False, 'from torch.nn.functional import interpolate\n'), ((17979, 18040), 'torch.nn.functional.interpolate', 'interpolate', (['img_', '(bby2 - bby1, bbx2 - bbx1)'], {'mode': '"""nearest"""'}), "(img_, (bby2 - bby1, bbx2 - bbx1), mode='nearest')\n", (17990, 18040), False, 'from torch.nn.functional import interpolate\n'), ((3313, 3381), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (3330, 3381), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((5736, 5804), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (5753, 5804), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((7607, 7640), 'numpy.argmax', 'np.argmax', (['saliencyMap'], {'axis': 'None'}), '(saliencyMap, axis=None)\n', (7616, 7640), True, 'import numpy as np\n'), ((9788, 9856), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (9805, 9856), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((12362, 12379), 'torch.sum', 'torch.sum', (['kernel'], {}), '(kernel)\n', (12371, 12379), False, 'import torch\n'), ((13364, 13381), 'torch.sum', 'torch.sum', (['kernel'], {}), '(kernel)\n', (13373, 13381), False, 'import torch\n'), ((13485, 13553), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (13502, 13553), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((18295, 18363), 'openmixup.models.utils.batch_shuffle_ddp', 'batch_shuffle_ddp', (['gt_label'], {'idx_shuffle': 'idx_shuffle', 'no_repeat': '(True)'}), '(gt_label, idx_shuffle=idx_shuffle, no_repeat=True)\n', (18312, 18363), False, 'from openmixup.models.utils import batch_shuffle_ddp\n'), ((10988, 11025), 'torch.stack', 'torch.stack', (['[x_grid, y_grid]'], {'dim': '(-1)'}), '([x_grid, y_grid], dim=-1)\n', (10999, 11025), False, 'import torch\n'), ((12086, 12111), 'torch.randint', 'torch.randint', (['(0)', 'w', '(1,)'], {}), '(0, w, (1,))\n', (12099, 12111), False, 'import torch\n'), ((12142, 12167), 'torch.randint', 'torch.randint', (['(0)', 'h', '(1,)'], {}), '(0, h, (1,))\n', (12155, 12167), False, 'import torch\n'), ((13097, 13122), 'torch.randint', 'torch.randint', (['(0)', 'w', '(1,)'], {}), '(0, w, (1,))\n', (13110, 13122), False, 'import torch\n'), ((13153, 13178), 'torch.randint', 'torch.randint', (['(0)', 'h', '(1,)'], {}), '(0, h, (1,))\n', (13166, 13178), False, 'import torch\n'), ((16067, 16095), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (16081, 16095), True, 'import numpy as np\n'), ((16302, 16339), 'numpy.random.uniform', 'np.random.uniform', (['scope[0]', 'scope[1]'], {}), '(scope[0], scope[1])\n', (16319, 16339), True, 'import numpy as np\n'), ((17560, 17588), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (17574, 17588), True, 'import numpy as np\n'), ((17795, 17832), 'numpy.random.uniform', 'np.random.uniform', (['scope[0]', 'scope[1]'], {}), '(scope[0], scope[1])\n', (17812, 17832), True, 'import numpy as np\n'), ((11319, 11359), 'torch.sum', 'torch.sum', (['((xy_grid - mean) ** 2)'], {'dim': '(-1)'}), '((xy_grid - mean) ** 2, dim=-1)\n', (11328, 11359), False, 'import torch\n'), ((13205, 13218), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (13215, 13218), False, 'import torch\n'), ((16175, 16212), 'numpy.random.uniform', 'np.random.uniform', (['scope[0]', 'scope[1]'], {}), '(scope[0], scope[1])\n', (16192, 16212), True, 'import numpy as np\n'), ((17668, 17705), 'numpy.random.uniform', 'np.random.uniform', (['scope[0]', 'scope[1]'], {}), '(scope[0], scope[1])\n', (17685, 17705), True, 'import numpy as np\n'), ((12195, 12208), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (12205, 12208), False, 'import torch\n')] |
import numpy as np
class Example5:
def __init__(self):
self.l = np.array([-2., np.NINF])
self.u = np.array([ 1., np.Inf])
self.x_init = np.array([-2., 5.])
self.n0 = 0
self.n12 = 1
self.opt_x = np.array([-1., 0.])
self.opt_f = self.fun(self.opt_x)
def fun(self, x):
return (x[0] + 0.5*x[0]**2) + 4*x[1]**4 + x[1]**2 + 0.01*x[1]**3
def grad(self, x):
return np.array([x[0] + 1., 12.*x[1]**3 + 2*x[1] + 0.03*x[1]**2])
def hess(self, x):
return np.array([[1., 0.], [0., 36.*x[0]**2 + 2. + 0.06*x[1]]])
| [
"numpy.array"
] | [((78, 103), 'numpy.array', 'np.array', (['[-2.0, np.NINF]'], {}), '([-2.0, np.NINF])\n', (86, 103), True, 'import numpy as np\n'), ((120, 143), 'numpy.array', 'np.array', (['[1.0, np.Inf]'], {}), '([1.0, np.Inf])\n', (128, 143), True, 'import numpy as np\n'), ((166, 187), 'numpy.array', 'np.array', (['[-2.0, 5.0]'], {}), '([-2.0, 5.0])\n', (174, 187), True, 'import numpy as np\n'), ((248, 269), 'numpy.array', 'np.array', (['[-1.0, 0.0]'], {}), '([-1.0, 0.0])\n', (256, 269), True, 'import numpy as np\n'), ((445, 515), 'numpy.array', 'np.array', (['[x[0] + 1.0, 12.0 * x[1] ** 3 + 2 * x[1] + 0.03 * x[1] ** 2]'], {}), '([x[0] + 1.0, 12.0 * x[1] ** 3 + 2 * x[1] + 0.03 * x[1] ** 2])\n', (453, 515), True, 'import numpy as np\n'), ((543, 610), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 36.0 * x[0] ** 2 + 2.0 + 0.06 * x[1]]]'], {}), '([[1.0, 0.0], [0.0, 36.0 * x[0] ** 2 + 2.0 + 0.06 * x[1]]])\n', (551, 610), True, 'import numpy as np\n')] |
# coding = utf-8
"""将minist数据记录为record格式
"""
import os
import gzip
import sys
import numpy as np
import tensorflow as tf
def read_imgs(filename, num_images):
"""读入图片数据
:param filename:
:param num_images:
:return:
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
28 * 28 * num_images * 1)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, 28, 28, 1)
return data
def read_labels(filename, num_labels):
"""读入图片标签数据
:param filename:
:param num_labels:
:return:
"""
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(num_labels)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
A TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def float_feature(values):
"""Returns a TF-Feature of floats.
Args:
values: A scalar of list of values.
Returns:
A TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(float_list=tf.train.FloatList(value=values))
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
def add_to_record(data_filename, label_filename, num_images, tf_writer):
"""将数据写入tfrecord文件
:param data_filename: 数据文件路径
:param label_filename: 标签文件路径
:param num_images: 数据数目
:param tf_writer: tfrecord文件写指针
:return:
"""
imgs = read_imgs(data_filename, num_images)
labels = read_labels(label_filename, num_images)
shape = (28, 28, 1)
with tf.Graph().as_default():
image = tf.placeholder(dtype=tf.uint8, shape=shape)
encoded_png = tf.image.encode_png(image)
# example_img = tf.image.decode_png(encoded_png)
with tf.Session() as sess:
for j in range(num_images):
sys.stdout.write('\r>> Converting image %d/%d' % (j + 1, num_images))
sys.stdout.flush()
# png_string, exa_img = sess.run([encoded_png, example_img], feed_dict={image: imgs[j]})
png_string = sess.run(encoded_png, feed_dict={image: imgs[j]})
example = image_to_tfexample(png_string, 'png'.encode(), 28, 28, labels[j])
tf_writer.write(example.SerializeToString())
DATA_DIR = '/media/xiu/data/MNIST_data/'
TRAIN_DATA_FILENAME = 'train-images-idx3-ubyte.gz'
TRAIN_LABEL_FILENAME = 'train-labels-idx1-ubyte.gz'
TEST_DATA_FILENAME = 't10k-images-idx3-ubyte.gz'
TEST_LABEL_FILENAME = 't10k-labels-idx1-ubyte.gz'
training_filename = './mnist_%s.tfrecord' % 'train'
testing_filename = './mnist_%s.tfrecord' % 'test'
# 转化训练数据
with tf.python_io.TFRecordWriter(training_filename) as tf_writer:
data_filename = os.path.join(DATA_DIR, TRAIN_DATA_FILENAME)
label_filename = os.path.join(DATA_DIR, TRAIN_LABEL_FILENAME)
add_to_record(data_filename, label_filename, 60000, tf_writer)
# 转化测试数据
with tf.python_io.TFRecordWriter(testing_filename) as tf_writer:
data_filename = os.path.join(DATA_DIR, TEST_DATA_FILENAME)
label_filename = os.path.join(DATA_DIR, TEST_LABEL_FILENAME)
add_to_record(data_filename, label_filename, 10000, tf_writer)
| [
"tensorflow.train.BytesList",
"sys.stdout.write",
"tensorflow.python_io.TFRecordWriter",
"gzip.open",
"tensorflow.train.Int64List",
"tensorflow.image.encode_png",
"numpy.frombuffer",
"tensorflow.Session",
"tensorflow.placeholder",
"sys.stdout.flush",
"tensorflow.train.FloatList",
"tensorflow.G... | [((3467, 3513), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['training_filename'], {}), '(training_filename)\n', (3494, 3513), True, 'import tensorflow as tf\n'), ((3548, 3591), 'os.path.join', 'os.path.join', (['DATA_DIR', 'TRAIN_DATA_FILENAME'], {}), '(DATA_DIR, TRAIN_DATA_FILENAME)\n', (3560, 3591), False, 'import os\n'), ((3613, 3657), 'os.path.join', 'os.path.join', (['DATA_DIR', 'TRAIN_LABEL_FILENAME'], {}), '(DATA_DIR, TRAIN_LABEL_FILENAME)\n', (3625, 3657), False, 'import os\n'), ((3740, 3785), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['testing_filename'], {}), '(testing_filename)\n', (3767, 3785), True, 'import tensorflow as tf\n'), ((3820, 3862), 'os.path.join', 'os.path.join', (['DATA_DIR', 'TEST_DATA_FILENAME'], {}), '(DATA_DIR, TEST_DATA_FILENAME)\n', (3832, 3862), False, 'import os\n'), ((3884, 3927), 'os.path.join', 'os.path.join', (['DATA_DIR', 'TEST_LABEL_FILENAME'], {}), '(DATA_DIR, TEST_LABEL_FILENAME)\n', (3896, 3927), False, 'import os\n'), ((250, 269), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (259, 269), False, 'import gzip\n'), ((397, 431), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (410, 431), True, 'import numpy as np\n'), ((631, 650), 'gzip.open', 'gzip.open', (['filename'], {}), '(filename)\n', (640, 650), False, 'import gzip\n'), ((2418, 2461), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8', 'shape': 'shape'}), '(dtype=tf.uint8, shape=shape)\n', (2432, 2461), True, 'import tensorflow as tf\n'), ((2484, 2510), 'tensorflow.image.encode_png', 'tf.image.encode_png', (['image'], {}), '(image)\n', (2503, 2510), True, 'import tensorflow as tf\n'), ((1077, 1109), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'values'}), '(value=values)\n', (1095, 1109), True, 'import tensorflow as tf\n'), ((1280, 1314), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[values]'}), '(value=[values])\n', (1298, 1314), True, 'import tensorflow as tf\n'), ((1570, 1602), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'values'}), '(value=values)\n', (1588, 1602), True, 'import tensorflow as tf\n'), ((2582, 2594), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2592, 2594), True, 'import tensorflow as tf\n'), ((752, 786), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (765, 786), True, 'import numpy as np\n'), ((2377, 2387), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2385, 2387), True, 'import tensorflow as tf\n'), ((2660, 2729), 'sys.stdout.write', 'sys.stdout.write', (["('\\r>> Converting image %d/%d' % (j + 1, num_images))"], {}), "('\\r>> Converting image %d/%d' % (j + 1, num_images))\n", (2676, 2729), False, 'import sys\n'), ((2746, 2764), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2762, 2764), False, 'import sys\n')] |
import numpy as np
import matplotlib.pyplot as plt
from .utils import *
from .MTM import MTM
class LiverDeform(MTM):
def __init__(self, mesh_filename='liver_matdata_meter.json',E=1e4, v=0.4, gamma=-5, timeinterval=0.1,dt=1e-4,lamu_times=1e-1):
MTM.__init__(self,E=E,v=v,gamma=gamma,timeinterval=timeinterval,dt=dt)
self.read_mesh_json(mesh_filename)
self.vertices *= 1e2 # cm
# 1e-2 too soft, change to 1e-1
self.lamu_times = lamu_times
self.la *= self.lamu_times # kg/cm/s^2 # Pa = N/m^2 = m/s^2 * kg /m^2 = kg/m/s^2
self.mu *= self.lamu_times # kg/cm/s^2 # Pa = N/m^2 = m/s^2 * kg /m^2 = kg/m/s^2
self._handle_tri_elements(self.vertices)
self.reset_x()
#self.surf_vindex = np.unique(self.tri_elements)
#self.fixed_vindex = np.arange(self.n_v)[np.isin(np.arange(self.n_v),self.surf_vindex,invert=True)]
#self.Fos = np.array([0, 0, -1]) * self.dm * 9.8 # gravity in mm
def step(self):
for _ in range(int(self.T_interval / self.dt)):
self._step()
def _step(self,move_vindex =[]): # replace old with Ficp
self._update_volume()
if self.crash_flag:
return
self._update_aj_set()
self._update_BCD_set()
self._update_Fes()
self._update_Fincompressible()
self.Fis = -self.Fes + self.Ficp
self._pre_fixed_handle(move_vindex) # can replace
self._explicit_step()
self._post_fixed_handle(move_vindex) # can replace
self.t += self.dt
def _pre_fixed_handle(self,move_vindex=[]): # replace old
self.Fis[move_vindex] = 0
self.Fis[self.fixed_vindex] = 0
def _post_fixed_handle(self,move_vindex=[]): # replace old
self.x[move_vindex] = self.xp[move_vindex]
self.x[self.fixed_vindex] = self.xp[self.fixed_vindex]
if __name__ == "__main__":
liver = LiverDeform()
ax = liver.plt_x(text_opt='off')
ax.text(liver.vertices[190,0],liver.vertices[190,1],liver.vertices[190,2],'190')
find = np.argwhere(liver.tet_elements==190)[:,0]
ax = plt_tet(liver.vertices[liver.tet_elements[find[0]]],ax=ax)
plt_show_equal(ax,block=False)
print("done") | [
"numpy.argwhere"
] | [((2143, 2181), 'numpy.argwhere', 'np.argwhere', (['(liver.tet_elements == 190)'], {}), '(liver.tet_elements == 190)\n', (2154, 2181), True, 'import numpy as np\n')] |
from worldengine.simulations.basic import find_threshold_f
import numpy
class HumiditySimulation(object):
@staticmethod
def is_applicable(world):
return world.has_precipitations() and world.has_irrigation() and (
not world.has_humidity())
def execute(self, world, seed):
assert seed is not None
data, quantiles = self._calculate(world)
world.humidity = (data, quantiles)
@staticmethod
def _calculate(world):
humids = world.humids
precipitationWeight = 1.0
irrigationWeight = 3
data = numpy.zeros((world.height, world.width), dtype=float)
data = (world.layers['precipitation'].data * precipitationWeight - world.layers['irrigation'].data * irrigationWeight)/(precipitationWeight + irrigationWeight)
# These were originally evenly spaced at 12.5% each but changing them
# to a bell curve produced better results
ocean = world.layers['ocean'].data
quantiles = {}
quantiles['12'] = find_threshold_f(data, humids[6], ocean)
quantiles['25'] = find_threshold_f(data, humids[5], ocean)
quantiles['37'] = find_threshold_f(data, humids[4], ocean)
quantiles['50'] = find_threshold_f(data, humids[3], ocean)
quantiles['62'] = find_threshold_f(data, humids[2], ocean)
quantiles['75'] = find_threshold_f(data, humids[1], ocean)
quantiles['87'] = find_threshold_f(data, humids[0], ocean)
return data, quantiles
| [
"worldengine.simulations.basic.find_threshold_f",
"numpy.zeros"
] | [((584, 637), 'numpy.zeros', 'numpy.zeros', (['(world.height, world.width)'], {'dtype': 'float'}), '((world.height, world.width), dtype=float)\n', (595, 637), False, 'import numpy\n'), ((1028, 1068), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[6]', 'ocean'], {}), '(data, humids[6], ocean)\n', (1044, 1068), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1095, 1135), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[5]', 'ocean'], {}), '(data, humids[5], ocean)\n', (1111, 1135), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1162, 1202), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[4]', 'ocean'], {}), '(data, humids[4], ocean)\n', (1178, 1202), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1229, 1269), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[3]', 'ocean'], {}), '(data, humids[3], ocean)\n', (1245, 1269), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1296, 1336), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[2]', 'ocean'], {}), '(data, humids[2], ocean)\n', (1312, 1336), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1363, 1403), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[1]', 'ocean'], {}), '(data, humids[1], ocean)\n', (1379, 1403), False, 'from worldengine.simulations.basic import find_threshold_f\n'), ((1430, 1470), 'worldengine.simulations.basic.find_threshold_f', 'find_threshold_f', (['data', 'humids[0]', 'ocean'], {}), '(data, humids[0], ocean)\n', (1446, 1470), False, 'from worldengine.simulations.basic import find_threshold_f\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2022.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for quantum neural networks Two Layer QNN."""
import numpy as np
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.opflow import PauliSumOp
from qiskit.utils import QuantumInstance
from qiskit_machine_learning.neural_networks import TwoLayerQNN
from qiskit_neko.tests import base
from qiskit_neko import decorators
class TestNeuralNetworks(base.BaseTestCase):
"""Test adapted from the qiskit_machine_learning tutorials."""
def setUp(self):
super().setUp()
if hasattr(self.backend.options, "seed_simulator"):
self.backend.set_options(seed_simulator=42)
@decorators.component_attr("terra", "backend", "machine_learning")
def test_neural_networks(self):
"""Test the execution of quantum neural networks using OpflowQNN"""
num_qubits = 3
qi_sv = QuantumInstance(self.backend)
fm = ZZFeatureMap(num_qubits, reps=2)
ansatz = RealAmplitudes(num_qubits, reps=1)
observable = PauliSumOp.from_list([("Z" * num_qubits, 1)])
qnn3 = TwoLayerQNN(
num_qubits, feature_map=fm, ansatz=ansatz, observable=observable, quantum_instance=qi_sv
)
rng = np.random.default_rng(seed=42)
input3 = rng.random(size=qnn3.num_inputs)
weights3 = rng.random(size=qnn3.num_weights)
qnn3_forward = qnn3.forward(input3, weights3)
qnn3_backward = qnn3.backward(input3, weights3)
qnn3_backward_ideal = [
0.0404151,
0.20428904,
-0.29863051,
0.15322033,
0.10620678,
-0.2779404,
]
self.assertAlmostEqual(qnn3_forward[0][0], -0.66604201, delta=0.1)
qnn3_backward_result = qnn3_backward[1][0][0].tolist()
for count, ele in enumerate(qnn3_backward_ideal):
self.assertAlmostEqual(qnn3_backward_result[count], ele, delta=0.1)
| [
"qiskit.opflow.PauliSumOp.from_list",
"qiskit.circuit.library.RealAmplitudes",
"qiskit_neko.decorators.component_attr",
"numpy.random.default_rng",
"qiskit_machine_learning.neural_networks.TwoLayerQNN",
"qiskit.circuit.library.ZZFeatureMap",
"qiskit.utils.QuantumInstance"
] | [((1114, 1179), 'qiskit_neko.decorators.component_attr', 'decorators.component_attr', (['"""terra"""', '"""backend"""', '"""machine_learning"""'], {}), "('terra', 'backend', 'machine_learning')\n", (1139, 1179), False, 'from qiskit_neko import decorators\n'), ((1332, 1361), 'qiskit.utils.QuantumInstance', 'QuantumInstance', (['self.backend'], {}), '(self.backend)\n', (1347, 1361), False, 'from qiskit.utils import QuantumInstance\n'), ((1376, 1408), 'qiskit.circuit.library.ZZFeatureMap', 'ZZFeatureMap', (['num_qubits'], {'reps': '(2)'}), '(num_qubits, reps=2)\n', (1388, 1408), False, 'from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap\n'), ((1426, 1460), 'qiskit.circuit.library.RealAmplitudes', 'RealAmplitudes', (['num_qubits'], {'reps': '(1)'}), '(num_qubits, reps=1)\n', (1440, 1460), False, 'from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap\n'), ((1483, 1528), 'qiskit.opflow.PauliSumOp.from_list', 'PauliSumOp.from_list', (["[('Z' * num_qubits, 1)]"], {}), "([('Z' * num_qubits, 1)])\n", (1503, 1528), False, 'from qiskit.opflow import PauliSumOp\n'), ((1545, 1651), 'qiskit_machine_learning.neural_networks.TwoLayerQNN', 'TwoLayerQNN', (['num_qubits'], {'feature_map': 'fm', 'ansatz': 'ansatz', 'observable': 'observable', 'quantum_instance': 'qi_sv'}), '(num_qubits, feature_map=fm, ansatz=ansatz, observable=\n observable, quantum_instance=qi_sv)\n', (1556, 1651), False, 'from qiskit_machine_learning.neural_networks import TwoLayerQNN\n'), ((1684, 1714), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(42)'}), '(seed=42)\n', (1705, 1714), True, 'import numpy as np\n')] |
import os
from collections import Counter
import pprint
import numpy as np
from tqdm import tqdm
'''
File to process the raw `wiki_{num}_string.txt_new.txt`
'''
def collect_files():
rest = []
for file in os.listdir('.'):
if 'wiki_' in file:
rest.append(file)
return rest
def process_one_file(path, min_len=10, max_len=512):
# longer than max_len, will be cut
with open(path) as f:
data = f.read()
data = data.split('\n')
data = [i for i in data if i.strip()]
nd = []
for i in data:
if len(i) <= max_len:
nd.append(i)
else:
start = 0
while start < len(i):
nd.append(i[start:start+max_len])
start += max_len
nd = [i for i in nd if len(i) > min_len]
print(f'[!] collect {len(nd)} string in {path}')
return nd
def write_file(data):
with open('train.txt', 'w') as f:
for i in data:
f.write(f'{i}\n')
if __name__ == "__main__":
files = collect_files()
data = []
for i in tqdm(files):
data.extend(process_one_file(i, min_len=10, max_len=300))
write_file(data)
# state
data_size = len(data)
lengths = [len(i) for i in data]
pprint.pprint(Counter(lengths))
print(f'[!] data size: {data_size}')
print(f'[!] mean lengths: {np.mean(lengths)}')
print(f'[!] min lengths: {np.min(lengths)}')
print(f'[!] max lengths: {np.max(lengths)}')
| [
"tqdm.tqdm",
"numpy.min",
"numpy.mean",
"numpy.max",
"collections.Counter",
"os.listdir"
] | [((214, 229), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (224, 229), False, 'import os\n'), ((1115, 1126), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (1119, 1126), False, 'from tqdm import tqdm\n'), ((1308, 1324), 'collections.Counter', 'Counter', (['lengths'], {}), '(lengths)\n', (1315, 1324), False, 'from collections import Counter\n'), ((1398, 1414), 'numpy.mean', 'np.mean', (['lengths'], {}), '(lengths)\n', (1405, 1414), True, 'import numpy as np\n'), ((1448, 1463), 'numpy.min', 'np.min', (['lengths'], {}), '(lengths)\n', (1454, 1463), True, 'import numpy as np\n'), ((1497, 1512), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (1503, 1512), True, 'import numpy as np\n')] |
import os
import numpy as np
import pytest
import tensorflow as tf
from keras.optimizers import SGD
from pymc3.variational.callbacks import CheckParametersConvergence
from csrank.choicefunction import *
from csrank.experiments.constants import *
from csrank.experiments.util import metrics_on_predictions
from csrank.metrics_np import f1_measure, subset_01_loss, instance_informedness, auc_score
from csrank.tests.test_ranking import check_learner
choice_metrics = {'F1Score': f1_measure, 'Informedness': instance_informedness, "AucScore": auc_score}
optimizer = SGD(lr=1e-3, momentum=0.9, nesterov=True)
def get_vals(values):
return dict(zip(choice_metrics.keys(), values))
choice_functions = {
FETA_CHOICE: (FETAChoiceFunction, {"add_zeroth_order_model": True, "optimizer": optimizer},
get_vals([0.946, 0.9684, 0.9998])),
FATE_CHOICE: (FATEChoiceFunction, {"n_hidden_joint_layers": 1, "n_hidden_set_layers": 1, "n_hidden_joint_units": 5,
"n_hidden_set_units": 5, "optimizer": optimizer},
get_vals([0.8185, 0.6845, 0.9924])),
FATELINEAR_CHOICE: (FATELinearChoiceFunction, {}, get_vals([0.8014, 0.4906, 0.9998])),
FETALINEAR_CHOICE: (FETALinearChoiceFunction, {}, get_vals([0.8782, 0.8894, 0.9998])),
RANKNET_CHOICE: (RankNetChoiceFunction, {"optimizer": optimizer}, get_vals([0.9522, 0.9866, 1.0])),
CMPNET_CHOICE: (CmpNetChoiceFunction, {"optimizer": optimizer}, get_vals([0.8554, 0.8649, 0.966])),
GLM_CHOICE: (GeneralizedLinearModel, {}, get_vals([0.9567, 0.9955, 1.0])),
RANKSVM_CHOICE: (PairwiseSVMChoiceFunction, {}, get_vals([0.9522, 0.9955, 1.0]))
}
@pytest.fixture(scope="module")
def trivial_choice_problem():
random_state = np.random.RandomState(42)
x = random_state.randn(200, 5, 1)
y_true = np.array(x.squeeze(axis=-1) > np.mean(x))
return x, y_true
@pytest.mark.parametrize("name", list(choice_functions.keys()))
def test_choice_function_fixed(trivial_choice_problem, name):
tf.set_random_seed(0)
os.environ["KERAS_BACKEND"] = "tensorflow"
np.random.seed(123)
x, y = trivial_choice_problem
choice_function = choice_functions[name][0]
params, accuracies = choice_functions[name][1], choice_functions[name][2]
params["n_objects"], params["n_object_features"] = tuple(x.shape[1:])
learner = choice_function(**params)
if name == GLM_CHOICE:
learner.fit(x, y, vi_params={"n": 100, "method": "advi", "callbacks": [CheckParametersConvergence()]})
elif "linear" in name:
learner.fit(x, y, epochs=10, validation_split=0, verbose=False)
else:
learner.fit(x, y, epochs=100, validation_split=0, verbose=False)
s_pred = learner.predict_scores(x)
y_pred = learner.predict_for_scores(s_pred)
y_pred_2 = learner.predict(x)
rtol = 1e-2
atol = 5e-2
assert np.isclose(0.0, subset_01_loss(y_pred, y_pred_2), rtol=rtol, atol=atol, equal_nan=False)
for key, value in accuracies.items():
metric = choice_metrics[key]
if metric in metrics_on_predictions:
pred_loss = metric(y, y_pred)
else:
pred_loss = metric(y, s_pred)
assert np.isclose(value, pred_loss, rtol=rtol, atol=atol, equal_nan=False)
params = {"n_hidden": 20, "n_units": 20, "n_hidden_set_units": 2, "n_hidden_set_layers": 10,
"n_hidden_joint_units": 2, "n_hidden_joint_layers": 10, "reg_strength": 1e-3, "learning_rate": 1e-1,
"batch_size": 32, "alpha": 0.5, "l1_ratio": 0.7, "tol": 1e-2, "C": 10, "n_mixtures": 10, "n_nests": 5,
"regularization": "l2"}
learner.set_tunable_parameters(**params)
check_learner(learner, params, rtol, atol)
| [
"numpy.random.seed",
"keras.optimizers.SGD",
"pymc3.variational.callbacks.CheckParametersConvergence",
"pytest.fixture",
"numpy.random.RandomState",
"tensorflow.set_random_seed",
"numpy.isclose",
"numpy.mean",
"csrank.metrics_np.subset_01_loss",
"csrank.tests.test_ranking.check_learner"
] | [((566, 608), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.001)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.001, momentum=0.9, nesterov=True)\n', (569, 608), False, 'from keras.optimizers import SGD\n'), ((1680, 1710), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1694, 1710), False, 'import pytest\n'), ((1760, 1785), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (1781, 1785), True, 'import numpy as np\n'), ((2032, 2053), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (2050, 2053), True, 'import tensorflow as tf\n'), ((2105, 2124), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (2119, 2124), True, 'import numpy as np\n'), ((3693, 3735), 'csrank.tests.test_ranking.check_learner', 'check_learner', (['learner', 'params', 'rtol', 'atol'], {}), '(learner, params, rtol, atol)\n', (3706, 3735), False, 'from csrank.tests.test_ranking import check_learner\n'), ((2899, 2931), 'csrank.metrics_np.subset_01_loss', 'subset_01_loss', (['y_pred', 'y_pred_2'], {}), '(y_pred, y_pred_2)\n', (2913, 2931), False, 'from csrank.metrics_np import f1_measure, subset_01_loss, instance_informedness, auc_score\n'), ((3209, 3276), 'numpy.isclose', 'np.isclose', (['value', 'pred_loss'], {'rtol': 'rtol', 'atol': 'atol', 'equal_nan': '(False)'}), '(value, pred_loss, rtol=rtol, atol=atol, equal_nan=False)\n', (3219, 3276), True, 'import numpy as np\n'), ((1867, 1877), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1874, 1877), True, 'import numpy as np\n'), ((2505, 2533), 'pymc3.variational.callbacks.CheckParametersConvergence', 'CheckParametersConvergence', ([], {}), '()\n', (2531, 2533), False, 'from pymc3.variational.callbacks import CheckParametersConvergence\n')] |
import argparse
from typing import Dict, List, Optional, Union
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import yaml
import air_quality_uci_dataset
import air_quality_uci_rnn
def train_and_evaluate(this_type: Optional[Union[List[str], str]], train_size: float, time_steps: int,
n_units: Dict[str, int], activations: Dict[str, str], optimizer: str, loss: str, metrics: List[str], batch_size: int, epochs: int, shuffle: bool) -> object:
"""Train an rnn model on air quality uci dataset and evaluate it.
:param Optional[Union[List[str], str]] this_type: string or list of strings to define elements of this dataset, defaults to None
:param float train_size: ratio of training dataset size
:param int time_steps: the length of data to predict
:param Dict[str, int] n_units: the numbers of units on an RNN layer and a hidden layer
:param Dict[str, str] activations: activation function names on an RNN layer and a hidden layer
:param str optimizer: optimizer
:param str loss: loss function
:param List[str] metrics: metrics
:param int batch_size: batch size
:param int epochs: the number of epochs
:param bool shuffle: whether the dataset shuffles or not
:return object: trained rnn model on air quality uci dataset
"""
# Load the air quality uci dataset
air_quality_uci_data = air_quality_uci_dataset.AirQualityUciDataset(this_type=this_type)
# Create an RNN for air quality uci
model = air_quality_uci_rnn.AirQualityUciRNN()
# Prepare to train
model.prepare_to_train(air_quality_uci_data=air_quality_uci_data, train_size=train_size, time_steps=time_steps)
# Build a model
model.build_rnn(n_units=n_units, activations=activations)
# Train the model
model.train(optimizer=optimizer, loss=loss, metrics=metrics, batch_size=batch_size, epochs=epochs, shuffle=shuffle)
# Evaluate the model
model.evaluate()
return model
def save_evaluation_result(trained_model: object, output_path: str) -> None:
"""Save a given information.
:param object trained_model: a trained model that has already been evaluated
:param str output_path: a path to an output file
"""
evaluated_data = trained_model.evaluated_data
test_y = trained_model.test_y
size = len(test_y[0])
num_columns = 4
num_rows = int(size // num_columns + 1)
f, axs = plt.subplots(nrows=num_rows, ncols=num_columns, sharex=True, sharey=True)
len_data = len(test_y)
row_index = 0
col_index = 0
for _ in range(size):
pred_color = "red"
true_color = "blue"
axs[row_index, col_index].plot(range(len_data), evaluated_data[:, row_index + col_index], linewidth=0.5, label="prediction", color=pred_color)
axs[row_index, col_index].plot(range(len_data), test_y[:, row_index + col_index], linewidth=0.5, label="true value", color=true_color)
all_values = [evaluated_data[:, row_index + col_index], test_y[:, row_index + col_index]]
min_y = np.min(all_values)
max_y = np.max(all_values)
axs[row_index, col_index].set_ylim([min_y - 0.1, max_y + 0.1])
plt.xlabel(f"prediction: {pred_color}, true: {true_color}")
if col_index != num_columns - 1:
col_index += 1
else:
col_index = 0
row_index += 1
plt.savefig(output_path, dpi=300)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train and evaluate RNN for air quality uci dataset.")
parser.add_argument("-c", "--config_yaml_path", required=False, type=str, default="./config_air_quality_uci.yaml")
args = parser.parse_args()
# Load configs
with open(args.config_yaml_path, "r") as yaml_f:
config = yaml.safe_load(yaml_f)
config_mlflow = config["mlflow"]
# Start training and evaluating whilist logging the information
mlflow.set_experiment(config_mlflow["experiment_name"])
with mlflow.start_run(run_name=config_mlflow["run_name"]):
mlflow.keras.autolog()
config_dataset = config["dataset"]
config_rnn = config["rnn"]
config_train = config["train"]
trained_model = train_and_evaluate(**config_dataset, **config_rnn, **config_train)
config_save = config["save"]
save_evaluation_result(trained_model=trained_model, **config_save)
mlflow.log_artifact(args.config_yaml_path)
mlflow.log_artifact(config_save["output_path"])
| [
"mlflow.start_run",
"air_quality_uci_rnn.AirQualityUciRNN",
"argparse.ArgumentParser",
"mlflow.keras.autolog",
"mlflow.set_experiment",
"air_quality_uci_dataset.AirQualityUciDataset",
"mlflow.log_artifact",
"numpy.min",
"numpy.max",
"yaml.safe_load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyp... | [((1389, 1454), 'air_quality_uci_dataset.AirQualityUciDataset', 'air_quality_uci_dataset.AirQualityUciDataset', ([], {'this_type': 'this_type'}), '(this_type=this_type)\n', (1433, 1454), False, 'import air_quality_uci_dataset\n'), ((1508, 1546), 'air_quality_uci_rnn.AirQualityUciRNN', 'air_quality_uci_rnn.AirQualityUciRNN', ([], {}), '()\n', (1544, 1546), False, 'import air_quality_uci_rnn\n'), ((2425, 2498), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_rows', 'ncols': 'num_columns', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=num_rows, ncols=num_columns, sharex=True, sharey=True)\n', (2437, 2498), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3418), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_path'], {'dpi': '(300)'}), '(output_path, dpi=300)\n', (3396, 3418), True, 'import matplotlib.pyplot as plt\n'), ((3461, 3556), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train and evaluate RNN for air quality uci dataset."""'}), "(description=\n 'Train and evaluate RNN for air quality uci dataset.')\n", (3484, 3556), False, 'import argparse\n'), ((3926, 3981), 'mlflow.set_experiment', 'mlflow.set_experiment', (["config_mlflow['experiment_name']"], {}), "(config_mlflow['experiment_name'])\n", (3947, 3981), False, 'import mlflow\n'), ((3052, 3070), 'numpy.min', 'np.min', (['all_values'], {}), '(all_values)\n', (3058, 3070), True, 'import numpy as np\n'), ((3087, 3105), 'numpy.max', 'np.max', (['all_values'], {}), '(all_values)\n', (3093, 3105), True, 'import numpy as np\n'), ((3185, 3244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""prediction: {pred_color}, true: {true_color}"""'], {}), "(f'prediction: {pred_color}, true: {true_color}')\n", (3195, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3815), 'yaml.safe_load', 'yaml.safe_load', (['yaml_f'], {}), '(yaml_f)\n', (3807, 3815), False, 'import yaml\n'), ((3991, 4043), 'mlflow.start_run', 'mlflow.start_run', ([], {'run_name': "config_mlflow['run_name']"}), "(run_name=config_mlflow['run_name'])\n", (4007, 4043), False, 'import mlflow\n'), ((4053, 4075), 'mlflow.keras.autolog', 'mlflow.keras.autolog', ([], {}), '()\n', (4073, 4075), False, 'import mlflow\n'), ((4407, 4449), 'mlflow.log_artifact', 'mlflow.log_artifact', (['args.config_yaml_path'], {}), '(args.config_yaml_path)\n', (4426, 4449), False, 'import mlflow\n'), ((4458, 4505), 'mlflow.log_artifact', 'mlflow.log_artifact', (["config_save['output_path']"], {}), "(config_save['output_path'])\n", (4477, 4505), False, 'import mlflow\n')] |
import numpy as np
import pandas as pd
from supervised import AutoML
COLS = 10
for ROWS in [1000, 5000, 10000]:
X = np.random.uniform(size=(ROWS, COLS))
y = np.random.randint(0, 2, size=(ROWS,))
automl = AutoML(results_path=f"AutoML_{ROWS//1000}k", mode="Explain", features_selection=True)
automl.fit(X, y)
| [
"numpy.random.uniform",
"numpy.random.randint",
"supervised.AutoML"
] | [((122, 158), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(ROWS, COLS)'}), '(size=(ROWS, COLS))\n', (139, 158), True, 'import numpy as np\n'), ((167, 204), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': '(ROWS,)'}), '(0, 2, size=(ROWS,))\n', (184, 204), True, 'import numpy as np\n'), ((219, 310), 'supervised.AutoML', 'AutoML', ([], {'results_path': 'f"""AutoML_{ROWS // 1000}k"""', 'mode': '"""Explain"""', 'features_selection': '(True)'}), "(results_path=f'AutoML_{ROWS // 1000}k', mode='Explain',\n features_selection=True)\n", (225, 310), False, 'from supervised import AutoML\n')] |
import errno
import inspect
import multiprocessing.pool
import os
import random
import re
import sys
import datetime as DT
import dataclasses
from enum import IntFlag
import dill as pickle
import matplotlib as mpl
from matplotlib import pyplot as plt
import numpy as np
from mlworkflow import get_callable, Dataset
from mlworkflow.datasets import batchify
def insert_suffix(filename, suffix):
root, ext = os.path.splitext(filename)
return root + suffix + ext
# TODO: could be implemented using dataclass
class ChunkProcessor():
def __repr__(self):
try:
config = getattr(self, "config", {name: getattr(self, name) for name in inspect.getfullargspec(self.__init__).args[1:]})
except KeyError as e:
print("You should implement the 'config' property that returns a dictionnary of config given to '__init__'")
raise e
attributes = ",".join("{}={}".format(k, v) for k,v in config.items())
return "{}({})".format(self.__class__.__name__, attributes)
class OutputInhibitor():
def __init__(self, name=None):
self.name = name
def __enter__(self):
if self.name:
print("Launching {}... ".format(self.name), end="")
self.ps1, self.ps2 = getattr(sys, "ps1", None), getattr(sys, "ps2", None)
if self.ps1:
del sys.ps1
if self.ps2:
del sys.ps2
self.stderr = sys.stderr
self.fp = open(os.devnull, "w")
sys.stderr = self.fp
def __exit__(self, exc_type, exc_val, exc_tb):
if self.ps1:
sys.ps1 = self.ps1
if self.ps2:
sys.ps2 = self.ps2
sys.stderr = self.stderr
self.fp.close()
if self.name:
print("Done.")
def mkdir(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class StagnationError(Exception):
@classmethod
def catch(cls, history, tail=5):
epoch = len(history)
history = [h for h in history if h!=np.nan]
if len(history) > tail and len(set(history[-tail:])) == 1:
raise cls("after {} epochs.".format(epoch))
def find(filename, dirs=None, verbose=True):
if os.path.isabs(filename):
if not os.path.isfile(filename):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
return filename
dirs = dirs or [os.getcwd(), *[os.getenv(name) for name in ['SCRATCH_FOLDER', 'DATASET_FOLDER', 'RESULTS_FOLDER']]]
for path in dirs:
if path is None:
continue
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
if verbose:
print("{} found in {}".format(filename, filepath))
return filepath
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
"{} (searched in {})".format(filename, dirs))
def datetime():
dt = DT.datetime.now()
d = dt.strftime("%Y%m%d")
t = dt.strftime("%H%M%S")
dt = "{}_{}".format(d, t)
return {"d": d, "t": t, "dt": dt}
PLOT_HEIGHT = 4
def build_metrics_axes(fetches, figsize=None):
figsize = figsize if figsize else (20, PLOT_HEIGHT*len(fetches))
_, axes = plt.subplots(len(fetches), 1, figsize=figsize, squeeze=False)
axes = [ax[0] for ax in axes]
# axes = [axes[0] for axes in fig.subplots(len(fetches), 1, squeeze=False)]
for ax, fetch in zip(axes, fetches):
ax.set_title(fetch)
ax.grid(color='lightgray', linestyle='-', linewidth=1)
if "accuracy" in fetch or fetch in ["recall", "accuracy", "precision"]:
ax.set_ylim([0, 1])
elif "learning_rate" in fetch:
ax.set_yscale('log')
return axes
def get_axes(rows=1, cols=1, expected_shape=(1, 1), size=8, squeeze=False):
expected_width, expected_height = expected_shape
figsize = (size*cols, size*rows*expected_width/expected_height)
fig = mpl.figure.Figure(figsize=figsize)
mpl.backends.backend_agg.FigureCanvasAgg(fig)
fig.subplots_adjust(wspace=0.05)
fig.subplots(rows, cols)
return fig.subplots(rows, cols, squeeze=squeeze)
def plot(ax, ydata, label, legend, replace=False, average=False, **kwargs):
if replace:
# Clean replaced elements
for elem in [e for e in ax.lines+ax.collections if e.get_label() == label]:
elem.remove()
dim = len(ydata.shape)
if dim == 1:
xdata = np.arange(ydata.shape[0])
mask = np.isfinite(ydata)
ax.plot(xdata[mask], ydata[mask], **kwargs, label=label)
elif dim == 2:
xdata = np.arange(ydata.shape[1])
mask = np.any(np.isfinite(ydata), axis=0)
if average:
mean = np.nanmean(ydata[:,mask], axis=0)
var = np.sqrt(np.nanvar(ydata[:,mask], axis=0))*1
ax.plot(xdata[mask], mean, **kwargs, label=label)
ax.fill_between(xdata[mask], mean+var, mean-var, alpha=0.3, label=label)
else:
for i in range(ydata.shape[0]):
ax.plot(xdata[mask], ydata[i, mask], **kwargs, label=label)
else:
raise ValueError("Invalid dimension for ydata")
if legend:
ax.legend()
# ax.relim()
# ax.autoscale_view(True, True, True)
class Callable():
def __init__(self, callee, *args, **kwargs):
self.callee = get_callable(callee)
self.kwargs = kwargs
self.args = args
def __call__(self, *args, **kwargs):
return self.callee(*args, *self.args, **{**kwargs, **self.kwargs})
def __repr__(self):
return "{}({},{})".format(self.callee, self.args, self.kwargs)
def transforms_to_name(transforms: list):
for t in transforms:
assert not re.match(".*object at 0x[0-9a-fA-F]*.*", str(t)), \
"The __str__ function of {} is not implemented".format(
t.__class__)
# sanitize name constructed by stringification of the transforms
return str(transforms).translate(str.maketrans({"$": r"\$", " ": ""}))+".pickle"
linestyles = {
"training": "--",
"validation": "-",
"testing": "-."
}
class ExperimentMode(IntFlag):
NONE = 0
TRAIN = 1
EVAL = 2
ALL = -1
# ----------------------------------------------------------------------
# From: https://stackoverflow.com/a/53180921/1782553
# ----------------------------------------------------------------------
class NoDaemonProcess(multiprocessing.Process):
@property
def daemon(self):
return False
@daemon.setter
def daemon(self, value):
pass
class NoDaemonContext(type(multiprocessing.get_context())):
Process = NoDaemonProcess
# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
# because the latter is only a wrapper function, not a proper class.
class NestablePool(multiprocessing.pool.Pool):
def __init__(self, *args, **kwargs):
kwargs['context'] = NoDaemonContext()
super(NestablePool, self).__init__(*args, **kwargs)
# ----------------------------------------------------------------------
class DataCollector(dict):
external = frozenset()
def __init__(self, filename=None, readonly=False, external=None):
super().__init__()
assert not external or filename, "'external' argument cannot be used if no filename is specified."
self.history = []
self.tmp_dict = dict()
assert isinstance(external, (list, tuple, type(None))), "argument 'external' must be a list of strings"
self.external = set(external) if external is not None else None
self.file = None
self.pickler = None
self.filename = self.format_filename(filename)
self.directory = self.filename[:-4] if self.filename else None
self.file_flag = "w"
if self.filename and os.path.isfile(self.filename):
self.file_flag = "r" if readonly else "a"
self.file = open(self.filename, self.file_flag+"+b")
self.file.seek(0)
try:
unpickler = pickle.Unpickler(self.file)
while True:
data = unpickler.load()
self.history.append(data)
except EOFError:
if self.history:
for k,v in self.history[-1].items():
self.__setitem__(k,v, skip_external=True)
self.file.close()
self.file = None
def __del__(self):
if self.file:
self.file.close()
@classmethod
def format_filename(cls, filename):
if filename is None:
return None
assert filename[-4:] == '.dcp', "DataCollector files must end with '.dcp'. Received {}".format(filename)
datetime_ = DT.datetime.now()
d = datetime_.strftime("%Y%m%d")
t = datetime_.strftime("%H%M%S")
dt = "{}_{}".format(d, t)
return filename.format(dt, dt=dt, d=d, t=t)
def save_external(self, key, value):
mkdir(self.directory) # trim '.dcp' extension
filename = "{}/{}_{}.data".format(self.directory, len(self.history), key)
with open(filename, "wb") as fd:
pickle.dump(value, fd)
return filename
def load_external(self, filename):
if not isinstance(filename, str): # handle retro-compatibilty
return filename
with open(filename, "rb") as fd:
value = pickle.load(fd)
return value
def __len__(self):
return len(self.history) + (1 if self.tmp_dict else 0)
def __setitem__(self, key, value, skip_external=False):
if self.external and key in self.external and skip_external is False:
value = self.save_external(key, value)
super().__setitem__(key, value)
self.tmp_dict[key] = value
def __getitem__(self, key, skip_external=False):
get = lambda k,v: v if not self.external or k not in self.external or skip_external else self.load_external(v)
if isinstance(key, tuple):
key, s = key
history = self.history + [self.tmp_dict] if key in self.tmp_dict else self.history
if isinstance(s, (int, np.int64)):
return get(key, history[s][key])
if isinstance(s, slice):
return [get(key, data.get(key, None)) for data in history[s]]
raise ValueError("Expected (key,slice). Received ({},{}).".format(key,s))
if isinstance(key, int):
it = key
return dict((key, self[key,it]) for key in self.history[it].keys())
if isinstance(key, slice):
s = key
return [dict((key, get(key, value)) for key,value in d.items()) for d in self.history[s]]
return get(key, super().__getitem__(key))
def setdefault(self, key, default=None):
if key in self.tmp_dict:
return self.tmp_dict[key]
self[key] = default
return default
def __delitem__(self, key):
super().__delitem__(key)
del self.tmp_dict[key]
def pop(self, key, skip_external=False):
value = self.__getitem__(key, skip_external=skip_external)
del self[key]
return value
def update(self, **kwargs):
for k,v in kwargs.items():
self[k] = v
def checkpoint(self, *keys):
keys = keys or list(self.keys())
checkpoint = {key: self.pop(key, skip_external=True) for key in keys}
self.history.append(checkpoint)
if self.filename:
# Open file if not done already
if not self.file:
self.file = open(self.filename, self.file_flag+"+b")
# Create pickler if not done already
if not self.pickler:
self.pickler = pickle.Pickler(self.file)
# Dump checkpoint
self.pickler.dump(checkpoint)
self.file.flush()
| [
"dill.Pickler",
"os.path.isfile",
"numpy.arange",
"os.path.join",
"numpy.nanmean",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"numpy.isfinite",
"dill.load",
"matplotlib.figure.Figure",
"numpy.nanvar",
"datetime.datetime.now",
"dill.Unpickler",
"os.strerror",
"dill.dump",
"os.gete... | [((413, 439), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (429, 439), False, 'import os\n'), ((2236, 2259), 'os.path.isabs', 'os.path.isabs', (['filename'], {}), '(filename)\n', (2249, 2259), False, 'import os\n'), ((2980, 2997), 'datetime.datetime.now', 'DT.datetime.now', ([], {}), '()\n', (2995, 2997), True, 'import datetime as DT\n'), ((3990, 4024), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4007, 4024), True, 'import matplotlib as mpl\n'), ((4029, 4074), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'mpl.backends.backend_agg.FigureCanvasAgg', (['fig'], {}), '(fig)\n', (4069, 4074), True, 'import matplotlib as mpl\n'), ((1796, 1813), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1807, 1813), False, 'import os\n'), ((2621, 2649), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (2633, 2649), False, 'import os\n'), ((2661, 2685), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (2675, 2685), False, 'import os\n'), ((2849, 2874), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (2860, 2874), False, 'import os\n'), ((4492, 4517), 'numpy.arange', 'np.arange', (['ydata.shape[0]'], {}), '(ydata.shape[0])\n', (4501, 4517), True, 'import numpy as np\n'), ((4533, 4551), 'numpy.isfinite', 'np.isfinite', (['ydata'], {}), '(ydata)\n', (4544, 4551), True, 'import numpy as np\n'), ((5395, 5415), 'mlworkflow.get_callable', 'get_callable', (['callee'], {}), '(callee)\n', (5407, 5415), False, 'from mlworkflow import get_callable, Dataset\n'), ((8785, 8802), 'datetime.datetime.now', 'DT.datetime.now', ([], {}), '()\n', (8800, 8802), True, 'import datetime as DT\n'), ((2276, 2300), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2290, 2300), False, 'import os\n'), ((2434, 2445), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2443, 2445), False, 'import os\n'), ((4652, 4677), 'numpy.arange', 'np.arange', (['ydata.shape[1]'], {}), '(ydata.shape[1])\n', (4661, 4677), True, 'import numpy as np\n'), ((7850, 7879), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (7864, 7879), False, 'import os\n'), ((9202, 9224), 'dill.dump', 'pickle.dump', (['value', 'fd'], {}), '(value, fd)\n', (9213, 9224), True, 'import dill as pickle\n'), ((9448, 9463), 'dill.load', 'pickle.load', (['fd'], {}), '(fd)\n', (9459, 9463), True, 'import dill as pickle\n'), ((1849, 1868), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1862, 1868), False, 'import os\n'), ((2352, 2377), 'os.strerror', 'os.strerror', (['errno.ENOENT'], {}), '(errno.ENOENT)\n', (2363, 2377), False, 'import os\n'), ((4700, 4718), 'numpy.isfinite', 'np.isfinite', (['ydata'], {}), '(ydata)\n', (4711, 4718), True, 'import numpy as np\n'), ((4767, 4801), 'numpy.nanmean', 'np.nanmean', (['ydata[:, mask]'], {'axis': '(0)'}), '(ydata[:, mask], axis=0)\n', (4777, 4801), True, 'import numpy as np\n'), ((8075, 8102), 'dill.Unpickler', 'pickle.Unpickler', (['self.file'], {}), '(self.file)\n', (8091, 8102), True, 'import dill as pickle\n'), ((11791, 11816), 'dill.Pickler', 'pickle.Pickler', (['self.file'], {}), '(self.file)\n', (11805, 11816), True, 'import dill as pickle\n'), ((2449, 2464), 'os.getenv', 'os.getenv', (['name'], {}), '(name)\n', (2458, 2464), False, 'import os\n'), ((4827, 4860), 'numpy.nanvar', 'np.nanvar', (['ydata[:, mask]'], {'axis': '(0)'}), '(ydata[:, mask], axis=0)\n', (4836, 4860), True, 'import numpy as np\n'), ((662, 699), 'inspect.getfullargspec', 'inspect.getfullargspec', (['self.__init__'], {}), '(self.__init__)\n', (684, 699), False, 'import inspect\n')] |
#
# Tests for the Finite Volume Method
#
import pybamm
from tests import (
get_mesh_for_testing,
get_p2d_mesh_for_testing,
get_1p1d_mesh_for_testing,
)
import numpy as np
from scipy.sparse import kron, eye
import unittest
class TestFiniteVolume(unittest.TestCase):
def test_node_to_edge_to_node(self):
# Create discretisation
mesh = get_mesh_for_testing()
fin_vol = pybamm.FiniteVolume()
fin_vol.build(mesh)
n = mesh["negative electrode"].npts
# node to edge
c = pybamm.StateVector(slice(0, n), domain=["negative electrode"])
y_test = np.ones(n)
diffusivity_c_ari = fin_vol.node_to_edge(c, method="arithmetic")
np.testing.assert_array_equal(
diffusivity_c_ari.evaluate(None, y_test), np.ones((n + 1, 1))
)
diffusivity_c_har = fin_vol.node_to_edge(c, method="harmonic")
np.testing.assert_array_equal(
diffusivity_c_har.evaluate(None, y_test), np.ones((n + 1, 1))
)
# edge to node
d = pybamm.StateVector(slice(0, n + 1), domain=["negative electrode"])
y_test = np.ones(n + 1)
diffusivity_d_ari = fin_vol.edge_to_node(d, method="arithmetic")
np.testing.assert_array_equal(
diffusivity_d_ari.evaluate(None, y_test), np.ones((n, 1))
)
diffusivity_d_har = fin_vol.edge_to_node(d, method="harmonic")
np.testing.assert_array_equal(
diffusivity_d_har.evaluate(None, y_test), np.ones((n, 1))
)
# bad shift key
with self.assertRaisesRegex(ValueError, "shift key"):
fin_vol.shift(c, "bad shift key", "arithmetic")
with self.assertRaisesRegex(ValueError, "shift key"):
fin_vol.shift(c, "bad shift key", "harmonic")
# bad method
with self.assertRaisesRegex(ValueError, "method"):
fin_vol.shift(c, "shift key", "bad method")
def test_concatenation(self):
mesh = get_mesh_for_testing()
fin_vol = pybamm.FiniteVolume()
fin_vol.build(mesh)
whole_cell = ["negative electrode", "separator", "positive electrode"]
edges = [pybamm.Vector(mesh[dom].edges, domain=dom) for dom in whole_cell]
# Concatenation of edges should get averaged to nodes first, using edge_to_node
v_disc = fin_vol.concatenation(edges)
np.testing.assert_array_equal(
v_disc.evaluate()[:, 0], mesh.combine_submeshes(*whole_cell).nodes
)
# test for bad shape
edges = [
pybamm.Vector(np.ones(mesh[dom].npts + 2), domain=dom) for dom in whole_cell
]
with self.assertRaisesRegex(pybamm.ShapeError, "child must have size n_nodes"):
fin_vol.concatenation(edges)
def test_discretise_diffusivity_times_spatial_operator(self):
# Setup mesh and discretisation
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
whole_cell = ["negative electrode", "separator", "positive electrode"]
combined_submesh = mesh.combine_submeshes(*whole_cell)
# Discretise some equations where averaging is needed
var = pybamm.Variable("var", domain=whole_cell)
disc.set_variable_slices([var])
y_test = np.ones_like(combined_submesh.nodes[:, np.newaxis])
for eqn in [
var * pybamm.grad(var),
var ** 2 * pybamm.grad(var),
var * pybamm.grad(var) ** 2,
var * (pybamm.grad(var) + 2),
(pybamm.grad(var) + 2) * (-var),
(pybamm.grad(var) + 2) * (2 * var),
pybamm.grad(var) * pybamm.grad(var),
(pybamm.grad(var) + 2) * pybamm.grad(var) ** 2,
pybamm.div(pybamm.grad(var)),
pybamm.div(pybamm.grad(var)) + 2,
pybamm.div(pybamm.grad(var)) + var,
pybamm.div(2 * pybamm.grad(var)),
pybamm.div(2 * pybamm.grad(var)) + 3 * var,
-2 * pybamm.div(var * pybamm.grad(var) + 2 * pybamm.grad(var)),
pybamm.laplacian(var),
]:
# Check that the equation can be evaluated for different combinations
# of boundary conditions
# Dirichlet
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(1), "Dirichlet"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# Neumann
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(0), "Neumann"),
"right": (pybamm.Scalar(1), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
# One of each
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(0), "Dirichlet"),
"right": (pybamm.Scalar(1), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(0), "Neumann"),
"right": (pybamm.Scalar(1), "Dirichlet"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_disc.evaluate(None, y_test)
def test_discretise_spatial_variable(self):
# Create discretisation
mesh = get_mesh_for_testing()
spatial_methods = {
"macroscale": pybamm.FiniteVolume(),
"negative particle": pybamm.FiniteVolume(),
"positive particle": pybamm.FiniteVolume(),
}
disc = pybamm.Discretisation(mesh, spatial_methods)
# macroscale
x1 = pybamm.SpatialVariable("x", ["negative electrode"])
x1_disc = disc.process_symbol(x1)
self.assertIsInstance(x1_disc, pybamm.Vector)
np.testing.assert_array_equal(
x1_disc.evaluate(), disc.mesh["negative electrode"].nodes[:, np.newaxis]
)
# macroscale with concatenation
x2 = pybamm.SpatialVariable("x", ["negative electrode", "separator"])
x2_disc = disc.process_symbol(x2)
self.assertIsInstance(x2_disc, pybamm.Vector)
np.testing.assert_array_equal(
x2_disc.evaluate(),
disc.mesh.combine_submeshes("negative electrode", "separator").nodes[
:, np.newaxis
],
)
# microscale
r = 3 * pybamm.SpatialVariable("r", ["negative particle"])
r_disc = disc.process_symbol(r)
self.assertIsInstance(r_disc, pybamm.Vector)
np.testing.assert_array_equal(
r_disc.evaluate(), 3 * disc.mesh["negative particle"].nodes[:, np.newaxis]
)
def test_mass_matrix_shape(self):
# Create model
whole_cell = ["negative electrode", "separator", "positive electrode"]
c = pybamm.Variable("c", domain=whole_cell)
N = pybamm.grad(c)
model = pybamm.BaseModel()
model.rhs = {c: pybamm.div(N)}
model.initial_conditions = {c: pybamm.Scalar(0)}
model.boundary_conditions = {
c: {"left": (0, "Dirichlet"), "right": (0, "Dirichlet")}
}
model.variables = {"c": c, "N": N}
# Create discretisation
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
combined_submesh = mesh.combine_submeshes(*whole_cell)
disc.process_model(model)
# Mass matrix
mass = np.eye(combined_submesh.npts)
np.testing.assert_array_equal(mass, model.mass_matrix.entries.toarray())
def test_p2d_mass_matrix_shape(self):
# Create model
c = pybamm.Variable(
"c",
domain=["negative particle"],
auxiliary_domains={"secondary": "negative electrode"},
)
N = pybamm.grad(c)
model = pybamm.BaseModel()
model.rhs = {c: pybamm.div(N)}
model.initial_conditions = {c: pybamm.Scalar(0)}
model.boundary_conditions = {
c: {"left": (0, "Neumann"), "right": (0, "Dirichlet")}
}
model.variables = {"c": c, "N": N}
# Create discretisation
mesh = get_p2d_mesh_for_testing()
spatial_methods = {"negative particle": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
disc.process_model(model)
# Mass matrix
prim_pts = mesh["negative particle"].npts
sec_pts = mesh["negative electrode"].npts
mass_local = eye(prim_pts)
mass = kron(eye(sec_pts), mass_local)
np.testing.assert_array_equal(
mass.toarray(), model.mass_matrix.entries.toarray()
)
def test_jacobian(self):
# Create discretisation
whole_cell = ["negative electrode", "separator", "positive electrode"]
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
combined_submesh = mesh.combine_submeshes(*whole_cell)
spatial_method = pybamm.FiniteVolume()
spatial_method.build(mesh)
# Setup variable
var = pybamm.Variable("var", domain=whole_cell)
disc.set_variable_slices([var])
y = pybamm.StateVector(slice(0, combined_submesh.npts))
y_test = np.ones_like(combined_submesh.nodes[:, np.newaxis])
# grad
eqn = pybamm.grad(var)
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(1), "Dirichlet"),
"right": (pybamm.Scalar(2), "Dirichlet"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_jac = eqn_disc.jac(y)
jacobian = eqn_jac.evaluate(y=y_test)
grad_matrix = spatial_method.gradient_matrix(
whole_cell, {"primary": whole_cell}
).entries
np.testing.assert_array_equal(jacobian.toarray()[1:-1], grad_matrix.toarray())
np.testing.assert_array_equal(
jacobian.toarray()[0, 0], grad_matrix.toarray()[0][0] * -2
)
np.testing.assert_array_equal(
jacobian.toarray()[-1, -1], grad_matrix.toarray()[-1][-1] * -2
)
# grad with averaging
eqn = var * pybamm.grad(var)
eqn_disc = disc.process_symbol(eqn)
eqn_jac = eqn_disc.jac(y)
eqn_jac.evaluate(y=y_test)
# div(grad)
flux = pybamm.grad(var)
eqn = pybamm.div(flux)
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(1), "Neumann"),
"right": (pybamm.Scalar(2), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_jac = eqn_disc.jac(y)
eqn_jac.evaluate(y=y_test)
# div(grad) with averaging
flux = var * pybamm.grad(var)
eqn = pybamm.div(flux)
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(1), "Neumann"),
"right": (pybamm.Scalar(2), "Neumann"),
}
}
eqn_disc = disc.process_symbol(eqn)
eqn_jac = eqn_disc.jac(y)
eqn_jac.evaluate(y=y_test)
def test_boundary_value_domain(self):
mesh = get_p2d_mesh_for_testing()
spatial_methods = {
"macroscale": pybamm.FiniteVolume(),
"negative particle": pybamm.FiniteVolume(),
"positive particle": pybamm.FiniteVolume(),
}
disc = pybamm.Discretisation(mesh, spatial_methods)
c_s_n = pybamm.Variable(
"c_s_n",
domain=["negative particle"],
auxiliary_domains={"secondary": ["negative electrode"]},
)
c_s_p = pybamm.Variable(
"c_s_p",
domain=["positive particle"],
auxiliary_domains={"secondary": ["positive electrode"]},
)
disc.set_variable_slices([c_s_n, c_s_p])
# surface values
c_s_n_surf = pybamm.surf(c_s_n)
c_s_p_surf = pybamm.surf(c_s_p)
c_s_n_surf_disc = disc.process_symbol(c_s_n_surf)
c_s_p_surf_disc = disc.process_symbol(c_s_p_surf)
self.assertEqual(c_s_n_surf_disc.domain, ["negative electrode"])
self.assertEqual(c_s_p_surf_disc.domain, ["positive electrode"])
def test_delta_function(self):
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
var = pybamm.Variable("var")
delta_fn_left = pybamm.DeltaFunction(var, "left", "negative electrode")
delta_fn_right = pybamm.DeltaFunction(var, "right", "negative electrode")
disc.set_variable_slices([var])
delta_fn_left_disc = disc.process_symbol(delta_fn_left)
delta_fn_right_disc = disc.process_symbol(delta_fn_right)
# Basic shape and type tests
y = np.ones_like(mesh["negative electrode"].nodes[:, np.newaxis])
# Left
self.assertEqual(delta_fn_left_disc.domains, delta_fn_left.domains)
self.assertIsInstance(delta_fn_left_disc, pybamm.Multiplication)
self.assertIsInstance(delta_fn_left_disc.left, pybamm.Matrix)
np.testing.assert_array_equal(delta_fn_left_disc.left.evaluate()[:, 1:], 0)
self.assertEqual(delta_fn_left_disc.shape, y.shape)
# Right
self.assertEqual(delta_fn_right_disc.domains, delta_fn_right.domains)
self.assertIsInstance(delta_fn_right_disc, pybamm.Multiplication)
self.assertIsInstance(delta_fn_right_disc.left, pybamm.Matrix)
np.testing.assert_array_equal(delta_fn_right_disc.left.evaluate()[:, :-1], 0)
self.assertEqual(delta_fn_right_disc.shape, y.shape)
# Value tests
# Delta function should integrate to the same thing as variable
var_disc = disc.process_symbol(var)
x = pybamm.standard_spatial_vars.x_n
delta_fn_int_disc = disc.process_symbol(pybamm.Integral(delta_fn_left, x))
np.testing.assert_array_equal(
var_disc.evaluate(y=y) * mesh["negative electrode"].edges[-1],
np.sum(delta_fn_int_disc.evaluate(y=y)),
)
def test_heaviside(self):
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
var = pybamm.Variable("var", domain="negative electrode")
heav = var > 1
disc.set_variable_slices([var])
# process_binary_operators should work with heaviside
disc_heav = disc.process_symbol(heav * var)
nodes = mesh["negative electrode"].nodes
self.assertEqual(disc_heav.size, nodes.size)
np.testing.assert_array_equal(disc_heav.evaluate(y=2 * np.ones_like(nodes)), 2)
np.testing.assert_array_equal(disc_heav.evaluate(y=-2 * np.ones_like(nodes)), 0)
def test_upwind_downwind(self):
mesh = get_mesh_for_testing()
spatial_methods = {"macroscale": pybamm.FiniteVolume()}
disc = pybamm.Discretisation(mesh, spatial_methods)
n = mesh["negative electrode"].npts
var = pybamm.StateVector(slice(0, n), domain="negative electrode")
upwind = pybamm.upwind(var)
downwind = pybamm.downwind(var)
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(5), "Dirichlet"),
"right": (pybamm.Scalar(3), "Dirichlet"),
}
}
disc_upwind = disc.process_symbol(upwind)
disc_downwind = disc.process_symbol(downwind)
nodes = mesh["negative electrode"].nodes
n = mesh["negative electrode"].npts
self.assertEqual(disc_upwind.size, nodes.size + 1)
self.assertEqual(disc_downwind.size, nodes.size + 1)
y_test = 2 * np.ones_like(nodes)
np.testing.assert_array_equal(
disc_upwind.evaluate(y=y_test),
np.concatenate([np.array([5, 0.5]), 2 * np.ones(n - 1)])[:, np.newaxis],
)
np.testing.assert_array_equal(
disc_downwind.evaluate(y=y_test),
np.concatenate([2 * np.ones(n - 1), np.array([1.5, 3])])[:, np.newaxis],
)
# Remove boundary conditions and check error is raised
disc.bcs = {}
with self.assertRaisesRegex(pybamm.ModelError, "Boundary conditions"):
disc.process_symbol(upwind)
# Set wrong boundary conditions and check error is raised
disc.bcs = {
var.id: {
"left": (pybamm.Scalar(5), "Neumann"),
"right": (pybamm.Scalar(3), "Neumann"),
}
}
with self.assertRaisesRegex(pybamm.ModelError, "Dirichlet boundary conditions"):
disc.process_symbol(upwind)
with self.assertRaisesRegex(pybamm.ModelError, "Dirichlet boundary conditions"):
disc.process_symbol(downwind)
def test_grad_div_with_bcs_on_tab(self):
# Create discretisation
mesh = get_1p1d_mesh_for_testing()
spatial_methods = {
"macroscale": pybamm.FiniteVolume(),
"negative particle": pybamm.FiniteVolume(),
"positive particle": pybamm.FiniteVolume(),
"current collector": pybamm.FiniteVolume(),
}
disc = pybamm.Discretisation(mesh, spatial_methods)
# var
y_test = np.ones(mesh["current collector"].npts)
var = pybamm.Variable("var", domain="current collector")
disc.set_variable_slices([var])
# grad
grad_eqn = pybamm.grad(var)
# div
N = pybamm.grad(var)
div_eqn = pybamm.div(N)
# bcs (on each tab)
boundary_conditions = {
var.id: {
"negative tab": (pybamm.Scalar(1), "Dirichlet"),
"positive tab": (pybamm.Scalar(0), "Neumann"),
}
}
disc.bcs = boundary_conditions
grad_eqn_disc = disc.process_symbol(grad_eqn)
grad_eqn_disc.evaluate(None, y_test)
div_eqn_disc = disc.process_symbol(div_eqn)
div_eqn_disc.evaluate(None, y_test)
# bcs (one pos, one not tab)
boundary_conditions = {
var.id: {
"no tab": (pybamm.Scalar(1), "Dirichlet"),
"positive tab": (pybamm.Scalar(0), "Dirichlet"),
}
}
disc.bcs = boundary_conditions
grad_eqn_disc = disc.process_symbol(grad_eqn)
grad_eqn_disc.evaluate(None, y_test)
div_eqn_disc = disc.process_symbol(div_eqn)
div_eqn_disc.evaluate(None, y_test)
# bcs (one neg, one not tab)
boundary_conditions = {
var.id: {
"negative tab": (pybamm.Scalar(1), "Neumann"),
"no tab": (pybamm.Scalar(0), "Neumann"),
}
}
disc.bcs = boundary_conditions
grad_eqn_disc = disc.process_symbol(grad_eqn)
grad_eqn_disc.evaluate(None, y_test)
div_eqn_disc = disc.process_symbol(div_eqn)
div_eqn_disc.evaluate(None, y_test)
def test_neg_pos_bcs(self):
# Create discretisation
mesh = get_1p1d_mesh_for_testing()
spatial_methods = {
"macroscale": pybamm.FiniteVolume(),
"negative particle": pybamm.FiniteVolume(),
"positive particle": pybamm.FiniteVolume(),
"current collector": pybamm.FiniteVolume(),
}
disc = pybamm.Discretisation(mesh, spatial_methods)
# var
var = pybamm.Variable("var", domain="current collector")
disc.set_variable_slices([var])
# grad
grad_eqn = pybamm.grad(var)
# bcs (on each tab)
boundary_conditions = {
var.id: {
"negative tab": (pybamm.Scalar(1), "Dirichlet"),
"positive tab": (pybamm.Scalar(0), "Neumann"),
"no tab": (pybamm.Scalar(8), "Dirichlet"),
}
}
disc.bcs = boundary_conditions
# check after disc that negative tab goes to left and positive tab goes
# to right
disc.process_symbol(grad_eqn)
self.assertEqual(disc.bcs[var.id]["left"][0].id, pybamm.Scalar(1).id)
self.assertEqual(disc.bcs[var.id]["left"][1], "Dirichlet")
self.assertEqual(disc.bcs[var.id]["right"][0].id, pybamm.Scalar(0).id)
self.assertEqual(disc.bcs[var.id]["right"][1], "Neumann")
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
| [
"tests.get_1p1d_mesh_for_testing",
"pybamm.Discretisation",
"pybamm.upwind",
"pybamm.SpatialVariable",
"numpy.ones",
"scipy.sparse.eye",
"pybamm.Integral",
"unittest.main",
"pybamm.surf",
"pybamm.downwind",
"pybamm.laplacian",
"tests.get_mesh_for_testing",
"pybamm.Variable",
"numpy.ones_li... | [((20896, 20911), 'unittest.main', 'unittest.main', ([], {}), '()\n', (20909, 20911), False, 'import unittest\n'), ((367, 389), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (387, 389), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((408, 429), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (427, 429), False, 'import pybamm\n'), ((618, 628), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (625, 628), True, 'import numpy as np\n'), ((1139, 1153), 'numpy.ones', 'np.ones', (['(n + 1)'], {}), '(n + 1)\n', (1146, 1153), True, 'import numpy as np\n'), ((1991, 2013), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (2011, 2013), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((2032, 2053), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (2051, 2053), False, 'import pybamm\n'), ((2905, 2927), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (2925, 2927), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((3007, 3051), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (3028, 3051), False, 'import pybamm\n'), ((3271, 3312), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {'domain': 'whole_cell'}), "('var', domain=whole_cell)\n", (3286, 3312), False, 'import pybamm\n'), ((3370, 3421), 'numpy.ones_like', 'np.ones_like', (['combined_submesh.nodes[:, np.newaxis]'], {}), '(combined_submesh.nodes[:, np.newaxis])\n', (3382, 3421), True, 'import numpy as np\n'), ((5636, 5658), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (5656, 5658), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((5873, 5917), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (5894, 5917), False, 'import pybamm\n'), ((5953, 6004), 'pybamm.SpatialVariable', 'pybamm.SpatialVariable', (['"""x"""', "['negative electrode']"], {}), "('x', ['negative electrode'])\n", (5975, 6004), False, 'import pybamm\n'), ((6288, 6352), 'pybamm.SpatialVariable', 'pybamm.SpatialVariable', (['"""x"""', "['negative electrode', 'separator']"], {}), "('x', ['negative electrode', 'separator'])\n", (6310, 6352), False, 'import pybamm\n'), ((7127, 7166), 'pybamm.Variable', 'pybamm.Variable', (['"""c"""'], {'domain': 'whole_cell'}), "('c', domain=whole_cell)\n", (7142, 7166), False, 'import pybamm\n'), ((7179, 7193), 'pybamm.grad', 'pybamm.grad', (['c'], {}), '(c)\n', (7190, 7193), False, 'import pybamm\n'), ((7210, 7228), 'pybamm.BaseModel', 'pybamm.BaseModel', ([], {}), '()\n', (7226, 7228), False, 'import pybamm\n'), ((7533, 7555), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (7553, 7555), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((7635, 7679), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (7656, 7679), False, 'import pybamm\n'), ((7815, 7844), 'numpy.eye', 'np.eye', (['combined_submesh.npts'], {}), '(combined_submesh.npts)\n', (7821, 7844), True, 'import numpy as np\n'), ((8004, 8114), 'pybamm.Variable', 'pybamm.Variable', (['"""c"""'], {'domain': "['negative particle']", 'auxiliary_domains': "{'secondary': 'negative electrode'}"}), "('c', domain=['negative particle'], auxiliary_domains={\n 'secondary': 'negative electrode'})\n", (8019, 8114), False, 'import pybamm\n'), ((8169, 8183), 'pybamm.grad', 'pybamm.grad', (['c'], {}), '(c)\n', (8180, 8183), False, 'import pybamm\n'), ((8200, 8218), 'pybamm.BaseModel', 'pybamm.BaseModel', ([], {}), '()\n', (8216, 8218), False, 'import pybamm\n'), ((8521, 8547), 'tests.get_p2d_mesh_for_testing', 'get_p2d_mesh_for_testing', ([], {}), '()\n', (8545, 8547), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((8634, 8678), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (8655, 8678), False, 'import pybamm\n'), ((8857, 8870), 'scipy.sparse.eye', 'eye', (['prim_pts'], {}), '(prim_pts)\n', (8860, 8870), False, 'from scipy.sparse import kron, eye\n'), ((9186, 9208), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (9206, 9208), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((9288, 9332), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (9309, 9332), False, 'import pybamm\n'), ((9421, 9442), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (9440, 9442), False, 'import pybamm\n'), ((9518, 9559), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {'domain': 'whole_cell'}), "('var', domain=whole_cell)\n", (9533, 9559), False, 'import pybamm\n'), ((9681, 9732), 'numpy.ones_like', 'np.ones_like', (['combined_submesh.nodes[:, np.newaxis]'], {}), '(combined_submesh.nodes[:, np.newaxis])\n', (9693, 9732), True, 'import numpy as np\n'), ((9763, 9779), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (9774, 9779), False, 'import pybamm\n'), ((10754, 10770), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (10765, 10770), False, 'import pybamm\n'), ((10785, 10801), 'pybamm.div', 'pybamm.div', (['flux'], {}), '(flux)\n', (10795, 10801), False, 'import pybamm\n'), ((11181, 11197), 'pybamm.div', 'pybamm.div', (['flux'], {}), '(flux)\n', (11191, 11197), False, 'import pybamm\n'), ((11547, 11573), 'tests.get_p2d_mesh_for_testing', 'get_p2d_mesh_for_testing', ([], {}), '()\n', (11571, 11573), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((11788, 11832), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (11809, 11832), False, 'import pybamm\n'), ((11850, 11966), 'pybamm.Variable', 'pybamm.Variable', (['"""c_s_n"""'], {'domain': "['negative particle']", 'auxiliary_domains': "{'secondary': ['negative electrode']}"}), "('c_s_n', domain=['negative particle'], auxiliary_domains={\n 'secondary': ['negative electrode']})\n", (11865, 11966), False, 'import pybamm\n'), ((12025, 12141), 'pybamm.Variable', 'pybamm.Variable', (['"""c_s_p"""'], {'domain': "['positive particle']", 'auxiliary_domains': "{'secondary': ['positive electrode']}"}), "('c_s_p', domain=['positive particle'], auxiliary_domains={\n 'secondary': ['positive electrode']})\n", (12040, 12141), False, 'import pybamm\n'), ((12281, 12299), 'pybamm.surf', 'pybamm.surf', (['c_s_n'], {}), '(c_s_n)\n', (12292, 12299), False, 'import pybamm\n'), ((12321, 12339), 'pybamm.surf', 'pybamm.surf', (['c_s_p'], {}), '(c_s_p)\n', (12332, 12339), False, 'import pybamm\n'), ((12653, 12675), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (12673, 12675), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((12755, 12799), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (12776, 12799), False, 'import pybamm\n'), ((12815, 12837), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {}), "('var')\n", (12830, 12837), False, 'import pybamm\n'), ((12862, 12917), 'pybamm.DeltaFunction', 'pybamm.DeltaFunction', (['var', '"""left"""', '"""negative electrode"""'], {}), "(var, 'left', 'negative electrode')\n", (12882, 12917), False, 'import pybamm\n'), ((12943, 12999), 'pybamm.DeltaFunction', 'pybamm.DeltaFunction', (['var', '"""right"""', '"""negative electrode"""'], {}), "(var, 'right', 'negative electrode')\n", (12963, 12999), False, 'import pybamm\n'), ((13220, 13281), 'numpy.ones_like', 'np.ones_like', (["mesh['negative electrode'].nodes[:, np.newaxis]"], {}), "(mesh['negative electrode'].nodes[:, np.newaxis])\n", (13232, 13281), True, 'import numpy as np\n'), ((14536, 14558), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (14556, 14558), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((14638, 14682), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (14659, 14682), False, 'import pybamm\n'), ((14698, 14749), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {'domain': '"""negative electrode"""'}), "('var', domain='negative electrode')\n", (14713, 14749), False, 'import pybamm\n'), ((15259, 15281), 'tests.get_mesh_for_testing', 'get_mesh_for_testing', ([], {}), '()\n', (15279, 15281), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((15361, 15405), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (15382, 15405), False, 'import pybamm\n'), ((15543, 15561), 'pybamm.upwind', 'pybamm.upwind', (['var'], {}), '(var)\n', (15556, 15561), False, 'import pybamm\n'), ((15581, 15601), 'pybamm.downwind', 'pybamm.downwind', (['var'], {}), '(var)\n', (15596, 15601), False, 'import pybamm\n'), ((17307, 17334), 'tests.get_1p1d_mesh_for_testing', 'get_1p1d_mesh_for_testing', ([], {}), '()\n', (17332, 17334), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((17605, 17649), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (17626, 17649), False, 'import pybamm\n'), ((17682, 17721), 'numpy.ones', 'np.ones', (["mesh['current collector'].npts"], {}), "(mesh['current collector'].npts)\n", (17689, 17721), True, 'import numpy as np\n'), ((17736, 17786), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {'domain': '"""current collector"""'}), "('var', domain='current collector')\n", (17751, 17786), False, 'import pybamm\n'), ((17861, 17877), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (17872, 17877), False, 'import pybamm\n'), ((17904, 17920), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (17915, 17920), False, 'import pybamm\n'), ((17939, 17952), 'pybamm.div', 'pybamm.div', (['N'], {}), '(N)\n', (17949, 17952), False, 'import pybamm\n'), ((19446, 19473), 'tests.get_1p1d_mesh_for_testing', 'get_1p1d_mesh_for_testing', ([], {}), '()\n', (19471, 19473), False, 'from tests import get_mesh_for_testing, get_p2d_mesh_for_testing, get_1p1d_mesh_for_testing\n'), ((19744, 19788), 'pybamm.Discretisation', 'pybamm.Discretisation', (['mesh', 'spatial_methods'], {}), '(mesh, spatial_methods)\n', (19765, 19788), False, 'import pybamm\n'), ((19818, 19868), 'pybamm.Variable', 'pybamm.Variable', (['"""var"""'], {'domain': '"""current collector"""'}), "('var', domain='current collector')\n", (19833, 19868), False, 'import pybamm\n'), ((19943, 19959), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (19954, 19959), False, 'import pybamm\n'), ((795, 814), 'numpy.ones', 'np.ones', (['(n + 1, 1)'], {}), '((n + 1, 1))\n', (802, 814), True, 'import numpy as np\n'), ((989, 1008), 'numpy.ones', 'np.ones', (['(n + 1, 1)'], {}), '((n + 1, 1))\n', (996, 1008), True, 'import numpy as np\n'), ((1320, 1335), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1327, 1335), True, 'import numpy as np\n'), ((1510, 1525), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (1517, 1525), True, 'import numpy as np\n'), ((2179, 2221), 'pybamm.Vector', 'pybamm.Vector', (['mesh[dom].edges'], {'domain': 'dom'}), '(mesh[dom].edges, domain=dom)\n', (2192, 2221), False, 'import pybamm\n'), ((2969, 2990), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (2988, 2990), False, 'import pybamm\n'), ((4131, 4152), 'pybamm.laplacian', 'pybamm.laplacian', (['var'], {}), '(var)\n', (4147, 4152), False, 'import pybamm\n'), ((5713, 5734), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (5732, 5734), False, 'import pybamm\n'), ((5769, 5790), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (5788, 5790), False, 'import pybamm\n'), ((5825, 5846), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (5844, 5846), False, 'import pybamm\n'), ((6694, 6744), 'pybamm.SpatialVariable', 'pybamm.SpatialVariable', (['"""r"""', "['negative particle']"], {}), "('r', ['negative particle'])\n", (6716, 6744), False, 'import pybamm\n'), ((7253, 7266), 'pybamm.div', 'pybamm.div', (['N'], {}), '(N)\n', (7263, 7266), False, 'import pybamm\n'), ((7307, 7323), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (7320, 7323), False, 'import pybamm\n'), ((7597, 7618), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (7616, 7618), False, 'import pybamm\n'), ((8243, 8256), 'pybamm.div', 'pybamm.div', (['N'], {}), '(N)\n', (8253, 8256), False, 'import pybamm\n'), ((8297, 8313), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (8310, 8313), False, 'import pybamm\n'), ((8596, 8617), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (8615, 8617), False, 'import pybamm\n'), ((8891, 8903), 'scipy.sparse.eye', 'eye', (['sec_pts'], {}), '(sec_pts)\n', (8894, 8903), False, 'from scipy.sparse import kron, eye\n'), ((9250, 9271), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (9269, 9271), False, 'import pybamm\n'), ((10588, 10604), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (10599, 10604), False, 'import pybamm\n'), ((11150, 11166), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (11161, 11166), False, 'import pybamm\n'), ((11628, 11649), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (11647, 11649), False, 'import pybamm\n'), ((11684, 11705), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (11703, 11705), False, 'import pybamm\n'), ((11740, 11761), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (11759, 11761), False, 'import pybamm\n'), ((12717, 12738), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (12736, 12738), False, 'import pybamm\n'), ((14278, 14311), 'pybamm.Integral', 'pybamm.Integral', (['delta_fn_left', 'x'], {}), '(delta_fn_left, x)\n', (14293, 14311), False, 'import pybamm\n'), ((14600, 14621), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (14619, 14621), False, 'import pybamm\n'), ((15323, 15344), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (15342, 15344), False, 'import pybamm\n'), ((16126, 16145), 'numpy.ones_like', 'np.ones_like', (['nodes'], {}), '(nodes)\n', (16138, 16145), True, 'import numpy as np\n'), ((17389, 17410), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (17408, 17410), False, 'import pybamm\n'), ((17445, 17466), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (17464, 17466), False, 'import pybamm\n'), ((17501, 17522), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (17520, 17522), False, 'import pybamm\n'), ((17557, 17578), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (17576, 17578), False, 'import pybamm\n'), ((19528, 19549), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (19547, 19549), False, 'import pybamm\n'), ((19584, 19605), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (19603, 19605), False, 'import pybamm\n'), ((19640, 19661), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (19659, 19661), False, 'import pybamm\n'), ((19696, 19717), 'pybamm.FiniteVolume', 'pybamm.FiniteVolume', ([], {}), '()\n', (19715, 19717), False, 'import pybamm\n'), ((2581, 2608), 'numpy.ones', 'np.ones', (['(mesh[dom].npts + 2)'], {}), '(mesh[dom].npts + 2)\n', (2588, 2608), True, 'import numpy as np\n'), ((3461, 3477), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3472, 3477), False, 'import pybamm\n'), ((3502, 3518), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3513, 3518), False, 'import pybamm\n'), ((3708, 3724), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3719, 3724), False, 'import pybamm\n'), ((3727, 3743), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3738, 3743), False, 'import pybamm\n'), ((3828, 3844), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3839, 3844), False, 'import pybamm\n'), ((20488, 20504), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (20501, 20504), False, 'import pybamm\n'), ((20634, 20650), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (20647, 20650), False, 'import pybamm\n'), ((3538, 3554), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3549, 3554), False, 'import pybamm\n'), ((3580, 3596), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3591, 3596), False, 'import pybamm\n'), ((3616, 3632), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3627, 3632), False, 'import pybamm\n'), ((3661, 3677), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3672, 3677), False, 'import pybamm\n'), ((3758, 3774), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3769, 3774), False, 'import pybamm\n'), ((3782, 3798), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3793, 3798), False, 'import pybamm\n'), ((3870, 3886), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3881, 3886), False, 'import pybamm\n'), ((3916, 3932), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3927, 3932), False, 'import pybamm\n'), ((3968, 3984), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (3979, 3984), False, 'import pybamm\n'), ((9848, 9864), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (9861, 9864), False, 'import pybamm\n'), ((9906, 9922), 'pybamm.Scalar', 'pybamm.Scalar', (['(2)'], {}), '(2)\n', (9919, 9922), False, 'import pybamm\n'), ((10870, 10886), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (10883, 10886), False, 'import pybamm\n'), ((10926, 10942), 'pybamm.Scalar', 'pybamm.Scalar', (['(2)'], {}), '(2)\n', (10939, 10942), False, 'import pybamm\n'), ((11266, 11282), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (11279, 11282), False, 'import pybamm\n'), ((11322, 11338), 'pybamm.Scalar', 'pybamm.Scalar', (['(2)'], {}), '(2)\n', (11335, 11338), False, 'import pybamm\n'), ((15671, 15687), 'pybamm.Scalar', 'pybamm.Scalar', (['(5)'], {}), '(5)\n', (15684, 15687), False, 'import pybamm\n'), ((15729, 15745), 'pybamm.Scalar', 'pybamm.Scalar', (['(3)'], {}), '(3)\n', (15742, 15745), False, 'import pybamm\n'), ((16844, 16860), 'pybamm.Scalar', 'pybamm.Scalar', (['(5)'], {}), '(5)\n', (16857, 16860), False, 'import pybamm\n'), ((16900, 16916), 'pybamm.Scalar', 'pybamm.Scalar', (['(3)'], {}), '(3)\n', (16913, 16916), False, 'import pybamm\n'), ((18069, 18085), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (18082, 18085), False, 'import pybamm\n'), ((18134, 18150), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (18147, 18150), False, 'import pybamm\n'), ((18541, 18557), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (18554, 18557), False, 'import pybamm\n'), ((18606, 18622), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (18619, 18622), False, 'import pybamm\n'), ((19021, 19037), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (19034, 19037), False, 'import pybamm\n'), ((19078, 19094), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (19091, 19094), False, 'import pybamm\n'), ((20076, 20092), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (20089, 20092), False, 'import pybamm\n'), ((20141, 20157), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (20154, 20157), False, 'import pybamm\n'), ((20198, 20214), 'pybamm.Scalar', 'pybamm.Scalar', (['(8)'], {}), '(8)\n', (20211, 20214), False, 'import pybamm\n'), ((4014, 4030), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (4025, 4030), False, 'import pybamm\n'), ((4388, 4404), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (4401, 4404), False, 'import pybamm\n'), ((4450, 4466), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (4463, 4466), False, 'import pybamm\n'), ((4708, 4724), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (4721, 4724), False, 'import pybamm\n'), ((4768, 4784), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (4781, 4784), False, 'import pybamm\n'), ((5028, 5044), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (5041, 5044), False, 'import pybamm\n'), ((5090, 5106), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (5103, 5106), False, 'import pybamm\n'), ((5324, 5340), 'pybamm.Scalar', 'pybamm.Scalar', (['(0)'], {}), '(0)\n', (5337, 5340), False, 'import pybamm\n'), ((5384, 5400), 'pybamm.Scalar', 'pybamm.Scalar', (['(1)'], {}), '(1)\n', (5397, 5400), False, 'import pybamm\n'), ((15093, 15112), 'numpy.ones_like', 'np.ones_like', (['nodes'], {}), '(nodes)\n', (15105, 15112), True, 'import numpy as np\n'), ((15182, 15201), 'numpy.ones_like', 'np.ones_like', (['nodes'], {}), '(nodes)\n', (15194, 15201), True, 'import numpy as np\n'), ((16257, 16275), 'numpy.array', 'np.array', (['[5, 0.5]'], {}), '([5, 0.5])\n', (16265, 16275), True, 'import numpy as np\n'), ((16457, 16475), 'numpy.array', 'np.array', (['[1.5, 3]'], {}), '([1.5, 3])\n', (16465, 16475), True, 'import numpy as np\n'), ((4077, 4093), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (4088, 4093), False, 'import pybamm\n'), ((4100, 4116), 'pybamm.grad', 'pybamm.grad', (['var'], {}), '(var)\n', (4111, 4116), False, 'import pybamm\n'), ((16281, 16295), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (16288, 16295), True, 'import numpy as np\n'), ((16441, 16455), 'numpy.ones', 'np.ones', (['(n - 1)'], {}), '(n - 1)\n', (16448, 16455), True, 'import numpy as np\n')] |
import torch
import torch.cuda
import logging
import numpy as np
import pandas as pd
import random
from datetime import datetime
import os
import sys
sys.path.append('../../')
from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset
from src.models.ARAE import ARAE
from src.models.networks.AE_ResNet18_dual import AE_ResNet18
from src.utils.utils import summary_string
################################################################################
# Settings #
################################################################################
# Import Export
Experiment_Name = 'ARAE_hand'
DATA_PATH = r'../../../data/PROCESSED/'
DATA_INFO_PATH = r'../../../data/data_info.csv'
OUTPUT_PATH = r'../../../Outputs/' + Experiment_Name + '_' + datetime.today().strftime('%Y_%m_%d_%Hh%M')+'/'
# make output dir
if not os.path.isdir(OUTPUT_PATH+'models/'): os.makedirs(OUTPUT_PATH+'model/')
if not os.path.isdir(OUTPUT_PATH+'results/'): os.makedirs(OUTPUT_PATH+'results/')
if not os.path.isdir(OUTPUT_PATH+'logs/'): os.makedirs(OUTPUT_PATH+'logs/')
# General
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
n_thread = 0
n_seeds = 1#4
seeds = [int(''.join(['1']*(i+1))) for i in range(n_seeds)]
print_batch_progress = True
# Datasets : Unsupervised method
train_frac = 0.5
ratio_known_normal = 0.00
ratio_known_abnormal = 0.00
n_jobs_dataloader = 8
batch_size = 16
img_size = 512
# Training
gamma = 0.1 # as suggested in Salehi et al. (2020)
epsilon = 0.05 # the allowed l-inf distance of the adversarial samples to the point
lr = 1e-4
lr_adv = epsilon * 1.25
lr_milestone = [40,80]
n_epoch = 100
n_epoch_adv = 1
use_PGD = False
weight_decay = 1e-6
model_path_to_load = None
# Network
pretrain = False
ae_output_size = (1, img_size, img_size)
################################################################################
# Training #
################################################################################
def main(seed_i):
"""
Implementation of the unsupervised ARAE model proposed by Salehi et al (2020).
This unsupervised method apply a projected gradient descent algorithm to find
a more meaningful lattent space for the autoencoder. The encoder composed of
a ResNet18 encoder. The decoder is composed of a mirrored ResNet18. The latent
space has dimension (16,16,512).
"""
# initialize logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
try:
logger.handlers[1].stream.close()
logger.removeHandler(logger.handlers[1])
except IndexError:
pass
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
log_file = OUTPUT_PATH + 'logs/' + f'log_{seed_i+1}.txt'
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# print path and main docstring with experiment summary
logger.info('Brief summary of experiment : \n' + main.__doc__)
logger.info(f'Log file : {log_file}')
logger.info(f'Data path : {DATA_PATH}')
logger.info(f'Outputs path : {OUTPUT_PATH}' + '\n')
############################## Make datasets ###############################
# load data_info
df_info = pd.read_csv(DATA_INFO_PATH)
df_info = df_info.drop(df_info.columns[0], axis=1)
# remove low contrast images (all black)
df_info = df_info[df_info.low_contrast == 0]
# keep only hands
df_info = df_info[df_info.body_part == 'HAND']
# Train Validation Test Split
spliter = MURA_TrainValidTestSplitter(df_info, train_frac=train_frac,
ratio_known_normal=ratio_known_normal,
ratio_known_abnormal=ratio_known_abnormal, random_state=42)
spliter.split_data(verbose=False)
train_df = spliter.get_subset('train')
valid_df = spliter.get_subset('valid')
test_df = spliter.get_subset('test')
# make datasets
train_dataset = MURA_Dataset(train_df, data_path=DATA_PATH, load_mask=True,
load_semilabels=True, output_size=img_size)
valid_dataset = MURA_Dataset(valid_df, data_path=DATA_PATH, load_mask=True,
load_semilabels=True, output_size=img_size)
test_dataset = MURA_Dataset(test_df, data_path=DATA_PATH, load_mask=True,
load_semilabels=True, output_size=img_size)
# print info to logger
logger.info(f'Train fraction : {train_frac:.0%}')
logger.info(f'Fraction knonw normal : {ratio_known_normal:.0%}')
logger.info(f'Fraction known abnormal : {ratio_known_abnormal:.0%}')
logger.info('Split Summary \n' + str(spliter.print_stat(returnTable=True)))
logger.info('Online preprocessing pipeline : \n' + str(train_dataset.transform) + '\n')
################################ Set Up ####################################
# Set seed
seed = seeds[seed_i]
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
logger.info(f'Set seed {seed_i+1:02}/{n_seeds:02} to {seed}')
# set number of thread
if n_thread > 0:
torch.set_num_threads(n_thread)
# print info in logger
logger.info(f'Device : {device}')
logger.info(f'Number of thread : {n_thread}')
logger.info(f'Number of dataloader worker for {Experiment_Name} : {n_jobs_dataloader}' + '\n')
######################### Networks Initialization ##########################
net = AE_ResNet18(pretrain_ResNetEnc=pretrain, output_channels=ae_output_size[0])
net = net.to(device)
# add info to logger
logger.info(f'Network : {net.__class__.__name__}')
logger.info(f'ResNet18 pretrained on ImageNet : {pretrain}')
logger.info('Network architecture: \n' + summary_string(net, (1, img_size, img_size), device=str(device), batch_size=batch_size) + '\n')
# initialization of the Model
arae = ARAE(net, gamma, epsilon)
if model_path_to_load:
arae.load_model(model_path_to_load, map_location=device)
logger.info(f'Model Loaded from {model_path_to_load}' + '\n')
################################ Training ##################################
# add parameter info
logger.info(f'{Experiment_Name} epsilon : {epsilon}')
logger.info(f'{Experiment_Name} adversarial importance gamma : {gamma}')
logger.info(f'{Experiment_Name} number of epoch : {n_epoch}')
logger.info(f'{Experiment_Name} number of adversarial search epoch: {n_epoch_adv}')
logger.info(f'{Experiment_Name} learning rate : {lr}')
logger.info(f'{Experiment_Name} adversarial search learning rate : {lr_adv}')
method_adv = 'PGD' if use_PGD else 'FGSM'
logger.info(f'{Experiment_Name} adversarial search method : {method_adv}')
logger.info(f'{Experiment_Name} learning rate milestone : {lr_milestone}')
logger.info(f'{Experiment_Name} weight_decay : {weight_decay}')
logger.info(f'{Experiment_Name} optimizer : Adam')
logger.info(f'{Experiment_Name} batch_size {batch_size}')
logger.info(f'{Experiment_Name} number of dataloader worker : {n_jobs_dataloader}')
# train DROCC
arae.train(train_dataset, lr=lr, lr_adv=lr_adv, lr_milestone=lr_milestone,
weight_decay=weight_decay, n_epoch=n_epoch, n_epoch_adv=n_epoch_adv, use_PGD=use_PGD,
batch_size=batch_size, device=device, n_jobs_dataloader=n_jobs_dataloader,
print_batch_progress=print_batch_progress, valid_dataset=valid_dataset)
# validate DROCC
arae.validate(valid_dataset, device=device,
n_jobs_dataloader=n_jobs_dataloader,
print_batch_progress=print_batch_progress)
# test DROCC
arae.test(test_dataset, device=device,
n_jobs_dataloader=n_jobs_dataloader,
print_batch_progress=print_batch_progress)
# save results
arae.save_results(OUTPUT_PATH + f'results/{Experiment_Name}_results_{seed_i+1}.json')
logger.info('Test results saved at ' + OUTPUT_PATH + f'results/{Experiment_Name}_results_{seed_i+1}.json' + '\n')
# save model
arae.save_model(OUTPUT_PATH + f'model/{Experiment_Name}_model_{seed_i+1}.pt')
logger.info('Model saved at ' + OUTPUT_PATH + f'model/{Experiment_Name}_model_{seed_i+1}.pt')
if __name__ == '__main__':
# experiment for each seeds
for i in range(n_seeds):
main(i)
| [
"numpy.random.seed",
"pandas.read_csv",
"logging.Formatter",
"torch.set_num_threads",
"torch.device",
"src.models.ARAE.ARAE",
"sys.path.append",
"logging.FileHandler",
"random.seed",
"datetime.datetime.today",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"src.d... | [((150, 175), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (165, 175), False, 'import sys\n'), ((905, 943), 'os.path.isdir', 'os.path.isdir', (["(OUTPUT_PATH + 'models/')"], {}), "(OUTPUT_PATH + 'models/')\n", (918, 943), False, 'import os\n'), ((943, 978), 'os.makedirs', 'os.makedirs', (["(OUTPUT_PATH + 'model/')"], {}), "(OUTPUT_PATH + 'model/')\n", (954, 978), False, 'import os\n'), ((984, 1023), 'os.path.isdir', 'os.path.isdir', (["(OUTPUT_PATH + 'results/')"], {}), "(OUTPUT_PATH + 'results/')\n", (997, 1023), False, 'import os\n'), ((1023, 1060), 'os.makedirs', 'os.makedirs', (["(OUTPUT_PATH + 'results/')"], {}), "(OUTPUT_PATH + 'results/')\n", (1034, 1060), False, 'import os\n'), ((1066, 1102), 'os.path.isdir', 'os.path.isdir', (["(OUTPUT_PATH + 'logs/')"], {}), "(OUTPUT_PATH + 'logs/')\n", (1079, 1102), False, 'import os\n'), ((1102, 1136), 'os.makedirs', 'os.makedirs', (["(OUTPUT_PATH + 'logs/')"], {}), "(OUTPUT_PATH + 'logs/')\n", (1113, 1136), False, 'import os\n'), ((1179, 1204), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1202, 1204), False, 'import torch\n'), ((1155, 1175), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1167, 1175), False, 'import torch\n'), ((1210, 1229), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1222, 1229), False, 'import torch\n'), ((2542, 2581), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2561, 2581), False, 'import logging\n'), ((2595, 2614), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2612, 2614), False, 'import logging\n'), ((2801, 2863), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(levelname)s | %(message)s"""'], {}), "('%(asctime)s | %(levelname)s | %(message)s')\n", (2818, 2863), False, 'import logging\n'), ((2944, 2973), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (2963, 2973), False, 'import logging\n'), ((3478, 3505), 'pandas.read_csv', 'pd.read_csv', (['DATA_INFO_PATH'], {}), '(DATA_INFO_PATH)\n', (3489, 3505), True, 'import pandas as pd\n'), ((3777, 3944), 'src.datasets.MURADataset.MURA_TrainValidTestSplitter', 'MURA_TrainValidTestSplitter', (['df_info'], {'train_frac': 'train_frac', 'ratio_known_normal': 'ratio_known_normal', 'ratio_known_abnormal': 'ratio_known_abnormal', 'random_state': '(42)'}), '(df_info, train_frac=train_frac,\n ratio_known_normal=ratio_known_normal, ratio_known_abnormal=\n ratio_known_abnormal, random_state=42)\n', (3804, 3944), False, 'from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset\n'), ((4225, 4333), 'src.datasets.MURADataset.MURA_Dataset', 'MURA_Dataset', (['train_df'], {'data_path': 'DATA_PATH', 'load_mask': '(True)', 'load_semilabels': '(True)', 'output_size': 'img_size'}), '(train_df, data_path=DATA_PATH, load_mask=True, load_semilabels\n =True, output_size=img_size)\n', (4237, 4333), False, 'from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset\n'), ((4382, 4490), 'src.datasets.MURADataset.MURA_Dataset', 'MURA_Dataset', (['valid_df'], {'data_path': 'DATA_PATH', 'load_mask': '(True)', 'load_semilabels': '(True)', 'output_size': 'img_size'}), '(valid_df, data_path=DATA_PATH, load_mask=True, load_semilabels\n =True, output_size=img_size)\n', (4394, 4490), False, 'from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset\n'), ((4538, 4645), 'src.datasets.MURADataset.MURA_Dataset', 'MURA_Dataset', (['test_df'], {'data_path': 'DATA_PATH', 'load_mask': '(True)', 'load_semilabels': '(True)', 'output_size': 'img_size'}), '(test_df, data_path=DATA_PATH, load_mask=True, load_semilabels=\n True, output_size=img_size)\n', (4550, 4645), False, 'from src.datasets.MURADataset import MURA_TrainValidTestSplitter, MURA_Dataset\n'), ((5850, 5925), 'src.models.networks.AE_ResNet18_dual.AE_ResNet18', 'AE_ResNet18', ([], {'pretrain_ResNetEnc': 'pretrain', 'output_channels': 'ae_output_size[0]'}), '(pretrain_ResNetEnc=pretrain, output_channels=ae_output_size[0])\n', (5861, 5925), False, 'from src.models.networks.AE_ResNet18_dual import AE_ResNet18\n'), ((6284, 6309), 'src.models.ARAE.ARAE', 'ARAE', (['net', 'gamma', 'epsilon'], {}), '(net, gamma, epsilon)\n', (6288, 6309), False, 'from src.models.ARAE import ARAE\n'), ((5218, 5235), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5229, 5235), False, 'import random\n'), ((5244, 5264), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5258, 5264), True, 'import numpy as np\n'), ((5273, 5296), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (5290, 5296), False, 'import torch\n'), ((5305, 5333), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (5327, 5333), False, 'import torch\n'), ((5511, 5542), 'torch.set_num_threads', 'torch.set_num_threads', (['n_thread'], {}), '(n_thread)\n', (5532, 5542), False, 'import torch\n'), ((832, 848), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (846, 848), False, 'from datetime import datetime\n')] |
"""
This module provides graphing functionality
for visualising level data over time.
"""
# pylint: disable=relative-beyond-top-level
import math
import os
import datetime
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter
try:
from .utils import flatten
from .analysis import polyfit, moving_average
from .station import MonitoringStation
except ImportError:
from utils import flatten
from analysis import polyfit, moving_average
from station import MonitoringStation
RESOURCES = os.path.join(os.path.dirname(__file__), 'assets')
PROPLOT_STYLE_SHEET = os.path.join(RESOURCES, 'proplot_style.mplstyle')
def plot_water_levels(stations: list, dates: dict, levels: dict, as_subplots: bool = True,
use_proplot_style: bool = True, subplots_share_y_axis: bool = False):
'''
Plots graph(s) of the level data in stations (which may be a single
MonitoringStation object or a list of them).
#### Arguments
`stations` (list): list of input stations
`dates` (dict): dates where data is available
`levels` (dict): level data corresponding to the given dates
`as_subplots` (bool, default = True): whether to use multiple plots on the same figure
`use_proplot_style` (bool, default = True): use ProPlot stylesheet
`subplots_share_y_axis` (bool, default = False): if using subplots, all y-axes share same limits
'''
# remove all stations with inconsistent typical range
for s in stations:
if not s.typical_range_consistent():
levels.pop(s.name, None)
dates.pop(s.name, None)
stations.remove(s)
assert len(list(levels.keys())) == len(stations)
if use_proplot_style:
plt.style.use(PROPLOT_STYLE_SHEET)
else:
plt.style.use('default')
if as_subplots:
y = math.ceil(len(stations) / 2)
x = round(len(stations) / y)
fig, axs = plt.subplots(x, y, figsize=(12, 6))
for i in range(y):
axs[0][i].plot(list(dates.values())[i], list(levels.values())[i])
axs[0][i].set_title(stations[i].name)
axs[0][i].set_xlabel('dates')
axs[0][i].set_ylabel('water level / $ m $')
axs[0][i].tick_params(axis='x', rotation=30)
axs[0][i].set_ylim(0, 1.3 * max(list(levels.values())[i]))
for i in range(y - (len(stations) % 2)):
axs[1][i].plot(list(dates.values())[i + y], list(levels.values())[i + y])
axs[1][i].set_title(stations[i + y].name)
axs[1][i].set_xlabel('dates')
axs[1][i].set_ylabel('water level / $ m $')
axs[1][i].tick_params(axis='x', rotation=30)
axs[1][i].set_ylim(0, 1.3 * max(list(levels.values())[i + y]))
if subplots_share_y_axis:
plt.setp(axs, ylim=(0, 0.5 + max(flatten(list(levels.values())))))
fig.tight_layout()
fig.show()
else:
for s in stations:
plt.plot(dates[s.name], levels[s.name], label=s.name)
plt.ylim(ymin=0)
plt.title('Recorded water levels')
plt.xlabel('date')
plt.ylabel('water level / $ m $')
plt.xticks(rotation=45)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
def plot_water_level_with_polyfit(station: MonitoringStation, dates: list, levels: list,
poly_degree: int = 5, n_points: int = 100, format_dates: bool = True,
y_axis_from_zero: bool = None, use_proplot_style: bool = True):
'''
Plot water level data with a polynomial least-squares best-fit curve.
#### Arguments
`station` (MonitoringStation): list of input stations
`dates` (list): dates where data is available
`levels` (list): level data corresponding to the given dates
`poly_degree` (int, default = 5): degree of polynomial fit
`n_points` (int, default = 100): number of points to sample the polynomial curve
`format_dates` (bool, default = True): format dates neater
`y_axis_from_zero` (bool, default = None): whether to start the y-axis from the zero level
`use_proplot_style` (bool, default = True): use ProPlot stylesheet
'''
if use_proplot_style:
plt.style.use(PROPLOT_STYLE_SHEET)
else:
plt.style.use('default')
if y_axis_from_zero is None:
y_axis_from_zero = not station.is_tidal
# Get a polynomial function fitting the data, the offset, and the original dataset.
poly, d0, date_nums = polyfit(dates, levels, poly_degree)
# plot given data
plt.plot(dates, levels, '.', label=station.name)
# sample from the data and plot with the offset
x1 = np.linspace(date_nums[0], date_nums[-1], n_points)
plt.plot(x1, poly(x1 - d0), label='Best-fit curve')
# plot the typical range as a shaded region
if station.typical_range_consistent():
plt.fill_between([x1[0], x1[-1]], station.typical_range[0], station.typical_range[1],
facecolor='green', alpha=0.2,
label=f'Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}')
else:
plt.plot(date_nums[-1], levels[-1], label='(typical range' + '\n' + 'unavailable)')
# graphical - main figure
plt.xlabel('date')
plt.ylabel('water level / $ m $')
plt.legend(loc='upper left')
plt.xticks(rotation=45)
plt.tight_layout()
if y_axis_from_zero:
plt.ylim(ymin=0)
# graphical - axes
ax = plt.gca()
if format_dates: # string date formats: https://strftime.org/
ax.xaxis.set_major_formatter(DateFormatter('%d %b, %I:%M %p'))
plt.show()
def plot_water_level_with_moving_average(station: object, dates: list, levels: list, interval: int = 3,
format_dates: bool = True, y_axis_from_zero: bool = None, use_proplot_style: bool = True):
'''
Plot water level data with a symmetric moving average curve.
#### Arguments
`station` (MonitoringStation): list of input stations
`dates` (list): dates where data is available
`levels` (list): level data corresponding to the given dates
`interval` (int, default = 3): window size for moving average
`format_dates` (bool, default = True): format dates neater
`y_axis_from_zero` (bool, default = None): whether to start the y-axis from the zero level
`use_proplot_style` (bool, default = True): use ProPlot stylesheet
'''
if use_proplot_style:
plt.style.use(PROPLOT_STYLE_SHEET)
else:
plt.style.use('default')
if y_axis_from_zero is None:
y_axis_from_zero = not station.is_tidal
# Get average data
date_nums, avg_levels = moving_average(dates, levels, interval)
# plot given and moving average data
plt.plot(dates, levels, '.', label=station.name)
plt.plot(date_nums, avg_levels, label=f'{interval}-point SMA')
# plot the typical range as a shaded region
if station.typical_range_consistent():
plt.fill_between([dates[0], dates[-1]], station.typical_range[0], station.typical_range[1],
facecolor='green', alpha=0.2,
label=f'Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}')
else:
plt.plot(date_nums[-1], levels[-1], label='(typical range' + '\n' + 'unavailable)')
# graphical - main figure
plt.xlabel('date')
plt.ylabel('water level / $ m $')
plt.legend(loc='upper left')
plt.xticks(rotation=45)
plt.tight_layout()
if y_axis_from_zero:
plt.ylim(ymin=0)
# graphical - axes
ax = plt.gca()
if format_dates: # string date formats: https://strftime.org/
ax.xaxis.set_major_formatter(DateFormatter('%d %b, %I:%M %p'))
plt.show()
def plot_predicted_water_levels(station: MonitoringStation, dates_future: list[datetime.datetime],
levels_future_predicted: list[float], format_dates: bool = True, y_axis_from_zero: bool = None,
use_proplot_style: bool = True, **kwargs):
'''
Plots the forecast of a station, including past predictions.
#### Arguments
`station` (MonitoringStation): the station which has the forecast to be plotted
`dates_future` (list[datetime.datetime]): the dates into the future where a forecast is given
`levels_future_predicted` (list[float]): the forecasted levels
#### Optional Keywords Arguments
`format_dates` (bool, default = True): format dates neater
`y_axis_from_zero` (bool, default = None): whether to start the y-axis from the zero level
`use_proplot_style` (bool, default = True): use ProPlot stylesheet
`dates_to_now` (list, default = None): the dates where a past forecast exist
`levels_to_now` (list, default = None): the true levels over the dates_to_now
`levels_past_predicted` (list, default = None): the levels predicted by a past forecast
`metadata` (dict, default = None): additional info about the forecast, including about the model used.
{'has_past_forecast', 'dataset_size', 'lookback', 'iterations', 'batch_size', 'used_pretrained', 'epochs'}''' # noqa
dates_to_now = kwargs.get('dates_to_now', None)
levels_to_now = kwargs.get('levels_to_now', None)
levels_past_predicted = kwargs.get('levels_past_predicted', None)
_metadata = kwargs.get('metadata', None)
station_name = station.name
if _metadata is not None:
has_past_forecast = _metadata['has_past_forecast']
else:
has_past_forecast = all([i is not None for i in [dates_to_now, levels_to_now, levels_past_predicted]])
if use_proplot_style:
plt.style.use(PROPLOT_STYLE_SHEET)
else:
plt.style.use('default')
if y_axis_from_zero is None:
y_axis_from_zero = not station.is_tidal
if has_past_forecast:
plt.plot(dates_to_now, levels_to_now,
label='Past levels', color='#000000')
plt.plot(dates_to_now, levels_past_predicted,
label='Past forecast', color='#29a762', linestyle='dashed')
plt.plot(dates_future, levels_future_predicted, label='Forecast', color='#c12091', linestyle='dashed')
if station.typical_range_consistent():
plt.fill_between(
[dates_to_now[0], dates_future[-1]] if has_past_forecast else [dates_future[0], dates_future[-1]],
station.typical_range[0], station.typical_range[1], facecolor='green', alpha=0.2,
label=f'Typical range: \n{station.typical_range[0]} - {station.typical_range[1]}')
# graphical - main figure
plt.xlabel('date')
plt.ylabel('water level / $ m $')
plt.legend(loc='upper left')
plt.title('Water levels and forecast ' + # noqa
f'{"for " + station_name if station_name is not None else "(station unspecified)"}')
plt.xticks(rotation=45)
plt.tight_layout()
if y_axis_from_zero:
plt.ylim(ymin=0)
# graphical - axes
ax = plt.gca()
if format_dates: # string date formats: https://strftime.org/
ax.xaxis.set_major_formatter(DateFormatter('%d %b, %I:%M %p'))
plt.show()
def plot_model_loss(history: list, loss_name: str, model_name: str, use_logscale: bool = True,
use_proplot_style: bool = True, batch_size: int = None, show_colors: bool = True):
'''
Shows a graph of the convergence of the loss for each epoch for a trained model.
Called by `floodsystem.forecasts.train_model`.
#### Arguments
`history` (list): values of losses to be plotted
`loss_name` (str): name of loss function used when training model
`model_name` (str): name of the station which this model is based on
#### Optional
`use_logscale` (bool, default = True): whether to show loss on a vertical log axis
`use_proplot_style` (bool, default = True): use ProPlot stylesheet
`batch_size` (int, default = None): batch size used when training model. Shows 'unspecified' if None.
`show_colors` (bool, default = True): indicate quality based on loss with colours.
Only shows when using mse loss (`loss_name == 'Mean Squared Error'`)
'''
if batch_size is None:
batch_size = 'unspecified'
epoch = len(history)
end_loss = history[-1]
if use_proplot_style:
plt.style.use(PROPLOT_STYLE_SHEET)
else:
plt.style.use('default')
if show_colors and loss_name == 'Mean Squared Error':
GREEN = '#18b83d'
YELLOW = '#e3b51e'
RED = '#b81818'
plt.fill_between([1, epoch], 1e-5, 1e-3, facecolor=GREEN, alpha=0.4)
plt.fill_between([1, epoch], 1e-3, 1e-2, facecolor=YELLOW, alpha=0.4)
plt.fill_between([1, epoch], 1e-2, 2e-0, facecolor=RED, alpha=0.4)
loss_color = GREEN if end_loss < 1e-3 else YELLOW if end_loss < 1e-2 else RED
else:
loss_color = None
plt.title(f'Loss convergence of training for {model_name}')
plt.plot(np.arange(1, epoch + 1), history, label='loss',
color='#999999', linestyle='dotted', marker='x', markeredgecolor='#000000')
plt.plot((1, epoch), (end_loss, end_loss),
label=f'converged on {round(end_loss, 6)}', color=loss_color, linestyle='dashed', zorder=-1)
plt.xticks(range(epoch + 1))
plt.xlim(left=1, right=epoch)
plt.ylim(bottom=7e-5, top=1.5e-0)
plt.legend()
plt.xlabel(f'Epoch number (batch size {batch_size}), out of {epoch}')
plt.ylabel(f'Loss ({loss_name})')
if use_logscale:
plt.yscale('log')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.yscale",
"analysis.moving_average",
"matplotlib.pyplot.style.use",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"os.path.dirname",
"matplotlib.dates.DateFormatter",
"nu... | [((631, 680), 'os.path.join', 'os.path.join', (['RESOURCES', '"""proplot_style.mplstyle"""'], {}), "(RESOURCES, 'proplot_style.mplstyle')\n", (643, 680), False, 'import os\n'), ((572, 597), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (587, 597), False, 'import os\n'), ((3309, 3319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3317, 3319), True, 'from matplotlib import pyplot as plt\n'), ((4532, 4567), 'analysis.polyfit', 'polyfit', (['dates', 'levels', 'poly_degree'], {}), '(dates, levels, poly_degree)\n', (4539, 4567), False, 'from analysis import polyfit, moving_average\n'), ((4595, 4643), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels', '"""."""'], {'label': 'station.name'}), "(dates, levels, '.', label=station.name)\n", (4603, 4643), True, 'from matplotlib import pyplot as plt\n'), ((4706, 4756), 'numpy.linspace', 'np.linspace', (['date_nums[0]', 'date_nums[-1]', 'n_points'], {}), '(date_nums[0], date_nums[-1], n_points)\n', (4717, 4756), True, 'import numpy as np\n'), ((5263, 5281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (5273, 5281), True, 'from matplotlib import pyplot as plt\n'), ((5286, 5319), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level / $ m $"""'], {}), "('water level / $ m $')\n", (5296, 5319), True, 'from matplotlib import pyplot as plt\n'), ((5324, 5352), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (5334, 5352), True, 'from matplotlib import pyplot as plt\n'), ((5357, 5380), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (5367, 5380), True, 'from matplotlib import pyplot as plt\n'), ((5385, 5403), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5401, 5403), True, 'from matplotlib import pyplot as plt\n'), ((5487, 5496), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5494, 5496), True, 'from matplotlib import pyplot as plt\n'), ((5640, 5650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5648, 5650), True, 'from matplotlib import pyplot as plt\n'), ((6673, 6712), 'analysis.moving_average', 'moving_average', (['dates', 'levels', 'interval'], {}), '(dates, levels, interval)\n', (6687, 6712), False, 'from analysis import polyfit, moving_average\n'), ((6759, 6807), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels', '"""."""'], {'label': 'station.name'}), "(dates, levels, '.', label=station.name)\n", (6767, 6807), True, 'from matplotlib import pyplot as plt\n'), ((6812, 6874), 'matplotlib.pyplot.plot', 'plt.plot', (['date_nums', 'avg_levels'], {'label': 'f"""{interval}-point SMA"""'}), "(date_nums, avg_levels, label=f'{interval}-point SMA')\n", (6820, 6874), True, 'from matplotlib import pyplot as plt\n'), ((7365, 7383), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (7375, 7383), True, 'from matplotlib import pyplot as plt\n'), ((7388, 7421), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level / $ m $"""'], {}), "('water level / $ m $')\n", (7398, 7421), True, 'from matplotlib import pyplot as plt\n'), ((7426, 7454), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (7436, 7454), True, 'from matplotlib import pyplot as plt\n'), ((7459, 7482), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (7469, 7482), True, 'from matplotlib import pyplot as plt\n'), ((7487, 7505), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7503, 7505), True, 'from matplotlib import pyplot as plt\n'), ((7589, 7598), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7596, 7598), True, 'from matplotlib import pyplot as plt\n'), ((7742, 7752), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7750, 7752), True, 'from matplotlib import pyplot as plt\n'), ((10023, 10130), 'matplotlib.pyplot.plot', 'plt.plot', (['dates_future', 'levels_future_predicted'], {'label': '"""Forecast"""', 'color': '"""#c12091"""', 'linestyle': '"""dashed"""'}), "(dates_future, levels_future_predicted, label='Forecast', color=\n '#c12091', linestyle='dashed')\n", (10031, 10130), True, 'from matplotlib import pyplot as plt\n'), ((10531, 10549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (10541, 10549), True, 'from matplotlib import pyplot as plt\n'), ((10554, 10587), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level / $ m $"""'], {}), "('water level / $ m $')\n", (10564, 10587), True, 'from matplotlib import pyplot as plt\n'), ((10592, 10620), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (10602, 10620), True, 'from matplotlib import pyplot as plt\n'), ((10625, 10759), 'matplotlib.pyplot.title', 'plt.title', (['(\'Water levels and forecast \' +\n f"{\'for \' + station_name if station_name is not None else \'(station unspecified)\'}"\n )'], {}), '(\'Water levels and forecast \' +\n f"{\'for \' + station_name if station_name is not None else \'(station unspecified)\'}"\n )\n', (10634, 10759), True, 'from matplotlib import pyplot as plt\n'), ((10773, 10796), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (10783, 10796), True, 'from matplotlib import pyplot as plt\n'), ((10801, 10819), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10817, 10819), True, 'from matplotlib import pyplot as plt\n'), ((10903, 10912), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10910, 10912), True, 'from matplotlib import pyplot as plt\n'), ((11056, 11066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11064, 11066), True, 'from matplotlib import pyplot as plt\n'), ((12794, 12853), 'matplotlib.pyplot.title', 'plt.title', (['f"""Loss convergence of training for {model_name}"""'], {}), "(f'Loss convergence of training for {model_name}')\n", (12803, 12853), True, 'from matplotlib import pyplot as plt\n'), ((13185, 13214), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)', 'right': 'epoch'}), '(left=1, right=epoch)\n', (13193, 13214), True, 'from matplotlib import pyplot as plt\n'), ((13219, 13250), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(7e-05)', 'top': '(1.5)'}), '(bottom=7e-05, top=1.5)\n', (13227, 13250), True, 'from matplotlib import pyplot as plt\n'), ((13257, 13269), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13267, 13269), True, 'from matplotlib import pyplot as plt\n'), ((13274, 13343), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Epoch number (batch size {batch_size}), out of {epoch}"""'], {}), "(f'Epoch number (batch size {batch_size}), out of {epoch}')\n", (13284, 13343), True, 'from matplotlib import pyplot as plt\n'), ((13348, 13381), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Loss ({loss_name})"""'], {}), "(f'Loss ({loss_name})')\n", (13358, 13381), True, 'from matplotlib import pyplot as plt\n'), ((13434, 13444), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13442, 13444), True, 'from matplotlib import pyplot as plt\n'), ((1768, 1802), 'matplotlib.pyplot.style.use', 'plt.style.use', (['PROPLOT_STYLE_SHEET'], {}), '(PROPLOT_STYLE_SHEET)\n', (1781, 1802), True, 'from matplotlib import pyplot as plt\n'), ((1821, 1845), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (1834, 1845), True, 'from matplotlib import pyplot as plt\n'), ((1966, 2001), 'matplotlib.pyplot.subplots', 'plt.subplots', (['x', 'y'], {'figsize': '(12, 6)'}), '(x, y, figsize=(12, 6))\n', (1978, 2001), True, 'from matplotlib import pyplot as plt\n'), ((3079, 3095), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (3087, 3095), True, 'from matplotlib import pyplot as plt\n'), ((3104, 3138), 'matplotlib.pyplot.title', 'plt.title', (['"""Recorded water levels"""'], {}), "('Recorded water levels')\n", (3113, 3138), True, 'from matplotlib import pyplot as plt\n'), ((3147, 3165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""date"""'], {}), "('date')\n", (3157, 3165), True, 'from matplotlib import pyplot as plt\n'), ((3174, 3207), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level / $ m $"""'], {}), "('water level / $ m $')\n", (3184, 3207), True, 'from matplotlib import pyplot as plt\n'), ((3216, 3239), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (3226, 3239), True, 'from matplotlib import pyplot as plt\n'), ((3248, 3276), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (3258, 3276), True, 'from matplotlib import pyplot as plt\n'), ((3285, 3303), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3301, 3303), True, 'from matplotlib import pyplot as plt\n'), ((4257, 4291), 'matplotlib.pyplot.style.use', 'plt.style.use', (['PROPLOT_STYLE_SHEET'], {}), '(PROPLOT_STYLE_SHEET)\n', (4270, 4291), True, 'from matplotlib import pyplot as plt\n'), ((4310, 4334), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (4323, 4334), True, 'from matplotlib import pyplot as plt\n'), ((4913, 5127), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[x1[0], x1[-1]]', 'station.typical_range[0]', 'station.typical_range[1]'], {'facecolor': '"""green"""', 'alpha': '(0.2)', 'label': 'f"""Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}"""'}), '([x1[0], x1[-1]], station.typical_range[0], station.\n typical_range[1], facecolor=\'green\', alpha=0.2, label=\n f"""Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}"""\n )\n', (4929, 5127), True, 'from matplotlib import pyplot as plt\n'), ((5144, 5231), 'matplotlib.pyplot.plot', 'plt.plot', (['date_nums[-1]', 'levels[-1]'], {'label': "('(typical range' + '\\n' + 'unavailable)')"}), "(date_nums[-1], levels[-1], label='(typical range' + '\\n' +\n 'unavailable)')\n", (5152, 5231), True, 'from matplotlib import pyplot as plt\n'), ((5437, 5453), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (5445, 5453), True, 'from matplotlib import pyplot as plt\n'), ((6461, 6495), 'matplotlib.pyplot.style.use', 'plt.style.use', (['PROPLOT_STYLE_SHEET'], {}), '(PROPLOT_STYLE_SHEET)\n', (6474, 6495), True, 'from matplotlib import pyplot as plt\n'), ((6514, 6538), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (6527, 6538), True, 'from matplotlib import pyplot as plt\n'), ((6975, 7195), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[dates[0], dates[-1]]', 'station.typical_range[0]', 'station.typical_range[1]'], {'facecolor': '"""green"""', 'alpha': '(0.2)', 'label': 'f"""Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}"""'}), '([dates[0], dates[-1]], station.typical_range[0], station.\n typical_range[1], facecolor=\'green\', alpha=0.2, label=\n f"""Typical range: \n{station.typical_range[0]}-{station.typical_range[1]}"""\n )\n', (6991, 7195), True, 'from matplotlib import pyplot as plt\n'), ((7246, 7333), 'matplotlib.pyplot.plot', 'plt.plot', (['date_nums[-1]', 'levels[-1]'], {'label': "('(typical range' + '\\n' + 'unavailable)')"}), "(date_nums[-1], levels[-1], label='(typical range' + '\\n' +\n 'unavailable)')\n", (7254, 7333), True, 'from matplotlib import pyplot as plt\n'), ((7539, 7555), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (7547, 7555), True, 'from matplotlib import pyplot as plt\n'), ((9609, 9643), 'matplotlib.pyplot.style.use', 'plt.style.use', (['PROPLOT_STYLE_SHEET'], {}), '(PROPLOT_STYLE_SHEET)\n', (9622, 9643), True, 'from matplotlib import pyplot as plt\n'), ((9662, 9686), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (9675, 9686), True, 'from matplotlib import pyplot as plt\n'), ((9804, 9879), 'matplotlib.pyplot.plot', 'plt.plot', (['dates_to_now', 'levels_to_now'], {'label': '"""Past levels"""', 'color': '"""#000000"""'}), "(dates_to_now, levels_to_now, label='Past levels', color='#000000')\n", (9812, 9879), True, 'from matplotlib import pyplot as plt\n'), ((9900, 10010), 'matplotlib.pyplot.plot', 'plt.plot', (['dates_to_now', 'levels_past_predicted'], {'label': '"""Past forecast"""', 'color': '"""#29a762"""', 'linestyle': '"""dashed"""'}), "(dates_to_now, levels_past_predicted, label='Past forecast', color=\n '#29a762', linestyle='dashed')\n", (9908, 10010), True, 'from matplotlib import pyplot as plt\n'), ((10178, 10480), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['([dates_to_now[0], dates_future[-1]] if has_past_forecast else [\n dates_future[0], dates_future[-1]])', 'station.typical_range[0]', 'station.typical_range[1]'], {'facecolor': '"""green"""', 'alpha': '(0.2)', 'label': 'f"""Typical range: \n{station.typical_range[0]} - {station.typical_range[1]}"""'}), '([dates_to_now[0], dates_future[-1]] if has_past_forecast else\n [dates_future[0], dates_future[-1]], station.typical_range[0], station.\n typical_range[1], facecolor=\'green\', alpha=0.2, label=\n f"""Typical range: \n{station.typical_range[0]} - {station.typical_range[1]}"""\n )\n', (10194, 10480), True, 'from matplotlib import pyplot as plt\n'), ((10853, 10869), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (10861, 10869), True, 'from matplotlib import pyplot as plt\n'), ((12223, 12257), 'matplotlib.pyplot.style.use', 'plt.style.use', (['PROPLOT_STYLE_SHEET'], {}), '(PROPLOT_STYLE_SHEET)\n', (12236, 12257), True, 'from matplotlib import pyplot as plt\n'), ((12276, 12300), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (12289, 12300), True, 'from matplotlib import pyplot as plt\n'), ((12445, 12515), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[1, epoch]', '(1e-05)', '(0.001)'], {'facecolor': 'GREEN', 'alpha': '(0.4)'}), '([1, epoch], 1e-05, 0.001, facecolor=GREEN, alpha=0.4)\n', (12461, 12515), True, 'from matplotlib import pyplot as plt\n'), ((12522, 12592), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[1, epoch]', '(0.001)', '(0.01)'], {'facecolor': 'YELLOW', 'alpha': '(0.4)'}), '([1, epoch], 0.001, 0.01, facecolor=YELLOW, alpha=0.4)\n', (12538, 12592), True, 'from matplotlib import pyplot as plt\n'), ((12600, 12665), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[1, epoch]', '(0.01)', '(2.0)'], {'facecolor': 'RED', 'alpha': '(0.4)'}), '([1, epoch], 0.01, 2.0, facecolor=RED, alpha=0.4)\n', (12616, 12665), True, 'from matplotlib import pyplot as plt\n'), ((12867, 12890), 'numpy.arange', 'np.arange', (['(1)', '(epoch + 1)'], {}), '(1, epoch + 1)\n', (12876, 12890), True, 'import numpy as np\n'), ((13411, 13428), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (13421, 13428), True, 'from matplotlib import pyplot as plt\n'), ((3016, 3069), 'matplotlib.pyplot.plot', 'plt.plot', (['dates[s.name]', 'levels[s.name]'], {'label': 's.name'}), '(dates[s.name], levels[s.name], label=s.name)\n', (3024, 3069), True, 'from matplotlib import pyplot as plt\n'), ((5601, 5633), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d %b, %I:%M %p"""'], {}), "('%d %b, %I:%M %p')\n", (5614, 5633), False, 'from matplotlib.dates import DateFormatter\n'), ((7703, 7735), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d %b, %I:%M %p"""'], {}), "('%d %b, %I:%M %p')\n", (7716, 7735), False, 'from matplotlib.dates import DateFormatter\n'), ((11017, 11049), 'matplotlib.dates.DateFormatter', 'DateFormatter', (['"""%d %b, %I:%M %p"""'], {}), "('%d %b, %I:%M %p')\n", (11030, 11049), False, 'from matplotlib.dates import DateFormatter\n')] |
from planer import read_net, resize
import numpy as np
import scipy.ndimage as ndimg
import random, math, itertools
from tqdm import tqdm
import os.path as osp
root = osp.abspath(osp.dirname(__file__))
def progress(i, n, bar=[None]):
if bar[0] is None:
bar[0] = tqdm()
bar[0].total = n
bar[0].update(1)
if n==i: bar[0] = None
def load_model(names='cyto_0'):
if isinstance(names, str): return read_net(root+'/models/'+names)
return [read_net(root+'/models/'+i) for i in names]
def count_flow(nets, img, cn=[0,0], size=512, work=1):
if not isinstance(nets, list): nets = [nets]
img = np.asarray(img)[None, cn, :, :]
h, w = img.shape[-2:]
if not(h==w==size): img = resize(img, (size,size))
y = np.zeros((1,3)+img.shape[2:], img.dtype)
style = np.zeros((1,256), img.dtype)
def one(net, img):
i, s = net(img)
y[:] += i; style[:] += s
if work>1 and len(nets)>1:
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(max_workers=work, thread_name_prefix="flow")
for net in nets: pool.submit(one, net, img)
pool.shutdown(wait=True)
else:
for net in nets: one(net, img)
if len(nets)>0:
y /= len(nets); style /= len(nets)
if not(h==w==size): y = resize(y, (h, w))
return y[0].transpose(1,2,0), style
def make_slice(l, w, mar):
r = np.linspace(w//2, l-w//2, math.ceil((l-mar)/(w-mar))).astype(int)
return [slice(i-w//2, i+w//2) for i in r.tolist()]
def grid_slice(H, W, size, mar):
a, b = make_slice(H, size, mar), make_slice(W, size, mar)
return list(itertools.product(a, b))
def get_flow(nets, img, cn=[0,0], sample=1, size=512, tile=True, work=1, callback=progress):
if not isinstance(nets, list): nets = [nets]
if img.ndim==2: img = np.asarray(img[None,:,:])[cn]
else: img = np.asarray(img.transpose(2,0,1))[cn]
(_, H, W), k = img.shape, sample;
h, w = (size, size) if not tile else (max(size,int(H*k)), max(int(W*k), size))
needresize = ((k!=1 or min(H, W)<size) and tile) or (not(H==W==size) and not tile)
simg = img if not needresize else resize(img[:,:,:], (h, w))
rcs = grid_slice(h, w, size, size//10)
flow = np.zeros((3, h, w), dtype=simg.dtype)
style = np.zeros((1, 256), dtype=simg.dtype)
count = np.zeros(simg.shape[1:], 'uint8')
def one(sr, sc, sz, wk, s=[0]):
flw_prb, sty = count_flow(nets, simg[:,sr,sc], slice(None), sz, wk)
flow[:, sr, sc] += flw_prb.transpose(2,0,1)
style[:] += sty
count[sr, sc] += 1
s[0] += 1
callback(s[0], len(rcs))
if work>1 and len(rcs)>1:
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor(max_workers=work, thread_name_prefix="net")
for i in range(len(rcs)): pool.submit(one, *rcs[i], size, 1)
pool.shutdown(wait=True)
else:
for slr, slc in rcs: one(slr, slc, size, work)
flow /= count; style /= len(rcs)
if needresize: flow = resize(flow, (H, W))
flow[2]*=-1; np.exp(flow[2], out=flow[2]);
flow[2]+=1; np.divide(1, flow[2], out=flow[2])
return flow.transpose(1,2,0), style
def estimate_volumes(arr, sigma=3):
msk = arr > 50;
idx = np.arange(len(arr), dtype=np.uint32)
idx, arr = idx[msk], arr[msk]
for k in np.linspace(5, sigma, 5):
std = arr.std()
dif = np.abs(arr - arr.mean())
msk = dif < std * k
idx, arr = idx[msk], arr[msk]
return arr.mean(), arr.std()
def flow2msk(flowp, level=0.5, grad=0.5, area=None, volume=None):
flowp = np.asarray(flowp)
shp, dim = flowp.shape[:-1], flowp.ndim - 1
l = np.linalg.norm(flowp[:,:,:2], axis=-1)
flow = flowp[:,:,:2]/l.reshape(shp+(1,))
flow[(flowp[:,:,2]<level)|(l<grad)] = 0
ss = ((slice(None),) * (dim) + ([0,-1],)) * 2
for i in range(dim):flow[ss[dim-i:-i-2]+(i,)]=0
sn = np.sign(flow); sn *= 0.5; flow += sn;
dn = flow.astype(np.int32).reshape(-1, dim)
strides = np.cumprod(np.array((1,)+shp[::-1]))
dn = (strides[-2::-1] * dn).sum(axis=-1)
rst = np.arange(flow.size//dim); rst += dn
for i in range(10): rst = rst[rst]
hist = np.bincount(rst, None, len(rst))
hist = hist.astype(np.uint32).reshape(shp)
lab, n = ndimg.label(hist, np.ones((3,)*dim))
volumes = ndimg.sum(hist, lab, np.arange(n+1))
areas = np.bincount(lab.ravel())
mean, std = estimate_volumes(volumes, 2)
if not volume: volume = max(mean-std*3, 50)
if not area: area = volumes // 3
msk = (areas<area) & (volumes>volume)
lut = np.zeros(n+1, np.uint32)
lut[msk] = np.arange(1, msk.sum()+1)
return lut[lab].ravel()[rst].reshape(shp)
return hist, lut[lab], mask
if __name__ == '__main__':
a = np.random.rand(100)
a[3] = 2
idx = filter(a)
| [
"numpy.divide",
"tqdm.tqdm",
"planer.resize",
"math.ceil",
"numpy.random.rand",
"os.path.dirname",
"numpy.asarray",
"numpy.zeros",
"planer.read_net",
"numpy.ones",
"numpy.linalg.norm",
"numpy.exp",
"numpy.linspace",
"numpy.sign",
"numpy.arange",
"itertools.product",
"concurrent.futur... | [((180, 201), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (191, 201), True, 'import os.path as osp\n'), ((734, 777), 'numpy.zeros', 'np.zeros', (['((1, 3) + img.shape[2:])', 'img.dtype'], {}), '((1, 3) + img.shape[2:], img.dtype)\n', (742, 777), True, 'import numpy as np\n'), ((786, 815), 'numpy.zeros', 'np.zeros', (['(1, 256)', 'img.dtype'], {}), '((1, 256), img.dtype)\n', (794, 815), True, 'import numpy as np\n'), ((2188, 2225), 'numpy.zeros', 'np.zeros', (['(3, h, w)'], {'dtype': 'simg.dtype'}), '((3, h, w), dtype=simg.dtype)\n', (2196, 2225), True, 'import numpy as np\n'), ((2237, 2273), 'numpy.zeros', 'np.zeros', (['(1, 256)'], {'dtype': 'simg.dtype'}), '((1, 256), dtype=simg.dtype)\n', (2245, 2273), True, 'import numpy as np\n'), ((2285, 2318), 'numpy.zeros', 'np.zeros', (['simg.shape[1:]', '"""uint8"""'], {}), "(simg.shape[1:], 'uint8')\n", (2293, 2318), True, 'import numpy as np\n'), ((2991, 3019), 'numpy.exp', 'np.exp', (['flow[2]'], {'out': 'flow[2]'}), '(flow[2], out=flow[2])\n', (2997, 3019), True, 'import numpy as np\n'), ((3036, 3070), 'numpy.divide', 'np.divide', (['(1)', 'flow[2]'], {'out': 'flow[2]'}), '(1, flow[2], out=flow[2])\n', (3045, 3070), True, 'import numpy as np\n'), ((3262, 3286), 'numpy.linspace', 'np.linspace', (['(5)', 'sigma', '(5)'], {}), '(5, sigma, 5)\n', (3273, 3286), True, 'import numpy as np\n'), ((3525, 3542), 'numpy.asarray', 'np.asarray', (['flowp'], {}), '(flowp)\n', (3535, 3542), True, 'import numpy as np\n'), ((3599, 3639), 'numpy.linalg.norm', 'np.linalg.norm', (['flowp[:, :, :2]'], {'axis': '(-1)'}), '(flowp[:, :, :2], axis=-1)\n', (3613, 3639), True, 'import numpy as np\n'), ((3838, 3851), 'numpy.sign', 'np.sign', (['flow'], {}), '(flow)\n', (3845, 3851), True, 'import numpy as np\n'), ((4030, 4057), 'numpy.arange', 'np.arange', (['(flow.size // dim)'], {}), '(flow.size // dim)\n', (4039, 4057), True, 'import numpy as np\n'), ((4517, 4543), 'numpy.zeros', 'np.zeros', (['(n + 1)', 'np.uint32'], {}), '(n + 1, np.uint32)\n', (4525, 4543), True, 'import numpy as np\n'), ((4699, 4718), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (4713, 4718), True, 'import numpy as np\n'), ((273, 279), 'tqdm.tqdm', 'tqdm', ([], {}), '()\n', (277, 279), False, 'from tqdm import tqdm\n'), ((416, 451), 'planer.read_net', 'read_net', (["(root + '/models/' + names)"], {}), "(root + '/models/' + names)\n", (424, 451), False, 'from planer import read_net, resize\n'), ((459, 490), 'planer.read_net', 'read_net', (["(root + '/models/' + i)"], {}), "(root + '/models/' + i)\n", (467, 490), False, 'from planer import read_net, resize\n'), ((616, 631), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (626, 631), True, 'import numpy as np\n'), ((702, 727), 'planer.resize', 'resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (708, 727), False, 'from planer import read_net, resize\n'), ((989, 1052), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'work', 'thread_name_prefix': '"""flow"""'}), "(max_workers=work, thread_name_prefix='flow')\n", (1007, 1052), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((1267, 1284), 'planer.resize', 'resize', (['y', '(h, w)'], {}), '(y, (h, w))\n', (1273, 1284), False, 'from planer import read_net, resize\n'), ((1593, 1616), 'itertools.product', 'itertools.product', (['a', 'b'], {}), '(a, b)\n', (1610, 1616), False, 'import random, math, itertools\n'), ((2109, 2137), 'planer.resize', 'resize', (['img[:, :, :]', '(h, w)'], {}), '(img[:, :, :], (h, w))\n', (2115, 2137), False, 'from planer import read_net, resize\n'), ((2670, 2732), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'work', 'thread_name_prefix': '"""net"""'}), "(max_workers=work, thread_name_prefix='net')\n", (2688, 2732), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((2954, 2974), 'planer.resize', 'resize', (['flow', '(H, W)'], {}), '(flow, (H, W))\n', (2960, 2974), False, 'from planer import read_net, resize\n'), ((3949, 3975), 'numpy.array', 'np.array', (['((1,) + shp[::-1])'], {}), '((1,) + shp[::-1])\n', (3957, 3975), True, 'import numpy as np\n'), ((4228, 4247), 'numpy.ones', 'np.ones', (['((3,) * dim)'], {}), '((3,) * dim)\n', (4235, 4247), True, 'import numpy as np\n'), ((4282, 4298), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (4291, 4298), True, 'import numpy as np\n'), ((1785, 1812), 'numpy.asarray', 'np.asarray', (['img[None, :, :]'], {}), '(img[None, :, :])\n', (1795, 1812), True, 'import numpy as np\n'), ((1386, 1418), 'math.ceil', 'math.ceil', (['((l - mar) / (w - mar))'], {}), '((l - mar) / (w - mar))\n', (1395, 1418), False, 'import random, math, itertools\n')] |
import os
from torchvision import datasets
import numpy as np
from scipy.io import loadmat
from .base_load_data import base_load_data
import wget
class dynamic_mnist_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(dynamic_mnist_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
train = datasets.MNIST(os.path.join('datasets', self.args.dataset_name), train=True, download=True)
test = datasets.MNIST(os.path.join('datasets', self.args.dataset_name), train=False)
return train, test
class fashion_mnist_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(fashion_mnist_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
train = datasets.FashionMNIST(os.path.join('datasets', self.args.dataset_name), train=True, download=True)
test = datasets.FashionMNIST(os.path.join('datasets', self.args.dataset_name), train=False)
return train, test
class svhn_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(svhn_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
train = datasets.SVHN(os.path.join('datasets', self.args.dataset_name), split='train', download=True)
test = datasets.SVHN(os.path.join('datasets', self.args.dataset_name), split='test', download=True)
return train, test
def seperate_data_from_label(self, train_dataset, test_dataset):
x_train = train_dataset.data
y_train = train_dataset.labels.astype(dtype=int)
x_test = test_dataset.data
y_test = test_dataset.labels.astype(dtype=int)
return x_train, y_train, x_test, y_test
class static_mnist_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(static_mnist_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('datasets', self.args.dataset_name, 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', self.args.dataset_name, 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', self.args.dataset_name, 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
y_train = np.zeros((x_train.shape[0], 1)).astype(int)
y_val = np.zeros((x_val.shape[0], 1)).astype(int)
y_test = np.zeros((x_test.shape[0], 1)).astype(int)
return (x_train, x_val, y_train, y_val), (x_test, y_test)
def seperate_data_from_label(self, train_dataset, test_dataset):
x_train, x_val, y_train, y_val = train_dataset
x_test, y_test = test_dataset
return (x_train, x_val), (y_train, y_val), x_test, y_test
def preprocessing_(self, x_train, x_test):
return x_train, x_test
class omniglot_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(omniglot_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28*28), order='F')
dataset_file = os.path.join('datasets', self.args.dataset_name, 'chardata.mat')
if not os.path.exists(dataset_file):
url = "https://raw.githubusercontent.com/yburda/iwae/master/datasets/OMNIGLOT/chardata.mat"
wget.download(url, dataset_file)
omni_raw = loadmat(os.path.join('datasets', self.args.dataset_name, 'chardata.mat'))
x_train = reshape_data(omni_raw['data'].T.astype('float32'))
x_test = reshape_data(omni_raw['testdata'].T.astype('float32'))
y_train = omni_raw['targetchar'].reshape((-1, 1))
y_test = omni_raw['testtargetchar'].reshape((-1, 1))
return (x_train, y_train), (x_test, y_test)
def seperate_data_from_label(self, train_dataset, test_dataset):
x_train, y_train = train_dataset
x_test, y_test = test_dataset
return x_train, y_train, x_test, y_test
def preprocessing_(self, x_train, x_test):
return x_train, x_test
class cifar10_loader(base_load_data):
def __init__(self, args, use_fixed_validation=False, no_binarization=False):
super(cifar10_loader, self).__init__(args, use_fixed_validation, no_binarization=no_binarization)
def obtain_data(self):
training_dataset = datasets.CIFAR10(os.path.join('datasets', self.args.dataset_name), train=True, download=True)
test_dataset = datasets.CIFAR10(os.path.join('datasets', self.args.dataset_name), train=False)
return training_dataset, test_dataset
def seperate_data_from_label(self, train_dataset, test_dataset):
train_data = np.swapaxes(np.swapaxes(train_dataset.data, 1, 2), 1, 3)
y_train = np.zeros((train_data.shape[0], 1)).astype(int)
test_data = np.swapaxes(np.swapaxes(test_dataset.data, 1, 2), 1, 3)
y_test = np.zeros((test_data.shape[0], 1)).astype(int)
return train_data, y_train, test_data, y_test
def load_dataset(args, training_num=None, use_fixed_validation=False, no_binarization=False, **kwargs):
if training_num is not None:
args.training_set_size = training_num
if args.dataset_name == 'static_mnist':
args.input_size = [1, 28, 28]
args.input_type = 'binary'
train_loader, val_loader, test_loader, args = static_mnist_loader(args).load_dataset(**kwargs)
elif args.dataset_name == 'dynamic_mnist':
if training_num is None:
args.training_set_size = 50000
args.input_size = [1, 28, 28]
if args.continuous is True:
args.input_type = 'gray'
args.dynamic_binarization = False
no_binarization = True
else:
args.input_type = 'binary'
args.dynamic_binarization = True
train_loader, val_loader, test_loader, args = \
dynamic_mnist_loader(args, use_fixed_validation, no_binarization=no_binarization).load_dataset(**kwargs)
elif args.dataset_name == 'fashion_mnist':
if training_num is None:
args.training_set_size = 50000
args.input_size = [1, 28, 28]
if args.continuous is True:
print("*****Continuous Data*****")
args.input_type = 'gray'
args.dynamic_binarization = False
no_binarization = True
else:
args.input_type = 'binary'
args.dynamic_binarization = True
train_loader, val_loader, test_loader, args = \
fashion_mnist_loader(args, use_fixed_validation, no_binarization=no_binarization).load_dataset(**kwargs)
elif args.dataset_name == 'omniglot':
if training_num is None:
args.training_set_size = 23000
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = True
train_loader, val_loader, test_loader, args = omniglot_loader(args).load_dataset(**kwargs)
elif args.dataset_name == 'svhn':
args.training_set_size = 60000
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
train_loader, val_loader, test_loader, args = svhn_loader(args).load_dataset(**kwargs)
elif args.dataset_name == 'cifar10':
args.training_set_size = 40000
args.input_size = [3, 32, 32]
args.input_type = 'continuous'
train_loader, val_loader, test_loader, args = cifar10_loader(args).load_dataset(**kwargs)
else:
raise Exception('Wrong name of the dataset!')
print('train size', len(train_loader.dataset))
if val_loader is not None:
print('val size', len(val_loader.dataset))
print('test size', len(test_loader.dataset))
return train_loader, val_loader, test_loader, args
| [
"numpy.zeros",
"os.path.exists",
"wget.download",
"numpy.swapaxes",
"os.path.join"
] | [((3867, 3931), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name', '"""chardata.mat"""'], {}), "('datasets', self.args.dataset_name, 'chardata.mat')\n", (3879, 3931), False, 'import os\n'), ((443, 491), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (455, 491), False, 'import os\n'), ((550, 598), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (562, 598), False, 'import os\n'), ((945, 993), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (957, 993), False, 'import os\n'), ((1059, 1107), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (1071, 1107), False, 'import os\n'), ((1428, 1476), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (1440, 1476), False, 'import os\n'), ((1537, 1585), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (1549, 1585), False, 'import os\n'), ((3947, 3975), 'os.path.exists', 'os.path.exists', (['dataset_file'], {}), '(dataset_file)\n', (3961, 3975), False, 'import os\n'), ((4093, 4125), 'wget.download', 'wget.download', (['url', 'dataset_file'], {}), '(url, dataset_file)\n', (4106, 4125), False, 'import wget\n'), ((4154, 4218), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name', '"""chardata.mat"""'], {}), "('datasets', self.args.dataset_name, 'chardata.mat')\n", (4166, 4218), False, 'import os\n'), ((5109, 5157), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (5121, 5157), False, 'import os\n'), ((5226, 5274), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name'], {}), "('datasets', self.args.dataset_name)\n", (5238, 5274), False, 'import os\n'), ((5438, 5475), 'numpy.swapaxes', 'np.swapaxes', (['train_dataset.data', '(1)', '(2)'], {}), '(train_dataset.data, 1, 2)\n', (5449, 5475), True, 'import numpy as np\n'), ((5580, 5616), 'numpy.swapaxes', 'np.swapaxes', (['test_dataset.data', '(1)', '(2)'], {}), '(test_dataset.data, 1, 2)\n', (5591, 5616), True, 'import numpy as np\n'), ((2347, 2425), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name', '"""binarized_mnist_train.amat"""'], {}), "('datasets', self.args.dataset_name, 'binarized_mnist_train.amat')\n", (2359, 2425), False, 'import os\n'), ((2546, 2624), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name', '"""binarized_mnist_valid.amat"""'], {}), "('datasets', self.args.dataset_name, 'binarized_mnist_valid.amat')\n", (2558, 2624), False, 'import os\n'), ((2743, 2820), 'os.path.join', 'os.path.join', (['"""datasets"""', 'self.args.dataset_name', '"""binarized_mnist_test.amat"""'], {}), "('datasets', self.args.dataset_name, 'binarized_mnist_test.amat')\n", (2755, 2820), False, 'import os\n'), ((2941, 2972), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0], 1)'], {}), '((x_train.shape[0], 1))\n', (2949, 2972), True, 'import numpy as np\n'), ((3001, 3030), 'numpy.zeros', 'np.zeros', (['(x_val.shape[0], 1)'], {}), '((x_val.shape[0], 1))\n', (3009, 3030), True, 'import numpy as np\n'), ((3060, 3090), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0], 1)'], {}), '((x_test.shape[0], 1))\n', (3068, 3090), True, 'import numpy as np\n'), ((5501, 5535), 'numpy.zeros', 'np.zeros', (['(train_data.shape[0], 1)'], {}), '((train_data.shape[0], 1))\n', (5509, 5535), True, 'import numpy as np\n'), ((5641, 5674), 'numpy.zeros', 'np.zeros', (['(test_data.shape[0], 1)'], {}), '((test_data.shape[0], 1))\n', (5649, 5674), True, 'import numpy as np\n')] |
"""
data generator for feeding data into pytorch models
"""
import os, sys
import json
import math
from random import shuffle, randint, uniform, sample
from copy import deepcopy
from functools import reduce
from typing import Optional, List, Tuple, Sequence, NoReturn
import numpy as np
np.set_printoptions(precision=5, suppress=True)
try:
from tqdm.auto import tqdm
except ModuleNotFoundError:
from tqdm import tqdm
import torch
from torch.utils.data.dataset import Dataset
from sklearn.preprocessing import StandardScaler
try:
import torch_ecg
except ModuleNotFoundError:
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from torch_ecg.cfg import CFG
from torch_ecg.databases import CPSC2019 as CR
from torch_ecg._preprocessors import PreprocManager
from cfg import TrainCfg, ModelCfg
if ModelCfg.torch_dtype == torch.float64:
torch.set_default_tensor_type(torch.DoubleTensor)
__all__ = [
"CPSC2019",
]
class CPSC2019(Dataset):
"""
"""
__DEBUG__ = False
__name__ = "CPSC2019"
def __init__(self,
config:CFG,
training:bool=True,
lazy:bool=False,) -> NoReturn:
""" finished, checked,
Parameters
----------
config: dict,
configurations for the Dataset,
ref. `cfg.TrainCfg`
training: bool, default True,
if True, the training set will be loaded, otherwise the test set
lazy: bool, default False,
if True, the data will not be loaded immediately,
"""
super().__init__()
self.config = deepcopy(config)
self.reader = CR(db_dir=config.db_dir)
self.training = training
self.n_classes = 1
self.lazy = lazy
if self.config.torch_dtype == torch.float64:
self.dtype = np.float64
else:
self.dtype = np.float32
self.siglen = self.config.input_len # alias, for simplicity
self.records = []
self._train_test_split(
train_ratio=self.config.train_ratio,
force_recompute=False,
)
self.ppm = PreprocManager.from_config(self.config)
self.fdr = FastDataReader(self.reader, self.records, self.config, self.ppm)
self._signals = None
self._labels = None
if not self.lazy:
self._load_all_data()
def __getitem__(self, index:int) -> Tuple[np.ndarray, np.ndarray]:
""" finished, checked,
"""
if self.lazy:
signal, label = self.fdr[index]
else:
signal, label = self._signals[index], self._labels[index]
return signal, label
def __len__(self) -> int:
"""
"""
return len(self.fdr)
def _load_all_data(self) -> NoReturn:
"""
"""
self._signals, self._labels = [], []
with tqdm(self.fdr, desc="loading data", unit="records") as pbar:
for sig, lab in pbar:
self._signals.append(sig)
self._labels.append(lab)
self._signals = np.array(self._signals)
self._labels = np.array(self._labels)
@property
def signals(self) -> np.ndarray:
"""
"""
return self._signals
@property
def labels(self) -> np.ndarray:
"""
"""
return self._labels
def _train_test_split(self, train_ratio:float=0.8, force_recompute:bool=False) -> List[str]:
"""
do train test split,
it is ensured that both the train and the test set contain all classes
Parameters
----------
train_ratio: float, default 0.8,
ratio of the train set in the whole dataset (or the whole tranche(s))
force_recompute: bool, default False,
if True, force redo the train-test split,
regardless of the existing ones stored in json files
Returns
-------
records: list of str,
list of the records split for training or validation
"""
assert 0 < train_ratio < 100
_train_ratio = train_ratio if train_ratio < 1 else train_ratio/100
split_fn = os.path.join(self.reader.db_dir, f"train_test_split_{_train_ratio:.2f}.json")
if os.path.isfile(split_fn) and not force_recompute:
with open(split_fn, "r") as f:
split_res = json.load(f)
if self.training:
self.records = split_res["train"]
shuffle(self.records)
else:
self.records = split_res["test"]
return
records = deepcopy(self.reader.all_records)
shuffle(records)
split_num = int(_train_ratio*len(records))
train = sorted(records[:split_num])
test = sorted(records[split_num:])
split_res = {"train":train, "test":test}
with open(split_fn, "w") as f:
json.dump(split_res, f, ensure_ascii=False)
if self.training:
self.records = train
shuffle(self.records)
else:
self.records = test
class FastDataReader(Dataset):
"""
"""
def __init__(self, reader:CR, records:Sequence[str], config:CFG, ppm:Optional[PreprocManager]=None) -> NoReturn:
"""
"""
self.reader = reader
self.records = records
self.config = config
self.ppm = ppm
self.siglen = self.config.input_len # alias, for simplicity
def __len__(self) -> int:
"""
"""
return len(self.records)
def __getitem__(self, index:int) -> Tuple[np.ndarray, np.ndarray]:
"""
"""
rec_name = self.records[index]
values = self.reader.load_data(rec_name, units="mV", keep_dim=False)
rpeaks = self.reader.load_ann(rec_name, keep_dim=False)
if self.config.get("recover_length", False):
reduction = 1
else:
reduction = self.config.reduction
labels = np.zeros((self.siglen // reduction))
# rpeak indices to mask
for r in rpeaks:
if r < self.config.skip_dist or r >= self.siglen - self.config.skip_dist:
continue
start_idx = math.floor( (r-self.config.bias_thr) / reduction )
end_idx = math.ceil( (r+self.config.bias_thr) / reduction )
labels[start_idx:end_idx] = 1
values = values.reshape((self.config.n_leads, self.siglen))
labels = labels[..., np.newaxis]
values, _ = self.ppm(values, self.config.fs)
return values, labels
| [
"json.dump",
"copy.deepcopy",
"torch_ecg.databases.CPSC2019",
"numpy.set_printoptions",
"tqdm.tqdm",
"json.load",
"math.ceil",
"os.path.abspath",
"random.shuffle",
"numpy.zeros",
"torch.set_default_tensor_type",
"torch_ecg._preprocessors.PreprocManager.from_config",
"math.floor",
"os.path.... | [((288, 335), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (307, 335), True, 'import numpy as np\n'), ((926, 975), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (955, 975), False, 'import torch\n'), ((1678, 1694), 'copy.deepcopy', 'deepcopy', (['config'], {}), '(config)\n', (1686, 1694), False, 'from copy import deepcopy\n'), ((1717, 1741), 'torch_ecg.databases.CPSC2019', 'CR', ([], {'db_dir': 'config.db_dir'}), '(db_dir=config.db_dir)\n', (1719, 1741), True, 'from torch_ecg.databases import CPSC2019 as CR\n'), ((2208, 2247), 'torch_ecg._preprocessors.PreprocManager.from_config', 'PreprocManager.from_config', (['self.config'], {}), '(self.config)\n', (2234, 2247), False, 'from torch_ecg._preprocessors import PreprocManager\n'), ((3156, 3179), 'numpy.array', 'np.array', (['self._signals'], {}), '(self._signals)\n', (3164, 3179), True, 'import numpy as np\n'), ((3203, 3225), 'numpy.array', 'np.array', (['self._labels'], {}), '(self._labels)\n', (3211, 3225), True, 'import numpy as np\n'), ((4255, 4332), 'os.path.join', 'os.path.join', (['self.reader.db_dir', 'f"""train_test_split_{_train_ratio:.2f}.json"""'], {}), "(self.reader.db_dir, f'train_test_split_{_train_ratio:.2f}.json')\n", (4267, 4332), False, 'import os, sys\n'), ((4720, 4753), 'copy.deepcopy', 'deepcopy', (['self.reader.all_records'], {}), '(self.reader.all_records)\n', (4728, 4753), False, 'from copy import deepcopy\n'), ((4762, 4778), 'random.shuffle', 'shuffle', (['records'], {}), '(records)\n', (4769, 4778), False, 'from random import shuffle, randint, uniform, sample\n'), ((6100, 6134), 'numpy.zeros', 'np.zeros', (['(self.siglen // reduction)'], {}), '(self.siglen // reduction)\n', (6108, 6134), True, 'import numpy as np\n'), ((2954, 3005), 'tqdm.tqdm', 'tqdm', (['self.fdr'], {'desc': '"""loading data"""', 'unit': '"""records"""'}), "(self.fdr, desc='loading data', unit='records')\n", (2958, 3005), False, 'from tqdm import tqdm\n'), ((4344, 4368), 'os.path.isfile', 'os.path.isfile', (['split_fn'], {}), '(split_fn)\n', (4358, 4368), False, 'import os, sys\n'), ((5017, 5060), 'json.dump', 'json.dump', (['split_res', 'f'], {'ensure_ascii': '(False)'}), '(split_res, f, ensure_ascii=False)\n', (5026, 5060), False, 'import json\n'), ((5132, 5153), 'random.shuffle', 'shuffle', (['self.records'], {}), '(self.records)\n', (5139, 5153), False, 'from random import shuffle, randint, uniform, sample\n'), ((6329, 6379), 'math.floor', 'math.floor', (['((r - self.config.bias_thr) / reduction)'], {}), '((r - self.config.bias_thr) / reduction)\n', (6339, 6379), False, 'import math\n'), ((6402, 6451), 'math.ceil', 'math.ceil', (['((r + self.config.bias_thr) / reduction)'], {}), '((r + self.config.bias_thr) / reduction)\n', (6411, 6451), False, 'import math\n'), ((4465, 4477), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4474, 4477), False, 'import json\n'), ((4586, 4607), 'random.shuffle', 'shuffle', (['self.records'], {}), '(self.records)\n', (4593, 4607), False, 'from random import shuffle, randint, uniform, sample\n'), ((691, 708), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (698, 708), False, 'from os.path import dirname, abspath\n')] |
import os
import tempfile
import zipfile
import shutil
from urllib.request import urlretrieve
from sktime.utils.data_io import load_from_tsfile_to_dataframe
from sklearn.utils.multiclass import class_distribution
import pandas as pd
import numpy as np
NonExistentKey = -1
DIRNAME = "database"
MODULE = os.path.dirname(__file__)
def get_dist_key(dist, value):
"""A static method to return the key that stored the required value or return -1 if not exist.
Args:
dist: dist
A dist that may contain some key-value pairs.
value: string
The class label.
Returns:
output: int
The integer that stored the associated with the value or returns -1 if not exist.
"""
for key, val in dist.items():
if value == val:
return key
return NonExistentKey
def pre_processing(y, dist=None):
"""A static method to pre-process the y.
Args:
y: string
A string that may contain characters or integer.
dist: dist_like, optional
A dist that may contain some key-value pairs.
Returns:
output: ndarray, dist
The numpy array and a dist that have been processed.
"""
distinct_label = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
labels = [idx for idx in range(0, len(distinct_label))]
transform_y = []
if dist is None:
dist = {}
for idx in range(0, len(y)):
label = y[idx]
key = get_dist_key(dist, label)
if key is NonExistentKey:
key = labels.pop(0)
dist[key] = label
transform_y.append(key)
else:
for idx in range(0, len(y)):
label = y[idx]
key = get_dist_key(dist, label)
transform_y.append(key)
return np.asarray(transform_y), dist
def load_dataset(name, split, return_X_y, dist=None):
"""A static method to load dataset by name.
This mirrors the sktime implementation.
Args:
name: string
The name of dataset.
split: string
Whether it belongs to the train or test set or none of them.
return_X_y: bool
Whether should return true or false.
dist: dist, optional
A dist that may contain some key-value pairs.
Returns:
output: ndarray, ndarray, dist
"""
extract_path = os.path.join(MODULE, DIRNAME)
local_module = MODULE
local_dirname = DIRNAME
if not os.path.exists(extract_path):
os.makedirs(extract_path)
if name not in _list_downloaded_datasets(extract_path):
url = "http://timeseriesclassification.com/Downloads/%s.zip" % name
# This also tests the validitiy of the URL, can't rely on the html
# status code as it always returns 200
try:
_download_and_extract(url, extract_path)
except zipfile.BadZipFile as e:
raise ValueError(
"Invalid dataset name. Please make sure the dataset is "
"available on http://timeseriesclassification.com/."
) from e
if split in ("train", "test"):
fname = name + "_" + split.upper() + ".ts"
abspath = os.path.join(local_module, local_dirname, name, fname)
X, y = load_from_tsfile_to_dataframe(abspath)
if y is float or y is int:
y = [int(label) for label in y]
else:
y, dist = pre_processing(y, dist)
if np.amin(y) > 0:
y = y - 1
else:
raise ValueError("Invalid `split` value")
# Return appropriately
if return_X_y:
return X, y, dist
else:
X["class_val"] = pd.Series(y)
return X, dist
def _list_downloaded_datasets(extract_path):
"""
Returns a list of all the currently downloaded datasets.
This mirrors the sktime implementation.
Args:
extract_path: string
The specific extract path.
Returns:
datasets : list
List of the names of datasets downloaded.
"""
if extract_path is None:
data_dir = os.path.join(MODULE, DIRNAME)
else:
data_dir = extract_path
datasets = [
path
for path in os.listdir(data_dir)
if os.path.isdir(os.path.join(data_dir, path))
]
return datasets
def _download_and_extract(url, extract_path=None):
"""Helper function for downloading and unzipping datasets
This mirrors the sktime implementation.
Args:
url : string
Url pointing to file to download
extract_path : string, optional (default: None)
path to extract downloaded zip to, None defaults
to sktime/datasets/data
Returns:
extract_path : string or None
if successful, string containing the path of the extracted file, None
if it wasn't succesful.
"""
file_name = os.path.basename(url)
dl_dir = tempfile.mkdtemp()
zip_file_name = os.path.join(dl_dir, file_name)
urlretrieve(url, zip_file_name)
if extract_path is None:
extract_path = os.path.join(MODULE, DIRNAME + "/%s/" % file_name.split(".")[0])
else:
extract_path = os.path.join(extract_path, "%s/" % file_name.split(".")[0])
try:
if not os.path.exists(extract_path):
os.makedirs(extract_path)
zipfile.ZipFile(zip_file_name, "r").extractall(extract_path)
shutil.rmtree(dl_dir)
return extract_path
except zipfile.BadZipFile:
shutil.rmtree(dl_dir)
if os.path.exists(extract_path):
shutil.rmtree(extract_path)
raise zipfile.BadZipFile(
"Could not unzip dataset. Please make sure the URL is valid."
) | [
"zipfile.ZipFile",
"os.makedirs",
"numpy.amin",
"os.path.basename",
"os.path.dirname",
"numpy.asarray",
"os.path.exists",
"urllib.request.urlretrieve",
"tempfile.mkdtemp",
"pandas.Series",
"shutil.rmtree",
"sktime.utils.data_io.load_from_tsfile_to_dataframe",
"os.path.join",
"os.listdir",
... | [((306, 331), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (321, 331), False, 'import os\n'), ((2421, 2450), 'os.path.join', 'os.path.join', (['MODULE', 'DIRNAME'], {}), '(MODULE, DIRNAME)\n', (2433, 2450), False, 'import os\n'), ((4938, 4959), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (4954, 4959), False, 'import os\n'), ((4973, 4991), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4989, 4991), False, 'import tempfile\n'), ((5012, 5043), 'os.path.join', 'os.path.join', (['dl_dir', 'file_name'], {}), '(dl_dir, file_name)\n', (5024, 5043), False, 'import os\n'), ((5048, 5079), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'zip_file_name'], {}), '(url, zip_file_name)\n', (5059, 5079), False, 'from urllib.request import urlretrieve\n'), ((1842, 1865), 'numpy.asarray', 'np.asarray', (['transform_y'], {}), '(transform_y)\n', (1852, 1865), True, 'import numpy as np\n'), ((2517, 2545), 'os.path.exists', 'os.path.exists', (['extract_path'], {}), '(extract_path)\n', (2531, 2545), False, 'import os\n'), ((2555, 2580), 'os.makedirs', 'os.makedirs', (['extract_path'], {}), '(extract_path)\n', (2566, 2580), False, 'import os\n'), ((3243, 3297), 'os.path.join', 'os.path.join', (['local_module', 'local_dirname', 'name', 'fname'], {}), '(local_module, local_dirname, name, fname)\n', (3255, 3297), False, 'import os\n'), ((3313, 3351), 'sktime.utils.data_io.load_from_tsfile_to_dataframe', 'load_from_tsfile_to_dataframe', (['abspath'], {}), '(abspath)\n', (3342, 3351), False, 'from sktime.utils.data_io import load_from_tsfile_to_dataframe\n'), ((3708, 3720), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (3717, 3720), True, 'import pandas as pd\n'), ((4131, 4160), 'os.path.join', 'os.path.join', (['MODULE', 'DIRNAME'], {}), '(MODULE, DIRNAME)\n', (4143, 4160), False, 'import os\n'), ((5461, 5482), 'shutil.rmtree', 'shutil.rmtree', (['dl_dir'], {}), '(dl_dir)\n', (5474, 5482), False, 'import shutil\n'), ((3502, 3512), 'numpy.amin', 'np.amin', (['y'], {}), '(y)\n', (3509, 3512), True, 'import numpy as np\n'), ((4253, 4273), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (4263, 4273), False, 'import os\n'), ((5316, 5344), 'os.path.exists', 'os.path.exists', (['extract_path'], {}), '(extract_path)\n', (5330, 5344), False, 'import os\n'), ((5358, 5383), 'os.makedirs', 'os.makedirs', (['extract_path'], {}), '(extract_path)\n', (5369, 5383), False, 'import os\n'), ((5550, 5571), 'shutil.rmtree', 'shutil.rmtree', (['dl_dir'], {}), '(dl_dir)\n', (5563, 5571), False, 'import shutil\n'), ((5583, 5611), 'os.path.exists', 'os.path.exists', (['extract_path'], {}), '(extract_path)\n', (5597, 5611), False, 'import os\n'), ((5667, 5753), 'zipfile.BadZipFile', 'zipfile.BadZipFile', (['"""Could not unzip dataset. Please make sure the URL is valid."""'], {}), "(\n 'Could not unzip dataset. Please make sure the URL is valid.')\n", (5685, 5753), False, 'import zipfile\n'), ((4299, 4327), 'os.path.join', 'os.path.join', (['data_dir', 'path'], {}), '(data_dir, path)\n', (4311, 4327), False, 'import os\n'), ((5392, 5427), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file_name', '"""r"""'], {}), "(zip_file_name, 'r')\n", (5407, 5427), False, 'import zipfile\n'), ((5625, 5652), 'shutil.rmtree', 'shutil.rmtree', (['extract_path'], {}), '(extract_path)\n', (5638, 5652), False, 'import shutil\n'), ((1266, 1279), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1276, 1279), True, 'import numpy as np\n')] |
from scipy import *
from pylab import *
import numpy as np
import cv2 as cv2
# image = imread("img/me1.jpg")[:, :, 0]
image = cv2.imread("img//test.jpg")
print(f'width: {image.shape[1]} pixels')
print(f'height: {image.shape[0]} pixels')
print(f'channels: {image.shape[2]}')
cv2.imshow('image', image)
cv2.waitKey(0)
# cv2.imwrite('new_image.png', image)
# patch1 = image[:100, :100]
# cv2.imshow("patch1", patch1)
# cv2.waitKey(0)
canvas = np.zeros((300,300,3), dtype="uint8")
# print(canvas)
for _ in range(0, 25):
r = np.random.randint(5, 200)
color = np.random.randint(0, 256, size = (3, )).tolist()
pt = np.random.randint(0, 200, size = (2, ))
cv2.circle(canvas, tuple(pt), r, color, -1)
cv2.imshow('canvas', canvas)
cv2.waitKey(0)
#shift position to right down
# M = np.float32([[1, 0, 25], [0, 1, 50]])
# shifted_image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# cv2.imshow('shifted_image', shifted_image)
# cv2.waitKey(0)
#rotate
# (h, w) = image.shape[:2]
# center = (w // 2, h // 2)
#
# M = cv2.getRotationMatrix2D(center, 135, 1.0)
# Rotated_iamge = cv2.warpAffine(image, M, (w, h))
# cv2.imshow('Rotated_iamge', Rotated_iamge)
# cv2.waitKey(0)
# new_w, new_h = 100, 200
# resize = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA)
# cv2.imshow('resize', resize)
# cv2.waitKey(0)
# flip
# flipped_image = cv2.flip(image, -1)
# cv2.imshow('flipped_image', flipped_image)
# cv2.waitKey(0)
# 位运算
# rectangle = np.zeros((100, 100), dtype='uint8')
# cv2.rectangle(rectangle, (30, 30), (70, 70), 255, -1)
# cv2.imshow('rectangle', rectangle)
# cv2.waitKey(0)
#
# circle = np.zeros((100, 100), dtype='uint8')
# cv2.circle(circle, (50, 50), 25, 255, -1)
# cv2.imshow('circle', circle)
# cv2.waitKey(0)
# bitwiseAnd = cv2.bitwise_and(rectangle, circle)
# cv2.imshow('And', bitwiseAnd)
# cv2.waitKey(0)
#
# bitwiseOr = cv2.bitwise_or(rectangle, circle)
# cv2.imshow('And', bitwiseOr)
# cv2.waitKey(0)
#
# bitwiseXOR = cv2.bitwise_xor(rectangle, circle)
# cv2.imshow('And', bitwiseXOR)
# cv2.waitKey(0)
#
# bitwiseNot = cv2.bitwise_not(circle)
# cv2.imshow('And', bitwiseNot)
# cv2.waitKey(0)
#masking
# mask = np.zeros(image.shape[:2], dtype='uint8')
# (cX, cY) = (image.shape[1] // 2, image.shape[0] // 2)
# cv2.rectangle(mask, (cX - 75, cY - 75), (cX + 75, cY + 75), 255, -1)
# cv2.imshow('mask', mask)
# cv2.waitKey(0)
#
# masked = cv2.bitwise_and(image, image, mask = mask)
# cv2.imshow('masked image', masked)
# cv2.waitKey(0)
# split RGB channels and merge
# (B, G, R) = cv2.split(image)
# merged = cv2.merge([B, G, R])
# cv2.imshow('RED', R)
# cv2.imshow('Blue', B)
# cv2.imshow('Green', G)
# cv2.imshow('Merged', merged)
# cv2.waitKey(0)
# change color to gray/HSV/LAB
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# LAB = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
# cv2.imshow('gray', gray)
# cv2.imshow('HSV', HSV)
# cv2.imshow('LAB', LAB)
# cv2.waitKey(0)
# colorful plt hist
# from matplotlib import pyplot as plt
# hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
# plt.figure()
#
# p1 = plt.subplot(121)
# p2 = plt.subplot(122)
#
# p1.plot(hist)
# chans = cv2.split(image)
# colors = ('b', 'g', 'r')
# for (chan, color) in zip(chans, colors):
# hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
# p2.plot(hist, color=color)
# plt.show()
# blur 平滑 模糊
# blurred = np.hstack([cv2.blur(image, (3, 3)), cv2.blur(image, (5, 5)), cv2.blur(image, (7, 7))])
# cv2.imshow('Averaged', blurred)
#
# blurred = np.hstack([cv2.GaussianBlur(image, (3, 3), 0), cv2.GaussianBlur(image, (5, 5), 0), cv2.GaussianBlur(image, (7, 7), 0)])
# cv2.imshow('GaussianBlur', blurred)
#
# blurred = np.hstack([cv2.medianBlur(image, 3), cv2.medianBlur(image, 5), cv2.medianBlur(image, 7)])
# cv2.imshow('Median', blurred)
#
# blurred = np.hstack([cv2.bilateralFilter(image, 5, 21, 21), cv2.bilateralFilter(image, 7, 31, 31), cv2.bilateralFilter(image, 9, 41, 41)])
# cv2.imshow('Bilateral', blurred)
# cv2.waitKey(0)
# 边缘检测
# blured = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blured = cv2.GaussianBlur(blured, (5, 5), 0)
# cv2.imshow('blurred', blured)
#
# canny = cv2.Canny(blured, 30, 150)
# cv2.imshow('canny', canny)
# cv2.waitKey(0)
# openCV for face detection
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eyes_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
faces_rects = faceCascade.detectMultiScale(image, scaleFactor=1.02, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in faces_rects:
image = cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = image[y:y+h, x:x+w]
eyes = eyes_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow('iamge', image)
cv2.imwrite('new_test.jpg', image)
cv2.waitKey(0)
| [
"cv2.waitKey",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.zeros",
"cv2.rectangle",
"cv2.imread",
"numpy.random.randint",
"cv2.CascadeClassifier",
"cv2.imshow"
] | [((131, 158), 'cv2.imread', 'cv2.imread', (['"""img//test.jpg"""'], {}), "('img//test.jpg')\n", (141, 158), True, 'import cv2 as cv2\n'), ((289, 315), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (299, 315), True, 'import cv2 as cv2\n'), ((317, 331), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (328, 331), True, 'import cv2 as cv2\n'), ((461, 499), 'numpy.zeros', 'np.zeros', (['(300, 300, 3)'], {'dtype': '"""uint8"""'}), "((300, 300, 3), dtype='uint8')\n", (469, 499), True, 'import numpy as np\n'), ((740, 768), 'cv2.imshow', 'cv2.imshow', (['"""canvas"""', 'canvas'], {}), "('canvas', canvas)\n", (750, 768), True, 'import cv2 as cv2\n'), ((770, 784), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (781, 784), True, 'import cv2 as cv2\n'), ((2846, 2885), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2858, 2885), True, 'import cv2 as cv2\n'), ((4437, 4476), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (4449, 4476), True, 'import cv2 as cv2\n'), ((4494, 4554), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (4515, 4554), True, 'import cv2 as cv2\n'), ((4571, 4615), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_eye.xml"""'], {}), "('haarcascade_eye.xml')\n", (4592, 4615), True, 'import cv2 as cv2\n'), ((5098, 5124), 'cv2.imshow', 'cv2.imshow', (['"""iamge"""', 'image'], {}), "('iamge', image)\n", (5108, 5124), True, 'import cv2 as cv2\n'), ((5126, 5160), 'cv2.imwrite', 'cv2.imwrite', (['"""new_test.jpg"""', 'image'], {}), "('new_test.jpg', image)\n", (5137, 5160), True, 'import cv2 as cv2\n'), ((5162, 5176), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5173, 5176), True, 'import cv2 as cv2\n'), ((548, 573), 'numpy.random.randint', 'np.random.randint', (['(5)', '(200)'], {}), '(5, 200)\n', (565, 573), True, 'import numpy as np\n'), ((646, 682), 'numpy.random.randint', 'np.random.randint', (['(0)', '(200)'], {'size': '(2,)'}), '(0, 200, size=(2,))\n', (663, 682), True, 'import numpy as np\n'), ((4797, 4857), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (4810, 4857), True, 'import cv2 as cv2\n'), ((5026, 5096), 'cv2.rectangle', 'cv2.rectangle', (['roi_color', '(ex, ey)', '(ex + ew, ey + eh)', '(0, 255, 0)', '(2)'], {}), '(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)\n', (5039, 5096), True, 'import cv2 as cv2\n'), ((587, 623), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(3,)'}), '(0, 256, size=(3,))\n', (604, 623), True, 'import numpy as np\n')] |
'''Underwater Detection Experiment.'''
from lrp import Linear_Reward_Penalty as LRP
from mse import MSE
from environment import Environment
from pinger import Pinger
import numpy as np
# Define the number of discrete depths between the surface and seabed.
num_actions = 6
n = 10000
interval = 1
time_between = (n / interval) - 1
# Define the environment with the number of discrete depths for the detectable
# object.
env = Environment(num_actions)
# Define the LRI automata with the same number of actions. This number does
# not correspond to the number of receivers on the array. It is merely the
# representation of the array's ability to detect the object at that depth.
lrp = LRP(num_actions) # The learning automata.
# The most probable depth that the object exists at, as calculated by the
# learner.
bestdepth = np.zeros(num_actions)
# Define the Markovian Switching Environment that will feed probabilities to
# the Pinger object.
Es = [
[0.1, 0.2, 0.4, 0.2, 0.01, 0.09],
[0, 0, 0.8, 0.1, 0, 0.1],
[0, 0, 0, 1, 0, 0],
[0.1, 0.1, 0.6, 0.05, 0.01, 0.04]]
mse = MSE(Es)
det_obj = Pinger(mse.env_now()) # Create the detectable object.
# Run 5 individual experiments experiments.
for k in range(len(Es)):
# Generate an ensemble of n experiments
for j in range(n):
# reset the action probabilities.
lrp.reset_actions()
# Run a single experiment. Terminate if it reaches 10000 iterations.
while(True):
# Define m as the next action predicting the depth of the object.
m = lrp.next_action()
# Defin req as the next detectable object depth.
req = det_obj.request()
# reward if m = req.
resp = env.response(m, req)
if(not resp):
lrp.do_reward(m)
else:
lrp.do_penalty(m)
if(max(lrp.p) > 0.98):
# The best depth counting from 0.
# Break at 98% convergence to a single depth.
bestdepth[np.argmax(lrp.p)] += 1
break
if(j == time_between):
mse.next_env()
det_obj.set_env(mse.env_now())
print("The desired vector is now: " + str(mse.env_now()))
print("The probability vector is: " + str(bestdepth / sum(bestdepth)))
| [
"mse.MSE",
"numpy.argmax",
"numpy.zeros",
"environment.Environment",
"lrp.Linear_Reward_Penalty"
] | [((426, 450), 'environment.Environment', 'Environment', (['num_actions'], {}), '(num_actions)\n', (437, 450), False, 'from environment import Environment\n'), ((684, 700), 'lrp.Linear_Reward_Penalty', 'LRP', (['num_actions'], {}), '(num_actions)\n', (687, 700), True, 'from lrp import Linear_Reward_Penalty as LRP\n'), ((824, 845), 'numpy.zeros', 'np.zeros', (['num_actions'], {}), '(num_actions)\n', (832, 845), True, 'import numpy as np\n'), ((1092, 1099), 'mse.MSE', 'MSE', (['Es'], {}), '(Es)\n', (1095, 1099), False, 'from mse import MSE\n'), ((2037, 2053), 'numpy.argmax', 'np.argmax', (['lrp.p'], {}), '(lrp.p)\n', (2046, 2053), True, 'import numpy as np\n')] |
from __future__ import absolute_import
# Interface to various QP solvers
from builtins import object
import numpy as np
import mathprogbasepy.quadprog.solvers.solvers as s
# Solver Constants
OPTIMAL = "optimal"
OPTIMAL_INACCURATE = "optimal inaccurate"
PRIMAL_INFEASIBLE = "primal infeasible"
PRIMAL_INFEASIBLE_INACCURATE = "primal infeasible inaccurate"
DUAL_INFEASIBLE = "dual infeasible"
DUAL_INFEASIBLE_INACCURATE = "dual infeasible inaccurate"
PRIMAL_OR_DUAL_INFEASIBLE = "primal or dual infeasible"
SOLVER_ERROR = "solver_error"
MAX_ITER_REACHED = "max_iter_reached"
TIME_LIMIT = "time_limit"
# Statuses that indicate a solution was found.
SOLUTION_PRESENT = [OPTIMAL, OPTIMAL_INACCURATE]
class QuadprogProblem(object):
"""
Defines QP problem of the form
minimize 1/2 x' P x + q' x
subject to l <= A x <= u
i_l[i] <= x_i <= i_u[i] for i \\in i_idx
x_i \\in Z for i \\in i_idx
Attributes
----------
P: scipy sparse matrix
quadratic cost matrix
q: numpy vector
linear cost vector
A: scipy sparse matrix
constraints matrix
l: numpy vector
constraints lower bound
u: numpy vector
constraints upper bound
i_l: numpy vector
lower bound on integer variables
i_u: numpy vector
upper bound on integer variables
i_idx: numpy vector
index of integer variables
"""
def __init__(self, P=None, q=None, A=None,
l=None, u=None, i_idx=None,
i_l=None, i_u=None,
x0=None):
#
# Get problem dimensions
#
if P is None:
if q is not None:
self.n = len(q)
elif A is not None:
self.n = A.shape[1]
else:
raise ValueError("The problem does not have any variables")
else:
self.n = P.shape[0]
if A is None:
self.m = 0
else:
self.m = A.shape[0]
if i_idx is not None:
if i_l is not None:
if len(i_l) != len(i_idx):
raise ValueError("Wrong number of integer variables lower bounds")
if i_u is not None:
if len(i_u) != len(i_idx):
raise ValueError("Wrong number of integer variables upper bounds")
self.P = P
self.q = q
self.A = A
self.l = l if l is not None else -np.inf*np.ones(P.shape[0])
self.u = u if u is not None else np.inf*np.ones(P.shape[0])
self.i_idx = i_idx
self.i_u = i_u
self.i_l = i_l
self.x0 = x0
if x0 is not None and len(x0) != self.n:
raise ValueError('Initial guess has wrong dimensions!')
def solve(self, solver=s.GUROBI, **kwargs):
"""
Solve Quadratic Program with desired solver
"""
# Set solver
if solver == s.GUROBI:
from .solvers.gurobi_qpif import GUROBI
solver = GUROBI(**kwargs) # Initialize solver
elif solver == s.CPLEX:
from .solvers.cplex_qpif import CPLEX
solver = CPLEX(**kwargs) # Initialize solver
elif solver == s.OSQP:
from .solvers.osqp_qpif import OSQP
solver = OSQP(**kwargs) # Initialize solver
elif solver == s.MOSEK:
from .solvers.mosek_qpif import MOSEK
solver = MOSEK(**kwargs) # Initialize solver
elif solver == s.qpOASES:
from .solvers.qpoases_qpif import qpOASES
solver = qpOASES(**kwargs) # Initialize solver
elif solver == s.OSQP_PUREPY:
from .solvers.osqp_purepy_qpif import OSQP_PUREPY
solver = OSQP_PUREPY(**kwargs) # Initialize solver
# Solve problem
results = solver.solve(self) # Solve problem
return results
| [
"numpy.ones"
] | [((2495, 2514), 'numpy.ones', 'np.ones', (['P.shape[0]'], {}), '(P.shape[0])\n', (2502, 2514), True, 'import numpy as np\n'), ((2563, 2582), 'numpy.ones', 'np.ones', (['P.shape[0]'], {}), '(P.shape[0])\n', (2570, 2582), True, 'import numpy as np\n')] |
import os
from pathlib import Path
import artm
import numpy as np
class RankingByModel:
def __init__(self, model_path, metric):
"""
Class for ranking document search between language pairs.
Parameters
----------
model_path: str
a path to the artm model directory
metric: callable
a way to measure norm of a matrix of vectors
for example init as:
metric = np.linalg.norm
"""
self._model = artm.load_artm_model(model_path)
self._metric = metric
def _get_document_embeddings(self, path_to_data):
if os.path.isfile(path_to_data):
path_to_batches = os.path.join(
os.path.dirname(path_to_data), Path(path_to_data).stem + "_rank_batches"
)
bv = artm.BatchVectorizer(
data_path=path_to_data,
data_format="vowpal_wabbit",
target_folder=path_to_batches,
)
elif len(os.listdir(path_to_data)) > 0:
bv = artm.BatchVectorizer(data_path=path_to_data, data_format="batches",)
else:
raise ValueError("Unknown data format")
theta = self._model.transform(batch_vectorizer=bv)
return theta.columns, theta
def _rank(self, search_indices, vectors_first, vectors_secnd, kwargs=None):
average_position = []
for search_num in range(len(search_indices)):
difference = vectors_secnd - vectors_first[search_num]
vectors_norm = self._metric(difference, **kwargs)
rating = search_indices[np.argsort(vectors_norm)].copy()
average_position.append(
np.argwhere(rating == search_indices[search_num])[0][0] + 1
)
return np.mean(average_position)
def get_ranking(self, data_lang_one, data_lang_two, kwargs=None):
"""
Function returning average position of search documents in the other language.
Parameters
----------
data_lang_one: str
path to folder with batches or path to vw file
data_lang_two: str
path to folder with batches or path to vw file
"""
idx_one, theta_one = self._get_document_embeddings(data_lang_one)
idx_two, theta_two = self._get_document_embeddings(data_lang_two)
assert len(idx_one) == len(set(idx_one))
assert len(idx_two) == len(set(idx_two))
search_indices = idx_one.intersection(idx_two)
vectors_one = theta_one[search_indices].values.T
vectors_two = theta_two[search_indices].values.T
del theta_one, theta_two
ranking_first = self._rank(search_indices, vectors_one, vectors_two, kwargs)
ranking_secnd = self._rank(search_indices, vectors_two, vectors_one, kwargs)
return ranking_first, ranking_secnd, len(search_indices)
| [
"artm.BatchVectorizer",
"os.path.dirname",
"artm.load_artm_model",
"numpy.argsort",
"os.path.isfile",
"numpy.mean",
"pathlib.Path",
"numpy.argwhere",
"os.listdir"
] | [((509, 541), 'artm.load_artm_model', 'artm.load_artm_model', (['model_path'], {}), '(model_path)\n', (529, 541), False, 'import artm\n'), ((638, 666), 'os.path.isfile', 'os.path.isfile', (['path_to_data'], {}), '(path_to_data)\n', (652, 666), False, 'import os\n'), ((1802, 1827), 'numpy.mean', 'np.mean', (['average_position'], {}), '(average_position)\n', (1809, 1827), True, 'import numpy as np\n'), ((832, 940), 'artm.BatchVectorizer', 'artm.BatchVectorizer', ([], {'data_path': 'path_to_data', 'data_format': '"""vowpal_wabbit"""', 'target_folder': 'path_to_batches'}), "(data_path=path_to_data, data_format='vowpal_wabbit',\n target_folder=path_to_batches)\n", (852, 940), False, 'import artm\n'), ((728, 757), 'os.path.dirname', 'os.path.dirname', (['path_to_data'], {}), '(path_to_data)\n', (743, 757), False, 'import os\n'), ((1066, 1133), 'artm.BatchVectorizer', 'artm.BatchVectorizer', ([], {'data_path': 'path_to_data', 'data_format': '"""batches"""'}), "(data_path=path_to_data, data_format='batches')\n", (1086, 1133), False, 'import artm\n'), ((1018, 1042), 'os.listdir', 'os.listdir', (['path_to_data'], {}), '(path_to_data)\n', (1028, 1042), False, 'import os\n'), ((759, 777), 'pathlib.Path', 'Path', (['path_to_data'], {}), '(path_to_data)\n', (763, 777), False, 'from pathlib import Path\n'), ((1627, 1651), 'numpy.argsort', 'np.argsort', (['vectors_norm'], {}), '(vectors_norm)\n', (1637, 1651), True, 'import numpy as np\n'), ((1713, 1762), 'numpy.argwhere', 'np.argwhere', (['(rating == search_indices[search_num])'], {}), '(rating == search_indices[search_num])\n', (1724, 1762), True, 'import numpy as np\n')] |
from typing import Union, Tuple, List, Optional, Dict
from pathlib import Path
import sqlite3
import numpy as np
from numpy.typing import NDArray
import pandas as pd
import cv2
from lxml import etree
import h5py
from napari_imsmicrolink.utils.points import apply_rotmat_points
from napari_imsmicrolink.utils.ims_coords import parse_tsf_coordinates
class PixelMapIMS:
def __init__(
self,
data: Union[str, np.ndarray, Path],
infer_regions: bool = True,
):
"""
Container for IMS pixel coordinates for manipulation
Parameters
----------
data : n_pix x 3 (regions, x , y) np.ndarray or file path as string
infer_regions : for imzML import, regions will be infered from non connected groups of pixels
"""
if isinstance(data, (str, Path, np.ndarray)):
data_imported = [data]
else:
data_imported = data
self.data: List[Union[str, NDArray]] = data_imported
self.ims_res: int = 1
self.regions: Optional[NDArray] = None
self.x_coords_orig: Optional[NDArray] = None
self.y_coords_orig: Optional[NDArray] = None
self.x_coords_min: Optional[NDArray] = None
self.y_coords_min: Optional[NDArray] = None
self.x_coords_pad: Optional[NDArray] = None
self.y_coords_pad: Optional[NDArray] = None
self.x_extent_pad: Optional[int] = None
self.y_extent_pad: Optional[int] = None
self._padding: Dict[str, int] = {
"x_left": 0,
"x_right": 0,
"y_top": 0,
"y_bottom": 0,
}
self.padding_microns: Dict[str, Union[int, float]] = {
"x_left": 0,
"x_right": 0,
"y_top": 0,
"y_bottom": 0,
}
for data in self.data:
self.read_pixel_data(data, infer_regions=infer_regions)
self._pixelmap_minimized: np.ndarray = self._make_pixel_map_at_ims(
map_type="minimized", randomize=True
)
self.pixelmap_padded: NDArray = self._pixelmap_minimized
self._shape_map_minimized = self._make_shape_map(map_type="minimized")
def _check_bruker_sl(self, data_fp: str) -> bool:
with open(data_fp) as f:
first_line = f.readline()
return any(e in first_line for e in ["flexImaging", "from R"])
def _read_bruker_sl_rxy(
self, data_fp: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
sl = pd.read_table(
data_fp,
header=None,
skiprows=2,
sep=" ",
names=["X-pos", "Y-pos", "spot-name", "region"],
)
rxy = sl["spot-name"].str.split("X|Y", expand=True)
# handle named regions
regions = np.asarray(sl["region"])
# TODO: defer to rxy regions over factorized if discrepancy exists
x = np.asarray(rxy.iloc[:, 1], dtype=np.int32)
y = np.asarray(rxy.iloc[:, 2], dtype=np.int32)
return regions, x, y
def _read_sqlite_rxy(
self, data_fp: str
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
sqlite_db = sqlite3.connect(data_fp)
c = sqlite_db.cursor()
c.execute("SELECT RegionNumber, XIndexPos, YIndexPos FROM Spectra")
rxy = np.array(c.fetchall())
regions, x, y = rxy[:, 0], rxy[:, 1], rxy[:, 2]
return regions, x, y
def _read_h5(self, data_fp):
with h5py.File(data_fp) as f:
regions, x, y = (
np.asarray(f["region"]),
np.asarray(f["x"]),
np.asarray(f["y"]),
)
return regions, x, y
def _read_imzml_rxy(
self, data_fp: str, infer_regions: bool = True
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
elements = etree.iterparse(data_fp)
coordinates = []
for event, element in elements:
if element.tag == "{http://psi.hupo.org/ms/mzml}spectrum":
scan_elem = element.find(
"%sscanList/%sscan"
% ("{http://psi.hupo.org/ms/mzml}", "{http://psi.hupo.org/ms/mzml}")
)
x = scan_elem.find(
'%scvParam[@accession="IMS:1000050"]'
% "{http://psi.hupo.org/ms/mzml}"
).attrib["value"]
y = scan_elem.find(
'%scvParam[@accession="IMS:1000051"]'
% "{http://psi.hupo.org/ms/mzml}"
).attrib["value"]
coordinates.append((int(x), int(y)))
rxy = np.column_stack(
[
np.zeros(len(coordinates), dtype=np.int32),
np.asarray(coordinates, dtype=np.int32),
]
)
regions, x, y = rxy[:, 0], rxy[:, 1], rxy[:, 2]
if infer_regions:
pix_map_arr = np.zeros((np.max(y) + 1, np.max(x) + 1), dtype=np.uint8)
pix_map_arr[y, x] = 255
n_cc, cc = cv2.connectedComponents(pix_map_arr)
cc_rs = []
cc_xs = []
cc_ys = []
for roi in np.arange(1, n_cc, 1):
cc_y, cc_x = np.where(cc == roi)
cc_r = np.ones_like(cc_x, dtype=np.int32) * roi
cc_rs.append(cc_r)
cc_xs.append(cc_x)
cc_ys.append(cc_y)
cc_x = np.concatenate(cc_xs)
cc_y = np.concatenate(cc_ys)
cc_r = np.concatenate(cc_rs)
infer_rxy = np.column_stack([cc_r, cc_x, cc_y])
regions = np.asarray(
pd.merge(pd.DataFrame(rxy), pd.DataFrame(infer_rxy), on=[1, 2])["0_y"]
)
return regions, x, y
def _read_ims_microlink_csv(
self, data_fp: str
) -> Tuple[
pd.Series, pd.Series, pd.Series, pd.Series, pd.Series, pd.Series, pd.Series
]:
allcoords = pd.read_csv(data_fp, comment="#")
return (
allcoords["regions"],
allcoords["x_original"],
allcoords["y_original"],
allcoords["x_minimized"],
allcoords["y_minimized"],
allcoords["x_padded"],
allcoords["y_padded"],
)
def read_pixel_data(
self, data: Union[str, np.ndarray], infer_regions: bool
) -> None:
data_round_trip = False
if isinstance(data, np.ndarray):
regions = data[:, 0]
x = data[:, 1]
y = data[:, 2]
elif Path(data).suffix.lower() == ".txt":
if self._check_bruker_sl(data) is False:
raise ValueError(
"{} doesn't appear to be a bruker flexImaging spotlist".format(data)
)
regions, x, y = self._read_bruker_sl_rxy(data)
elif Path(data).suffix.lower() == ".sqlite":
regions, x, y = self._read_sqlite_rxy(data)
elif Path(data).suffix.lower() == ".imzml":
regions, x, y = self._read_imzml_rxy(str(data), infer_regions=infer_regions)
elif Path(data).suffix.lower() == ".h5":
regions, x, y = self._read_h5(str(data))
elif Path(data).suffix.lower() == ".tsf":
regions, x, y = parse_tsf_coordinates(str(data))
elif Path(data).suffix.lower() == ".csv":
(
self.regions,
self.x_coords_orig,
self.y_coords_orig,
self.x_coords_min,
self.y_coords_min,
self.x_coords_pad,
self.y_coords_pad,
) = self._read_ims_microlink_csv(data)
data_round_trip = True
if data_round_trip is False:
# assume there is no data if regions is None
if self.regions is None:
self.regions = regions
self.x_coords_orig = x
self.y_coords_orig = y
else:
offset_roi_no = int(np.max(self.regions) + 1)
regions = regions + offset_roi_no
self.regions = np.concatenate([self.regions, regions])
self.x_coords_orig = np.concatenate([self.x_coords_orig, x])
self.y_coords_orig = np.concatenate([self.y_coords_orig, y])
self.x_coords_min = self.x_coords_orig - np.min(self.x_coords_orig)
self.y_coords_min = self.y_coords_orig - np.min(self.y_coords_orig)
self.x_coords_pad = self.x_coords_min
self.y_coords_pad = self.y_coords_min
def add_pixel_data(
self,
data: Union[str, np.ndarray],
infer_regions: bool = True,
):
if isinstance(data, (str, np.ndarray)):
data_imported = [data]
else:
data_imported = data
for data in data_imported:
self.read_pixel_data(data, infer_regions=infer_regions)
self.data.extend(data_imported)
self._pixelmap_minimized: np.ndarray = self._make_pixel_map_at_ims(
map_type="minimized", randomize=True
)
self.pixelmap_padded: NDArray = self._pixelmap_minimized
self._shape_map_minimized = self._make_shape_map(map_type="minimized")
def _get_xy_extents_coords(
self, map_type: str = "minimized"
) -> Tuple[int, int, NDArray, NDArray]:
if map_type == "minimized":
y_extent = int(np.max(self.y_coords_min) + 1)
x_extent = int(np.max(self.x_coords_min) + 1)
y_coords = self.y_coords_min
x_coords = self.x_coords_min
elif map_type == "padded":
y_extent = int(np.max(self.y_coords_pad) + 1)
x_extent = int(np.max(self.x_coords_pad) + 1)
y_coords = self.y_coords_pad
x_coords = self.x_coords_pad
elif map_type == "original":
y_extent = int(np.max(self.y_coords_orig) + 1)
x_extent = int(np.max(self.x_coords_orig) + 1)
y_coords = self.y_coords_orig
x_coords = self.x_coords_orig
return y_extent, x_extent, y_coords, x_coords # type:ignore
def approx_polygon_contour(
self, mask: np.ndarray, percent_arc_length: float = 0.01
) -> np.ndarray:
"""Approximate binary mask contours to polygon vertices using cv2.
Parameters
----------
mask : numpy.ndarray
2-d numpy array of datatype np.uint8.
percent_arc_length : float
scaling of epsilon for polygon approximate vertices accuracy.
maximum distance of new vertices from original.
Returns
-------
numpy.ndarray
returns an 2d array of vertices, rows: points, columns: y,x
"""
contours, hierarchy = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
)
if len(contours) > 1:
contours = [contours[np.argmax([cnt.shape[0] for cnt in contours])]]
epsilon = percent_arc_length * cv2.arcLength(contours[0], True)
if len(contours[0]) > 1000:
contours = cv2.approxPolyDP(contours[0], epsilon, True)
elif len(contours[0]) == 1:
ct1 = contours[0]
ct2 = ct1 + 1
contours = np.vstack([ct1, ct2])
return np.squeeze(contours)[:, [1, 0]]
def _approximate_roi(self, pix_map_in: np.ndarray, roi: int) -> np.ndarray:
pix_map = np.zeros_like(pix_map_in)
pix_map[pix_map_in == roi] = 255
pix_map[pix_map_in != roi] = 0
pix_map = pix_map.astype(np.uint8)
while cv2.connectedComponents(pix_map)[0] > 2:
pix_map = cv2.dilate(pix_map, np.ones((3, 3), np.uint8))
return self.approx_polygon_contour(pix_map, 0.001)
def _make_shape_map(
self, map_type: str = "minimized"
) -> List[Tuple[str, np.ndarray]]:
y_extent, x_extent, y_coords, x_coords = self._get_xy_extents_coords(
map_type=map_type
)
region_names, region_indices = np.unique(self.regions, return_inverse=True)
pix_map_arr = np.zeros((y_extent, x_extent), dtype=np.int32)
pix_map_arr[y_coords, x_coords] = region_indices + 1
ims_rois = [
(region_name, self._approximate_roi(pix_map_arr, roi + 1))
for region_name, roi in zip(region_names, np.unique(region_indices))
]
return ims_rois
def _make_pixel_map_at_ims(
self, map_type: str = "minimized", randomize: bool = True
) -> np.ndarray:
y_extent, x_extent, y_coords, x_coords = self._get_xy_extents_coords(map_type)
pix_map_arr = np.zeros((y_extent, x_extent), dtype=np.uint8)
if randomize is True:
pix_map_arr[y_coords, x_coords] = np.random.randint(
85, 255, len(y_coords), dtype=np.uint8
)
else:
pix_map_arr[y_coords, x_coords] = 255
return pix_map_arr
def make_pixel_map_mz_fill(
self, mz_vals: Union[list, np.ndarray], map_type: str = "minimized"
) -> np.ndarray:
y_extent, x_extent, y_coords, x_coords = self._get_xy_extents_coords(map_type)
pix_map_arr = np.zeros((y_extent, x_extent), dtype=np.float32)
pix_map_arr[y_coords, x_coords] = mz_vals
return pix_map_arr
def delete_roi(self, roi_name: str, remove_padding: bool = True) -> None:
roi_name_np = np.asarray(roi_name, dtype=self.regions.dtype) # type:ignore
non_roi_idx = self.regions != roi_name_np
roi_idx = np.invert(non_roi_idx)
self.regions = self.regions[non_roi_idx] # type:ignore
self.x_coords_orig = self.x_coords_orig[non_roi_idx] # type:ignore
self.y_coords_orig = self.y_coords_orig[non_roi_idx] # type:ignore
self.x_coords_min = self.x_coords_orig - np.min(self.x_coords_orig)
self.y_coords_min = self.y_coords_orig - np.min(self.y_coords_orig)
if remove_padding is True:
self.x_coords_pad = self.x_coords_min
self.y_coords_pad = self.y_coords_min
self.x_extent_pad = np.max(self.x_coords_min) + 1
self.y_extent_pad = np.max(self.y_coords_min) + 1
self._pixelmap_minimized = self._make_pixel_map_at_ims(
map_type="minimized", randomize=True
)
self.pixelmap_padded = self._pixelmap_minimized
else:
self.pixelmap_padded[
self.y_coords_pad[roi_idx], self.x_coords_pad[roi_idx] # type:ignore
] = 0
self.x_coords_pad = self.x_coords_pad[non_roi_idx] # type:ignore
self.y_coords_pad = self.y_coords_pad[non_roi_idx] # type:ignore
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, pad_values: Dict[str, int]) -> None:
for k in pad_values.keys():
self._padding[k] += pad_values[k]
if self._padding[k] < 0:
self._padding[k] = 0
self._refresh_coords_after_pad()
self._get_padding_microns()
def _get_padding_microns(self) -> None:
for k in self._padding.keys():
self.padding_microns[k] = int(self._padding[k] * self.ims_res)
def _refresh_coords_after_pad(self) -> None:
self.pixelmap_padded = np.pad(
self._pixelmap_minimized,
(
(self.padding.get("y_top"), self.padding.get("y_bottom")),
(self.padding.get("x_left"), self.padding.get("x_right")),
),
)
self.x_coords_pad = self.x_coords_min + self.padding.get("x_left")
self.y_coords_pad = self.y_coords_min + self.padding.get("y_top")
self.y_extent_pad = self.pixelmap_padded.shape[0]
self.x_extent_pad = self.pixelmap_padded.shape[1]
def reset_padding(self) -> None:
self.pixelmap_padded = self._pixelmap_minimized
self.y_extent_pad = self.pixelmap_padded.shape[0]
self.x_extent_pad = self.pixelmap_padded.shape[1]
self._padding = {"x_left": 0, "x_right": 0, "y_top": 0, "y_bottom": 0}
self._get_padding_microns()
def prepare_pmap_metadata(self) -> Dict:
return {
"Pixel Map Datasets Files": self.data,
"padding": {
"x_left_padding (px)": self.padding["x_left"],
"x_right_padding (px)": self.padding["x_right"],
"y_top_padding (px)": self.padding["y_top"],
"y_bottom_padding (px)": self.padding["y_bottom"],
"x_left_padding (um)": self.padding_microns["x_left"],
"x_right_padding (um)": self.padding_microns["x_right"],
"y_top_padding (um)": self.padding_microns["y_top"],
"y_bottom_padding (um)": self.padding_microns["y_bottom"],
},
}
def prepare_pmap_dataframe(self) -> pd.DataFrame:
return pd.DataFrame(
{
"regions": self.regions,
"x_original": self.x_coords_orig,
"y_original": self.y_coords_orig,
"x_minimized": self.x_coords_min,
"y_minimized": self.y_coords_min,
"x_padded": self.x_coords_pad,
"y_padded": self.y_coords_pad,
}
)
def rotate_coordinates(
self, rotation_angle: int, fiducial_pts: Optional[np.ndarray] = None
) -> Optional[np.ndarray]:
# translate center of mass to be near origin
mean_x = np.max(self.x_coords_min) / 2
mean_y = np.max(self.y_coords_min) / 2
center_point = [mean_x, mean_y]
# rotate around origin
new_padding = dict()
if rotation_angle in [90, -270]:
rotmat = np.asarray([[0, 1], [-1, 0]])
new_padding.update(
{
"x_left": self._padding["y_top"],
"x_right": self._padding["y_bottom"],
"y_top": self._padding["x_right"],
"y_bottom": self._padding["x_left"],
}
)
recenter_point = [mean_x, mean_y]
elif rotation_angle in [-90, 270]:
rotmat = np.asarray([[0, -1], [1, 0]])
new_padding.update(
{
"x_left": self._padding["y_bottom"],
"x_right": self._padding["y_top"],
"y_top": self._padding["x_left"],
"y_bottom": self._padding["x_right"],
}
)
recenter_point = [mean_x, mean_y]
elif rotation_angle in [-180, 180]:
rotmat = np.asarray([[-1, 0], [0, -1]])
new_padding.update(
{
"x_left": self._padding["x_right"],
"x_right": self._padding["x_left"],
"y_top": self._padding["y_bottom"],
"y_bottom": self._padding["y_top"],
}
)
# recenter_point = [mean_y, mean_x]
recenter_point = [mean_x, mean_y]
point_mat = np.column_stack([self.x_coords_min, self.y_coords_min])
rotcoords = apply_rotmat_points(rotmat, point_mat, center_point, recenter_point)
rotcoords = np.round(rotcoords).astype(np.uint32)
self.x_coords_min = rotcoords[:, 0]
self.y_coords_min = rotcoords[:, 1]
self._pixelmap_minimized = self._make_pixel_map_at_ims(
map_type="minimized", randomize=True
)
self.reset_padding()
self.padding = new_padding
self._shape_map_minimized = self._make_shape_map(map_type="minimized")
if fiducial_pts is not None:
return apply_rotmat_points(
rotmat, fiducial_pts, center_point, recenter_point
)
else:
return
| [
"numpy.invert",
"cv2.approxPolyDP",
"pandas.read_csv",
"cv2.arcLength",
"numpy.argmax",
"numpy.ones",
"pathlib.Path",
"lxml.etree.iterparse",
"numpy.arange",
"pandas.read_table",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"napari_imsmicrolink.utils.points.apply_rotmat_points",
"n... | [((2509, 2618), 'pandas.read_table', 'pd.read_table', (['data_fp'], {'header': 'None', 'skiprows': '(2)', 'sep': '""" """', 'names': "['X-pos', 'Y-pos', 'spot-name', 'region']"}), "(data_fp, header=None, skiprows=2, sep=' ', names=['X-pos',\n 'Y-pos', 'spot-name', 'region'])\n", (2522, 2618), True, 'import pandas as pd\n'), ((2797, 2821), 'numpy.asarray', 'np.asarray', (["sl['region']"], {}), "(sl['region'])\n", (2807, 2821), True, 'import numpy as np\n'), ((2910, 2952), 'numpy.asarray', 'np.asarray', (['rxy.iloc[:, 1]'], {'dtype': 'np.int32'}), '(rxy.iloc[:, 1], dtype=np.int32)\n', (2920, 2952), True, 'import numpy as np\n'), ((2965, 3007), 'numpy.asarray', 'np.asarray', (['rxy.iloc[:, 2]'], {'dtype': 'np.int32'}), '(rxy.iloc[:, 2], dtype=np.int32)\n', (2975, 3007), True, 'import numpy as np\n'), ((3164, 3188), 'sqlite3.connect', 'sqlite3.connect', (['data_fp'], {}), '(data_fp)\n', (3179, 3188), False, 'import sqlite3\n'), ((3835, 3859), 'lxml.etree.iterparse', 'etree.iterparse', (['data_fp'], {}), '(data_fp)\n', (3850, 3859), False, 'from lxml import etree\n'), ((5926, 5959), 'pandas.read_csv', 'pd.read_csv', (['data_fp'], {'comment': '"""#"""'}), "(data_fp, comment='#')\n", (5937, 5959), True, 'import pandas as pd\n'), ((10764, 10828), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (10780, 10828), False, 'import cv2\n'), ((11426, 11451), 'numpy.zeros_like', 'np.zeros_like', (['pix_map_in'], {}), '(pix_map_in)\n', (11439, 11451), True, 'import numpy as np\n'), ((12025, 12069), 'numpy.unique', 'np.unique', (['self.regions'], {'return_inverse': '(True)'}), '(self.regions, return_inverse=True)\n', (12034, 12069), True, 'import numpy as np\n'), ((12093, 12139), 'numpy.zeros', 'np.zeros', (['(y_extent, x_extent)'], {'dtype': 'np.int32'}), '((y_extent, x_extent), dtype=np.int32)\n', (12101, 12139), True, 'import numpy as np\n'), ((12642, 12688), 'numpy.zeros', 'np.zeros', (['(y_extent, x_extent)'], {'dtype': 'np.uint8'}), '((y_extent, x_extent), dtype=np.uint8)\n', (12650, 12688), True, 'import numpy as np\n'), ((13187, 13235), 'numpy.zeros', 'np.zeros', (['(y_extent, x_extent)'], {'dtype': 'np.float32'}), '((y_extent, x_extent), dtype=np.float32)\n', (13195, 13235), True, 'import numpy as np\n'), ((13416, 13462), 'numpy.asarray', 'np.asarray', (['roi_name'], {'dtype': 'self.regions.dtype'}), '(roi_name, dtype=self.regions.dtype)\n', (13426, 13462), True, 'import numpy as np\n'), ((13546, 13568), 'numpy.invert', 'np.invert', (['non_roi_idx'], {}), '(non_roi_idx)\n', (13555, 13568), True, 'import numpy as np\n'), ((16928, 17177), 'pandas.DataFrame', 'pd.DataFrame', (["{'regions': self.regions, 'x_original': self.x_coords_orig, 'y_original':\n self.y_coords_orig, 'x_minimized': self.x_coords_min, 'y_minimized':\n self.y_coords_min, 'x_padded': self.x_coords_pad, 'y_padded': self.\n y_coords_pad}"], {}), "({'regions': self.regions, 'x_original': self.x_coords_orig,\n 'y_original': self.y_coords_orig, 'x_minimized': self.x_coords_min,\n 'y_minimized': self.y_coords_min, 'x_padded': self.x_coords_pad,\n 'y_padded': self.y_coords_pad})\n", (16940, 17177), True, 'import pandas as pd\n'), ((19110, 19165), 'numpy.column_stack', 'np.column_stack', (['[self.x_coords_min, self.y_coords_min]'], {}), '([self.x_coords_min, self.y_coords_min])\n', (19125, 19165), True, 'import numpy as np\n'), ((19187, 19255), 'napari_imsmicrolink.utils.points.apply_rotmat_points', 'apply_rotmat_points', (['rotmat', 'point_mat', 'center_point', 'recenter_point'], {}), '(rotmat, point_mat, center_point, recenter_point)\n', (19206, 19255), False, 'from napari_imsmicrolink.utils.points import apply_rotmat_points\n'), ((3470, 3488), 'h5py.File', 'h5py.File', (['data_fp'], {}), '(data_fp)\n', (3479, 3488), False, 'import h5py\n'), ((5016, 5052), 'cv2.connectedComponents', 'cv2.connectedComponents', (['pix_map_arr'], {}), '(pix_map_arr)\n', (5039, 5052), False, 'import cv2\n'), ((5147, 5168), 'numpy.arange', 'np.arange', (['(1)', 'n_cc', '(1)'], {}), '(1, n_cc, 1)\n', (5156, 5168), True, 'import numpy as np\n'), ((5408, 5429), 'numpy.concatenate', 'np.concatenate', (['cc_xs'], {}), '(cc_xs)\n', (5422, 5429), True, 'import numpy as np\n'), ((5449, 5470), 'numpy.concatenate', 'np.concatenate', (['cc_ys'], {}), '(cc_ys)\n', (5463, 5470), True, 'import numpy as np\n'), ((5490, 5511), 'numpy.concatenate', 'np.concatenate', (['cc_rs'], {}), '(cc_rs)\n', (5504, 5511), True, 'import numpy as np\n'), ((5537, 5572), 'numpy.column_stack', 'np.column_stack', (['[cc_r, cc_x, cc_y]'], {}), '([cc_r, cc_x, cc_y])\n', (5552, 5572), True, 'import numpy as np\n'), ((11003, 11035), 'cv2.arcLength', 'cv2.arcLength', (['contours[0]', '(True)'], {}), '(contours[0], True)\n', (11016, 11035), False, 'import cv2\n'), ((11096, 11140), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['contours[0]', 'epsilon', '(True)'], {}), '(contours[0], epsilon, True)\n', (11112, 11140), False, 'import cv2\n'), ((11294, 11314), 'numpy.squeeze', 'np.squeeze', (['contours'], {}), '(contours)\n', (11304, 11314), True, 'import numpy as np\n'), ((13835, 13861), 'numpy.min', 'np.min', (['self.x_coords_orig'], {}), '(self.x_coords_orig)\n', (13841, 13861), True, 'import numpy as np\n'), ((13911, 13937), 'numpy.min', 'np.min', (['self.y_coords_orig'], {}), '(self.y_coords_orig)\n', (13917, 13937), True, 'import numpy as np\n'), ((17523, 17548), 'numpy.max', 'np.max', (['self.x_coords_min'], {}), '(self.x_coords_min)\n', (17529, 17548), True, 'import numpy as np\n'), ((17570, 17595), 'numpy.max', 'np.max', (['self.y_coords_min'], {}), '(self.y_coords_min)\n', (17576, 17595), True, 'import numpy as np\n'), ((17763, 17792), 'numpy.asarray', 'np.asarray', (['[[0, 1], [-1, 0]]'], {}), '([[0, 1], [-1, 0]])\n', (17773, 17792), True, 'import numpy as np\n'), ((19726, 19797), 'napari_imsmicrolink.utils.points.apply_rotmat_points', 'apply_rotmat_points', (['rotmat', 'fiducial_pts', 'center_point', 'recenter_point'], {}), '(rotmat, fiducial_pts, center_point, recenter_point)\n', (19745, 19797), False, 'from napari_imsmicrolink.utils.points import apply_rotmat_points\n'), ((3541, 3564), 'numpy.asarray', 'np.asarray', (["f['region']"], {}), "(f['region'])\n", (3551, 3564), True, 'import numpy as np\n'), ((3582, 3600), 'numpy.asarray', 'np.asarray', (["f['x']"], {}), "(f['x'])\n", (3592, 3600), True, 'import numpy as np\n'), ((3618, 3636), 'numpy.asarray', 'np.asarray', (["f['y']"], {}), "(f['y'])\n", (3628, 3636), True, 'import numpy as np\n'), ((4725, 4764), 'numpy.asarray', 'np.asarray', (['coordinates'], {'dtype': 'np.int32'}), '(coordinates, dtype=np.int32)\n', (4735, 4764), True, 'import numpy as np\n'), ((5199, 5218), 'numpy.where', 'np.where', (['(cc == roi)'], {}), '(cc == roi)\n', (5207, 5218), True, 'import numpy as np\n'), ((8084, 8123), 'numpy.concatenate', 'np.concatenate', (['[self.regions, regions]'], {}), '([self.regions, regions])\n', (8098, 8123), True, 'import numpy as np\n'), ((8161, 8200), 'numpy.concatenate', 'np.concatenate', (['[self.x_coords_orig, x]'], {}), '([self.x_coords_orig, x])\n', (8175, 8200), True, 'import numpy as np\n'), ((8238, 8277), 'numpy.concatenate', 'np.concatenate', (['[self.y_coords_orig, y]'], {}), '([self.y_coords_orig, y])\n', (8252, 8277), True, 'import numpy as np\n'), ((8332, 8358), 'numpy.min', 'np.min', (['self.x_coords_orig'], {}), '(self.x_coords_orig)\n', (8338, 8358), True, 'import numpy as np\n'), ((8412, 8438), 'numpy.min', 'np.min', (['self.y_coords_orig'], {}), '(self.y_coords_orig)\n', (8418, 8438), True, 'import numpy as np\n'), ((11256, 11277), 'numpy.vstack', 'np.vstack', (['[ct1, ct2]'], {}), '([ct1, ct2])\n', (11265, 11277), True, 'import numpy as np\n'), ((11590, 11622), 'cv2.connectedComponents', 'cv2.connectedComponents', (['pix_map'], {}), '(pix_map)\n', (11613, 11622), False, 'import cv2\n'), ((11673, 11698), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (11680, 11698), True, 'import numpy as np\n'), ((14106, 14131), 'numpy.max', 'np.max', (['self.x_coords_min'], {}), '(self.x_coords_min)\n', (14112, 14131), True, 'import numpy as np\n'), ((14168, 14193), 'numpy.max', 'np.max', (['self.y_coords_min'], {}), '(self.y_coords_min)\n', (14174, 14193), True, 'import numpy as np\n'), ((18210, 18239), 'numpy.asarray', 'np.asarray', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (18220, 18239), True, 'import numpy as np\n'), ((19276, 19295), 'numpy.round', 'np.round', (['rotcoords'], {}), '(rotcoords)\n', (19284, 19295), True, 'import numpy as np\n'), ((5242, 5276), 'numpy.ones_like', 'np.ones_like', (['cc_x'], {'dtype': 'np.int32'}), '(cc_x, dtype=np.int32)\n', (5254, 5276), True, 'import numpy as np\n'), ((9397, 9422), 'numpy.max', 'np.max', (['self.y_coords_min'], {}), '(self.y_coords_min)\n', (9403, 9422), True, 'import numpy as np\n'), ((9455, 9480), 'numpy.max', 'np.max', (['self.x_coords_min'], {}), '(self.x_coords_min)\n', (9461, 9480), True, 'import numpy as np\n'), ((10915, 10960), 'numpy.argmax', 'np.argmax', (['[cnt.shape[0] for cnt in contours]'], {}), '([cnt.shape[0] for cnt in contours])\n', (10924, 10960), True, 'import numpy as np\n'), ((12349, 12374), 'numpy.unique', 'np.unique', (['region_indices'], {}), '(region_indices)\n', (12358, 12374), True, 'import numpy as np\n'), ((18658, 18688), 'numpy.asarray', 'np.asarray', (['[[-1, 0], [0, -1]]'], {}), '([[-1, 0], [0, -1]])\n', (18668, 18688), True, 'import numpy as np\n'), ((4910, 4919), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4916, 4919), True, 'import numpy as np\n'), ((4925, 4934), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4931, 4934), True, 'import numpy as np\n'), ((5632, 5649), 'pandas.DataFrame', 'pd.DataFrame', (['rxy'], {}), '(rxy)\n', (5644, 5649), True, 'import pandas as pd\n'), ((5651, 5674), 'pandas.DataFrame', 'pd.DataFrame', (['infer_rxy'], {}), '(infer_rxy)\n', (5663, 5674), True, 'import pandas as pd\n'), ((7976, 7996), 'numpy.max', 'np.max', (['self.regions'], {}), '(self.regions)\n', (7982, 7996), True, 'import numpy as np\n'), ((9631, 9656), 'numpy.max', 'np.max', (['self.y_coords_pad'], {}), '(self.y_coords_pad)\n', (9637, 9656), True, 'import numpy as np\n'), ((9689, 9714), 'numpy.max', 'np.max', (['self.x_coords_pad'], {}), '(self.x_coords_pad)\n', (9695, 9714), True, 'import numpy as np\n'), ((6520, 6530), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (6524, 6530), False, 'from pathlib import Path\n'), ((9867, 9893), 'numpy.max', 'np.max', (['self.y_coords_orig'], {}), '(self.y_coords_orig)\n', (9873, 9893), True, 'import numpy as np\n'), ((9926, 9952), 'numpy.max', 'np.max', (['self.x_coords_orig'], {}), '(self.x_coords_orig)\n', (9932, 9952), True, 'import numpy as np\n'), ((6824, 6834), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (6828, 6834), False, 'from pathlib import Path\n'), ((6935, 6945), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (6939, 6945), False, 'from pathlib import Path\n'), ((7077, 7087), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (7081, 7087), False, 'from pathlib import Path\n'), ((7180, 7190), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (7184, 7190), False, 'from pathlib import Path\n'), ((7292, 7302), 'pathlib.Path', 'Path', (['data'], {}), '(data)\n', (7296, 7302), False, 'from pathlib import Path\n')] |
# noqa: D100
from __future__ import annotations
import numpy as np
import xarray as xr
from numba import float32, float64, vectorize
from xclim.core.calendar import date_range, datetime_to_decimal_year
from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint
from xclim.indices.helpers import (
cosine_of_solar_zenith_angle,
day_lengths,
distance_from_sun,
extraterrestrial_solar_radiation,
solar_declination,
time_correction_for_solar_angle,
)
__all__ = [
"humidex",
"heat_index",
"tas",
"uas_vas_2_sfcwind",
"sfcwind_2_uas_vas",
"saturation_vapor_pressure",
"relative_humidity",
"specific_humidity",
"specific_humidity_from_dewpoint",
"snowfall_approximation",
"rain_approximation",
"wind_chill_index",
"clausius_clapeyron_scaled_precipitation",
"potential_evapotranspiration",
"universal_thermal_climate_index",
"mean_radiant_temperature",
]
@declare_units(tas="[temperature]", tdps="[temperature]", hurs="[]")
def humidex(
tas: xr.DataArray,
tdps: xr.DataArray | None = None,
hurs: xr.DataArray | None = None,
) -> xr.DataArray:
r"""Humidex index.
The humidex indicates how hot the air feels to an average person, accounting for the effect of humidity. It
can be loosely interpreted as the equivalent perceived temperature when the air is dry.
Parameters
----------
tas : xarray.DataArray
Air temperature.
tdps : xarray.DataArray,
Dewpoint temperature.
hurs : xarray.DataArray
Relative humidity.
Returns
-------
xarray.DataArray, [temperature]
The humidex index.
Notes
-----
The humidex is usually computed using hourly observations of dry bulb and dewpoint temperatures. It is computed
using the formula based on [masterton79]_:
.. math::
T + {\frac {5}{9}}\left[e - 10\right]
where :math:`T` is the dry bulb air temperature (°C). The term :math:`e` can be computed from the dewpoint
temperature :math:`T_{dewpoint}` in °K:
.. math::
e = 6.112 \times \exp(5417.7530\left({\frac {1}{273.16}}-{\frac {1}{T_{\text{dewpoint}}}}\right)
where the constant 5417.753 reflects the molecular weight of water, latent heat of vaporization,
and the universal gas constant ([mekis15]_). Alternatively, the term :math:`e` can also be computed from
the relative humidity `h` expressed in percent using [sirangelo20]_:
.. math::
e = \frac{h}{100} \times 6.112 * 10^{7.5 T/(T + 237.7)}.
The humidex *comfort scale* ([eccc]_) can be interpreted as follows:
- 20 to 29 : no discomfort;
- 30 to 39 : some discomfort;
- 40 to 45 : great discomfort, avoid exertion;
- 46 and over : dangerous, possible heat stroke;
Please note that while both the humidex and the heat index are calculated
using dew point, the humidex uses a dew point of 7 °C (45 °F) as a base,
whereas the heat index uses a dew point base of 14 °C (57 °F). Further,
the heat index uses heat balance equations which account for many variables
other than vapor pressure, which is used exclusively in the humidex
calculation.
References
----------
.. [masterton79] <NAME>., & <NAME>. (1979). HUMIDEX, A method of quantifying human discomfort due to excessive heat and humidity, CLI 1-79. Downsview, Ontario: Environment Canada, Atmospheric Environment Service.
.. [mekis15] <NAME>, <NAME>, <NAME> & <NAME> (2015) Observed Trends in Severe Weather Conditions Based on Humidex, Wind Chill, and Heavy Rainfall Events in Canada for 1953–2012, Atmosphere-Ocean, 53:4, 383-397, DOI: 10.1080/07055900.2015.1086970
.. [sirangelo20] <NAME>., <NAME>. et al. Combining stochastic models of air temperature and vapour pressure for the analysis of the bioclimatic comfort through the Humidex. Sci Rep 10, 11395 (2020). https://doi.org/10.1038/s41598-020-68297-4
.. [eccc] https://climate.weather.gc.ca/glossary_e.html
"""
if (tdps is None) == (hurs is None):
raise ValueError(
"At least one of `tdps` or `hurs` must be given, and not both."
)
# Vapour pressure in hPa
if tdps is not None:
# Convert dewpoint temperature to Kelvins
tdps = convert_units_to(tdps, "kelvin")
e = 6.112 * np.exp(5417.7530 * (1 / 273.16 - 1.0 / tdps))
elif hurs is not None:
# Convert dry bulb temperature to Celsius
tasC = convert_units_to(tas, "celsius")
e = hurs / 100 * 6.112 * 10 ** (7.5 * tasC / (tasC + 237.7))
# Temperature delta due to humidity in delta_degC
h = 5 / 9 * (e - 10)
h.attrs["units"] = "delta_degree_Celsius"
# Get delta_units for output
du = (1 * units2pint(tas) - 0 * units2pint(tas)).units
h = convert_units_to(h, du)
# Add the delta to the input temperature
out = h + tas
out.attrs["units"] = tas.units
return out
@declare_units(tasmax="[temperature]", hurs="[]")
def heat_index(tasmax: xr.DataArray, hurs: xr.DataArray) -> xr.DataArray:
r"""Daily heat index.
Perceived temperature after relative humidity is taken into account ([Blazejczyk2012]_).
The index is only valid for temperatures above 20°C.
Parameters
----------
tasmax : xr.DataArray
Maximum daily temperature.
hurs : xr.DataArray
Relative humidity.
Returns
-------
xr.DataArray, [time][temperature]
Heat index for days with temperature above 20°C.
References
----------
.. [Blazejczyk2012] <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Comparison of UTCI to selected thermal indices. International journal of biometeorology, 56(3), 515-535.
Notes
-----
While both the humidex and the heat index are calculated using dew point,
the humidex uses a dew point of 7 °C (45 °F) as a base, whereas the heat
index uses a dew point base of 14 °C (57 °F). Further, the heat index uses
heat balance equations which account for many variables other than vapor
pressure, which is used exclusively in the humidex calculation.
"""
thresh = "20.0 degC"
thresh = convert_units_to(thresh, "degC")
t = convert_units_to(tasmax, "degC")
t = t.where(t > thresh)
r = convert_units_to(hurs, "%")
tr = t * r
tt = t * t
rr = r * r
ttr = tt * r
trr = t * rr
ttrr = tt * rr
out = (
-8.78469475556
+ 1.61139411 * t
+ 2.33854883889 * r
- 0.14611605 * tr
- 0.012308094 * tt
- 0.0164248277778 * rr
+ 0.002211732 * ttr
+ 0.00072546 * trr
- 0.000003582 * ttrr
)
out = out.assign_attrs(units="degC")
return convert_units_to(out, tasmax.units)
@declare_units(tasmin="[temperature]", tasmax="[temperature]")
def tas(tasmin: xr.DataArray, tasmax: xr.DataArray) -> xr.DataArray:
"""Average temperature from minimum and maximum temperatures.
We assume a symmetrical distribution for the temperature and retrieve the average value as Tg = (Tx + Tn) / 2
Parameters
----------
tasmin : xarray.DataArray
Minimum (daily) temperature
tasmax : xarray.DataArray
Maximum (daily) temperature
Returns
-------
xarray.DataArray
Mean (daily) temperature [same units as tasmin]
"""
tasmax = convert_units_to(tasmax, tasmin)
tas = (tasmax + tasmin) / 2
tas.attrs["units"] = tasmin.attrs["units"]
return tas
@declare_units(uas="[speed]", vas="[speed]", calm_wind_thresh="[speed]")
def uas_vas_2_sfcwind(
uas: xr.DataArray, vas: xr.DataArray, calm_wind_thresh: str = "0.5 m/s"
) -> tuple[xr.DataArray, xr.DataArray]:
"""Wind speed and direction from the eastward and northward wind components.
Computes the magnitude and angle of the wind vector from its northward and eastward components,
following the meteorological convention that sets calm wind to a direction of 0° and northerly wind to 360°.
Parameters
----------
uas : xr.DataArray
Eastward wind velocity
vas : xr.DataArray
Northward wind velocity
calm_wind_thresh : str
The threshold under which winds are considered "calm" and for which the direction
is set to 0. On the Beaufort scale, calm winds are defined as < 0.5 m/s.
Returns
-------
wind : xr.DataArray, [m s-1]
Wind velocity
wind_from_dir : xr.DataArray, [°]
Direction from which the wind blows, following the meteorological convention where
360 stands for North and 0 for calm winds.
Notes
-----
Winds with a velocity less than `calm_wind_thresh` are given a wind direction of 0°,
while stronger northerly winds are set to 360°.
"""
# Converts the wind speed to m s-1
uas = convert_units_to(uas, "m/s")
vas = convert_units_to(vas, "m/s")
wind_thresh = convert_units_to(calm_wind_thresh, "m/s")
# Wind speed is the hypotenuse of "uas" and "vas"
wind = np.hypot(uas, vas)
wind.attrs["units"] = "m s-1"
# Calculate the angle
wind_from_dir_math = np.degrees(np.arctan2(vas, uas))
# Convert the angle from the mathematical standard to the meteorological standard
wind_from_dir = (270 - wind_from_dir_math) % 360.0
# According to the meteorological standard, calm winds must have a direction of 0°
# while northerly winds have a direction of 360°
# On the Beaufort scale, calm winds are defined as < 0.5 m/s
wind_from_dir = xr.where(wind_from_dir.round() == 0, 360, wind_from_dir)
wind_from_dir = xr.where(wind < wind_thresh, 0, wind_from_dir)
wind_from_dir.attrs["units"] = "degree"
return wind, wind_from_dir
@declare_units(sfcWind="[speed]", sfcWindfromdir="[]")
def sfcwind_2_uas_vas(
sfcWind: xr.DataArray, sfcWindfromdir: xr.DataArray # noqa
) -> tuple[xr.DataArray, xr.DataArray]:
"""Eastward and northward wind components from the wind speed and direction.
Compute the eastward and northward wind components from the wind speed and direction.
Parameters
----------
sfcWind : xr.DataArray
Wind velocity
sfcWindfromdir : xr.DataArray
Direction from which the wind blows, following the meteorological convention
where 360 stands for North.
Returns
-------
uas : xr.DataArray, [m s-1]
Eastward wind velocity.
vas : xr.DataArray, [m s-1]
Northward wind velocity.
"""
# Converts the wind speed to m s-1
sfcWind = convert_units_to(sfcWind, "m/s") # noqa
# Converts the wind direction from the meteorological standard to the mathematical standard
wind_from_dir_math = (-sfcWindfromdir + 270) % 360.0
# TODO: This commented part should allow us to resample subdaily wind, but needs to be cleaned up and put elsewhere.
# if resample is not None:
# wind = wind.resample(time=resample).mean(dim='time', keep_attrs=True)
#
# # nb_per_day is the number of values each day. This should be calculated
# wind_from_dir_math_per_day = wind_from_dir_math.reshape((len(wind.time), nb_per_day))
# # Averages the subdaily angles around a circle, i.e. mean([0, 360]) = 0, not 180
# wind_from_dir_math = np.concatenate([[degrees(phase(sum(rect(1, radians(d)) for d in angles) / len(angles)))]
# for angles in wind_from_dir_math_per_day])
uas = sfcWind * np.cos(np.radians(wind_from_dir_math))
vas = sfcWind * np.sin(np.radians(wind_from_dir_math))
uas.attrs["units"] = "m s-1"
vas.attrs["units"] = "m s-1"
return uas, vas
@declare_units(tas="[temperature]", ice_thresh="[temperature]")
def saturation_vapor_pressure(
tas: xr.DataArray, ice_thresh: str = None, method: str = "sonntag90" # noqa
) -> xr.DataArray:
"""Saturation vapor pressure from temperature.
Parameters
----------
tas : xr.DataArray
Temperature array.
ice_thresh : str
Threshold temperature under which to switch to equations in reference to ice instead of water.
If None (default) everything is computed with reference to water.
method : {"goffgratch46", "sonntag90", "tetens30", "wmo08", "its90"}
Which method to use, see notes.
Returns
-------
xarray.DataArray, [Pa]
Saturation vapor pressure.
Notes
-----
In all cases implemented here :math:`log(e_{sat})` is an empirically fitted function (usually a polynomial)
where coefficients can be different when ice is taken as reference instead of water. Available methods are:
- "goffgratch46" or "GG46", based on [goffgratch46]_, values and equation taken from [voemel]_.
- "sonntag90" or "SO90", taken from [sonntag90]_.
- "tetens30" or "TE30", based on [tetens30]_, values and equation taken from [voemel]_.
- "wmo08" or "WMO08", taken from [wmo08]_.
- "its90" or "ITS90", taken from [its90]_.
References
----------
.. [goffgratch46] <NAME>., and <NAME> (1946) Low-pressure properties of water from -160 to 212 °F, in Transactions of the American Society of Heating and Ventilating Engineers, pp 95-122, presented at the 52nd annual meeting of the American Society of Heating and Ventilating Engineers, New York, 1946.
.. [sonntag90] <NAME>. (1990). Important new values of the physical constants of 1986, vapour pressure formulations based on the ITS-90, and psychrometer formulae. Zeitschrift für Meteorologie, 40(5), 340-344.
.. [tetens30] <NAME>. 1930. Über einige meteorologische Begriffe. Z. Geophys 6: 207-309.
.. [voemel] https://cires1.colorado.edu/~voemel/vp.html
.. [wmo08] World Meteorological Organization. (2008). Guide to meteorological instruments and methods of observation. Geneva, Switzerland: World Meteorological Organization. https://www.weather.gov/media/epz/mesonet/CWOP-WMO8.pdf
.. [its90] <NAME>. (1998). ITS-90 formulations for vapor pressure, frostpoint temperature, dewpoint temperature, and enhancement factors in the range–100 to+ 100 C. In The Proceedings of the Third International Symposium on Humidity & Moisture (pp. 1-8). https://www.thunderscientific.com/tech_info/reflibrary/its90formulas.pdf
"""
if ice_thresh is not None:
thresh = convert_units_to(ice_thresh, "degK")
else:
thresh = convert_units_to("0 K", "degK")
ref_is_water = tas > thresh
tas = convert_units_to(tas, "K")
if method in ["sonntag90", "SO90"]:
e_sat = xr.where(
ref_is_water,
100
* np.exp( # Where ref_is_water is True, x100 is to convert hPa to Pa
-6096.9385 / tas # type: ignore
+ 16.635794
+ -2.711193e-2 * tas # type: ignore
+ 1.673952e-5 * tas**2
+ 2.433502 * np.log(tas) # numpy's log is ln
),
100
* np.exp( # Where ref_is_water is False (thus ref is ice)
-6024.5282 / tas # type: ignore
+ 24.7219
+ 1.0613868e-2 * tas # type: ignore
+ -1.3198825e-5 * tas**2
+ -0.49382577 * np.log(tas)
),
)
elif method in ["tetens30", "TE30"]:
e_sat = xr.where(
ref_is_water,
610.78 * np.exp(17.269388 * (tas - 273.16) / (tas - 35.86)),
610.78 * np.exp(21.8745584 * (tas - 273.16) / (tas - 7.66)),
)
elif method in ["goffgratch46", "GG46"]:
Tb = 373.16 # Water boiling temp [K]
eb = 101325 # e_sat at Tb [Pa]
Tp = 273.16 # Triple-point temperature [K]
ep = 611.73 # e_sat at Tp [Pa]
e_sat = xr.where(
ref_is_water,
eb
* 10
** (
-7.90298 * ((Tb / tas) - 1) # type: ignore
+ 5.02808 * np.log10(Tb / tas) # type: ignore
+ -1.3817e-7 * (10 ** (11.344 * (1 - tas / Tb)) - 1)
+ 8.1328e-3 * (10 ** (-3.49149 * ((Tb / tas) - 1)) - 1) # type: ignore
),
ep
* 10
** (
-9.09718 * ((Tp / tas) - 1) # type: ignore
+ -3.56654 * np.log10(Tp / tas) # type: ignore
+ 0.876793 * (1 - tas / Tp)
),
)
elif method in ["wmo08", "WMO08"]:
e_sat = xr.where(
ref_is_water,
611.2 * np.exp(17.62 * (tas - 273.16) / (tas - 30.04)),
611.2 * np.exp(22.46 * (tas - 273.16) / (tas - 0.54)),
)
elif method in ["its90", "ITS90"]:
e_sat = xr.where(
ref_is_water,
np.exp(
-2836.5744 / tas**2
+ -6028.076559 / tas
+ 19.54263612
+ -2.737830188e-2 * tas
+ 1.6261698e-5 * tas**2
+ 7.0229056e-10 * tas**3
+ -1.8680009e-13 * tas**4
+ 2.7150305 * np.log(tas)
),
np.exp(
-5866.6426 / tas
+ 22.32870244
+ 1.39387003e-2 * tas
+ -3.4262402e-5 * tas**2
+ 2.7040955e-8 * tas**3
+ 6.7063522e-1 * np.log(tas)
),
)
else:
raise ValueError(
f"Method {method} is not in ['sonntag90', 'tetens30', 'goffgratch46', 'wmo08', 'its90']"
)
e_sat.attrs["units"] = "Pa"
return e_sat
@declare_units(
tas="[temperature]",
tdps="[temperature]",
huss="[]",
ps="[pressure]",
ice_thresh="[temperature]",
)
def relative_humidity(
tas: xr.DataArray,
tdps: xr.DataArray = None,
huss: xr.DataArray = None,
ps: xr.DataArray = None,
ice_thresh: str = None,
method: str = "sonntag90",
invalid_values: str = "clip",
) -> xr.DataArray:
r"""Relative humidity.
Compute relative humidity from temperature and either dewpoint temperature or specific humidity and pressure through
the saturation vapor pressure.
Parameters
----------
tas : xr.DataArray
Temperature array
tdps : xr.DataArray
Dewpoint temperature, if specified, overrides huss and ps.
huss : xr.DataArray
Specific humidity.
ps : xr.DataArray
Air Pressure.
ice_thresh : str
Threshold temperature under which to switch to equations in reference to ice instead of water.
If None (default) everything is computed with reference to water. Does nothing if 'method' is "bohren98".
method : {"bohren98", "goffgratch46", "sonntag90", "tetens30", "wmo08"}
Which method to use, see notes of this function and of `saturation_vapor_pressure`.
invalid_values : {"clip", "mask", None}
What to do with values outside the 0-100 range. If "clip" (default), clips everything to 0 - 100,
if "mask", replaces values outside the range by np.nan, and if `None`, does nothing.
Returns
-------
xr.DataArray, [%]
Relative humidity.
Notes
-----
In the following, let :math:`T`, :math:`T_d`, :math:`q` and :math:`p` be the temperature,
the dew point temperature, the specific humidity and the air pressure.
**For the "bohren98" method** : This method does not use the saturation vapor pressure directly,
but rather uses an approximation of the ratio of :math:`\frac{e_{sat}(T_d)}{e_{sat}(T)}`.
With :math:`L` the enthalpy of vaporization of water and :math:`R_w` the gas constant for water vapor,
the relative humidity is computed as:
.. math::
RH = e^{\frac{-L (T - T_d)}{R_wTT_d}}
From [BohrenAlbrecht1998]_, formula taken from [Lawrence2005]_. :math:`L = 2.5\times 10^{-6}` J kg-1, exact for :math:`T = 273.15` K, is used.
**Other methods**: With :math:`w`, :math:`w_{sat}`, :math:`e_{sat}` the mixing ratio,
the saturation mixing ratio and the saturation vapor pressure.
If the dewpoint temperature is given, relative humidity is computed as:
.. math::
RH = 100\frac{e_{sat}(T_d)}{e_{sat}(T)}
Otherwise, the specific humidity and the air pressure must be given so relative humidity can be computed as:
.. math::
RH = 100\frac{w}{w_{sat}}
w = \frac{q}{1-q}
w_{sat} = 0.622\frac{e_{sat}}{P - e_{sat}}
The methods differ by how :math:`e_{sat}` is computed. See the doc of :py:meth:`xclim.core.utils.saturation_vapor_pressure`.
References
----------
.. [Lawrence2005] <NAME>. (2005). The Relationship between Relative Humidity and the Dewpoint Temperature in Moist Air: A Simple Conversion and Applications. Bull. Amer. Meteor. Soc., 86, 225–234, https://doi.org/10.1175/BAMS-86-2-225
.. [BohrenAlbrecht1998] <NAME>, <NAME>. Atmospheric Thermodynamics. Oxford University Press, 1998.
"""
if method in ("bohren98", "BA90"):
if tdps is None:
raise ValueError("To use method 'bohren98' (BA98), dewpoint must be given.")
tdps = convert_units_to(tdps, "degK")
tas = convert_units_to(tas, "degK")
L = 2.501e6
Rw = (461.5,)
hurs = 100 * np.exp(-L * (tas - tdps) / (Rw * tas * tdps)) # type: ignore
elif tdps is not None:
e_sat_dt = saturation_vapor_pressure(
tas=tdps, ice_thresh=ice_thresh, method=method
)
e_sat_t = saturation_vapor_pressure(
tas=tas, ice_thresh=ice_thresh, method=method
)
hurs = 100 * e_sat_dt / e_sat_t # type: ignore
else:
ps = convert_units_to(ps, "Pa")
huss = convert_units_to(huss, "")
tas = convert_units_to(tas, "degK")
e_sat = saturation_vapor_pressure(tas=tas, ice_thresh=ice_thresh, method=method)
w = huss / (1 - huss)
w_sat = 0.62198 * e_sat / (ps - e_sat) # type: ignore
hurs = 100 * w / w_sat
if invalid_values == "clip":
hurs = hurs.clip(0, 100)
elif invalid_values == "mask":
hurs = hurs.where((hurs <= 100) & (hurs >= 0))
hurs.attrs["units"] = "%"
return hurs
@declare_units(
tas="[temperature]",
hurs="[]",
ps="[pressure]",
ice_thresh="[temperature]",
)
def specific_humidity(
tas: xr.DataArray,
hurs: xr.DataArray,
ps: xr.DataArray,
ice_thresh: str = None,
method: str = "sonntag90",
invalid_values: str = None,
) -> xr.DataArray:
r"""Specific humidity from temperature, relative humidity and pressure.
Specific humidity is the ratio between the mass of water vapour and the mass of moist air [WMO08]_.
Parameters
----------
tas : xr.DataArray
Temperature array
hurs : xr.DataArray
Relative Humidity.
ps : xr.DataArray
Air Pressure.
ice_thresh : str
Threshold temperature under which to switch to equations in reference to ice instead of water.
If None (default) everything is computed with reference to water.
method : {"goffgratch46", "sonntag90", "tetens30", "wmo08"}
Which method to use, see notes of this function and of `saturation_vapor_pressure`.
invalid_values : {"clip", "mask", None}
What to do with values larger than the saturation specific humidity and lower than 0.
If "clip" (default), clips everything to 0 - q_sat
if "mask", replaces values outside the range by np.nan,
if None, does nothing.
Returns
-------
xarray.DataArray, [dimensionless]
Specific humidity.
Notes
-----
In the following, let :math:`T`, :math:`hurs` (in %) and :math:`p` be the temperature,
the relative humidity and the air pressure. With :math:`w`, :math:`w_{sat}`, :math:`e_{sat}` the mixing ratio,
the saturation mixing ratio and the saturation vapor pressure, specific humidity :math:`q` is computed as:
.. math::
w_{sat} = 0.622\frac{e_{sat}}{P - e_{sat}}
w = w_{sat} * hurs / 100
q = w / (1 + w)
The methods differ by how :math:`e_{sat}` is computed. See the doc of `xclim.core.utils.saturation_vapor_pressure`.
If `invalid_values` is not `None`, the saturation specific humidity :math:`q_{sat}` is computed as:
.. math::
q_{sat} = w_{sat} / (1 + w_{sat})
References
----------
.. [WMO08] World Meteorological Organization. (2008). Guide to meteorological instruments and methods of observation. Geneva, Switzerland: World Meteorological Organization. https://www.weather.gov/media/epz/mesonet/CWOP-WMO8.pdf
"""
ps = convert_units_to(ps, "Pa")
hurs = convert_units_to(hurs, "")
tas = convert_units_to(tas, "degK")
e_sat = saturation_vapor_pressure(tas=tas, ice_thresh=ice_thresh, method=method)
w_sat = 0.62198 * e_sat / (ps - e_sat) # type: ignore
w = w_sat * hurs
q = w / (1 + w)
if invalid_values is not None:
q_sat = w_sat / (1 + w_sat)
if invalid_values == "clip":
q = q.clip(0, q_sat)
elif invalid_values == "mask":
q = q.where((q <= q_sat) & (q >= 0))
q.attrs["units"] = ""
return q
@declare_units(
tdps="[temperature]",
ps="[pressure]",
)
def specific_humidity_from_dewpoint(
tdps: xr.DataArray,
ps: xr.DataArray,
method: str = "sonntag90",
) -> xr.DataArray:
r"""Specific humidity from dewpoint temperature and air pressure.
Specific humidity is the ratio between the mass of water vapour and the mass of moist air [WMO08]_.
Parameters
----------
tdps : xr.DataArray
Dewpoint temperature array.
ps : xr.DataArray
Air pressure array.
method : {"goffgratch46", "sonntag90", "tetens30", "wmo08"}
Method to compute the saturation vapor pressure.
Returns
-------
xarray.DataArray, [dimensionless]
Specific humidity.
Notes
-----
If :math:`e` is the water vapor pressure, and :math:`p` the total air pressure, then specific humidity is given by
.. math::
q = m_w e / ( m_a (p - e) + m_w e )
where :math:`m_w` and :math:`m_a` are the molecular weights of water and dry air respectively. This formula is often
written with :math:`ε = m_w / m_a`, which simplifies to :math:`q = ε e / (p - e (1 - ε))`.
References
----------
.. [WMO08] World Meteorological Organization. (2008). Guide to meteorological instruments and methods of observation. Geneva, Switzerland: World Meteorological Organization. https://www.weather.gov/media/epz/mesonet/CWOP-WMO8.pdf
"""
ε = 0.6219569 # weight of water vs dry air []
e = saturation_vapor_pressure(tas=tdps, method=method) # vapor pressure [Pa]
ps = convert_units_to(ps, "Pa") # total air pressure
q = ε * e / (ps - e * (1 - ε))
q.attrs["units"] = ""
return q
@declare_units(pr="[precipitation]", tas="[temperature]", thresh="[temperature]")
def snowfall_approximation(
pr: xr.DataArray,
tas: xr.DataArray,
thresh: str = "0 degC",
method: str = "binary",
) -> xr.DataArray:
"""Snowfall approximation from total precipitation and temperature.
Solid precipitation estimated from precipitation and temperature according to a given method.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
tas : xarray.DataArray, optional
Mean, maximum, or minimum daily temperature.
thresh : str,
Threshold temperature, used by method "binary".
method : {"binary", "brown", "auer"}
Which method to use when approximating snowfall from total precipitation. See notes.
Returns
-------
xarray.DataArray, [same units as pr]
Solid precipitation flux.
Notes
-----
The following methods are available to approximate snowfall and are drawn from the
Canadian Land Surface Scheme (CLASS, [Verseghy09]_).
- ``'binary'`` : When the temperature is under the freezing threshold, precipitation
is assumed to be solid. The method is agnostic to the type of temperature used
(mean, maximum or minimum).
- ``'brown'`` : The phase between the freezing threshold goes from solid to liquid linearly
over a range of 2°C over the freezing point.
- ``'auer'`` : The phase between the freezing threshold goes from solid to liquid as a degree six
polynomial over a range of 6°C over the freezing point.
References
----------
.. [Verseghy09] <NAME> (2009), CLASS – The Canadian Land Surface Scheme (Version 3.4), Technical
Documentation (Version 1.1), Environment Canada, Climate Research Division, Science and Technology Branch.
https://gitlab.com/cccma/classic/-/blob/master/src/atmosphericVarsCalc.f90
"""
if method == "binary":
thresh = convert_units_to(thresh, tas)
prsn = pr.where(tas <= thresh, 0)
elif method == "brown":
# Freezing point + 2C in the native units
upper = convert_units_to(convert_units_to(thresh, "degC") + 2, tas)
thresh = convert_units_to(thresh, tas)
# Interpolate fraction over temperature (in units of tas)
t = xr.DataArray(
[-np.inf, thresh, upper, np.inf], dims=("tas",), attrs={"units": "degC"}
)
fraction = xr.DataArray([1.0, 1.0, 0.0, 0.0], dims=("tas",), coords={"tas": t})
# Multiply precip by snowfall fraction
prsn = pr * fraction.interp(tas=tas, method="linear")
elif method == "auer":
dtas = convert_units_to(tas, "degK") - convert_units_to(thresh, "degK")
# Create nodes for the snowfall fraction: -inf, thresh, ..., thresh+6, inf [degC]
t = np.concatenate(
[[-273.15], np.linspace(0, 6, 100, endpoint=False), [6, 1e10]]
)
t = xr.DataArray(t, dims="tas", name="tas", coords={"tas": t})
# The polynomial coefficients, valid between thresh and thresh + 6 (defined in CLASS)
coeffs = xr.DataArray(
[100, 4.6664, -15.038, -1.5089, 2.0399, -0.366, 0.0202],
dims=("degree",),
coords={"degree": range(7)},
)
fraction = xr.polyval(t.tas, coeffs).clip(0, 100) / 100
fraction[0] = 1
fraction[-2:] = 0
# Convert snowfall fraction coordinates to native tas units
prsn = pr * fraction.interp(tas=dtas, method="linear")
else:
raise ValueError(f"Method {method} not one of 'binary', 'brown' or 'auer'.")
prsn.attrs["units"] = pr.attrs["units"]
return prsn
@declare_units(pr="[precipitation]", tas="[temperature]", thresh="[temperature]")
def rain_approximation(
pr: xr.DataArray,
tas: xr.DataArray,
thresh: str = "0 degC",
method: str = "binary",
) -> xr.DataArray:
"""Rainfall approximation from total precipitation and temperature.
Liquid precipitation estimated from precipitation and temperature according to a given method.
This is a convenience method based on :py:func:`snowfall_approximation`, see the latter for details.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux.
tas : xarray.DataArray, optional
Mean, maximum, or minimum daily temperature.
thresh : str,
Threshold temperature, used by method "binary".
method : {"binary", "brown", "auer"}
Which method to use when approximating snowfall from total precipitation. See notes.
Returns
-------
xarray.DataArray, [same units as pr]
Liquid precipitation rate.
Notes
-----
This method computes the snowfall approximation and subtracts it from the total
precipitation to estimate the liquid rain precipitation.
See Also
--------
:py:func:`xclim.indices.snowfall_approximation`
"""
prra = pr - snowfall_approximation(pr, tas, thresh=thresh, method=method)
prra.attrs["units"] = pr.attrs["units"]
return prra
@declare_units(
tas="[temperature]",
sfcWind="[speed]",
)
def wind_chill_index(
tas: xr.DataArray,
sfcWind: xr.DataArray,
method: str = "CAN",
mask_invalid: bool = True,
):
r"""Wind chill index.
The Wind Chill Index is an estimation of how cold the weather feels to the average person.
It is computed from the air temperature and the 10-m wind. As defined by the Environment and Climate Change Canada
([MVSZ2015]_), two equations exist, the conventional one and one for slow winds (usually < 5 km/h), see Notes.
Parameters
----------
tas : xarray.DataArray
Surface air temperature.
sfcWind : xarray.DataArray
Surface wind speed (10 m).
method : {'CAN', 'US'}
If "CAN" (default), a "slow wind" equation is used where winds are slower than 5 km/h, see Notes.
mask_invalid : bool
Whether to mask values when the inputs are outside their validity range. or not.
If True (default), points where the temperature is above a threshold are masked.
The threshold is 0°C for the canadian method and 50°F for the american one.
With the latter method, points where sfcWind < 3 mph are also masked.
Returns
-------
xarray.DataArray, [degC]
Wind Chill Index.
Notes
-----
Following the calculations of Environment and Climate Change Canada, this function switches from the standardized
index to another one for slow winds. The standard index is the same as used by the National Weather Service of the
USA ([NWS]_). Given a temperature at surface :math:`T` (in °C) and 10-m wind speed :math:`V` (in km/h), the Wind
Chill Index :math:`W` (dimensionless) is computed as:
.. math::
W = 13.12 + 0.6125*T - 11.37*V^0.16 + 0.3965*T*V^0.16
Under slow winds (:math:`V < 5` km/h), and using the canadian method, it becomes:
.. math::
W = T + \frac{-1.59 + 0.1345 * T}{5} * V
Both equations are invalid for temperature over 0°C in the canadian method.
The american Wind Chill Temperature index (WCT), as defined by USA's National Weather Service, is computed when
`method='US'`. In that case, the maximal valid temperature is 50°F (10 °C) and minimal wind speed is 3 mph (4.8 km/h).
See Also
--------
National Weather Service FAQ: ([NWS]_).
References
----------
.. [MVSZ2015] <NAME>, <NAME>, <NAME> & <NAME> (2015) Observed Trends in Severe Weather Conditions Based on Humidex, Wind Chill, and Heavy Rainfall Events in Canada for 1953–2012, Atmosphere-Ocean, 53:4, 383-397, DOI: 10.1080/07055900.2015.1086970
.. [Osczevski&Bluestein2005] <NAME>., & <NAME>. (2005). The New Wind Chill Equivalent Temperature Chart. Bulletin of the American Meteorological Society, 86(10), 1453–1458. https://doi.org/10.1175/BAMS-86-10-1453
.. [NWS] Wind Chill Questions, Cold Resources, National Weather Service, retrieved 25-05-21. https://www.weather.gov/safety/cold-faqs
"""
tas = convert_units_to(tas, "degC")
sfcWind = convert_units_to(sfcWind, "km/h")
V = sfcWind**0.16
W = 13.12 + 0.6215 * tas - 11.37 * V + 0.3965 * tas * V
if method.upper() == "CAN":
W = xr.where(sfcWind < 5, tas + sfcWind * (-1.59 + 0.1345 * tas) / 5, W)
elif method.upper() != "US":
raise ValueError(f"`method` must be one of 'US' and 'CAN'. Got '{method}'.")
if mask_invalid:
mask = {"CAN": tas <= 0, "US": (sfcWind > 4.828032) & (tas <= 10)}
W = W.where(mask[method.upper()])
W.attrs["units"] = "degC"
return W
@declare_units(
delta_tas="[temperature]",
pr_baseline="[precipitation]",
)
def clausius_clapeyron_scaled_precipitation(
delta_tas: xr.DataArray,
pr_baseline: xr.DataArray,
cc_scale_factor: float = 1.07,
) -> xr.DataArray:
r"""Scale precipitation according to the Clausius-Clapeyron relation.
Parameters
----------
delta_tas : xarray.DataArray
Difference in temperature between a baseline climatology and another climatology.
pr_baseline : xarray.DataArray
Baseline precipitation to adjust with Clausius-Clapeyron.
cc_scale_factor : float (default = 1.07)
Clausius Clapeyron scale factor.
Returns
-------
DataArray
Baseline precipitation scaled to other climatology using Clausius-Clapeyron relationship.
Notes
-----
The Clausius-Clapeyron equation for water vapor under typical atmospheric conditions states that the saturation
water vapor pressure :math:`e_s` changes approximately exponentially with temperature
.. math::
\frac{\mathrm{d}e_s(T)}{\mathrm{d}T} \approx 1.07 e_s(T)
This function assumes that precipitation can be scaled by the same factor.
Warnings
--------
Make sure that `delta_tas` is computed over a baseline compatible with `pr_baseline`. So for example,
if `delta_tas` is the climatological difference between a baseline and a future period, then `pr_baseline`
should be precipitations over a period within the same baseline.
"""
# Get difference in temperature. Time-invariant baseline temperature (from above) is broadcast.
delta_tas = convert_units_to(delta_tas, "delta_degreeC")
# Calculate scaled precipitation.
pr_out = pr_baseline * (cc_scale_factor**delta_tas)
pr_out.attrs["units"] = pr_baseline.attrs["units"]
return pr_out
@declare_units(
tasmin="[temperature]", tasmax="[temperature]", tas="[temperature]", lat="[]"
)
def potential_evapotranspiration(
tasmin: xr.DataArray | None = None,
tasmax: xr.DataArray | None = None,
tas: xr.DataArray | None = None,
lat: xr.DataArray | None = None,
method: str = "BR65",
peta: float | None = 0.00516409319477,
petb: float | None = 0.0874972822289,
) -> xr.DataArray:
r"""Potential evapotranspiration.
The potential for water evaporation from soil and transpiration by plants if the water supply is
sufficient, according to a given method.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature.
tasmax : xarray.DataArray
Maximum daily temperature.
tas : xarray.DataArray
Mean daily temperature.
lat : xarray.DataArray, optional
Latitude. If not given, it is sought on tasmin or tas with cf-xarray.
method : {"baierrobertson65", "BR65", "hargreaves85", "HG85", "thornthwaite48", "TW48", "mcguinnessbordne05", "MB05"}
Which method to use, see notes.
peta : float
Used only with method MB05 as :math:`a` for calculation of PET, see Notes section. Default value resulted from calibration of PET over the UK.
petb : float
Used only with method MB05 as :math:`b` for calculation of PET, see Notes section. Default value resulted from calibration of PET over the UK.
Returns
-------
xarray.DataArray
Notes
-----
Available methods are:
- "baierrobertson65" or "BR65", based on [BaierRobertson1965]_. Requires tasmin and tasmax, daily [D] freq.
- "hargreaves85" or "HG85", based on [Hargreaves1985]_. Requires tasmin and tasmax, daily [D] freq. (optional: tas can be given in addition of tasmin and tasmax).
- "mcguinnessbordne05" or "MB05", based on [Tanguy2018]_. Requires tas, daily [D] freq, with latitudes 'lat'.
- "thornthwaite48" or "TW48", based on [Thornthwaite1948]_. Requires tasmin and tasmax, monthly [MS] or daily [D] freq. (optional: tas can be given instead of tasmin and tasmax).
The McGuinness-Bordne [McGuinness1972]_ equation is:
.. math::
PET[mm day^{-1}] = a * \frac{S_0}{\lambda}T_a + b *\frsc{S_0}{\lambda}
where :math:`a` and :math:`b` are empirical parameters; :math:`S_0` is the extraterrestrial radiation [MJ m-2 day-1],
assuming a solar constant of 1367 W m-2; :math:`\\lambda` is the latent heat of vaporisation [MJ kg-1]
and :math:`T_a` is the air temperature [°C]. The equation was originally derived for the USA,
with :math:`a=0.0147` and :math:`b=0.07353`. The default parameters used here are calibrated for the UK,
using the method described in [Tanguy2018]_.
Methods "BR65", "HG85" and "MB05" use an approximation of the extraterrestrial
radiation. See :py:func:`~xclim.indices._helpers.extraterrestrial_solar_radiation`.
References
----------
.. [BaierRobertson1965] <NAME>., & <NAME>. (1965). Estimation of latent evaporation from simple weather observations. Canadian journal of plant science, 45(3), 276-284.
.. [Hargreaves1985] <NAME>., & <NAME>. (1985). Reference crop evapotranspiration from temperature. Applied engineering in agriculture, 1(2), 96-99.
.. [Tanguy2018] <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Historical gridded reconstruction of potential evapotranspiration for the UK. Earth System Science Data, 10(2), 951-968.
.. [McGuinness1972] <NAME>., & <NAME>. (1972). A comparison of lysimeter-derived potential evapotranspiration with computed values (No. 1452). US Department of Agriculture.
.. [Thornthwaite1948] <NAME>. (1948). An approach toward a rational classification of climate. Geographical review, 38(1), 55-94.
"""
if lat is None:
lat = (tasmin if tas is None else tas).cf["latitude"]
if method in ["baierrobertson65", "BR65"]:
tasmin = convert_units_to(tasmin, "degF")
tasmax = convert_units_to(tasmax, "degF")
re = extraterrestrial_solar_radiation(tasmin.time, lat)
re = convert_units_to(re, "cal cm-2 day-1")
# Baier et Robertson(1965) formula
out = 0.094 * (
-87.03 + 0.928 * tasmax + 0.933 * (tasmax - tasmin) + 0.0486 * re
)
out = out.clip(0)
elif method in ["hargreaves85", "HG85"]:
tasmin = convert_units_to(tasmin, "degC")
tasmax = convert_units_to(tasmax, "degC")
if tas is None:
tas = (tasmin + tasmax) / 2
else:
tas = convert_units_to(tas, "degC")
lv = 2.5 # MJ/kg
ra = extraterrestrial_solar_radiation(tasmin.time, lat)
ra = convert_units_to(ra, "MJ m-2 d-1")
# Hargreaves and Samani(1985) formula
out = (0.0023 * ra * (tas + 17.8) * (tasmax - tasmin) ** 0.5) / lv
out = out.clip(0)
elif method in ["mcguinnessbordne05", "MB05"]:
if tas is None:
tasmin = convert_units_to(tasmin, "degC")
tasmax = convert_units_to(tasmax, "degC")
tas = (tasmin + tasmax) / 2
tas.attrs["units"] = "degC"
tas = convert_units_to(tas, "degC")
tasK = convert_units_to(tas, "K")
ext_rad = extraterrestrial_solar_radiation(
tas.time, lat, solar_constant="1367 W m-2"
)
latentH = 4185.5 * (751.78 - 0.5655 * tasK)
radDIVlat = ext_rad / latentH
# parameters from calibration provided by Dr <NAME> @ CEH
# (calibrated for PET over the UK)
a = peta
b = petb
out = radDIVlat * a * tas + radDIVlat * b
elif method in ["thornthwaite48", "TW48"]:
if tas is None:
tasmin = convert_units_to(tasmin, "degC")
tasmax = convert_units_to(tasmax, "degC")
tas = (tasmin + tasmax) / 2
else:
tas = convert_units_to(tas, "degC")
tas = tas.clip(0)
tas = tas.resample(time="MS").mean(dim="time")
start = "-".join(
[
str(tas.time[0].dt.year.values),
f"{tas.time[0].dt.month.values:02d}",
"01",
]
)
end = "-".join(
[
str(tas.time[-1].dt.year.values),
f"{tas.time[-1].dt.month.values:02d}",
str(tas.time[-1].dt.daysinmonth.values),
]
)
time_v = xr.DataArray(
date_range(start, end, freq="D", calendar="standard"),
dims="time",
name="time",
)
# Thornwaith measures half-days
dl = day_lengths(time_v, lat) / 12
dl_m = dl.resample(time="MS").mean(dim="time")
# annual heat index
id_m = (tas / 5) ** 1.514
id_y = id_m.resample(time="YS").sum(dim="time")
tas_idy_a = []
for base_time, indexes in tas.resample(time="YS").groups.items():
tas_y = tas.isel(time=indexes)
id_v = id_y.sel(time=base_time)
a = 6.75e-7 * id_v**3 - 7.71e-5 * id_v**2 + 0.01791 * id_v + 0.49239
frac = (10 * tas_y / id_v) ** a
tas_idy_a.append(frac)
tas_idy_a = xr.concat(tas_idy_a, dim="time")
# Thornthwaite(1948) formula
out = 1.6 * dl_m * tas_idy_a # cm/month
out = 10 * out # mm/month
else:
raise NotImplementedError(f"'{method}' method is not implemented.")
out.attrs["units"] = "mm"
return amount2rate(out, out_units="kg m-2 s-1")
@vectorize(
[
float64(float64, float64, float64, float64),
float32(float32, float32, float32, float32),
],
)
def _utci(tas, sfcWind, dt, wvp):
"""Return the empirical polynomial function for UTCI. See :py:func:`universal_thermal_climate_index`."""
# Taken directly from the original Fortran code by <NAME>.
# http://www.utci.org/public/UTCI%20Program%20Code/UTCI_a002.f90
# tas -> Ta (surface temperature, °C)
# sfcWind -> va (surface wind speed, m/s)
# dt -> D_Tmrt (tas - t_mrt, K)
# wvp -> Pa (water vapor partial pressure, kPa)
return (
tas
+ 6.07562052e-1
+ -2.27712343e-2 * tas
+ 8.06470249e-4 * tas * tas
+ -1.54271372e-4 * tas * tas * tas
+ -3.24651735e-6 * tas * tas * tas * tas
+ 7.32602852e-8 * tas * tas * tas * tas * tas
+ 1.35959073e-9 * tas * tas * tas * tas * tas * tas
+ -2.25836520e0 * sfcWind
+ 8.80326035e-2 * tas * sfcWind
+ 2.16844454e-3 * tas * tas * sfcWind
+ -1.53347087e-5 * tas * tas * tas * sfcWind
+ -5.72983704e-7 * tas * tas * tas * tas * sfcWind
+ -2.55090145e-9 * tas * tas * tas * tas * tas * sfcWind
+ -7.51269505e-1 * sfcWind * sfcWind
+ -4.08350271e-3 * tas * sfcWind * sfcWind
+ -5.21670675e-5 * tas * tas * sfcWind * sfcWind
+ 1.94544667e-6 * tas * tas * tas * sfcWind * sfcWind
+ 1.14099531e-8 * tas * tas * tas * tas * sfcWind * sfcWind
+ 1.58137256e-1 * sfcWind * sfcWind * sfcWind
+ -6.57263143e-5 * tas * sfcWind * sfcWind * sfcWind
+ 2.22697524e-7 * tas * tas * sfcWind * sfcWind * sfcWind
+ -4.16117031e-8 * tas * tas * tas * sfcWind * sfcWind * sfcWind
+ -1.27762753e-2 * sfcWind * sfcWind * sfcWind * sfcWind
+ 9.66891875e-6 * tas * sfcWind * sfcWind * sfcWind * sfcWind
+ 2.52785852e-9 * tas * tas * sfcWind * sfcWind * sfcWind * sfcWind
+ 4.56306672e-4 * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind
+ -1.74202546e-7 * tas * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind
+ -5.91491269e-6 * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind
+ 3.98374029e-1 * dt
+ 1.83945314e-4 * tas * dt
+ -1.73754510e-4 * tas * tas * dt
+ -7.60781159e-7 * tas * tas * tas * dt
+ 3.77830287e-8 * tas * tas * tas * tas * dt
+ 5.43079673e-10 * tas * tas * tas * tas * tas * dt
+ -2.00518269e-2 * sfcWind * dt
+ 8.92859837e-4 * tas * sfcWind * dt
+ 3.45433048e-6 * tas * tas * sfcWind * dt
+ -3.77925774e-7 * tas * tas * tas * sfcWind * dt
+ -1.69699377e-9 * tas * tas * tas * tas * sfcWind * dt
+ 1.69992415e-4 * sfcWind * sfcWind * dt
+ -4.99204314e-5 * tas * sfcWind * sfcWind * dt
+ 2.47417178e-7 * tas * tas * sfcWind * sfcWind * dt
+ 1.07596466e-8 * tas * tas * tas * sfcWind * sfcWind * dt
+ 8.49242932e-5 * sfcWind * sfcWind * sfcWind * dt
+ 1.35191328e-6 * tas * sfcWind * sfcWind * sfcWind * dt
+ -6.21531254e-9 * tas * tas * sfcWind * sfcWind * sfcWind * dt
+ -4.99410301e-6 * sfcWind * sfcWind * sfcWind * sfcWind * dt
+ -1.89489258e-8 * tas * sfcWind * sfcWind * sfcWind * sfcWind * dt
+ 8.15300114e-8 * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind * dt
+ 7.55043090e-4 * dt * dt
+ -5.65095215e-5 * tas * dt * dt
+ -4.52166564e-7 * tas * tas * dt * dt
+ 2.46688878e-8 * tas * tas * tas * dt * dt
+ 2.42674348e-10 * tas * tas * tas * tas * dt * dt
+ 1.54547250e-4 * sfcWind * dt * dt
+ 5.24110970e-6 * tas * sfcWind * dt * dt
+ -8.75874982e-8 * tas * tas * sfcWind * dt * dt
+ -1.50743064e-9 * tas * tas * tas * sfcWind * dt * dt
+ -1.56236307e-5 * sfcWind * sfcWind * dt * dt
+ -1.33895614e-7 * tas * sfcWind * sfcWind * dt * dt
+ 2.49709824e-9 * tas * tas * sfcWind * sfcWind * dt * dt
+ 6.51711721e-7 * sfcWind * sfcWind * sfcWind * dt * dt
+ 1.94960053e-9 * tas * sfcWind * sfcWind * sfcWind * dt * dt
+ -1.00361113e-8 * sfcWind * sfcWind * sfcWind * sfcWind * dt * dt
+ -1.21206673e-5 * dt * dt * dt
+ -2.18203660e-7 * tas * dt * dt * dt
+ 7.51269482e-9 * tas * tas * dt * dt * dt
+ 9.79063848e-11 * tas * tas * tas * dt * dt * dt
+ 1.25006734e-6 * sfcWind * dt * dt * dt
+ -1.81584736e-9 * tas * sfcWind * dt * dt * dt
+ -3.52197671e-10 * tas * tas * sfcWind * dt * dt * dt
+ -3.36514630e-8 * sfcWind * sfcWind * dt * dt * dt
+ 1.35908359e-10 * tas * sfcWind * sfcWind * dt * dt * dt
+ 4.17032620e-10 * sfcWind * sfcWind * sfcWind * dt * dt * dt
+ -1.30369025e-9 * dt * dt * dt * dt
+ 4.13908461e-10 * tas * dt * dt * dt * dt
+ 9.22652254e-12 * tas * tas * dt * dt * dt * dt
+ -5.08220384e-9 * sfcWind * dt * dt * dt * dt
+ -2.24730961e-11 * tas * sfcWind * dt * dt * dt * dt
+ 1.17139133e-10 * sfcWind * sfcWind * dt * dt * dt * dt
+ 6.62154879e-10 * dt * dt * dt * dt * dt
+ 4.03863260e-13 * tas * dt * dt * dt * dt * dt
+ 1.95087203e-12 * sfcWind * dt * dt * dt * dt * dt
+ -4.73602469e-12 * dt * dt * dt * dt * dt * dt
+ 5.12733497e0 * wvp
+ -3.12788561e-1 * tas * wvp
+ -1.96701861e-2 * tas * tas * wvp
+ 9.99690870e-4 * tas * tas * tas * wvp
+ 9.51738512e-6 * tas * tas * tas * tas * wvp
+ -4.66426341e-7 * tas * tas * tas * tas * tas * wvp
+ 5.48050612e-1 * sfcWind * wvp
+ -3.30552823e-3 * tas * sfcWind * wvp
+ -1.64119440e-3 * tas * tas * sfcWind * wvp
+ -5.16670694e-6 * tas * tas * tas * sfcWind * wvp
+ 9.52692432e-7 * tas * tas * tas * tas * sfcWind * wvp
+ -4.29223622e-2 * sfcWind * sfcWind * wvp
+ 5.00845667e-3 * tas * sfcWind * sfcWind * wvp
+ 1.00601257e-6 * tas * tas * sfcWind * sfcWind * wvp
+ -1.81748644e-6 * tas * tas * tas * sfcWind * sfcWind * wvp
+ -1.25813502e-3 * sfcWind * sfcWind * sfcWind * wvp
+ -1.79330391e-4 * tas * sfcWind * sfcWind * sfcWind * wvp
+ 2.34994441e-6 * tas * tas * sfcWind * sfcWind * sfcWind * wvp
+ 1.29735808e-4 * sfcWind * sfcWind * sfcWind * sfcWind * wvp
+ 1.29064870e-6 * tas * sfcWind * sfcWind * sfcWind * sfcWind * wvp
+ -2.28558686e-6 * sfcWind * sfcWind * sfcWind * sfcWind * sfcWind * wvp
+ -3.69476348e-2 * dt * wvp
+ 1.62325322e-3 * tas * dt * wvp
+ -3.14279680e-5 * tas * tas * dt * wvp
+ 2.59835559e-6 * tas * tas * tas * dt * wvp
+ -4.77136523e-8 * tas * tas * tas * tas * dt * wvp
+ 8.64203390e-3 * sfcWind * dt * wvp
+ -6.87405181e-4 * tas * sfcWind * dt * wvp
+ -9.13863872e-6 * tas * tas * sfcWind * dt * wvp
+ 5.15916806e-7 * tas * tas * tas * sfcWind * dt * wvp
+ -3.59217476e-5 * sfcWind * sfcWind * dt * wvp
+ 3.28696511e-5 * tas * sfcWind * sfcWind * dt * wvp
+ -7.10542454e-7 * tas * tas * sfcWind * sfcWind * dt * wvp
+ -1.24382300e-5 * sfcWind * sfcWind * sfcWind * dt * wvp
+ -7.38584400e-9 * tas * sfcWind * sfcWind * sfcWind * dt * wvp
+ 2.20609296e-7 * sfcWind * sfcWind * sfcWind * sfcWind * dt * wvp
+ -7.32469180e-4 * dt * dt * wvp
+ -1.87381964e-5 * tas * dt * dt * wvp
+ 4.80925239e-6 * tas * tas * dt * dt * wvp
+ -8.75492040e-8 * tas * tas * tas * dt * dt * wvp
+ 2.77862930e-5 * sfcWind * dt * dt * wvp
+ -5.06004592e-6 * tas * sfcWind * dt * dt * wvp
+ 1.14325367e-7 * tas * tas * sfcWind * dt * dt * wvp
+ 2.53016723e-6 * sfcWind * sfcWind * dt * dt * wvp
+ -1.72857035e-8 * tas * sfcWind * sfcWind * dt * dt * wvp
+ -3.95079398e-8 * sfcWind * sfcWind * sfcWind * dt * dt * wvp
+ -3.59413173e-7 * dt * dt * dt * wvp
+ 7.04388046e-7 * tas * dt * dt * dt * wvp
+ -1.89309167e-8 * tas * tas * dt * dt * dt * wvp
+ -4.79768731e-7 * sfcWind * dt * dt * dt * wvp
+ 7.96079978e-9 * tas * sfcWind * dt * dt * dt * wvp
+ 1.62897058e-9 * sfcWind * sfcWind * dt * dt * dt * wvp
+ 3.94367674e-8 * dt * dt * dt * dt * wvp
+ -1.18566247e-9 * tas * dt * dt * dt * dt * wvp
+ 3.34678041e-10 * sfcWind * dt * dt * dt * dt * wvp
+ -1.15606447e-10 * dt * dt * dt * dt * dt * wvp
+ -2.80626406e0 * wvp * wvp
+ 5.48712484e-1 * tas * wvp * wvp
+ -3.99428410e-3 * tas * tas * wvp * wvp
+ -9.54009191e-4 * tas * tas * tas * wvp * wvp
+ 1.93090978e-5 * tas * tas * tas * tas * wvp * wvp
+ -3.08806365e-1 * sfcWind * wvp * wvp
+ 1.16952364e-2 * tas * sfcWind * wvp * wvp
+ 4.95271903e-4 * tas * tas * sfcWind * wvp * wvp
+ -1.90710882e-5 * tas * tas * tas * sfcWind * wvp * wvp
+ 2.10787756e-3 * sfcWind * sfcWind * wvp * wvp
+ -6.98445738e-4 * tas * sfcWind * sfcWind * wvp * wvp
+ 2.30109073e-5 * tas * tas * sfcWind * sfcWind * wvp * wvp
+ 4.17856590e-4 * sfcWind * sfcWind * sfcWind * wvp * wvp
+ -1.27043871e-5 * tas * sfcWind * sfcWind * sfcWind * wvp * wvp
+ -3.04620472e-6 * sfcWind * sfcWind * sfcWind * sfcWind * wvp * wvp
+ 5.14507424e-2 * dt * wvp * wvp
+ -4.32510997e-3 * tas * dt * wvp * wvp
+ 8.99281156e-5 * tas * tas * dt * wvp * wvp
+ -7.14663943e-7 * tas * tas * tas * dt * wvp * wvp
+ -2.66016305e-4 * sfcWind * dt * wvp * wvp
+ 2.63789586e-4 * tas * sfcWind * dt * wvp * wvp
+ -7.01199003e-6 * tas * tas * sfcWind * dt * wvp * wvp
+ -1.06823306e-4 * sfcWind * sfcWind * dt * wvp * wvp
+ 3.61341136e-6 * tas * sfcWind * sfcWind * dt * wvp * wvp
+ 2.29748967e-7 * sfcWind * sfcWind * sfcWind * dt * wvp * wvp
+ 3.04788893e-4 * dt * dt * wvp * wvp
+ -6.42070836e-5 * tas * dt * dt * wvp * wvp
+ 1.16257971e-6 * tas * tas * dt * dt * wvp * wvp
+ 7.68023384e-6 * sfcWind * dt * dt * wvp * wvp
+ -5.47446896e-7 * tas * sfcWind * dt * dt * wvp * wvp
+ -3.59937910e-8 * sfcWind * sfcWind * dt * dt * wvp * wvp
+ -4.36497725e-6 * dt * dt * dt * wvp * wvp
+ 1.68737969e-7 * tas * dt * dt * dt * wvp * wvp
+ 2.67489271e-8 * sfcWind * dt * dt * dt * wvp * wvp
+ 3.23926897e-9 * dt * dt * dt * dt * wvp * wvp
+ -3.53874123e-2 * wvp * wvp * wvp
+ -2.21201190e-1 * tas * wvp * wvp * wvp
+ 1.55126038e-2 * tas * tas * wvp * wvp * wvp
+ -2.63917279e-4 * tas * tas * tas * wvp * wvp * wvp
+ 4.53433455e-2 * sfcWind * wvp * wvp * wvp
+ -4.32943862e-3 * tas * sfcWind * wvp * wvp * wvp
+ 1.45389826e-4 * tas * tas * sfcWind * wvp * wvp * wvp
+ 2.17508610e-4 * sfcWind * sfcWind * wvp * wvp * wvp
+ -6.66724702e-5 * tas * sfcWind * sfcWind * wvp * wvp * wvp
+ 3.33217140e-5 * sfcWind * sfcWind * sfcWind * wvp * wvp * wvp
+ -2.26921615e-3 * dt * wvp * wvp * wvp
+ 3.80261982e-4 * tas * dt * wvp * wvp * wvp
+ -5.45314314e-9 * tas * tas * dt * wvp * wvp * wvp
+ -7.96355448e-4 * sfcWind * dt * wvp * wvp * wvp
+ 2.53458034e-5 * tas * sfcWind * dt * wvp * wvp * wvp
+ -6.31223658e-6 * sfcWind * sfcWind * dt * wvp * wvp * wvp
+ 3.02122035e-4 * dt * dt * wvp * wvp * wvp
+ -4.77403547e-6 * tas * dt * dt * wvp * wvp * wvp
+ 1.73825715e-6 * sfcWind * dt * dt * wvp * wvp * wvp
+ -4.09087898e-7 * dt * dt * dt * wvp * wvp * wvp
+ 6.14155345e-1 * wvp * wvp * wvp * wvp
+ -6.16755931e-2 * tas * wvp * wvp * wvp * wvp
+ 1.33374846e-3 * tas * tas * wvp * wvp * wvp * wvp
+ 3.55375387e-3 * sfcWind * wvp * wvp * wvp * wvp
+ -5.13027851e-4 * tas * sfcWind * wvp * wvp * wvp * wvp
+ 1.02449757e-4 * sfcWind * sfcWind * wvp * wvp * wvp * wvp
+ -1.48526421e-3 * dt * wvp * wvp * wvp * wvp
+ -4.11469183e-5 * tas * dt * wvp * wvp * wvp * wvp
+ -6.80434415e-6 * sfcWind * dt * wvp * wvp * wvp * wvp
+ -9.77675906e-6 * dt * dt * wvp * wvp * wvp * wvp
+ 8.82773108e-2 * wvp * wvp * wvp * wvp * wvp
+ -3.01859306e-3 * tas * wvp * wvp * wvp * wvp * wvp
+ 1.04452989e-3 * sfcWind * wvp * wvp * wvp * wvp * wvp
+ 2.47090539e-4 * dt * wvp * wvp * wvp * wvp * wvp
+ 1.48348065e-3 * wvp * wvp * wvp * wvp * wvp * wvp
)
@declare_units(
tas="[temperature]",
hurs="[]",
sfcWind="[speed]",
mrt="[temperature]",
rsds="[radiation]",
rsus="[radiation]",
rlds="[radiation]",
rlus="[radiation]",
)
def universal_thermal_climate_index(
tas: xr.DataArray,
hurs: xr.DataArray,
sfcWind: xr.DataArray,
mrt: xr.DataArray = None,
rsds: xr.DataArray = None,
rsus: xr.DataArray = None,
rlds: xr.DataArray = None,
rlus: xr.DataArray = None,
stat: str = "average",
mask_invalid: bool = True,
) -> xr.DataArray:
r"""Universal thermal climate index.
The UTCI is the equivalent temperature for the environment derived from a
reference environment and is used to evaluate heat stress in outdoor spaces.
Parameters
----------
tas : xarray.DataArray
Mean temperature
hurs : xarray.DataArray
Relative Humidity
sfcWind : xarray.DataArray
Wind velocity
mrt: xarray.DataArray, optional
Mean radiant temperature
rsds : xr.DataArray, optional
Surface Downwelling Shortwave Radiation
This is necessary if mrt is not None.
rsus : xr.DataArray, optional
Surface Upwelling Shortwave Radiation
This is necessary if mrt is not None.
rlds : xr.DataArray, optional
Surface Downwelling Longwave Radiation
This is necessary if mrt is not None.
rlus : xr.DataArray, optional
Surface Upwelling Longwave Radiation
This is necessary if mrt is not None.
stat : {'average', 'instant', 'sunlit'}
Which statistic to apply. If "average", the average of the cosine of the
solar zenith angle is calculated. If "instant", the instantaneous cosine
of the solar zenith angle is calculated. If "sunlit", the cosine of the
solar zenith angle is calculated during the sunlit period of each interval.
If "instant", the instantaneous cosine of the solar zenith angle is calculated.
This is necessary if mrt is not None.
mask_invalid: boolean
If True (default), UTCI values are NaN where any of the inputs are outside
their validity ranges : -50°C < tas < 50°C, -30°C < tas - mrt < 30°C
and 0.5 m/s < sfcWind < 17.0 m/s.
Returns
-------
xarray.DataArray
Universal Thermal Climate Index.
Notes
-----
The calculation uses water vapor partial pressure, which is derived from relative
humidity and saturation vapor pressure computed according to the ITS-90 equation.
This code was inspired by the `pythermalcomfort` and `thermofeel` packages.
References
----------
<NAME> (2009). Program for calculating UTCI Temperature (UTCI), version a 0.002, http://www.utci.org/public/UTCI%20Program%20Code/UTCI_a002.f90
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2013). An introduction to the Universal Thermal Climate Index (UTCI). DOI:10.7163/GPOL.2013.1
See Also
--------
http://www.utci.org/utcineu/utcineu.php
"""
e_sat = saturation_vapor_pressure(tas=tas, method="its90")
tas = convert_units_to(tas, "degC")
sfcWind = convert_units_to(sfcWind, "m/s")
if mrt is None:
mrt = mean_radiant_temperature(
rsds=rsds, rsus=rsus, rlds=rlds, rlus=rlus, stat=stat
)
mrt = convert_units_to(mrt, "degC")
delta = mrt - tas
pa = convert_units_to(e_sat, "kPa") * convert_units_to(hurs, "1")
utci = _utci(tas, sfcWind, delta, pa)
utci = utci.assign_attrs({"units": "degC"})
if mask_invalid:
utci = utci.where(
(-50.0 < tas)
& (tas < 50.0)
& (-30 < delta)
& (delta < 30)
& (0.5 < sfcWind)
& (sfcWind < 17.0)
)
return utci
def _fdir_ratio(
dates: xr.DataArray,
csza_i: xr.DataArray,
csza_s: xr.DataArray,
rsds: xr.DataArray,
) -> xr.DataArray:
r"""Return ratio of direct solar radiation.
The ratio of direct solar radiation is the fraction of the total horizontal
solar irridance due to the direct beam of the sun.
Parameters
----------
dates : xr.DataArray
Series of dates and time of day
csza_i : xr.DataArray
Cosine of the solar zenith angle during each interal
csza_s : xr.DataArray
Cosine of the solar zenith angle during the sunlit period of each interval
rsds : xr.DataArray
Surface Downwelling Shortwave Radiation
Returns
-------
xarray.DataArray, [dimensionless]
Ratio of direct solar radiation
Notes
-----
This code was inspired by the `PyWBGT` package.
References
----------
<NAME>, <NAME>, <NAME>, <NAME> and <NAME> (2008) Modeling the Wet Bulb Globe Temperature Using Standard Meteorological Measurements, Journal of Occupational and Environmental Hygiene, 5:10, 645-655, https://doi.org/10.1080/15459620802310770
Kong, Qinqin, and <NAME>. “Explicit Calculations of Wet Bulb Globe Temperature Compared with Approximations and Why It Matters for Labor Productivity.” Earth’s Future, January 31, 2022. https://doi.org/10.1029/2021EF002334.
"""
d = distance_from_sun(dates)
s_star = rsds * ((1367 * csza_s * (d ** (-2))) ** (-1))
s_star = xr.where(s_star > 0.85, 0.85, s_star)
fdir_ratio = np.exp(3 - 1.34 * s_star - 1.65 * (s_star ** (-1)))
fdir_ratio = xr.where(fdir_ratio > 0.9, 0.9, fdir_ratio)
return xr.where(
(fdir_ratio <= 0) | (csza_i <= np.cos(89.5 / 180 * np.pi)) | (rsds <= 0),
0,
fdir_ratio,
)
@declare_units(
rsds="[radiation]", rsus="[radiation]", rlds="[radiation]", rlus="[radiation]"
)
def mean_radiant_temperature(
rsds: xr.DataArray,
rsus: xr.DataArray,
rlds: xr.DataArray,
rlus: xr.DataArray,
stat: str = "average",
) -> xr.DataArray:
r"""Mean radiant temperature.
The mean radiant temperature is the incidence of radiation on the body from all directions.
WARNING: There are some issues in the calculation of mrt in polar regions.
Parameters
----------
rsds : xr.DataArray
Surface Downwelling Shortwave Radiation
rsus : xr.DataArray
Surface Upwelling Shortwave Radiation
rlds : xr.DataArray
Surface Downwelling Longwave Radiation
rlus : xr.DataArray
Surface Upwelling Longwave Radiation
stat : {'average', 'instant', 'sunlit'}
Which statistic to apply. If "average", the average of the cosine of the
solar zenith angle is calculated. If "instant", the instantaneous cosine
of the solar zenith angle is calculated. If "sunlit", the cosine of the
solar zenith angle is calculated during the sunlit period of each interval.
If "instant", the instantaneous cosine of the solar zenith angle is calculated.
This is necessary if mrt is not None.
Returns
-------
xarray.DataArray, [K]
Mean radiant temperature
Notes
-----
This code was inspired by the `thermofeel` package.
References
----------
<NAME>., <NAME>. & <NAME>. Mean radiant temperature from global-scale numerical weather prediction models. Int J Biometeorol 64, 1233–1245 (2020). https://doi.org/10.1007/s00484-020-01900-5
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2021 thermofeel: a python thermal comfort indices library, https://doi.org/10.21957/mp6v-fd16
"""
rsds = convert_units_to(rsds, "W m-2")
rsus = convert_units_to(rsus, "W m-2")
rlds = convert_units_to(rlds, "W m-2")
rlus = convert_units_to(rlus, "W m-2")
dates = rsds.time
hours = ((dates - dates.dt.floor("D")).dt.seconds / 3600).assign_attrs(units="h")
lat = rsds.lat
lon = rsds.lon
decimal_year = datetime_to_decimal_year(times=dates, calendar=dates.dt.calendar)
day_angle = ((decimal_year % 1) * 2 * np.pi).assign_attrs(units="rad")
dec = solar_declination(day_angle)
if stat == "sunlit":
interval = (dates.diff("time").dt.seconds / 3600).reindex(
time=dates.time, method="bfill"
)
csza_i = cosine_of_solar_zenith_angle(
declination=dec,
lat=lat,
lon=lon,
hours=hours,
interval=interval,
stat="interval",
)
csza_s = cosine_of_solar_zenith_angle(
declination=dec, lat=lat, lon=lon, hours=hours, interval=interval, stat=stat
)
elif stat == "instant":
tc = time_correction_for_solar_angle(day_angle)
csza = cosine_of_solar_zenith_angle(
declination=dec,
lat=lat,
lon=lon,
time_correction=tc,
hours=hours,
stat="instant",
)
csza_i = csza.copy()
csza_s = csza.copy()
elif stat == "average":
csza = cosine_of_solar_zenith_angle(
declination=dec,
lat=lat,
stat="average",
)
csza_i = csza.copy()
csza_s = csza.copy()
else:
raise NotImplementedError(
"Argument 'stat' must be one of 'average', 'instant' or 'sunlit'."
)
fdir_ratio = _fdir_ratio(dates, csza_i, csza_s, rsds)
rsds_direct = fdir_ratio * rsds
rsds_diffuse = rsds - rsds_direct
gamma = np.arcsin(csza_i)
fp = 0.308 * np.cos(gamma * 0.988 - (gamma**2 / 50000))
i_star = xr.where(csza_s > 0.001, rsds_direct / csza_s, 0)
mrt = np.power(
(
(1 / 5.67e-8) # Stefan-Boltzmann constant
* (
0.5 * rlds
+ 0.5 * rlus
+ (0.7 / 0.97) * (0.5 * rsds_diffuse + 0.5 * rsus + fp * i_star)
)
),
0.25,
)
return mrt.assign_attrs({"units": "K"})
| [
"numpy.arctan2",
"xclim.indices.helpers.extraterrestrial_solar_radiation",
"xclim.indices.helpers.time_correction_for_solar_angle",
"xclim.core.units.convert_units_to",
"numpy.exp",
"xclim.core.calendar.datetime_to_decimal_year",
"numpy.power",
"numpy.arcsin",
"numpy.linspace",
"numba.float64",
... | [((968, 1035), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'tdps': '"""[temperature]"""', 'hurs': '"""[]"""'}), "(tas='[temperature]', tdps='[temperature]', hurs='[]')\n", (981, 1035), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((4949, 4997), 'xclim.core.units.declare_units', 'declare_units', ([], {'tasmax': '"""[temperature]"""', 'hurs': '"""[]"""'}), "(tasmax='[temperature]', hurs='[]')\n", (4962, 4997), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((6758, 6819), 'xclim.core.units.declare_units', 'declare_units', ([], {'tasmin': '"""[temperature]"""', 'tasmax': '"""[temperature]"""'}), "(tasmin='[temperature]', tasmax='[temperature]')\n", (6771, 6819), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((7486, 7557), 'xclim.core.units.declare_units', 'declare_units', ([], {'uas': '"""[speed]"""', 'vas': '"""[speed]"""', 'calm_wind_thresh': '"""[speed]"""'}), "(uas='[speed]', vas='[speed]', calm_wind_thresh='[speed]')\n", (7499, 7557), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((9697, 9750), 'xclim.core.units.declare_units', 'declare_units', ([], {'sfcWind': '"""[speed]"""', 'sfcWindfromdir': '"""[]"""'}), "(sfcWind='[speed]', sfcWindfromdir='[]')\n", (9710, 9750), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((11610, 11672), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'ice_thresh': '"""[temperature]"""'}), "(tas='[temperature]', ice_thresh='[temperature]')\n", (11623, 11672), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((17415, 17532), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'tdps': '"""[temperature]"""', 'huss': '"""[]"""', 'ps': '"""[pressure]"""', 'ice_thresh': '"""[temperature]"""'}), "(tas='[temperature]', tdps='[temperature]', huss='[]', ps=\n '[pressure]', ice_thresh='[temperature]')\n", (17428, 17532), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((21984, 22079), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'hurs': '"""[]"""', 'ps': '"""[pressure]"""', 'ice_thresh': '"""[temperature]"""'}), "(tas='[temperature]', hurs='[]', ps='[pressure]', ice_thresh=\n '[temperature]')\n", (21997, 22079), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((24958, 25010), 'xclim.core.units.declare_units', 'declare_units', ([], {'tdps': '"""[temperature]"""', 'ps': '"""[pressure]"""'}), "(tdps='[temperature]', ps='[pressure]')\n", (24971, 25010), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((26632, 26717), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""', 'tas': '"""[temperature]"""', 'thresh': '"""[temperature]"""'}), "(pr='[precipitation]', tas='[temperature]', thresh='[temperature]'\n )\n", (26645, 26717), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((30305, 30390), 'xclim.core.units.declare_units', 'declare_units', ([], {'pr': '"""[precipitation]"""', 'tas': '"""[temperature]"""', 'thresh': '"""[temperature]"""'}), "(pr='[precipitation]', tas='[temperature]', thresh='[temperature]'\n )\n", (30318, 30390), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((31686, 31739), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'sfcWind': '"""[speed]"""'}), "(tas='[temperature]', sfcWind='[speed]')\n", (31699, 31739), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((35246, 35317), 'xclim.core.units.declare_units', 'declare_units', ([], {'delta_tas': '"""[temperature]"""', 'pr_baseline': '"""[precipitation]"""'}), "(delta_tas='[temperature]', pr_baseline='[precipitation]')\n", (35259, 35317), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((37077, 37174), 'xclim.core.units.declare_units', 'declare_units', ([], {'tasmin': '"""[temperature]"""', 'tasmax': '"""[temperature]"""', 'tas': '"""[temperature]"""', 'lat': '"""[]"""'}), "(tasmin='[temperature]', tasmax='[temperature]', tas=\n '[temperature]', lat='[]')\n", (37090, 37174), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((57153, 57328), 'xclim.core.units.declare_units', 'declare_units', ([], {'tas': '"""[temperature]"""', 'hurs': '"""[]"""', 'sfcWind': '"""[speed]"""', 'mrt': '"""[temperature]"""', 'rsds': '"""[radiation]"""', 'rsus': '"""[radiation]"""', 'rlds': '"""[radiation]"""', 'rlus': '"""[radiation]"""'}), "(tas='[temperature]', hurs='[]', sfcWind='[speed]', mrt=\n '[temperature]', rsds='[radiation]', rsus='[radiation]', rlds=\n '[radiation]', rlus='[radiation]')\n", (57166, 57328), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((62733, 62830), 'xclim.core.units.declare_units', 'declare_units', ([], {'rsds': '"""[radiation]"""', 'rsus': '"""[radiation]"""', 'rlds': '"""[radiation]"""', 'rlus': '"""[radiation]"""'}), "(rsds='[radiation]', rsus='[radiation]', rlds='[radiation]',\n rlus='[radiation]')\n", (62746, 62830), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((4808, 4831), 'xclim.core.units.convert_units_to', 'convert_units_to', (['h', 'du'], {}), '(h, du)\n', (4824, 4831), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((6166, 6198), 'xclim.core.units.convert_units_to', 'convert_units_to', (['thresh', '"""degC"""'], {}), "(thresh, 'degC')\n", (6182, 6198), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((6207, 6239), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', '"""degC"""'], {}), "(tasmax, 'degC')\n", (6223, 6239), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((6276, 6303), 'xclim.core.units.convert_units_to', 'convert_units_to', (['hurs', '"""%"""'], {}), "(hurs, '%')\n", (6292, 6303), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((6719, 6754), 'xclim.core.units.convert_units_to', 'convert_units_to', (['out', 'tasmax.units'], {}), '(out, tasmax.units)\n', (6735, 6754), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((7356, 7388), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', 'tasmin'], {}), '(tasmax, tasmin)\n', (7372, 7388), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((8795, 8823), 'xclim.core.units.convert_units_to', 'convert_units_to', (['uas', '"""m/s"""'], {}), "(uas, 'm/s')\n", (8811, 8823), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((8834, 8862), 'xclim.core.units.convert_units_to', 'convert_units_to', (['vas', '"""m/s"""'], {}), "(vas, 'm/s')\n", (8850, 8862), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((8881, 8922), 'xclim.core.units.convert_units_to', 'convert_units_to', (['calm_wind_thresh', '"""m/s"""'], {}), "(calm_wind_thresh, 'm/s')\n", (8897, 8922), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((8989, 9007), 'numpy.hypot', 'np.hypot', (['uas', 'vas'], {}), '(uas, vas)\n', (8997, 9007), True, 'import numpy as np\n'), ((9572, 9618), 'xarray.where', 'xr.where', (['(wind < wind_thresh)', '(0)', 'wind_from_dir'], {}), '(wind < wind_thresh, 0, wind_from_dir)\n', (9580, 9618), True, 'import xarray as xr\n'), ((10491, 10523), 'xclim.core.units.convert_units_to', 'convert_units_to', (['sfcWind', '"""m/s"""'], {}), "(sfcWind, 'm/s')\n", (10507, 10523), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((14375, 14401), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""K"""'], {}), "(tas, 'K')\n", (14391, 14401), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((24394, 24420), 'xclim.core.units.convert_units_to', 'convert_units_to', (['ps', '"""Pa"""'], {}), "(ps, 'Pa')\n", (24410, 24420), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((24432, 24458), 'xclim.core.units.convert_units_to', 'convert_units_to', (['hurs', '""""""'], {}), "(hurs, '')\n", (24448, 24458), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((24469, 24498), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degK"""'], {}), "(tas, 'degK')\n", (24485, 24498), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((26505, 26531), 'xclim.core.units.convert_units_to', 'convert_units_to', (['ps', '"""Pa"""'], {}), "(ps, 'Pa')\n", (26521, 26531), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((34667, 34696), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degC"""'], {}), "(tas, 'degC')\n", (34683, 34696), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((34711, 34744), 'xclim.core.units.convert_units_to', 'convert_units_to', (['sfcWind', '"""km/h"""'], {}), "(sfcWind, 'km/h')\n", (34727, 34744), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((36860, 36904), 'xclim.core.units.convert_units_to', 'convert_units_to', (['delta_tas', '"""delta_degreeC"""'], {}), "(delta_tas, 'delta_degreeC')\n", (36876, 36904), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((44529, 44569), 'xclim.core.units.amount2rate', 'amount2rate', (['out'], {'out_units': '"""kg m-2 s-1"""'}), "(out, out_units='kg m-2 s-1')\n", (44540, 44569), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((60261, 60290), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degC"""'], {}), "(tas, 'degC')\n", (60277, 60290), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((60305, 60337), 'xclim.core.units.convert_units_to', 'convert_units_to', (['sfcWind', '"""m/s"""'], {}), "(sfcWind, 'm/s')\n", (60321, 60337), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((60484, 60513), 'xclim.core.units.convert_units_to', 'convert_units_to', (['mrt', '"""degC"""'], {}), "(mrt, 'degC')\n", (60500, 60513), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((62324, 62348), 'xclim.indices.helpers.distance_from_sun', 'distance_from_sun', (['dates'], {}), '(dates)\n', (62341, 62348), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((62422, 62459), 'xarray.where', 'xr.where', (['(s_star > 0.85)', '(0.85)', 's_star'], {}), '(s_star > 0.85, 0.85, s_star)\n', (62430, 62459), True, 'import xarray as xr\n'), ((62477, 62524), 'numpy.exp', 'np.exp', (['(3 - 1.34 * s_star - 1.65 * s_star ** -1)'], {}), '(3 - 1.34 * s_star - 1.65 * s_star ** -1)\n', (62483, 62524), True, 'import numpy as np\n'), ((62546, 62589), 'xarray.where', 'xr.where', (['(fdir_ratio > 0.9)', '(0.9)', 'fdir_ratio'], {}), '(fdir_ratio > 0.9, 0.9, fdir_ratio)\n', (62554, 62589), True, 'import xarray as xr\n'), ((64592, 64623), 'xclim.core.units.convert_units_to', 'convert_units_to', (['rsds', '"""W m-2"""'], {}), "(rsds, 'W m-2')\n", (64608, 64623), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((64635, 64666), 'xclim.core.units.convert_units_to', 'convert_units_to', (['rsus', '"""W m-2"""'], {}), "(rsus, 'W m-2')\n", (64651, 64666), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((64678, 64709), 'xclim.core.units.convert_units_to', 'convert_units_to', (['rlds', '"""W m-2"""'], {}), "(rlds, 'W m-2')\n", (64694, 64709), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((64721, 64752), 'xclim.core.units.convert_units_to', 'convert_units_to', (['rlus', '"""W m-2"""'], {}), "(rlus, 'W m-2')\n", (64737, 64752), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((64919, 64984), 'xclim.core.calendar.datetime_to_decimal_year', 'datetime_to_decimal_year', ([], {'times': 'dates', 'calendar': 'dates.dt.calendar'}), '(times=dates, calendar=dates.dt.calendar)\n', (64943, 64984), False, 'from xclim.core.calendar import date_range, datetime_to_decimal_year\n'), ((65070, 65098), 'xclim.indices.helpers.solar_declination', 'solar_declination', (['day_angle'], {}), '(day_angle)\n', (65087, 65098), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((66457, 66474), 'numpy.arcsin', 'np.arcsin', (['csza_i'], {}), '(csza_i)\n', (66466, 66474), True, 'import numpy as np\n'), ((66548, 66597), 'xarray.where', 'xr.where', (['(csza_s > 0.001)', '(rsds_direct / csza_s)', '(0)'], {}), '(csza_s > 0.001, rsds_direct / csza_s, 0)\n', (66556, 66597), True, 'import xarray as xr\n'), ((66609, 66732), 'numpy.power', 'np.power', (['(1 / 5.67e-08 * (0.5 * rlds + 0.5 * rlus + 0.7 / 0.97 * (0.5 * rsds_diffuse +\n 0.5 * rsus + fp * i_star)))', '(0.25)'], {}), '(1 / 5.67e-08 * (0.5 * rlds + 0.5 * rlus + 0.7 / 0.97 * (0.5 *\n rsds_diffuse + 0.5 * rsus + fp * i_star)), 0.25)\n', (66617, 66732), True, 'import numpy as np\n'), ((4287, 4319), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tdps', '"""kelvin"""'], {}), "(tdps, 'kelvin')\n", (4303, 4319), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((9105, 9125), 'numpy.arctan2', 'np.arctan2', (['vas', 'uas'], {}), '(vas, uas)\n', (9115, 9125), True, 'import numpy as np\n'), ((14237, 14273), 'xclim.core.units.convert_units_to', 'convert_units_to', (['ice_thresh', '"""degK"""'], {}), "(ice_thresh, 'degK')\n", (14253, 14273), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((14301, 14332), 'xclim.core.units.convert_units_to', 'convert_units_to', (['"""0 K"""', '"""degK"""'], {}), "('0 K', 'degK')\n", (14317, 14332), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((20916, 20946), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tdps', '"""degK"""'], {}), "(tdps, 'degK')\n", (20932, 20946), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((20961, 20990), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degK"""'], {}), "(tas, 'degK')\n", (20977, 20990), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((28579, 28608), 'xclim.core.units.convert_units_to', 'convert_units_to', (['thresh', 'tas'], {}), '(thresh, tas)\n', (28595, 28608), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((34873, 34941), 'xarray.where', 'xr.where', (['(sfcWind < 5)', '(tas + sfcWind * (-1.59 + 0.1345 * tas) / 5)', 'W'], {}), '(sfcWind < 5, tas + sfcWind * (-1.59 + 0.1345 * tas) / 5, W)\n', (34881, 34941), True, 'import xarray as xr\n'), ((40987, 41019), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmin', '"""degF"""'], {}), "(tasmin, 'degF')\n", (41003, 41019), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((41037, 41069), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', '"""degF"""'], {}), "(tasmax, 'degF')\n", (41053, 41069), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((41084, 41134), 'xclim.indices.helpers.extraterrestrial_solar_radiation', 'extraterrestrial_solar_radiation', (['tasmin.time', 'lat'], {}), '(tasmin.time, lat)\n', (41116, 41134), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((41148, 41186), 'xclim.core.units.convert_units_to', 'convert_units_to', (['re', '"""cal cm-2 day-1"""'], {}), "(re, 'cal cm-2 day-1')\n", (41164, 41186), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((44598, 44641), 'numba.float64', 'float64', (['float64', 'float64', 'float64', 'float64'], {}), '(float64, float64, float64, float64)\n', (44605, 44641), False, 'from numba import float32, float64, vectorize\n'), ((44651, 44694), 'numba.float32', 'float32', (['float32', 'float32', 'float32', 'float32'], {}), '(float32, float32, float32, float32)\n', (44658, 44694), False, 'from numba import float32, float64, vectorize\n'), ((60545, 60575), 'xclim.core.units.convert_units_to', 'convert_units_to', (['e_sat', '"""kPa"""'], {}), "(e_sat, 'kPa')\n", (60561, 60575), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((60578, 60605), 'xclim.core.units.convert_units_to', 'convert_units_to', (['hurs', '"""1"""'], {}), "(hurs, '1')\n", (60594, 60605), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((65262, 65378), 'xclim.indices.helpers.cosine_of_solar_zenith_angle', 'cosine_of_solar_zenith_angle', ([], {'declination': 'dec', 'lat': 'lat', 'lon': 'lon', 'hours': 'hours', 'interval': 'interval', 'stat': '"""interval"""'}), "(declination=dec, lat=lat, lon=lon, hours=hours,\n interval=interval, stat='interval')\n", (65290, 65378), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((65475, 65585), 'xclim.indices.helpers.cosine_of_solar_zenith_angle', 'cosine_of_solar_zenith_angle', ([], {'declination': 'dec', 'lat': 'lat', 'lon': 'lon', 'hours': 'hours', 'interval': 'interval', 'stat': 'stat'}), '(declination=dec, lat=lat, lon=lon, hours=hours,\n interval=interval, stat=stat)\n', (65503, 65585), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((66492, 66534), 'numpy.cos', 'np.cos', (['(gamma * 0.988 - gamma ** 2 / 50000)'], {}), '(gamma * 0.988 - gamma ** 2 / 50000)\n', (66498, 66534), True, 'import numpy as np\n'), ((4340, 4384), 'numpy.exp', 'np.exp', (['(5417.753 * (1 / 273.16 - 1.0 / tdps))'], {}), '(5417.753 * (1 / 273.16 - 1.0 / tdps))\n', (4346, 4384), True, 'import numpy as np\n'), ((4479, 4511), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""celsius"""'], {}), "(tas, 'celsius')\n", (4495, 4511), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((11430, 11460), 'numpy.radians', 'np.radians', (['wind_from_dir_math'], {}), '(wind_from_dir_math)\n', (11440, 11460), True, 'import numpy as np\n'), ((11489, 11519), 'numpy.radians', 'np.radians', (['wind_from_dir_math'], {}), '(wind_from_dir_math)\n', (11499, 11519), True, 'import numpy as np\n'), ((21054, 21099), 'numpy.exp', 'np.exp', (['(-L * (tas - tdps) / (Rw * tas * tdps))'], {}), '(-L * (tas - tdps) / (Rw * tas * tdps))\n', (21060, 21099), True, 'import numpy as np\n'), ((21450, 21476), 'xclim.core.units.convert_units_to', 'convert_units_to', (['ps', '"""Pa"""'], {}), "(ps, 'Pa')\n", (21466, 21476), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((21492, 21518), 'xclim.core.units.convert_units_to', 'convert_units_to', (['huss', '""""""'], {}), "(huss, '')\n", (21508, 21518), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((21533, 21562), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degK"""'], {}), "(tas, 'degK')\n", (21549, 21562), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((28823, 28852), 'xclim.core.units.convert_units_to', 'convert_units_to', (['thresh', 'tas'], {}), '(thresh, tas)\n', (28839, 28852), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((28932, 29023), 'xarray.DataArray', 'xr.DataArray', (['[-np.inf, thresh, upper, np.inf]'], {'dims': "('tas',)", 'attrs': "{'units': 'degC'}"}), "([-np.inf, thresh, upper, np.inf], dims=('tas',), attrs={\n 'units': 'degC'})\n", (28944, 29023), True, 'import xarray as xr\n'), ((29060, 29128), 'xarray.DataArray', 'xr.DataArray', (['[1.0, 1.0, 0.0, 0.0]'], {'dims': "('tas',)", 'coords': "{'tas': t}"}), "([1.0, 1.0, 0.0, 0.0], dims=('tas',), coords={'tas': t})\n", (29072, 29128), True, 'import xarray as xr\n'), ((41432, 41464), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmin', '"""degC"""'], {}), "(tasmin, 'degC')\n", (41448, 41464), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((41482, 41514), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', '"""degC"""'], {}), "(tasmax, 'degC')\n", (41498, 41514), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((41682, 41732), 'xclim.indices.helpers.extraterrestrial_solar_radiation', 'extraterrestrial_solar_radiation', (['tasmin.time', 'lat'], {}), '(tasmin.time, lat)\n', (41714, 41732), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((41746, 41780), 'xclim.core.units.convert_units_to', 'convert_units_to', (['ra', '"""MJ m-2 d-1"""'], {}), "(ra, 'MJ m-2 d-1')\n", (41762, 41780), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((65645, 65687), 'xclim.indices.helpers.time_correction_for_solar_angle', 'time_correction_for_solar_angle', (['day_angle'], {}), '(day_angle)\n', (65676, 65687), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((65703, 65819), 'xclim.indices.helpers.cosine_of_solar_zenith_angle', 'cosine_of_solar_zenith_angle', ([], {'declination': 'dec', 'lat': 'lat', 'lon': 'lon', 'time_correction': 'tc', 'hours': 'hours', 'stat': '"""instant"""'}), "(declination=dec, lat=lat, lon=lon,\n time_correction=tc, hours=hours, stat='instant')\n", (65731, 65819), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((4755, 4770), 'xclim.core.units.units2pint', 'units2pint', (['tas'], {}), '(tas)\n', (4765, 4770), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((4777, 4792), 'xclim.core.units.units2pint', 'units2pint', (['tas'], {}), '(tas)\n', (4787, 4792), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((29563, 29621), 'xarray.DataArray', 'xr.DataArray', (['t'], {'dims': '"""tas"""', 'name': '"""tas"""', 'coords': "{'tas': t}"}), "(t, dims='tas', name='tas', coords={'tas': t})\n", (29575, 29621), True, 'import xarray as xr\n'), ((41611, 41640), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degC"""'], {}), "(tas, 'degC')\n", (41627, 41640), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42208, 42237), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degC"""'], {}), "(tas, 'degC')\n", (42224, 42237), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42253, 42279), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""K"""'], {}), "(tas, 'K')\n", (42269, 42279), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42299, 42375), 'xclim.indices.helpers.extraterrestrial_solar_radiation', 'extraterrestrial_solar_radiation', (['tas.time', 'lat'], {'solar_constant': '"""1367 W m-2"""'}), "(tas.time, lat, solar_constant='1367 W m-2')\n", (42331, 42375), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((66000, 66070), 'xclim.indices.helpers.cosine_of_solar_zenith_angle', 'cosine_of_solar_zenith_angle', ([], {'declination': 'dec', 'lat': 'lat', 'stat': '"""average"""'}), "(declination=dec, lat=lat, stat='average')\n", (66028, 66070), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((15277, 15327), 'numpy.exp', 'np.exp', (['(17.269388 * (tas - 273.16) / (tas - 35.86))'], {}), '(17.269388 * (tas - 273.16) / (tas - 35.86))\n', (15283, 15327), True, 'import numpy as np\n'), ((15350, 15400), 'numpy.exp', 'np.exp', (['(21.8745584 * (tas - 273.16) / (tas - 7.66))'], {}), '(21.8745584 * (tas - 273.16) / (tas - 7.66))\n', (15356, 15400), True, 'import numpy as np\n'), ((28763, 28795), 'xclim.core.units.convert_units_to', 'convert_units_to', (['thresh', '"""degC"""'], {}), "(thresh, 'degC')\n", (28779, 28795), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((29282, 29311), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degK"""'], {}), "(tas, 'degK')\n", (29298, 29311), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((29314, 29346), 'xclim.core.units.convert_units_to', 'convert_units_to', (['thresh', '"""degK"""'], {}), "(thresh, 'degK')\n", (29330, 29346), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42026, 42058), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmin', '"""degC"""'], {}), "(tasmin, 'degC')\n", (42042, 42058), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42080, 42112), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', '"""degC"""'], {}), "(tasmax, 'degC')\n", (42096, 42112), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((44245, 44277), 'xarray.concat', 'xr.concat', (['tas_idy_a'], {'dim': '"""time"""'}), "(tas_idy_a, dim='time')\n", (44254, 44277), True, 'import xarray as xr\n'), ((62650, 62676), 'numpy.cos', 'np.cos', (['(89.5 / 180 * np.pi)'], {}), '(89.5 / 180 * np.pi)\n', (62656, 62676), True, 'import numpy as np\n'), ((29490, 29528), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', '(100)'], {'endpoint': '(False)'}), '(0, 6, 100, endpoint=False)\n', (29501, 29528), True, 'import numpy as np\n'), ((42776, 42808), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmin', '"""degC"""'], {}), "(tasmin, 'degC')\n", (42792, 42808), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42830, 42862), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tasmax', '"""degC"""'], {}), "(tasmax, 'degC')\n", (42846, 42862), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((42935, 42964), 'xclim.core.units.convert_units_to', 'convert_units_to', (['tas', '"""degC"""'], {}), "(tas, 'degC')\n", (42951, 42964), False, 'from xclim.core.units import amount2rate, convert_units_to, declare_units, units2pint\n'), ((43505, 43558), 'xclim.core.calendar.date_range', 'date_range', (['start', 'end'], {'freq': '"""D"""', 'calendar': '"""standard"""'}), "(start, end, freq='D', calendar='standard')\n", (43515, 43558), False, 'from xclim.core.calendar import date_range, datetime_to_decimal_year\n'), ((43674, 43698), 'xclim.indices.helpers.day_lengths', 'day_lengths', (['time_v', 'lat'], {}), '(time_v, lat)\n', (43685, 43698), False, 'from xclim.indices.helpers import cosine_of_solar_zenith_angle, day_lengths, distance_from_sun, extraterrestrial_solar_radiation, solar_declination, time_correction_for_solar_angle\n'), ((14790, 14801), 'numpy.log', 'np.log', (['tas'], {}), '(tas)\n', (14796, 14801), True, 'import numpy as np\n'), ((15126, 15137), 'numpy.log', 'np.log', (['tas'], {}), '(tas)\n', (15132, 15137), True, 'import numpy as np\n'), ((16384, 16430), 'numpy.exp', 'np.exp', (['(17.62 * (tas - 273.16) / (tas - 30.04))'], {}), '(17.62 * (tas - 273.16) / (tas - 30.04))\n', (16390, 16430), True, 'import numpy as np\n'), ((16452, 16497), 'numpy.exp', 'np.exp', (['(22.46 * (tas - 273.16) / (tas - 0.54))'], {}), '(22.46 * (tas - 273.16) / (tas - 0.54))\n', (16458, 16497), True, 'import numpy as np\n'), ((29918, 29943), 'xarray.polyval', 'xr.polyval', (['t.tas', 'coeffs'], {}), '(t.tas, coeffs)\n', (29928, 29943), True, 'import xarray as xr\n'), ((16169, 16187), 'numpy.log10', 'np.log10', (['(Tp / tas)'], {}), '(Tp / tas)\n', (16177, 16187), True, 'import numpy as np\n'), ((16916, 16927), 'numpy.log', 'np.log', (['tas'], {}), '(tas)\n', (16922, 16927), True, 'import numpy as np\n'), ((17178, 17189), 'numpy.log', 'np.log', (['tas'], {}), '(tas)\n', (17184, 17189), True, 'import numpy as np\n'), ((15824, 15842), 'numpy.log10', 'np.log10', (['(Tb / tas)'], {}), '(Tb / tas)\n', (15832, 15842), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Find new relations between entities.
This file contains functions that find predicted relations from a logistic regression model given model embeddings
The model and embeddings are trained and created from a graph containing drugs, targets and side effects.
The graph used contained nodeIDs that can be mapped using a tsv file
"""
from dataclasses import dataclass
from operator import itemgetter
from typing import Any, List, Mapping, Optional, Tuple
import joblib
import networkx as nx
import numpy as np
import pandas as pd
from bionev.utils import load_embedding
from sklearn.linear_model import LogisticRegression
from .constants import RESULTS_TYPE_TO_NAMESPACE
__all__ = [
'Embeddings',
'Predictor',
]
NodeInfo = Mapping[str, str]
Embeddings = Mapping[str, np.ndarray]
RelationsResults = Tuple[List[NodeInfo], List[np.ndarray], List[bool]]
class MissingCurie(ValueError):
"""Raised when a CURIE can't be found."""
def _load_embedding(path: str) -> Embeddings:
"""Load an embedding then fix its data types."""
rv = load_embedding(path)
return {
str(node_id): np.array(node_vector)
for node_id, node_vector in rv.items()
}
@dataclass
class Predictor:
"""Class for making predictions."""
model: LogisticRegression
embeddings: Embeddings
node_id_to_info: Mapping[str, NodeInfo]
node_curie_to_id: Mapping[Tuple[str, str], str]
node_name_to_id: Mapping[str, str]
graph: Optional[nx.Graph] = None
positive_control: bool = True
#: The precision at which results are reported
precision: int = 5
@classmethod
def from_paths(
cls,
*,
model_path: str,
embeddings_path: str,
mapping_path: str,
graph_path: Optional[str] = None,
positive_control: Optional[bool] = True,
) -> 'Predictor':
"""Return the predictor for embeddings."""
model = joblib.load(model_path)
mapping = pd.read_csv(mapping_path, sep='\t', dtype={'node_id': str})
node_id_to_info = {}
node_curie_to_id = {}
node_name_to_id = {}
for node_id, namespace, identifier, name, entity_type in mapping.values:
node_id_to_info[node_id] = dict(
node_id=node_id,
namespace=namespace,
identifier=identifier,
name=name,
entity_type=entity_type,
)
node_curie_to_id[namespace, identifier] = node_id
node_name_to_id[name] = node_id
embeddings = _load_embedding(embeddings_path)
graph = graph_path and nx.read_edgelist(graph_path)
return cls(
model=model,
graph=graph,
embeddings=embeddings,
positive_control=positive_control,
node_id_to_info=node_id_to_info,
node_curie_to_id=node_curie_to_id,
node_name_to_id=node_name_to_id,
)
def find_new_relations(
self,
node_id: Optional[str] = None,
node_name: Optional[str] = None,
node_curie: Optional[str] = None,
results_type: Optional[str] = None,
k: Optional[int] = 30,
) -> Optional[Mapping[str, Any]]:
"""Find new relations to specific entity.
Get all the relations of specific entity_type (if chosen) or all types (if None).
Finds their probabilities from the saved_model, and return the top k predictions.
:param node_id: the internal identifier of the node in the model
:param node_name: the entity we want to find predictions with
:param node_curie: the CURIE (namespace:identifier) of the entity we want to find predictions with
:param results_type: can be 'phenotype', 'chemical', 'target', or None
:param k: the amount of relations we want to find for the entity
:return: a list of tuples containing the predicted entities and their probabilities
:raises: MissingCurie
"""
node_id = self._lookup_node(node_id=node_id, node_curie=node_curie, node_name=node_name)
if node_id is None:
raise MissingCurie('The curie you input does not exist.')
node_info = self._get_entity_json(node_id)
namespace = RESULTS_TYPE_TO_NAMESPACE.get(results_type)
relations_results = self._find_relations_helper(
source_id=node_id,
source_vector=self.embeddings[node_id],
namespace=namespace,
)
return self._handle_relations_results(
relations_results=relations_results,
k=k,
results_type=results_type,
node_info=node_info,
)
def _handle_relations_results(
self,
*,
relations_results: RelationsResults,
k: Optional[int],
results_type: Optional[str],
node_info,
):
node_list, relations_list, relation_novelties = relations_results
prediction_list = self.get_probabilities(
nodes=node_list,
relations=relations_list,
relation_novelties=relation_novelties,
k=k,
)
return {
'query': {
'entity': node_info,
'k': k,
'type': results_type,
},
'predictions': prediction_list,
}
def _lookup_node_id_by_name(self, entity_name: str) -> str:
return self.node_name_to_id.get(entity_name)
def _lookup_node_id_by_curie(self, entity_curie: str) -> str:
namespace, identifier = entity_curie.split(':', 1)
return self.node_curie_to_id.get((namespace, identifier))
def _predict_helper(self, q):
return self.model.predict_proba(q)[:, 0]
def _get_entity_json(self, node_id: str) -> NodeInfo:
return self.node_id_to_info.get(node_id)
def _lookup_node(
self,
node_id: Optional[str] = None,
node_name: Optional[str] = None,
node_curie: Optional[str] = None,
) -> str:
if node_id is not None:
return node_id
elif node_name is not None:
return self._lookup_node_id_by_name(node_name)
elif node_curie is not None:
return self._lookup_node_id_by_curie(node_curie)
else:
raise ValueError("You need to provide information about the entity (node_id, entity_id, or entity_name)")
def find_new_relation(
self,
*,
source_id: Optional[str] = None,
source_curie: Optional[str] = None,
source_name: Optional[str] = None,
target_id: Optional[str] = None,
target_curie: Optional[str] = None,
target_name: Optional[str] = None,
) -> Mapping[str, Any]:
"""Get the probability of having a relation between two entities."""
source_id = self._lookup_node(node_id=source_id, node_curie=source_curie, node_name=source_name)
target_id = self._lookup_node(node_id=target_id, node_curie=target_curie, node_name=target_name)
lor = self.get_edge_probability(source_id, target_id)
return {
'source': self._get_entity_json(source_id),
'target': self._get_entity_json(target_id),
'lor': round(lor, self.precision),
}
def get_edge_embedding(self, source_id: str, target_id: str) -> np.ndarray:
"""Get the embedding of the edge between the two nodes."""
return self.embeddings[source_id] * self.embeddings[target_id]
def get_edge_probability(self, source_id: str, target_id: str) -> float:
"""Get the probability of the edge between the two nodes."""
edge_embedding = self.get_edge_embedding(source_id, target_id)
return self._predict_helper([edge_embedding.tolist()])[0]
def _find_relations_helper(
self,
*,
source_id: str,
namespace: Optional[str] = None,
source_vector: np.ndarray,
) -> RelationsResults:
node_list, relations_list, relation_novelties = [], [], []
for target_id, target_vector in self.embeddings.items():
if source_id == target_id:
continue
novel = (
self.graph is None or
not (self.graph.has_edge(source_id, target_id) or self.graph.has_edge(target_id, source_id))
)
node_info = self._get_entity_json(target_id)
if namespace is not None and node_info['namespace'] != namespace:
continue
node_list.append(node_info)
relation_novelties.append(novel)
# apply that hadamard operator
relation = source_vector * target_vector
relations_list.append(relation.tolist())
return node_list, relations_list, relation_novelties
def get_probabilities(
self,
*,
nodes,
relations: List[np.ndarray],
relation_novelties: List[bool],
k: Optional[int] = None,
) -> List[Mapping[str, Any]]:
"""Get probabilities from logistic regression classifier.
Get the probabilities of all the relations in the list from the log model.
Also sort the found probabilities by highest to lowest, then return the k highest probabilities.
:param nodes: the list of the nodes with relations to the entity
:param relations: the list of edge embedding of the two nodes
:param k: the number of relations to be output
:return: the k first probabilities in the list, type= list of tuples
"""
probabilities = self._predict_helper(relations)
results = [
{
'lor': round(lor, self.precision),
'novel': novel,
**node,
}
for node, lor, novel in zip(nodes, probabilities, relation_novelties)
]
results = sorted(results, key=itemgetter('lor'))
if not self.positive_control:
# results = [result for result in results if result['novel']]
results = list(filter(itemgetter('novel'), results))
if k is not None:
return results[:k]
else:
return results
| [
"bionev.utils.load_embedding",
"pandas.read_csv",
"numpy.array",
"networkx.read_edgelist",
"joblib.load",
"operator.itemgetter"
] | [((1079, 1099), 'bionev.utils.load_embedding', 'load_embedding', (['path'], {}), '(path)\n', (1093, 1099), False, 'from bionev.utils import load_embedding\n'), ((1135, 1156), 'numpy.array', 'np.array', (['node_vector'], {}), '(node_vector)\n', (1143, 1156), True, 'import numpy as np\n'), ((1943, 1966), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (1954, 1966), False, 'import joblib\n'), ((1985, 2044), 'pandas.read_csv', 'pd.read_csv', (['mapping_path'], {'sep': '"""\t"""', 'dtype': "{'node_id': str}"}), "(mapping_path, sep='\\t', dtype={'node_id': str})\n", (1996, 2044), True, 'import pandas as pd\n'), ((2643, 2671), 'networkx.read_edgelist', 'nx.read_edgelist', (['graph_path'], {}), '(graph_path)\n', (2659, 2671), True, 'import networkx as nx\n'), ((9933, 9950), 'operator.itemgetter', 'itemgetter', (['"""lor"""'], {}), "('lor')\n", (9943, 9950), False, 'from operator import itemgetter\n'), ((10098, 10117), 'operator.itemgetter', 'itemgetter', (['"""novel"""'], {}), "('novel')\n", (10108, 10117), False, 'from operator import itemgetter\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import collections
from transformations import quaternion_multiply, quaternion_inverse, quaternion_slerp, quaternion_matrix, quaternion_from_matrix
from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint
from morphablegraphs.constraints.spatial_constraints.keyframe_constraints.pose_constraint import PoseConstraint
from morphablegraphs.motion_model import MotionStateGraphLoader, NODE_TYPE_STANDARD, NODE_TYPE_END, NODE_TYPE_START, NODE_TYPE_IDLE, NODE_TYPE_SINGLE
from anim_utils.animation_data.skeleton_models import STANDARD_MIRROR_MAP
from morphablegraphs.constraints.motion_primitive_constraints import MotionPrimitiveConstraints
def unity_frame_to_mg_frame(skeleton, unity_frame, animated_joints, scale):
#unity_frame = {"rotations": [], "rootTranslation": None, "action": action, "events": events, "isIdle": is_idle, "success": success}
n_dims = len(animated_joints) * 4 + 3
frame = np.zeros(n_dims)
frame[0] = -unity_frame["rootTranslation"]["x"]
frame[1] = unity_frame["rootTranslation"]["y"]
frame[2] = unity_frame["rootTranslation"]["z"]
idx = 0
o = 3
for node_name in skeleton.nodes.keys():
if node_name in animated_joints:
q = unity_frame["rotations"][idx]
q =np.array([-q["w"], -q["x"],q["y"],q["z"]])
frame[o:o+4] = q
o+=4
idx +=1
return frame
class MockActionConstraints(object):
def __init__(self, action_name, mg):
self.motion_state_graph = mg
self.action_name = action_name
self.prev_action_name = None
class UnityFrameConstraint(object):
def __init__(self, node, keyframe_label, joint, position, orientation, hold_frame=False, offset=None, end_keyframe_label=None):
self.node = node # the graph node on which the constraint should be applied
self.joint = joint
self.position = position
self.orientation = orientation
self.hold_frame = hold_frame
self.keyframe_events = list()
self.offset = offset # optional tool offset
self.keyframe_label = keyframe_label
self.keyframe = None # gets looked up later based on keyframe_label
self.end_keyframe_label = end_keyframe_label
self.end_keyframe = None # gets looked up later based on end_keyframe_label
self.relative_joint_name = None
self.mirror_joint_name = None # create a dynamic constraint to keep the mirror joint at its original position
# create a constraint on the parent position
self.constrained_parent = None
self.vector_to_parent= None
# optional tool alignment
self.src_tool_cos = None
self.dest_tool_cos = None
# needs to be activated to enact the constraint in the region between keyframe and end_keyframe
self.constrain_position_in_region = False
self.constrain_orientation_in_region = False
self.cycle = 0 # needed for assignment to nodes in cyclic actions that repeat nodes multiple times
self.look_at = False
class ConstraintBuilder(object):
def __init__(self, skeleton, graph, planner_settings, algorithm_config):
self.skeleton = skeleton
self._graph = graph
self.settings = planner_settings
self.algorithm_config = algorithm_config
self.constrained_joints = []
self.joint_weights_map = collections.OrderedDict()
joint_map = self.skeleton.skeleton_model["joints"]
for idx, j in enumerate(["right_wrist", "left_wrist", "right_ankle", "left_ankle"]):
node_name = joint_map[j]
self.constrained_joints.append(node_name)
self.joint_weights_map[idx] = self.skeleton.joint_weight_map[node_name]
self.inv_joint_map = dict()
if self.skeleton.skeleton_model is not None:
for j in self.skeleton.skeleton_model["joints"]:
skel_j = self.skeleton.skeleton_model["joints"][j]
if skel_j is not None:
self.inv_joint_map[skel_j] = j
self.action_definitions = dict()
if hasattr(self._graph, "action_definitions") and self._graph.action_definitions is not None:
print("load action definitions from file")
self.action_definitions = self._graph.action_definitions
print("loaded actions", list(self.action_definitions.keys()))
def generate_walk_dir_constraint(self, dir_vector, n_frames, aligning_transform, w=1.0):
local_dir_vec = np.dot(aligning_transform, [dir_vector[0], 0, dir_vector[2], 0])[:3]
length = np.linalg.norm(local_dir_vec)
if length <= 0:
return None
local_dir_vec /= length
#print("dir", dir_vector, local_dir_vec)
c_desc = {"joint": self.skeleton.root, "canonical_keyframe": n_frames-1,
"dir_vector": local_dir_vec,
"n_canonical_frames": n_frames, "semanticAnnotation": {"keyframeLabel": "none"}}
return Direction2DConstraint(self.skeleton, c_desc, w, 1.0)
def generate_walk_position_constraint(self, dir_vector, distance, n_frames, aligning_transform, w=1.0):
local_dir_vec = np.dot(aligning_transform, [dir_vector[0], 0, dir_vector[2], 0])[:3]
local_dir_vec /= np.linalg.norm(local_dir_vec)
position = local_dir_vec * distance
c_desc = {"joint": self.skeleton.root, "canonical_keyframe": n_frames-1,
"position": position,
"n_canonical_frames": n_frames, "semanticAnnotation": {"keyframeLabel": "none"}}
return GlobalTransformConstraint(self.skeleton, c_desc, w, 1.0)
def generate_transform_constraint(self, node, keyframe, joint_name, position, orientation, n_frames, aligning_transform,
offset=None, end_keyframe=None, keep_orientation=False, relative_joint_name=None):
local_position = np.dot(aligning_transform, [position[0], position[1], position[2], 1])[:3]
c_desc = {"joint": joint_name, "canonical_keyframe": keyframe,
"position": local_position,
"n_canonical_frames": n_frames, "semanticAnnotation": {"keyframeLabel": "none"},
"canonical_end_keyframe": end_keyframe,
"keep_orientation": keep_orientation,
"relative_joint_name": relative_joint_name}
if orientation is not None:
local_orientation = np.dot(aligning_transform, quaternion_matrix(orientation))
lq = quaternion_from_matrix(local_orientation)
lq /= np.linalg.norm(lq)
c_desc["qOrientation"] = lq
#print("set orientation", c_desc["qOrientation"])
if offset is not None:
c_desc["offset"] = offset
return RelativeTransformConstraint(self.skeleton, c_desc, 1.0, 1.0)
else:
return GlobalTransformConstraint(self.skeleton, c_desc, 1.0, 1.0)
def generate_mg_constraint_from_unity_constraint(self, constraint, joint_name, n_frames, aligning_transform=None):
position = constraint.position
if aligning_transform is not None:
local_position = np.dot(aligning_transform, [position[0], position[1], position[2], 1])[:3]
else:
local_position = np.array([position[0], position[1], position[2]])
c_desc = {"joint": joint_name, "canonical_keyframe": constraint.keyframe,
"position": local_position,
"n_canonical_frames": n_frames, "semanticAnnotation": {"keyframeLabel": "none"},
"canonical_end_keyframe": constraint.end_keyframe,
"relative_joint_name": constraint.relative_joint_name,
"mirror_joint_name": constraint.mirror_joint_name,
"constrained_parent": constraint.constrained_parent
}
c_desc["constrain_position_in_region"] = constraint.constrain_position_in_region
c_desc["constrain_orientation_in_region"] = constraint.constrain_orientation_in_region
c_desc["look_at"] = constraint.look_at
if constraint.orientation is not None:
orientation = constraint.orientation
if aligning_transform is not None:
local_orientation = np.dot(aligning_transform, quaternion_matrix(orientation))
lq = quaternion_from_matrix(local_orientation)
lq /= np.linalg.norm(lq)
else:
lq = orientation
c_desc["qOrientation"] = lq
# print("set orientation", c_desc["qOrientation"])
if constraint.vector_to_parent is not None:
vector_to_parent = constraint.vector_to_parent
if aligning_transform is not None:
vector_to_parent = np.dot(aligning_transform, [vector_to_parent[0], vector_to_parent[1], vector_to_parent[2], 0])[:3]
c_desc["vector_to_parent"] = vector_to_parent
if constraint.src_tool_cos is not None and constraint.dest_tool_cos is not None:
src_tool_cos = dict()
dest_tool_cos = dict()
for a in ["x", "y"]:
if a in constraint.src_tool_cos and a in constraint.dest_tool_cos:
src_tool_cos[a]= constraint.src_tool_cos[a]
dest_tool_axis = constraint.dest_tool_cos[a]
if aligning_transform is not None:
dest_tool_axis = np.dot(aligning_transform, [dest_tool_axis[0], dest_tool_axis[1], dest_tool_axis[2], 0])[:3]
dest_tool_cos[a] = dest_tool_axis
c_desc["src_tool_cos"]= src_tool_cos
c_desc["dest_tool_cos"]= dest_tool_cos
if constraint.offset is not None:
# TODO check if offset needs to be brought into local coordinate system
c_desc["offset"] = constraint.offset
return RelativeTransformConstraint(self.skeleton, c_desc, 1.0, 1.0)
else:
return GlobalTransformConstraint(self.skeleton, c_desc, 1.0, 1.0)
def map_keyframe_labels_to_frame_indices(self, frame_constraints):
for c in frame_constraints:
c.keyframe = self._get_keyframe_from_label(c.node, c.keyframe_label)
if c.end_keyframe_label is not None:
c.end_keyframe = self._get_keyframe_from_label(c.node, c.end_keyframe_label)
def _get_keyframe_from_label(self, node, keyframe_label):
return self._graph.node_groups[node[0]].map_label_to_keyframe(node[1], keyframe_label)
def generate_transition_constraint(self, pose_buffer, aligning_transform):
last_pose = self.skeleton.convert_quaternion_frame_to_cartesian_frame(pose_buffer[-1], self.constrained_joints)
for i,p in enumerate(last_pose):
last_pose[i] = np.dot(aligning_transform[:3,:3], last_pose[i])
c_desc = {"keyframeLabel": "start",
"frame_constraint": last_pose,
"semanticAnnotation": {"keyframeLabel": "start"},
"node_names": self.constrained_joints,
"weights": self.joint_weights_map,
"canonical_keyframe": 0}
return PoseConstraint(self.skeleton, c_desc, 1.0, 1.0)
def extract_tool_offset(self, joint_name, constraint_desc):
tool_offset = None
src_cos = None
dest_cos = None
if "offset" in constraint_desc and "applyOffset" in constraint_desc and constraint_desc["applyOffset"]:
tool_offset = constraint_desc["offset"]
if "toolEndPoint" in constraint_desc and "currentPose" in constraint_desc:
print("try to overwrite offset", tool_offset)
tp = constraint_desc["toolEndPoint"]
unity_frame = constraint_desc["currentPose"]
frame = unity_frame_to_mg_frame(self.skeleton, unity_frame, self.skeleton.animated_joints, 1)
m = self.skeleton.nodes[joint_name].get_global_matrix(frame)
#p = self.skeleton.nodes[joint_name].get_global_position(frame)
#g_offset = tp-p
tp = np.array([tp[0],tp[1],tp[2],1])
inv_m = np.linalg.inv(m)
l_offset = np.dot(inv_m, tp)
print("overwrite offset", tool_offset, l_offset, tp)
tool_offset = l_offset
if "useToolCos" in constraint_desc and constraint_desc["useToolCos"]:
src_cos = dict()
dest_cos = dict()
if "destToolCos" in constraint_desc and "srcToolCos" in constraint_desc:
for a in ["x", "y"]:
if a in constraint_desc["srcToolCos"] and a in constraint_desc["destToolCos"]:
target_tool_vector = constraint_desc["destToolCos"][a]
magnitude = np.linalg.norm(target_tool_vector)
if magnitude <= 0:
src_cos = None
dest_cos = None
return tool_offset, None, None
target_tool_vector /= magnitude
dest_cos[a] = target_tool_vector
g_axis_point = constraint_desc["srcToolCos"][a]
tp = np.array([g_axis_point[0],g_axis_point[1],g_axis_point[2],1])
inv_m = np.linalg.inv(m)
tool_axis_offset = np.dot(inv_m, tp)[:3]
# remove tool offset to get relative axis
tool_axis_offset -= tool_offset[:3]
tool_axis_offset /= np.linalg.norm(tool_axis_offset)
tool_axis_offset = np.array([tool_axis_offset[0], tool_axis_offset[1],tool_axis_offset[2], 0])
src_cos[a] = tool_axis_offset
return tool_offset, src_cos, dest_cos
def create_frame_constraint(self, action_name, constraint_desc, look_at=False):
keyframe_label = constraint_desc["keyframe"]
print("generate constraint", action_name, keyframe_label)
joint_name = constraint_desc["joint"]
position = constraint_desc["position"]
constrain_orientation_in_region = False
constrain_position_in_region = False
if constraint_desc["constrainOrientation"]:
orientation = constraint_desc["orientation"]
if "constrainOrientationInRegion" in constraint_desc:
constrain_orientation_in_region = constraint_desc["constrainOrientationInRegion"]
else:
orientation = None
constraint_slots = self.action_definitions[action_name]["constraint_slots"]
cycle = 0
if "cycle" in constraint_desc:
cycle = constraint_desc["cycle"]
if "cycle_nodes" in constraint_slots[keyframe_label]:
if cycle < len(constraint_slots[keyframe_label]["cycle_nodes"]):
mp_name = constraint_slots[keyframe_label]["cycle_nodes"][cycle]
else:
mp_name = constraint_slots[keyframe_label]["cycle_nodes"][-1]
else:
mp_name = constraint_slots[keyframe_label]["node"]
print("set constraint", mp_name, cycle)
if joint_name is None:
joint_name = constraint_slots[keyframe_label]["joint"]
hold_frame = False
if "hold" in constraint_desc and constraint_desc["hold"]:
hold_frame = True
tool_offset, src_tool_cos, dest_tool_cos= self.extract_tool_offset(joint_name, constraint_desc)
node = (action_name, mp_name)
end_keyframe_label = None
if "constrainPositionInRegion" in constraint_desc:
constrain_position_in_region = constraint_desc["constrainPositionInRegion"]
if "endKeyframe" in constraint_desc and constrain_position_in_region or constrain_orientation_in_region:
if constraint_desc["endKeyframe"] != "":
end_keyframe_label = constraint_desc["endKeyframe"]
frame_constraint = UnityFrameConstraint(node, keyframe_label, joint_name, position, orientation, hold_frame, tool_offset, end_keyframe_label)
frame_constraint.constrain_orientation_in_region = constrain_orientation_in_region
frame_constraint.constrain_position_in_region = constrain_position_in_region
frame_constraint.cycle = cycle
frame_constraint.look_at = look_at
if "keyframeEvents" in constraint_desc:
frame_constraint.keyframe_events = constraint_desc["keyframeEvents"]
if "keepOffsetBetweenBones" in constraint_desc and constraint_desc["keepOffsetBetweenBones"]:
if constraint_desc["relativeBoneName"] in self.skeleton.nodes:
frame_constraint.relative_joint_name = constraint_desc["relativeBoneName"]
if "keepMirrorBoneStatic" in constraint_desc and constraint_desc["keepMirrorBoneStatic"]:
mirror_joint_name = self.get_mirror_joint_name(joint_name)
print("set mirror joint name", mirror_joint_name, joint_name)
frame_constraint.mirror_joint_name = mirror_joint_name
if "constrainedParent" in constraint_desc and constraint_desc["constrainedParent"] !="" and "vectorToParent" in constraint_desc:
constraint_parent = constraint_desc["constrainedParent"]
frame_constraint.vector_to_parent = constraint_desc["vectorToParent"]
frame_constraint.constrained_parent = constraint_parent
print("Found constrained parent", frame_constraint.constrained_parent, frame_constraint.vector_to_parent)
frame_constraint.src_tool_cos = src_tool_cos
frame_constraint.dest_tool_cos = dest_tool_cos
return frame_constraint
def extract_constraints_from_dict(self, action_desc, look_at_constraints=False):
action_name = action_desc["name"]
end_direction = None
if "orientationVector" in action_desc:
end_direction = action_desc["orientationVector"]
frame_constraints = list()
if action_name in self.action_definitions and "constraint_slots" in self.action_definitions[action_name]:
frame_constraints = self.create_frame_constraints(action_name, action_desc, look_at_constraints)
print("frame constraints", action_name)
look_at_target = None
spine_target = None
if "constrainLookAt" in action_desc and action_desc["constrainLookAt"]:
if "lookAtTarget" in action_desc and np.linalg.norm(action_desc["lookAtTarget"]) > 0:
look_at_target = action_desc["lookAtTarget"]
if "spineTarget" in action_desc and np.linalg.norm(action_desc["spineTarget"]) > 0:
spine_target = action_desc["spineTarget"]
body_orientation_targets = (look_at_target, spine_target)
print("body orientation targets", body_orientation_targets)
return frame_constraints, end_direction, body_orientation_targets
def create_frame_constraints(self, action_name, action_desc, look_at_constraints):
frame_constraints = list()
if "frameConstraints" in action_desc:
for constraint_desc in action_desc["frameConstraints"]:
frame_constraint = self.create_frame_constraint(action_name, constraint_desc, look_at_constraints)
frame_constraints.append(frame_constraint)
return frame_constraints
def get_mirror_joint_name(self, joint_name):
mirror_joint_name = None
if joint_name in self.inv_joint_map:
std_joint_name = self.inv_joint_map[joint_name]
if std_joint_name in STANDARD_MIRROR_MAP:
std_mirror_joint_name = STANDARD_MIRROR_MAP[std_joint_name]
if std_mirror_joint_name in self.skeleton.skeleton_model["joints"]:
mirror_joint_name = self.skeleton.skeleton_model["joints"][std_mirror_joint_name]
return mirror_joint_name
def generate_walk_constraints(self, current_node, aligning_transform, direction_vector, distance, pose_buffer):
n_frames = self._graph.nodes[current_node].get_n_canonical_frames()
mp_constraints = MotionPrimitiveConstraints()
mp_constraints.skeleton = self.skeleton
mp_constraints.constraints = list()
pos_constraint = self.generate_walk_position_constraint(direction_vector, distance, n_frames, aligning_transform, self.settings.position_constraint_weight)
mp_constraints.constraints.append(pos_constraint)
dir_constraint = self.generate_walk_dir_constraint(direction_vector, n_frames, aligning_transform,
self.settings.direction_constraint_weight)
if dir_constraint is not None:
mp_constraints.constraints.append(dir_constraint)
if self.settings.add_transition_constraint:
c = self.generate_transition_constraint(pose_buffer, aligning_transform)
mp_constraints.constraints.append(c)
mp_constraints.is_local = True
return mp_constraints
def generate_motion_primitive_constraints(self, current_node, aligning_transform, action_constraints, pose_buffer):
apply_ik = False
n_frames = self._graph.nodes[current_node].get_n_canonical_frames()
mp_constraints = MotionPrimitiveConstraints()
mp_constraints.skeleton = self.skeleton
mp_constraints.constraints = list()
for frame_constraint in action_constraints:
joint_name = None
if frame_constraint.joint in self.skeleton.skeleton_model["joints"]:
joint_name = self.skeleton.skeleton_model["joints"][frame_constraint.joint]
elif frame_constraint.joint in self.skeleton.nodes:
joint_name = frame_constraint.joint
else:
print("Error: could not assign joint", frame_constraint.joint)
if joint_name is not None:
c = self.generate_mg_constraint_from_unity_constraint(frame_constraint, joint_name,
n_frames, aligning_transform)
mp_constraints.constraints.append(c)
mp_constraints.use_local_optimization = self.algorithm_config["local_optimization_mode"] in ["keyframes",
"all"]
apply_ik = self.settings.activate_ik
print("add end effector constraints", c.joint_name, c.relative_joint_name, c.keyframe_label, c.canonical_keyframe, c.canonical_end_keyframe)
if self.settings.add_transition_constraint and not apply_ik:
c = self.generate_transition_constraint(pose_buffer, aligning_transform)
mp_constraints.constraints.append(c)
mp_constraints.is_local = True
return mp_constraints, apply_ik | [
"morphablegraphs.constraints.spatial_constraints.keyframe_constraints.Direction2DConstraint",
"numpy.zeros",
"transformations.quaternion_matrix",
"morphablegraphs.constraints.spatial_constraints.keyframe_constraints.GlobalTransformConstraint",
"numpy.linalg.norm",
"numpy.array",
"morphablegraphs.constra... | [((2138, 2154), 'numpy.zeros', 'np.zeros', (['n_dims'], {}), '(n_dims)\n', (2146, 2154), True, 'import numpy as np\n'), ((4625, 4650), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (4648, 4650), False, 'import collections\n'), ((5827, 5856), 'numpy.linalg.norm', 'np.linalg.norm', (['local_dir_vec'], {}), '(local_dir_vec)\n', (5841, 5856), True, 'import numpy as np\n'), ((6229, 6281), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.Direction2DConstraint', 'Direction2DConstraint', (['self.skeleton', 'c_desc', 'w', '(1.0)'], {}), '(self.skeleton, c_desc, w, 1.0)\n', (6250, 6281), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((6509, 6538), 'numpy.linalg.norm', 'np.linalg.norm', (['local_dir_vec'], {}), '(local_dir_vec)\n', (6523, 6538), True, 'import numpy as np\n'), ((6819, 6875), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.GlobalTransformConstraint', 'GlobalTransformConstraint', (['self.skeleton', 'c_desc', 'w', '(1.0)'], {}), '(self.skeleton, c_desc, w, 1.0)\n', (6844, 6875), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((12424, 12471), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.pose_constraint.PoseConstraint', 'PoseConstraint', (['self.skeleton', 'c_desc', '(1.0)', '(1.0)'], {}), '(self.skeleton, c_desc, 1.0, 1.0)\n', (12438, 12471), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints.pose_constraint import PoseConstraint\n'), ((21570, 21598), 'morphablegraphs.constraints.motion_primitive_constraints.MotionPrimitiveConstraints', 'MotionPrimitiveConstraints', ([], {}), '()\n', (21596, 21598), False, 'from morphablegraphs.constraints.motion_primitive_constraints import MotionPrimitiveConstraints\n'), ((22726, 22754), 'morphablegraphs.constraints.motion_primitive_constraints.MotionPrimitiveConstraints', 'MotionPrimitiveConstraints', ([], {}), '()\n', (22752, 22754), False, 'from morphablegraphs.constraints.motion_primitive_constraints import MotionPrimitiveConstraints\n'), ((2477, 2521), 'numpy.array', 'np.array', (["[-q['w'], -q['x'], q['y'], q['z']]"], {}), "([-q['w'], -q['x'], q['y'], q['z']])\n", (2485, 2521), True, 'import numpy as np\n'), ((5741, 5805), 'numpy.dot', 'np.dot', (['aligning_transform', '[dir_vector[0], 0, dir_vector[2], 0]'], {}), '(aligning_transform, [dir_vector[0], 0, dir_vector[2], 0])\n', (5747, 5805), True, 'import numpy as np\n'), ((6415, 6479), 'numpy.dot', 'np.dot', (['aligning_transform', '[dir_vector[0], 0, dir_vector[2], 0]'], {}), '(aligning_transform, [dir_vector[0], 0, dir_vector[2], 0])\n', (6421, 6479), True, 'import numpy as np\n'), ((7148, 7218), 'numpy.dot', 'np.dot', (['aligning_transform', '[position[0], position[1], position[2], 1]'], {}), '(aligning_transform, [position[0], position[1], position[2], 1])\n', (7154, 7218), True, 'import numpy as np\n'), ((7760, 7801), 'transformations.quaternion_from_matrix', 'quaternion_from_matrix', (['local_orientation'], {}), '(local_orientation)\n', (7782, 7801), False, 'from transformations import quaternion_multiply, quaternion_inverse, quaternion_slerp, quaternion_matrix, quaternion_from_matrix\n'), ((7820, 7838), 'numpy.linalg.norm', 'np.linalg.norm', (['lq'], {}), '(lq)\n', (7834, 7838), True, 'import numpy as np\n'), ((8029, 8089), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.RelativeTransformConstraint', 'RelativeTransformConstraint', (['self.skeleton', 'c_desc', '(1.0)', '(1.0)'], {}), '(self.skeleton, c_desc, 1.0, 1.0)\n', (8056, 8089), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((8123, 8181), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.GlobalTransformConstraint', 'GlobalTransformConstraint', (['self.skeleton', 'c_desc', '(1.0)', '(1.0)'], {}), '(self.skeleton, c_desc, 1.0, 1.0)\n', (8148, 8181), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((8531, 8580), 'numpy.array', 'np.array', (['[position[0], position[1], position[2]]'], {}), '([position[0], position[1], position[2]])\n', (8539, 8580), True, 'import numpy as np\n'), ((11145, 11205), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.RelativeTransformConstraint', 'RelativeTransformConstraint', (['self.skeleton', 'c_desc', '(1.0)', '(1.0)'], {}), '(self.skeleton, c_desc, 1.0, 1.0)\n', (11172, 11205), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((11239, 11297), 'morphablegraphs.constraints.spatial_constraints.keyframe_constraints.GlobalTransformConstraint', 'GlobalTransformConstraint', (['self.skeleton', 'c_desc', '(1.0)', '(1.0)'], {}), '(self.skeleton, c_desc, 1.0, 1.0)\n', (11264, 11297), False, 'from morphablegraphs.constraints.spatial_constraints.keyframe_constraints import GlobalTransformConstraint, Direction2DConstraint, RelativeTransformConstraint\n'), ((12055, 12103), 'numpy.dot', 'np.dot', (['aligning_transform[:3, :3]', 'last_pose[i]'], {}), '(aligning_transform[:3, :3], last_pose[i])\n', (12061, 12103), True, 'import numpy as np\n'), ((7711, 7741), 'transformations.quaternion_matrix', 'quaternion_matrix', (['orientation'], {}), '(orientation)\n', (7728, 7741), False, 'from transformations import quaternion_multiply, quaternion_inverse, quaternion_slerp, quaternion_matrix, quaternion_from_matrix\n'), ((8413, 8483), 'numpy.dot', 'np.dot', (['aligning_transform', '[position[0], position[1], position[2], 1]'], {}), '(aligning_transform, [position[0], position[1], position[2], 1])\n', (8419, 8483), True, 'import numpy as np\n'), ((9610, 9651), 'transformations.quaternion_from_matrix', 'quaternion_from_matrix', (['local_orientation'], {}), '(local_orientation)\n', (9632, 9651), False, 'from transformations import quaternion_multiply, quaternion_inverse, quaternion_slerp, quaternion_matrix, quaternion_from_matrix\n'), ((9674, 9692), 'numpy.linalg.norm', 'np.linalg.norm', (['lq'], {}), '(lq)\n', (9688, 9692), True, 'import numpy as np\n'), ((13359, 13393), 'numpy.array', 'np.array', (['[tp[0], tp[1], tp[2], 1]'], {}), '([tp[0], tp[1], tp[2], 1])\n', (13367, 13393), True, 'import numpy as np\n'), ((13415, 13431), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (13428, 13431), True, 'import numpy as np\n'), ((13459, 13476), 'numpy.dot', 'np.dot', (['inv_m', 'tp'], {}), '(inv_m, tp)\n', (13465, 13476), True, 'import numpy as np\n'), ((9557, 9587), 'transformations.quaternion_matrix', 'quaternion_matrix', (['orientation'], {}), '(orientation)\n', (9574, 9587), False, 'from transformations import quaternion_multiply, quaternion_inverse, quaternion_slerp, quaternion_matrix, quaternion_from_matrix\n'), ((10040, 10138), 'numpy.dot', 'np.dot', (['aligning_transform', '[vector_to_parent[0], vector_to_parent[1], vector_to_parent[2], 0]'], {}), '(aligning_transform, [vector_to_parent[0], vector_to_parent[1],\n vector_to_parent[2], 0])\n', (10046, 10138), True, 'import numpy as np\n'), ((19899, 19942), 'numpy.linalg.norm', 'np.linalg.norm', (["action_desc['lookAtTarget']"], {}), "(action_desc['lookAtTarget'])\n", (19913, 19942), True, 'import numpy as np\n'), ((20057, 20099), 'numpy.linalg.norm', 'np.linalg.norm', (["action_desc['spineTarget']"], {}), "(action_desc['spineTarget'])\n", (20071, 20099), True, 'import numpy as np\n'), ((10702, 10794), 'numpy.dot', 'np.dot', (['aligning_transform', '[dest_tool_axis[0], dest_tool_axis[1], dest_tool_axis[2], 0]'], {}), '(aligning_transform, [dest_tool_axis[0], dest_tool_axis[1],\n dest_tool_axis[2], 0])\n', (10708, 10794), True, 'import numpy as np\n'), ((14123, 14157), 'numpy.linalg.norm', 'np.linalg.norm', (['target_tool_vector'], {}), '(target_tool_vector)\n', (14137, 14157), True, 'import numpy as np\n'), ((14626, 14690), 'numpy.array', 'np.array', (['[g_axis_point[0], g_axis_point[1], g_axis_point[2], 1]'], {}), '([g_axis_point[0], g_axis_point[1], g_axis_point[2], 1])\n', (14634, 14690), True, 'import numpy as np\n'), ((14728, 14744), 'numpy.linalg.inv', 'np.linalg.inv', (['m'], {}), '(m)\n', (14741, 14744), True, 'import numpy as np\n'), ((15012, 15044), 'numpy.linalg.norm', 'np.linalg.norm', (['tool_axis_offset'], {}), '(tool_axis_offset)\n', (15026, 15044), True, 'import numpy as np\n'), ((15096, 15172), 'numpy.array', 'np.array', (['[tool_axis_offset[0], tool_axis_offset[1], tool_axis_offset[2], 0]'], {}), '([tool_axis_offset[0], tool_axis_offset[1], tool_axis_offset[2], 0])\n', (15104, 15172), True, 'import numpy as np\n'), ((14796, 14813), 'numpy.dot', 'np.dot', (['inv_m', 'tp'], {}), '(inv_m, tp)\n', (14802, 14813), True, 'import numpy as np\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_label_nonzero_pixel_count_ratio_map():
labels1 = np.asarray([[1, 2, 2, 3]])
labels2 = np.asarray([[1, 2, 0, 0]])
reference12 = np.asarray([[1, 0.5, 0.5, 0]])
reference21 = np.asarray([[1, 1, 0, 0]])
result = cle.label_nonzero_pixel_count_ratio_map(labels1, labels2)
assert (np.array_equal(result, reference12))
result = cle.label_nonzero_pixel_count_ratio_map(labels2, labels1)
assert (np.array_equal(result, reference21)) | [
"pyclesperanto_prototype.label_nonzero_pixel_count_ratio_map",
"numpy.asarray",
"numpy.array_equal"
] | [((120, 146), 'numpy.asarray', 'np.asarray', (['[[1, 2, 2, 3]]'], {}), '([[1, 2, 2, 3]])\n', (130, 146), True, 'import numpy as np\n'), ((161, 187), 'numpy.asarray', 'np.asarray', (['[[1, 2, 0, 0]]'], {}), '([[1, 2, 0, 0]])\n', (171, 187), True, 'import numpy as np\n'), ((207, 237), 'numpy.asarray', 'np.asarray', (['[[1, 0.5, 0.5, 0]]'], {}), '([[1, 0.5, 0.5, 0]])\n', (217, 237), True, 'import numpy as np\n'), ((256, 282), 'numpy.asarray', 'np.asarray', (['[[1, 1, 0, 0]]'], {}), '([[1, 1, 0, 0]])\n', (266, 282), True, 'import numpy as np\n'), ((297, 354), 'pyclesperanto_prototype.label_nonzero_pixel_count_ratio_map', 'cle.label_nonzero_pixel_count_ratio_map', (['labels1', 'labels2'], {}), '(labels1, labels2)\n', (336, 354), True, 'import pyclesperanto_prototype as cle\n'), ((367, 402), 'numpy.array_equal', 'np.array_equal', (['result', 'reference12'], {}), '(result, reference12)\n', (381, 402), True, 'import numpy as np\n'), ((418, 475), 'pyclesperanto_prototype.label_nonzero_pixel_count_ratio_map', 'cle.label_nonzero_pixel_count_ratio_map', (['labels2', 'labels1'], {}), '(labels2, labels1)\n', (457, 475), True, 'import pyclesperanto_prototype as cle\n'), ((488, 523), 'numpy.array_equal', 'np.array_equal', (['result', 'reference21'], {}), '(result, reference21)\n', (502, 523), True, 'import numpy as np\n')] |
'''
Test Sciris math functions.
'''
import numpy as np
import pylab as pl
import sciris as sc
import pytest
if 'doplot' not in locals(): doplot = False
def test_utils():
sc.heading('Testing utilities')
o = sc.objdict()
print('Testing sc.approx()')
assert sc.approx(2*6, 11.9999999, eps=1e-6) # Returns True
o.approx = sc.approx([3,12,11.9], 12) # Returns array([False, True, False], dtype=bool)
assert not o.approx[0]
print('Testing sc.savedivide()')
assert sc.safedivide(numerator=0, denominator=0, default=1, eps=0) == 1 # Returns 1
o.safedivide = sc.safedivide(3, np.array([1,3,0]),-1, warn=False) # Returns array([ 3, 1, -1])
assert o.safedivide[-1] == -1
print('Testing sc.isprime()')
o.isprime = [[i**2+1,sc.isprime(i**2+1)] for i in range(10)]
print('Testing sc.perturb()')
o.perturb = sc.perturb(10, 0.3)
print('Testing sc.normsum() and sc.normalize()')
o.normsum = sc.normsum([2,5,3,6,2,6,7,2,3,4], 100) # Scale so sum equals 100
o.normalize = sc.normalize([2,3,7,27])
assert o.normsum[0] == 5.0
assert o.normalize[2] == 0.2
print('Testing sc.inclusiverange()')
o.inclusiverange = sc.inclusiverange(3,5,0.2)
sc.inclusiverange()
sc.inclusiverange(3)
sc.inclusiverange(3,5)
assert o.inclusiverange[-1] == 5
print('Testing sc.randround()')
base = np.random.randn(20)
o.randround = sc.randround(base)
sc.randround(base.tolist())
sc.randround(base[0])
print('Testing sc.cat()')
o.cat = sc.cat(np.array([1,2,3]), [4,5], 6, copy=True)
assert o.cat[3] == 4
return o
def test_find():
sc.heading('Testing find functions')
found = sc.objdict()
print('Testing sc.findinds()')
found.vals = sc.findinds([2,3,6,3], 6)
assert found.vals[0] == 2
print('Testing sc.findfirst(), sc.findlast()')
found.first = sc.findfirst(pl.rand(10))
found.last = sc.findlast(pl.rand(10))
sc.findlast([1,2,3], 4, die=False)
with pytest.raises(IndexError):
sc.findlast([1,2,3], 4)
print('Testing sc.findnearest()')
found.nearest = sc.findnearest([0,2,5,8], 3)
sc.findnearest([0,2,5,8], [3,6])
assert found.nearest == 1
print('Testing sc.getvalidinds(), sc.getvaliddata()')
found.inds = sc.getvalidinds([3,5,8,13], [2000, np.nan, np.nan, 2004]) # Returns array([0,3])
found.data = sc.getvaliddata(np.array([3,5,8,13]), np.array([2000, np.nan, np.nan, 2004])) # Returns array([3,13])
assert found.inds[-1] == 3
assert found.data[-1] == 13
print('Testing sc.sanitize()')
sanitized,inds = sc.sanitize(np.array([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]), returninds=True)
sanitized = sc.sanitize(np.array([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]), replacenans=True)
sanitized = sc.sanitize(np.array([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]), replacenans=0)
found.sanitized = sanitized
return found
def test_smooth(doplot=doplot):
sc.heading('Testing smoothing')
print('Testing sc.smooth()')
data = pl.randn(200,100)
smoothdata = sc.smooth(data,10)
print('Testing sc.smoothinterp()')
n = 20
x1 = pl.arange(n)
y1 = pl.randn(n)
x2 = pl.linspace(-5,n+5,100)
y2 = sc.smoothinterp(x2, x1, y1)
if doplot:
pl.subplot(3,1,1)
pl.pcolor(data)
pl.subplot(3,1,2)
pl.pcolor(smoothdata)
pl.subplot(3,1,3)
pl.scatter(x1, y1)
pl.plot(x2, y2)
pl.show()
return smoothdata
#%% Run as a script
if __name__ == '__main__':
sc.tic()
doplot = True
other = test_utils()
found = test_find()
smoothed = test_smooth(doplot)
sc.toc()
print('Done.') | [
"sciris.safedivide",
"sciris.findinds",
"pylab.randn",
"sciris.smooth",
"pylab.linspace",
"sciris.tic",
"sciris.findnearest",
"sciris.objdict",
"sciris.randround",
"sciris.normsum",
"sciris.approx",
"numpy.random.randn",
"sciris.toc",
"pytest.raises",
"sciris.normalize",
"sciris.smooth... | [((179, 210), 'sciris.heading', 'sc.heading', (['"""Testing utilities"""'], {}), "('Testing utilities')\n", (189, 210), True, 'import sciris as sc\n'), ((220, 232), 'sciris.objdict', 'sc.objdict', ([], {}), '()\n', (230, 232), True, 'import sciris as sc\n'), ((278, 317), 'sciris.approx', 'sc.approx', (['(2 * 6)', '(11.9999999)'], {'eps': '(1e-06)'}), '(2 * 6, 11.9999999, eps=1e-06)\n', (287, 317), True, 'import sciris as sc\n'), ((345, 373), 'sciris.approx', 'sc.approx', (['[3, 12, 11.9]', '(12)'], {}), '([3, 12, 11.9], 12)\n', (354, 373), True, 'import sciris as sc\n'), ((861, 880), 'sciris.perturb', 'sc.perturb', (['(10)', '(0.3)'], {}), '(10, 0.3)\n', (871, 880), True, 'import sciris as sc\n'), ((953, 1000), 'sciris.normsum', 'sc.normsum', (['[2, 5, 3, 6, 2, 6, 7, 2, 3, 4]', '(100)'], {}), '([2, 5, 3, 6, 2, 6, 7, 2, 3, 4], 100)\n', (963, 1000), True, 'import sciris as sc\n'), ((1036, 1063), 'sciris.normalize', 'sc.normalize', (['[2, 3, 7, 27]'], {}), '([2, 3, 7, 27])\n', (1048, 1063), True, 'import sciris as sc\n'), ((1190, 1218), 'sciris.inclusiverange', 'sc.inclusiverange', (['(3)', '(5)', '(0.2)'], {}), '(3, 5, 0.2)\n', (1207, 1218), True, 'import sciris as sc\n'), ((1221, 1240), 'sciris.inclusiverange', 'sc.inclusiverange', ([], {}), '()\n', (1238, 1240), True, 'import sciris as sc\n'), ((1245, 1265), 'sciris.inclusiverange', 'sc.inclusiverange', (['(3)'], {}), '(3)\n', (1262, 1265), True, 'import sciris as sc\n'), ((1270, 1293), 'sciris.inclusiverange', 'sc.inclusiverange', (['(3)', '(5)'], {}), '(3, 5)\n', (1287, 1293), True, 'import sciris as sc\n'), ((1378, 1397), 'numpy.random.randn', 'np.random.randn', (['(20)'], {}), '(20)\n', (1393, 1397), True, 'import numpy as np\n'), ((1416, 1434), 'sciris.randround', 'sc.randround', (['base'], {}), '(base)\n', (1428, 1434), True, 'import sciris as sc\n'), ((1471, 1492), 'sciris.randround', 'sc.randround', (['base[0]'], {}), '(base[0])\n', (1483, 1492), True, 'import sciris as sc\n'), ((1645, 1681), 'sciris.heading', 'sc.heading', (['"""Testing find functions"""'], {}), "('Testing find functions')\n", (1655, 1681), True, 'import sciris as sc\n'), ((1695, 1707), 'sciris.objdict', 'sc.objdict', ([], {}), '()\n', (1705, 1707), True, 'import sciris as sc\n'), ((1761, 1789), 'sciris.findinds', 'sc.findinds', (['[2, 3, 6, 3]', '(6)'], {}), '([2, 3, 6, 3], 6)\n', (1772, 1789), True, 'import sciris as sc\n'), ((1959, 1995), 'sciris.findlast', 'sc.findlast', (['[1, 2, 3]', '(4)'], {'die': '(False)'}), '([1, 2, 3], 4, die=False)\n', (1970, 1995), True, 'import sciris as sc\n'), ((2121, 2152), 'sciris.findnearest', 'sc.findnearest', (['[0, 2, 5, 8]', '(3)'], {}), '([0, 2, 5, 8], 3)\n', (2135, 2152), True, 'import sciris as sc\n'), ((2154, 2190), 'sciris.findnearest', 'sc.findnearest', (['[0, 2, 5, 8]', '[3, 6]'], {}), '([0, 2, 5, 8], [3, 6])\n', (2168, 2190), True, 'import sciris as sc\n'), ((2293, 2353), 'sciris.getvalidinds', 'sc.getvalidinds', (['[3, 5, 8, 13]', '[2000, np.nan, np.nan, 2004]'], {}), '([3, 5, 8, 13], [2000, np.nan, np.nan, 2004])\n', (2308, 2353), True, 'import sciris as sc\n'), ((3006, 3037), 'sciris.heading', 'sc.heading', (['"""Testing smoothing"""'], {}), "('Testing smoothing')\n", (3016, 3037), True, 'import sciris as sc\n'), ((3083, 3101), 'pylab.randn', 'pl.randn', (['(200)', '(100)'], {}), '(200, 100)\n', (3091, 3101), True, 'import pylab as pl\n'), ((3118, 3137), 'sciris.smooth', 'sc.smooth', (['data', '(10)'], {}), '(data, 10)\n', (3127, 3137), True, 'import sciris as sc\n'), ((3197, 3209), 'pylab.arange', 'pl.arange', (['n'], {}), '(n)\n', (3206, 3209), True, 'import pylab as pl\n'), ((3219, 3230), 'pylab.randn', 'pl.randn', (['n'], {}), '(n)\n', (3227, 3230), True, 'import pylab as pl\n'), ((3240, 3267), 'pylab.linspace', 'pl.linspace', (['(-5)', '(n + 5)', '(100)'], {}), '(-5, n + 5, 100)\n', (3251, 3267), True, 'import pylab as pl\n'), ((3273, 3300), 'sciris.smoothinterp', 'sc.smoothinterp', (['x2', 'x1', 'y1'], {}), '(x2, x1, y1)\n', (3288, 3300), True, 'import sciris as sc\n'), ((3594, 3602), 'sciris.tic', 'sc.tic', ([], {}), '()\n', (3600, 3602), True, 'import sciris as sc\n'), ((3712, 3720), 'sciris.toc', 'sc.toc', ([], {}), '()\n', (3718, 3720), True, 'import sciris as sc\n'), ((498, 557), 'sciris.safedivide', 'sc.safedivide', ([], {'numerator': '(0)', 'denominator': '(0)', 'default': '(1)', 'eps': '(0)'}), '(numerator=0, denominator=0, default=1, eps=0)\n', (511, 557), True, 'import sciris as sc\n'), ((611, 630), 'numpy.array', 'np.array', (['[1, 3, 0]'], {}), '([1, 3, 0])\n', (619, 630), True, 'import numpy as np\n'), ((1543, 1562), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1551, 1562), True, 'import numpy as np\n'), ((1900, 1911), 'pylab.rand', 'pl.rand', (['(10)'], {}), '(10)\n', (1907, 1911), True, 'import pylab as pl\n'), ((1942, 1953), 'pylab.rand', 'pl.rand', (['(10)'], {}), '(10)\n', (1949, 1953), True, 'import pylab as pl\n'), ((2003, 2028), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2016, 2028), False, 'import pytest\n'), ((2038, 2063), 'sciris.findlast', 'sc.findlast', (['[1, 2, 3]', '(4)'], {}), '([1, 2, 3], 4)\n', (2049, 2063), True, 'import sciris as sc\n'), ((2407, 2430), 'numpy.array', 'np.array', (['[3, 5, 8, 13]'], {}), '([3, 5, 8, 13])\n', (2415, 2430), True, 'import numpy as np\n'), ((2429, 2467), 'numpy.array', 'np.array', (['[2000, np.nan, np.nan, 2004]'], {}), '([2000, np.nan, np.nan, 2004])\n', (2437, 2467), True, 'import numpy as np\n'), ((2625, 2682), 'numpy.array', 'np.array', (['[3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]'], {}), '([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8])\n', (2633, 2682), True, 'import numpy as np\n'), ((2734, 2791), 'numpy.array', 'np.array', (['[3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]'], {}), '([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8])\n', (2742, 2791), True, 'import numpy as np\n'), ((2844, 2901), 'numpy.array', 'np.array', (['[3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8]'], {}), '([3, 4, np.nan, 8, 2, np.nan, np.nan, np.nan, 8])\n', (2852, 2901), True, 'import numpy as np\n'), ((3325, 3344), 'pylab.subplot', 'pl.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (3335, 3344), True, 'import pylab as pl\n'), ((3351, 3366), 'pylab.pcolor', 'pl.pcolor', (['data'], {}), '(data)\n', (3360, 3366), True, 'import pylab as pl\n'), ((3375, 3394), 'pylab.subplot', 'pl.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3385, 3394), True, 'import pylab as pl\n'), ((3401, 3422), 'pylab.pcolor', 'pl.pcolor', (['smoothdata'], {}), '(smoothdata)\n', (3410, 3422), True, 'import pylab as pl\n'), ((3431, 3450), 'pylab.subplot', 'pl.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3441, 3450), True, 'import pylab as pl\n'), ((3457, 3475), 'pylab.scatter', 'pl.scatter', (['x1', 'y1'], {}), '(x1, y1)\n', (3467, 3475), True, 'import pylab as pl\n'), ((3484, 3499), 'pylab.plot', 'pl.plot', (['x2', 'y2'], {}), '(x2, y2)\n', (3491, 3499), True, 'import pylab as pl\n'), ((3508, 3517), 'pylab.show', 'pl.show', ([], {}), '()\n', (3515, 3517), True, 'import pylab as pl\n'), ((770, 792), 'sciris.isprime', 'sc.isprime', (['(i ** 2 + 1)'], {}), '(i ** 2 + 1)\n', (780, 792), True, 'import sciris as sc\n')] |
#!/usr/bin/env python3
#################
# img_to_vec.py #
#################
import numpy as np
import imutils
import cv2
import os
import sys
ROOT_DIR = os.path.abspath("../")
sys.path.append(ROOT_DIR)
from webcam import pre_recognition
def img_to_vec(image_path, detector, embedder, min_prob_filter):
'''
Convert an image to a 128-d vector
Arguments:
1. image_path: Image path
2. detector: ResNet CNN
3. embedder: FaceNet CNN
4. min_prob_filter: Probability threshold to filter weak detection
from ResNet CNN
Returns:
1. vector: 128-d vector (can be None if no detection)
'''
# Initialize the 128-d vector
vector = np.nan
# Load the image and resize to width of 600 pixels,
# while maintaing the aspect ratio. Then, get the
# image dimension
image = cv2.imread(image_path)
image = imutils.resize(image, width=600)
(h, w) = image.shape[:2]
# Generate bounding boxes for face candidates
detections, _ = pre_recognition.locate_faces(image, detector, None)
# Ensure at least one face candidate
if len(detections) > 0:
# NOTE: Assume each image has only ONE face !!!!
# Select the bounding box with the largest probability
# Here, only ONE bounding box is chosen !
i = np.argmax(detections[0, 0, :, 2])
confidence = detections[0, 0, i, 2]
# Filter weak detection
if confidence > min_prob_filter:
# Compute the (x, y) coordinates of the bounding box
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(start_x, start_y, end_x, end_y) = box.astype("int")
# Crop the ROI and dimensions
face = image[start_y:end_y, start_x:end_x]
(face_h, face_w) = face.shape[:2]
# Filter small face candidates
if face_w >= 20 and face_h >= 20:
# Extract the 128-d vector from the ROI
vector, _ = pre_recognition.extract_vector(face, embedder, None)
return vector
| [
"sys.path.append",
"os.path.abspath",
"numpy.argmax",
"webcam.pre_recognition.extract_vector",
"cv2.imread",
"numpy.array",
"imutils.resize",
"webcam.pre_recognition.locate_faces"
] | [((157, 179), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (172, 179), False, 'import os\n'), ((180, 205), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (195, 205), False, 'import sys\n'), ((905, 927), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (915, 927), False, 'import cv2\n'), ((940, 972), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(600)'}), '(image, width=600)\n', (954, 972), False, 'import imutils\n'), ((1073, 1124), 'webcam.pre_recognition.locate_faces', 'pre_recognition.locate_faces', (['image', 'detector', 'None'], {}), '(image, detector, None)\n', (1101, 1124), False, 'from webcam import pre_recognition\n'), ((1389, 1422), 'numpy.argmax', 'np.argmax', (['detections[0, 0, :, 2]'], {}), '(detections[0, 0, :, 2])\n', (1398, 1422), True, 'import numpy as np\n'), ((1651, 1673), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1659, 1673), True, 'import numpy as np\n'), ((2057, 2109), 'webcam.pre_recognition.extract_vector', 'pre_recognition.extract_vector', (['face', 'embedder', 'None'], {}), '(face, embedder, None)\n', (2087, 2109), False, 'from webcam import pre_recognition\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 <NAME> (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Fast Fourier Transform snippet
#
# Example usages:
# ./rfft2.py -t 0.0001 ./test.jpeg
# ./rfft2.py -t 0.001 ./test.jpeg
# ipython3 -- ./rfft2.py -t 0.0001 ./test.jpeg
#
# This snippet requires Numpy, Scipy, Matplotlib and PIL/Pillow Python libraries.
#
# Additional documentation:
# - Numpy implementation: http://docs.scipy.org/doc/numpy/reference/routines.fft.html
# - Scipy implementation: http://docs.scipy.org/doc/scipy/reference/fftpack.html
import argparse
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import PIL.Image as pil_img # PIL.Image is a module not a class...
# PARSE OPTIONS ######################################################
parser = argparse.ArgumentParser(description='An FFT snippet.')
parser.add_argument("--shift", "-s", help="Shift the zero to the center", action="store_true", default=False)
parser.add_argument("--threshold", "-t", help="The threshold value (between 0 and 1)", type=float, default=0, metavar="FLOAT")
parser.add_argument("fileargs", nargs=1, metavar="FILE", help="The file image to filter")
args = parser.parse_args()
shift = args.shift
threshold = args.threshold
file_path = args.fileargs[0]
# GET DATA ###########################################################
# Open the image and convert it to grayscale
signal = np.array(pil_img.open(file_path).convert('L'))
# Init plot
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 8))
# Plot
ax1.imshow(signal, interpolation='nearest', cmap=cm.gray)
ax1.set_title("Original image")
# FOURIER TRANSFORM WITH NUMPY #######################################
# Do the fourier transform #############
transformed_signal = np.fft.rfft2(signal)
if shift:
transformed_signal = np.fft.fftshift(transformed_signal)
ax2.imshow(np.log10(abs(transformed_signal)),
interpolation='nearest',
cmap=cm.gray)
ax2.set_title("Fourier coefficients before filtering")
# Filter ###############################
max_value = np.max(abs(transformed_signal))
filtered_transformed_signal = transformed_signal * (abs(transformed_signal) > max_value*threshold)
ax3.imshow(np.log10(abs(filtered_transformed_signal)),
interpolation='nearest',
cmap=cm.gray)
ax3.set_title("Fourier coefficients after filtering")
# Do the reverse transform #############
if shift:
filtered_transformed_signal = np.fft.ifftshift(filtered_transformed_signal)
filtered_signal = np.fft.irfft2(filtered_transformed_signal)
ax4.imshow(abs(filtered_signal), interpolation='nearest', cmap=cm.gray)
ax4.set_title("Filtered image")
# SAVE FILES ######################
#plt.savefig("rfft2.png")
#plt.savefig("rfft2.svg")
plt.savefig("rfft2.pdf")
# PLOT ############################
plt.show()
| [
"numpy.fft.irfft2",
"numpy.fft.ifftshift",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.subplots",
"PIL.Image.open",
"numpy.fft.fftshift",
"numpy.fft.rfft2",
"matplotlib.pyplot.savefig"
] | [((1857, 1911), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""An FFT snippet."""'}), "(description='An FFT snippet.')\n", (1880, 1911), False, 'import argparse\n'), ((2564, 2599), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(14, 8)'}), '(2, 2, figsize=(14, 8))\n', (2576, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2855), 'numpy.fft.rfft2', 'np.fft.rfft2', (['signal'], {}), '(signal)\n', (2847, 2855), True, 'import numpy as np\n'), ((3602, 3644), 'numpy.fft.irfft2', 'np.fft.irfft2', (['filtered_transformed_signal'], {}), '(filtered_transformed_signal)\n', (3615, 3644), True, 'import numpy as np\n'), ((3841, 3865), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rfft2.pdf"""'], {}), "('rfft2.pdf')\n", (3852, 3865), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3915), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3913, 3915), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2927), 'numpy.fft.fftshift', 'np.fft.fftshift', (['transformed_signal'], {}), '(transformed_signal)\n', (2907, 2927), True, 'import numpy as np\n'), ((3537, 3582), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['filtered_transformed_signal'], {}), '(filtered_transformed_signal)\n', (3553, 3582), True, 'import numpy as np\n'), ((2481, 2504), 'PIL.Image.open', 'pil_img.open', (['file_path'], {}), '(file_path)\n', (2493, 2504), True, 'import PIL.Image as pil_img\n')] |
import numpy as np
class Bat:
def __init__(self, min_x, max_x, min_v, max_v, fitness_func, f_min, f_max, dimensions, alpha, gamma):
self.min_velocity = min_v
self.max_velocity = max_v
self.min_position = min_x
self.max_position = max_x
self.min_frequency = f_min
self.max_frequency = f_max
self.alpha = alpha
self.gamma = gamma
self.counter = 1
self.number_of_dimensions = dimensions
self.A_loudness = np.random.uniform(1, 2)
self.r_tempo_0 = np.random.uniform(0, 1)
self.r_tempo_i = self.r_tempo_0
self.frequency = 0
self.X_positions = np.zeros(self.number_of_dimensions)
self.V_velocities = np.zeros(self.number_of_dimensions)
self.__init_position()
self.__init_velocities()
self.__update_frequency()
self.fitness = fitness_func
self.best_fitness = fitness_func(self.X_positions)
def __init_position(self):
self.X_positions = np.random.uniform(self.min_position, self.max_position, self.number_of_dimensions)
def __init_velocities(self):
self.V_velocities = np.random.uniform(self.min_velocity, self.max_velocity, self.number_of_dimensions)
def __update_frequency(self):
self.frequency = self.min_frequency + np.random.uniform(0, 1, self.number_of_dimensions) * (self.max_frequency - self.min_frequency)
def __update_velocities(self, G_best):
self.V_velocities = self.V_velocities + self.frequency * (self.X_positions - G_best)
self.V_velocities = np.clip(self.V_velocities, self.min_velocity, self.max_velocity)
def __update_positions(self):
self.X_positions = np.clip(self.X_positions + self.V_velocities, self.min_position, self.max_position)
def update_fitness(self):
if self.fitness(self.X_positions) < self.best_fitness:
self.best_fitness = self.fitness(self.X_positions)
def get_r_tempo(self):
return self.r_tempo_i
def get_a_loudness(self):
return self.A_loudness
def get_best_fitness(self):
return self.best_fitness
def update(self, G_best):
self.__update_velocities(G_best)
self.__update_positions()
def update_a_r(self):
self.A_loudness = self.alpha * self.A_loudness
self.r_tempo_i = self.r_tempo_0 * (1 - np.exp(-self.gamma * self.counter))
self.counter += 1
def new_position_with_a(self, loudness):
self.X_positions = self.X_positions + np.random.uniform(-1, 1) * loudness
| [
"numpy.random.uniform",
"numpy.zeros",
"numpy.exp",
"numpy.clip"
] | [((497, 520), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(2)'], {}), '(1, 2)\n', (514, 520), True, 'import numpy as np\n'), ((546, 569), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (563, 569), True, 'import numpy as np\n'), ((665, 700), 'numpy.zeros', 'np.zeros', (['self.number_of_dimensions'], {}), '(self.number_of_dimensions)\n', (673, 700), True, 'import numpy as np\n'), ((729, 764), 'numpy.zeros', 'np.zeros', (['self.number_of_dimensions'], {}), '(self.number_of_dimensions)\n', (737, 764), True, 'import numpy as np\n'), ((1019, 1106), 'numpy.random.uniform', 'np.random.uniform', (['self.min_position', 'self.max_position', 'self.number_of_dimensions'], {}), '(self.min_position, self.max_position, self.\n number_of_dimensions)\n', (1036, 1106), True, 'import numpy as np\n'), ((1164, 1251), 'numpy.random.uniform', 'np.random.uniform', (['self.min_velocity', 'self.max_velocity', 'self.number_of_dimensions'], {}), '(self.min_velocity, self.max_velocity, self.\n number_of_dimensions)\n', (1181, 1251), True, 'import numpy as np\n'), ((1588, 1652), 'numpy.clip', 'np.clip', (['self.V_velocities', 'self.min_velocity', 'self.max_velocity'], {}), '(self.V_velocities, self.min_velocity, self.max_velocity)\n', (1595, 1652), True, 'import numpy as np\n'), ((1715, 1803), 'numpy.clip', 'np.clip', (['(self.X_positions + self.V_velocities)', 'self.min_position', 'self.max_position'], {}), '(self.X_positions + self.V_velocities, self.min_position, self.\n max_position)\n', (1722, 1803), True, 'import numpy as np\n'), ((1328, 1378), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.number_of_dimensions'], {}), '(0, 1, self.number_of_dimensions)\n', (1345, 1378), True, 'import numpy as np\n'), ((2377, 2411), 'numpy.exp', 'np.exp', (['(-self.gamma * self.counter)'], {}), '(-self.gamma * self.counter)\n', (2383, 2411), True, 'import numpy as np\n'), ((2531, 2555), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2548, 2555), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import svd
from cv2 import cv2
import pickle
images = ["bear.jpg"]
def compress_show_color_images_reshape(name, k):
image = cv2.imread(name)
image = np.asarray(image)
original_shape = image.shape
image_reshaped = image.reshape((original_shape[0],original_shape[1]*3))
U,s,V = svd(image_reshaped,full_matrices=False)
U = U[:,:k]
s = s[:k]
V = V[:k,:]
lists = [U, s, V]
return lists
ks = [1, 10, 20, 30, 40, 50, 60, 70, 80, 90]
for i in images:
for k in ks:
lists = compress_show_color_images_reshape(i, k)
nam = i.split('.')[0]
if k>=10:
with open(nam+str(k)+'.pkl', 'wb') as f:
pickle.dump(lists, f)
else:
with open(nam+str(0)+str(k)+'.pkl', 'wb') as f:
pickle.dump(lists, f)
| [
"cv2.cv2.imread",
"numpy.asarray",
"numpy.linalg.svd",
"pickle.dump"
] | [((167, 183), 'cv2.cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (177, 183), False, 'from cv2 import cv2\n'), ((196, 213), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (206, 213), True, 'import numpy as np\n'), ((335, 375), 'numpy.linalg.svd', 'svd', (['image_reshaped'], {'full_matrices': '(False)'}), '(image_reshaped, full_matrices=False)\n', (338, 375), False, 'from numpy.linalg import svd\n'), ((715, 736), 'pickle.dump', 'pickle.dump', (['lists', 'f'], {}), '(lists, f)\n', (726, 736), False, 'import pickle\n'), ((827, 848), 'pickle.dump', 'pickle.dump', (['lists', 'f'], {}), '(lists, f)\n', (838, 848), False, 'import pickle\n')] |
import pathlib
import shutil
import meshzoo
import numpy as np
import pytest
from numba_celltree import CellTree2d
from numba_celltree.constants import MAX_N_VERTEX
@pytest.fixture
def datadir(tmpdir, request):
data = pathlib.Path(__file__).parent / "data"
shutil.copy(data / "triangles.txt", tmpdir / "triangles.txt")
shutil.copy(data / "xy.txt", tmpdir / "xy.txt")
return tmpdir
# some very simple test data:
# two triangles:
nodes2 = [
[0.0, 0.0],
[2.0, 0.0],
[1.0, 2.0],
[3.0, 2.0],
]
faces2 = [
[0, 1, 2],
[1, 3, 2],
]
fill_value = -1
nodes = np.array(nodes2, dtype=np.float64)
faces = np.array(faces2, dtype=np.intc)
nodes21 = [
(5, 1),
(10, 1),
(3, 3),
(7, 3),
(9, 4),
(12, 4),
(5, 5),
(3, 7),
(5, 7),
(7, 7),
(9, 7),
(11, 7),
(5, 9),
(8, 9),
(11, 9),
(9, 11),
(11, 11),
(7, 13),
(9, 13),
(7, 15),
]
faces21 = [
(0, 1, 3),
(0, 2, 6),
(0, 3, 6),
(1, 3, 4),
(1, 4, 5),
(2, 6, 7),
(6, 7, 8),
(7, 8, 12),
(6, 8, 9),
(8, 9, 12),
(9, 12, 13),
(4, 5, 11),
(4, 10, 11),
(9, 10, 13),
(10, 11, 14),
(10, 13, 14),
(13, 14, 15),
(14, 15, 16),
(15, 16, 18),
(15, 17, 18),
(17, 18, 19),
]
def test_init():
"""
can a tree be initialized
"""
# with defaults
CellTree2d(nodes, faces, fill_value)
# with everything specified
CellTree2d(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)
# with n_buckets
CellTree2d(nodes, faces, fill_value, n_buckets=4)
# with cells_per_leaf
CellTree2d(nodes, faces, fill_value, cells_per_leaf=2)
def test_init_larger_mesh(datadir):
# This mesh is large enough so that a bucket will get split during
# construction.
nodes = np.loadtxt(datadir / "xy.txt", dtype=float)
faces = np.loadtxt(datadir / "triangles.txt", dtype=int)
CellTree2d(nodes, faces, fill_value, n_buckets=2)
def test_lists():
"""
python lists should get converted to numpy arrays
"""
CellTree2d(nodes2, faces2, fill_value)
def test_types():
"""
It should auto-cast the types to the right types for you
"""
nodes = np.array(nodes2, dtype=np.float32)
faces = np.array(faces2, dtype=np.int32)
CellTree2d(nodes, faces, fill_value)
def test_fill_value_conversion():
faces = np.array([[0, 1, 2, -999], [1, 3, 2, -999]])
tree = CellTree2d(nodes, faces, -999)
assert tree.faces[0, -1] == -1
assert tree.faces[1, -1] == -1
def test_shape_errors():
faces = [0, 1, 2, 1, 3, 2]
nodes = [(1, 2, 3), (3, 4, 5), (4, 5, 6)]
box_coords = np.array([0.0, 1.0, 2.0, 3.0])
edge_coords = np.array([[0.0, 1.0], [2.0, 3.0]])
with pytest.raises(ValueError):
CellTree2d(nodes, faces2, -1)
with pytest.raises(ValueError):
CellTree2d(nodes2, faces, -1)
tree = CellTree2d(nodes2, faces2, -1)
with pytest.raises(ValueError):
tree.locate_points(nodes)
with pytest.raises(ValueError):
tree.intersect_faces(nodes, faces2, -1)
with pytest.raises(ValueError):
tree.intersect_faces(faces, nodes2, -1)
with pytest.raises(ValueError):
tree.locate_boxes(box_coords)
with pytest.raises(ValueError):
tree.intersect_boxes(box_coords)
with pytest.raises(ValueError):
tree.intersect_edges(edge_coords)
# Can't realistically test MAX_N_FACE: 2e9 faces requires enormous
# allocation.
faces = np.arange(MAX_N_VERTEX + 1).reshape((1, -1))
with pytest.raises(ValueError):
tree.intersect_faces(nodes2, faces, -1)
def test_bounds_errors():
with pytest.raises(ValueError):
CellTree2d(nodes, faces, fill_value, cells_per_leaf=-1)
with pytest.raises(ValueError):
CellTree2d(nodes, faces, fill_value, n_buckets=0)
def test_triangle_lookup():
tree = CellTree2d(nodes, faces, fill_value)
point = np.array(
[
[1.0, 1.0],
[2.0, 1.0],
[-1.0, 1.0],
]
) # in triangle 1
result = tree.locate_points(point)
expected = np.array([0, 1, -1])
assert np.array_equal(result, expected)
def test_poly_lookup():
# A simple quad grid
nodes = np.array(
[
[0.0, 0.0], # 0
[0.0, 2.0], # 1
[2.0, 0.0], # 2
[2.0, 2.0], # 3
[4.0, 0.0], # 4
[4.0, 2.0], # 5
[6.0, 0.0], # 6
[6.0, 2.0], # 7
[0.0, 4.0], # 8
[2.0, 4.0], # 9
[4.0, 4.0], # 10
[6.0, 4.0], # 11
]
)
# quads
faces1 = np.array(
[
[0, 2, 3, 1],
[4, 6, 7, 5],
],
dtype=np.intc,
)
# Pentas
faces2 = np.array(
[
[0, 8, 9, 5, 2],
[9, 11, 6, 2, 5],
],
dtype=np.intc,
)
tree1 = CellTree2d(nodes, faces1, fill_value, n_buckets=2, cells_per_leaf=1)
point = np.array(
[
[1.0, 1.0],
[5.0, 1.0],
[-1.0, 1.0],
]
)
result = tree1.locate_points(point)
expected = np.array([0, 1, -1])
assert np.array_equal(result, expected)
tree2 = CellTree2d(nodes, faces2, fill_value, n_buckets=2, cells_per_leaf=1)
point = np.array(
[
[1.0, 2.0],
[5.0, 2.0],
[-1.0, 2.0],
]
)
result = tree2.locate_points(point)
expected = np.array([0, 1, -1])
assert np.array_equal(result, expected)
def test_multi_poly_lookup():
# A simple quad grid
nodes = np.array(
[
[0.0, 0.0], # 0
[0.0, 2.0], # 1
[2.0, 0.0], # 2
[2.0, 2.0], # 3
[4.0, 0.0], # 4
[4.0, 2.0], # 5
[6.0, 0.0], # 6
[6.0, 2.0], # 7
[0.0, 4.0], # 8
[2.0, 4.0], # 9
[4.0, 4.0], # 10
[6.0, 4.0], # 11
]
)
faces = np.array(
[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc
)
tree = CellTree2d(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)
point = np.array(
[
[1.0, 1.0],
[5.0, 0.5],
[5.0, 3.0],
[-1.0, 1.0],
]
)
result = tree.locate_points(point)
expected = np.array([0, 2, 1, -1])
assert np.array_equal(result, expected)
def test_multipoint():
tree = CellTree2d(nodes21, faces21, fill_value)
points = [
(4.2, 3.0),
(7.7, 13.5),
(3.4, 7.000000001),
(7.0, 5.0), # out of bounds points
(8.66, 10.99),
(7.3, 0.74),
(2.5, 5.5),
(9.8, 12.3),
]
expected = (1, 20, 7, -1, -1, -1, -1, -1)
actual = tree.locate_points(points)
assert np.array_equal(actual, expected)
def test_box_lookup():
nodes = np.array(
[
[0.0, 0.0], # 0
[0.0, 2.0], # 1
[2.0, 0.0], # 2
[2.0, 2.0], # 3
[4.0, 0.0], # 4
[4.0, 2.0], # 5
[6.0, 0.0], # 6
[6.0, 2.0], # 7
[0.0, 4.0], # 8
[2.0, 4.0], # 9
[4.0, 4.0], # 10
[6.0, 4.0], # 11
]
)
faces = np.array(
[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc
)
tree = CellTree2d(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)
box_coords = np.array(
[
[1.0, 2.0, 1.0, 2.0], # in face 0
[4.0, 5.0, 0.0, 1.0], # in face 2
[4.0, 5.0, 2.0, 3.0], # in face 1
[-1.0, 0.0, 0.0, 4.0], # out of bounds x
[6.0, 8.0, 0.0, 4.0], # out of bounds x
[0.0, 6.0, -1.0, 0.0], # out of bounds y
[0.0, 6.0, 4.0, 5.0], # out of bounds y
]
)
actual_i, actual_j = tree.locate_boxes(box_coords)
expected_i = np.array([0, 1, 2])
expected_j = np.array([0, 2, 1])
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
actual_i, actual_j, _ = tree.intersect_boxes(box_coords)
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
def test_edge_lookup():
nodes = np.array(
[
[0.0, 0.0], # 0
[0.0, 2.0], # 1
[2.0, 0.0], # 2
[2.0, 2.0], # 3
[4.0, 0.0], # 4
[4.0, 2.0], # 5
[6.0, 0.0], # 6
[6.0, 2.0], # 7
[0.0, 4.0], # 8
[2.0, 4.0], # 9
[4.0, 4.0], # 10
[6.0, 4.0], # 11
]
)
faces = np.array(
[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc
)
tree = CellTree2d(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)
edge_coords = np.array(
[
[[1.0, 1.0], [2.0, 2.0]], # 0
[[4.0, 3.0], [5.0, 4.0]], # 1
[[5.0, 0.0], [6.0, 1.0]], # 2
[[-2.0, -1.0], [0.0, 1.0]], # out of bounds
[[-2.0, -1.0], [-2.0, -1.0]], # out of bbox
]
)
actual_i, actual_j, intersections = tree.intersect_edges(edge_coords)
expected_i = np.array([0, 1, 2])
expected_j = np.array([0, 1, 2])
expected_intersections = edge_coords[:3]
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
assert np.array_equal(intersections, expected_intersections)
# Flip edge orientation
actual_i, actual_j, intersections = tree.intersect_edges(edge_coords[:, ::-1])
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
assert np.array_equal(intersections, expected_intersections[:, ::-1])
def test_example_material():
# Note: the concatenation of lists to get 1D arrays is purely to keep black
# from formatting everything into very long 1-element columns.
vertices, faces = meshzoo.disk(5, 5)
vertices += 1.0
vertices *= 5.0
tree = CellTree2d(vertices, faces, -1)
points = np.array(
[
[-5.0, 1.0],
[4.5, 2.5],
[6.5, 4.5],
]
)
expected = [-1, 56, 80]
assert np.array_equal(tree.locate_points(points), expected)
box_coords = np.array(
[
[4.0, 8.0, 4.0, 6.0],
[0.0, 8.0, 8.0, 10.0],
[10.0, 13.0, 2.0, 8.0],
]
)
expected_i = np.concatenate(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
)
expected_j = np.concatenate(
[
[107, 103, 94, 96, 91, 102, 101, 106, 100, 105, 92, 89, 85, 88, 80],
[84, 81, 76, 50, 75, 55, 59, 0, 5, 9, 51, 25, 30, 26, 34, 118, 111],
[115, 120, 122, 114, 117, 123, 124, 119, 121, 4, 8, 7, 3, 15, 12],
[14, 11, 18, 17, 20, 22],
]
)
i, j = tree.locate_boxes(box_coords)
assert np.array_equal(i, expected_i)
assert np.array_equal(j, expected_j)
triangle_vertices = np.array(
[
[5.0, 3.0],
[7.0, 3.0],
[7.0, 5.0],
[0.0, 6.0],
[4.0, 4.0],
[6.0, 10.0],
]
)
triangles = np.array(
[
[0, 1, 2],
[3, 4, 5],
]
)
expected_i = np.concatenate(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1],
]
)
expected_j = np.concatenate(
[
[85, 88, 80, 84, 77, 81, 76, 59, 66, 63, 123, 124, 116, 119, 121, 4],
[8, 7, 3, 2, 14, 11, 1, 6, 0, 5, 10, 13, 9, 27, 16, 17, 21, 19, 32],
[28, 23, 24, 33, 29, 25, 30, 26, 34, 31],
]
)
i, j, _ = tree.intersect_faces(triangle_vertices, triangles, -1)
assert np.array_equal(i, expected_i)
assert np.array_equal(j, expected_j)
edge_coords = np.array(
[
[[0.0, 0.0], [10.0, 10.0]],
[[0.0, 10.0], [10.0, 0.0]],
]
)
expected_i = np.concatenate(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
)
expected_j = np.concatenate(
[
[112, 111, 115, 114, 110, 101, 106, 100, 105, 25, 30, 34, 41, 38, 46],
[44, 48, 49, 83, 78, 82, 79, 80, 77, 81, 76, 75, 14, 0, 5, 10, 13, 9],
[17, 20, 22],
]
)
i, j, _ = tree.intersect_edges(edge_coords)
assert np.array_equal(i, expected_i)
assert np.array_equal(j, expected_j)
| [
"numpy.concatenate",
"meshzoo.disk",
"numba_celltree.CellTree2d",
"pytest.raises",
"pathlib.Path",
"numpy.array",
"numpy.loadtxt",
"numpy.arange",
"numpy.array_equal",
"shutil.copy"
] | [((595, 629), 'numpy.array', 'np.array', (['nodes2'], {'dtype': 'np.float64'}), '(nodes2, dtype=np.float64)\n', (603, 629), True, 'import numpy as np\n'), ((638, 669), 'numpy.array', 'np.array', (['faces2'], {'dtype': 'np.intc'}), '(faces2, dtype=np.intc)\n', (646, 669), True, 'import numpy as np\n'), ((269, 330), 'shutil.copy', 'shutil.copy', (["(data / 'triangles.txt')", "(tmpdir / 'triangles.txt')"], {}), "(data / 'triangles.txt', tmpdir / 'triangles.txt')\n", (280, 330), False, 'import shutil\n'), ((335, 382), 'shutil.copy', 'shutil.copy', (["(data / 'xy.txt')", "(tmpdir / 'xy.txt')"], {}), "(data / 'xy.txt', tmpdir / 'xy.txt')\n", (346, 382), False, 'import shutil\n'), ((1385, 1421), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {}), '(nodes, faces, fill_value)\n', (1395, 1421), False, 'from numba_celltree import CellTree2d\n'), ((1458, 1525), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)\n', (1468, 1525), False, 'from numba_celltree import CellTree2d\n'), ((1551, 1600), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(4)'}), '(nodes, faces, fill_value, n_buckets=4)\n', (1561, 1600), False, 'from numba_celltree import CellTree2d\n'), ((1631, 1685), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'cells_per_leaf': '(2)'}), '(nodes, faces, fill_value, cells_per_leaf=2)\n', (1641, 1685), False, 'from numba_celltree import CellTree2d\n'), ((1827, 1870), 'numpy.loadtxt', 'np.loadtxt', (["(datadir / 'xy.txt')"], {'dtype': 'float'}), "(datadir / 'xy.txt', dtype=float)\n", (1837, 1870), True, 'import numpy as np\n'), ((1883, 1931), 'numpy.loadtxt', 'np.loadtxt', (["(datadir / 'triangles.txt')"], {'dtype': 'int'}), "(datadir / 'triangles.txt', dtype=int)\n", (1893, 1931), True, 'import numpy as np\n'), ((1936, 1985), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(2)'}), '(nodes, faces, fill_value, n_buckets=2)\n', (1946, 1985), False, 'from numba_celltree import CellTree2d\n'), ((2080, 2118), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes2', 'faces2', 'fill_value'], {}), '(nodes2, faces2, fill_value)\n', (2090, 2118), False, 'from numba_celltree import CellTree2d\n'), ((2228, 2262), 'numpy.array', 'np.array', (['nodes2'], {'dtype': 'np.float32'}), '(nodes2, dtype=np.float32)\n', (2236, 2262), True, 'import numpy as np\n'), ((2275, 2307), 'numpy.array', 'np.array', (['faces2'], {'dtype': 'np.int32'}), '(faces2, dtype=np.int32)\n', (2283, 2307), True, 'import numpy as np\n'), ((2312, 2348), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {}), '(nodes, faces, fill_value)\n', (2322, 2348), False, 'from numba_celltree import CellTree2d\n'), ((2397, 2441), 'numpy.array', 'np.array', (['[[0, 1, 2, -999], [1, 3, 2, -999]]'], {}), '([[0, 1, 2, -999], [1, 3, 2, -999]])\n', (2405, 2441), True, 'import numpy as np\n'), ((2453, 2483), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', '(-999)'], {}), '(nodes, faces, -999)\n', (2463, 2483), False, 'from numba_celltree import CellTree2d\n'), ((2675, 2705), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0]'], {}), '([0.0, 1.0, 2.0, 3.0])\n', (2683, 2705), True, 'import numpy as np\n'), ((2724, 2758), 'numpy.array', 'np.array', (['[[0.0, 1.0], [2.0, 3.0]]'], {}), '([[0.0, 1.0], [2.0, 3.0]])\n', (2732, 2758), True, 'import numpy as np\n'), ((2919, 2949), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes2', 'faces2', '(-1)'], {}), '(nodes2, faces2, -1)\n', (2929, 2949), False, 'from numba_celltree import CellTree2d\n'), ((3912, 3948), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {}), '(nodes, faces, fill_value)\n', (3922, 3948), False, 'from numba_celltree import CellTree2d\n'), ((3961, 4008), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 1.0], [-1.0, 1.0]]'], {}), '([[1.0, 1.0], [2.0, 1.0], [-1.0, 1.0]])\n', (3969, 4008), True, 'import numpy as np\n'), ((4141, 4161), 'numpy.array', 'np.array', (['[0, 1, -1]'], {}), '([0, 1, -1])\n', (4149, 4161), True, 'import numpy as np\n'), ((4173, 4205), 'numpy.array_equal', 'np.array_equal', (['result', 'expected'], {}), '(result, expected)\n', (4187, 4205), True, 'import numpy as np\n'), ((4269, 4431), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0, 2.0], [\n 6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0, 4.0]]'], {}), '([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0,\n 2.0], [6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0,\n 4.0]])\n', (4277, 4431), True, 'import numpy as np\n'), ((4681, 4734), 'numpy.array', 'np.array', (['[[0, 2, 3, 1], [4, 6, 7, 5]]'], {'dtype': 'np.intc'}), '([[0, 2, 3, 1], [4, 6, 7, 5]], dtype=np.intc)\n', (4689, 4734), True, 'import numpy as np\n'), ((4819, 4879), 'numpy.array', 'np.array', (['[[0, 8, 9, 5, 2], [9, 11, 6, 2, 5]]'], {'dtype': 'np.intc'}), '([[0, 8, 9, 5, 2], [9, 11, 6, 2, 5]], dtype=np.intc)\n', (4827, 4879), True, 'import numpy as np\n'), ((4951, 5019), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces1', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces1, fill_value, n_buckets=2, cells_per_leaf=1)\n', (4961, 5019), False, 'from numba_celltree import CellTree2d\n'), ((5032, 5079), 'numpy.array', 'np.array', (['[[1.0, 1.0], [5.0, 1.0], [-1.0, 1.0]]'], {}), '([[1.0, 1.0], [5.0, 1.0], [-1.0, 1.0]])\n', (5040, 5079), True, 'import numpy as np\n'), ((5196, 5216), 'numpy.array', 'np.array', (['[0, 1, -1]'], {}), '([0, 1, -1])\n', (5204, 5216), True, 'import numpy as np\n'), ((5228, 5260), 'numpy.array_equal', 'np.array_equal', (['result', 'expected'], {}), '(result, expected)\n', (5242, 5260), True, 'import numpy as np\n'), ((5274, 5342), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces2', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces2, fill_value, n_buckets=2, cells_per_leaf=1)\n', (5284, 5342), False, 'from numba_celltree import CellTree2d\n'), ((5355, 5402), 'numpy.array', 'np.array', (['[[1.0, 2.0], [5.0, 2.0], [-1.0, 2.0]]'], {}), '([[1.0, 2.0], [5.0, 2.0], [-1.0, 2.0]])\n', (5363, 5402), True, 'import numpy as np\n'), ((5519, 5539), 'numpy.array', 'np.array', (['[0, 1, -1]'], {}), '([0, 1, -1])\n', (5527, 5539), True, 'import numpy as np\n'), ((5551, 5583), 'numpy.array_equal', 'np.array_equal', (['result', 'expected'], {}), '(result, expected)\n', (5565, 5583), True, 'import numpy as np\n'), ((5653, 5815), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0, 2.0], [\n 6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0, 4.0]]'], {}), '([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0,\n 2.0], [6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0,\n 4.0]])\n', (5661, 5815), True, 'import numpy as np\n'), ((6052, 6137), 'numpy.array', 'np.array', (['[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]]'], {'dtype': 'np.intc'}), '([[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc\n )\n', (6060, 6137), True, 'import numpy as np\n'), ((6158, 6225), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)\n', (6168, 6225), False, 'from numba_celltree import CellTree2d\n'), ((6238, 6297), 'numpy.array', 'np.array', (['[[1.0, 1.0], [5.0, 0.5], [5.0, 3.0], [-1.0, 1.0]]'], {}), '([[1.0, 1.0], [5.0, 0.5], [5.0, 3.0], [-1.0, 1.0]])\n', (6246, 6297), True, 'import numpy as np\n'), ((6425, 6448), 'numpy.array', 'np.array', (['[0, 2, 1, -1]'], {}), '([0, 2, 1, -1])\n', (6433, 6448), True, 'import numpy as np\n'), ((6460, 6492), 'numpy.array_equal', 'np.array_equal', (['result', 'expected'], {}), '(result, expected)\n', (6474, 6492), True, 'import numpy as np\n'), ((6529, 6569), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes21', 'faces21', 'fill_value'], {}), '(nodes21, faces21, fill_value)\n', (6539, 6569), False, 'from numba_celltree import CellTree2d\n'), ((6886, 6918), 'numpy.array_equal', 'np.array_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (6900, 6918), True, 'import numpy as np\n'), ((6956, 7118), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0, 2.0], [\n 6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0, 4.0]]'], {}), '([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0,\n 2.0], [6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0,\n 4.0]])\n', (6964, 7118), True, 'import numpy as np\n'), ((7355, 7440), 'numpy.array', 'np.array', (['[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]]'], {'dtype': 'np.intc'}), '([[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc\n )\n', (7363, 7440), True, 'import numpy as np\n'), ((7461, 7528), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)\n', (7471, 7528), False, 'from numba_celltree import CellTree2d\n'), ((7546, 7721), 'numpy.array', 'np.array', (['[[1.0, 2.0, 1.0, 2.0], [4.0, 5.0, 0.0, 1.0], [4.0, 5.0, 2.0, 3.0], [-1.0, \n 0.0, 0.0, 4.0], [6.0, 8.0, 0.0, 4.0], [0.0, 6.0, -1.0, 0.0], [0.0, 6.0,\n 4.0, 5.0]]'], {}), '([[1.0, 2.0, 1.0, 2.0], [4.0, 5.0, 0.0, 1.0], [4.0, 5.0, 2.0, 3.0],\n [-1.0, 0.0, 0.0, 4.0], [6.0, 8.0, 0.0, 4.0], [0.0, 6.0, -1.0, 0.0], [\n 0.0, 6.0, 4.0, 5.0]])\n', (7554, 7721), True, 'import numpy as np\n'), ((8009, 8028), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (8017, 8028), True, 'import numpy as np\n'), ((8046, 8065), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (8054, 8065), True, 'import numpy as np\n'), ((8077, 8113), 'numpy.array_equal', 'np.array_equal', (['actual_i', 'expected_i'], {}), '(actual_i, expected_i)\n', (8091, 8113), True, 'import numpy as np\n'), ((8125, 8161), 'numpy.array_equal', 'np.array_equal', (['actual_j', 'expected_j'], {}), '(actual_j, expected_j)\n', (8139, 8161), True, 'import numpy as np\n'), ((8235, 8271), 'numpy.array_equal', 'np.array_equal', (['actual_i', 'expected_i'], {}), '(actual_i, expected_i)\n', (8249, 8271), True, 'import numpy as np\n'), ((8283, 8319), 'numpy.array_equal', 'np.array_equal', (['actual_j', 'expected_j'], {}), '(actual_j, expected_j)\n', (8297, 8319), True, 'import numpy as np\n'), ((8358, 8520), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0, 2.0], [\n 6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0, 4.0]]'], {}), '([[0.0, 0.0], [0.0, 2.0], [2.0, 0.0], [2.0, 2.0], [4.0, 0.0], [4.0,\n 2.0], [6.0, 0.0], [6.0, 2.0], [0.0, 4.0], [2.0, 4.0], [4.0, 4.0], [6.0,\n 4.0]])\n', (8366, 8520), True, 'import numpy as np\n'), ((8757, 8842), 'numpy.array', 'np.array', (['[[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]]'], {'dtype': 'np.intc'}), '([[0, 8, 9, 5, 2], [9, 11, 7, 5, -1], [4, 7, 6, -1, -1]], dtype=np.intc\n )\n', (8765, 8842), True, 'import numpy as np\n'), ((8863, 8930), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(2)', 'cells_per_leaf': '(1)'}), '(nodes, faces, fill_value, n_buckets=2, cells_per_leaf=1)\n', (8873, 8930), False, 'from numba_celltree import CellTree2d\n'), ((8949, 9099), 'numpy.array', 'np.array', (['[[[1.0, 1.0], [2.0, 2.0]], [[4.0, 3.0], [5.0, 4.0]], [[5.0, 0.0], [6.0, 1.0\n ]], [[-2.0, -1.0], [0.0, 1.0]], [[-2.0, -1.0], [-2.0, -1.0]]]'], {}), '([[[1.0, 1.0], [2.0, 2.0]], [[4.0, 3.0], [5.0, 4.0]], [[5.0, 0.0],\n [6.0, 1.0]], [[-2.0, -1.0], [0.0, 1.0]], [[-2.0, -1.0], [-2.0, -1.0]]])\n', (8957, 9099), True, 'import numpy as np\n'), ((9319, 9338), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (9327, 9338), True, 'import numpy as np\n'), ((9356, 9375), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (9364, 9375), True, 'import numpy as np\n'), ((9432, 9468), 'numpy.array_equal', 'np.array_equal', (['actual_i', 'expected_i'], {}), '(actual_i, expected_i)\n', (9446, 9468), True, 'import numpy as np\n'), ((9480, 9516), 'numpy.array_equal', 'np.array_equal', (['actual_j', 'expected_j'], {}), '(actual_j, expected_j)\n', (9494, 9516), True, 'import numpy as np\n'), ((9528, 9581), 'numpy.array_equal', 'np.array_equal', (['intersections', 'expected_intersections'], {}), '(intersections, expected_intersections)\n', (9542, 9581), True, 'import numpy as np\n'), ((9705, 9741), 'numpy.array_equal', 'np.array_equal', (['actual_i', 'expected_i'], {}), '(actual_i, expected_i)\n', (9719, 9741), True, 'import numpy as np\n'), ((9753, 9789), 'numpy.array_equal', 'np.array_equal', (['actual_j', 'expected_j'], {}), '(actual_j, expected_j)\n', (9767, 9789), True, 'import numpy as np\n'), ((9801, 9863), 'numpy.array_equal', 'np.array_equal', (['intersections', 'expected_intersections[:, ::-1]'], {}), '(intersections, expected_intersections[:, ::-1])\n', (9815, 9863), True, 'import numpy as np\n'), ((10064, 10082), 'meshzoo.disk', 'meshzoo.disk', (['(5)', '(5)'], {}), '(5, 5)\n', (10076, 10082), False, 'import meshzoo\n'), ((10135, 10166), 'numba_celltree.CellTree2d', 'CellTree2d', (['vertices', 'faces', '(-1)'], {}), '(vertices, faces, -1)\n', (10145, 10166), False, 'from numba_celltree import CellTree2d\n'), ((10180, 10227), 'numpy.array', 'np.array', (['[[-5.0, 1.0], [4.5, 2.5], [6.5, 4.5]]'], {}), '([[-5.0, 1.0], [4.5, 2.5], [6.5, 4.5]])\n', (10188, 10227), True, 'import numpy as np\n'), ((10399, 10478), 'numpy.array', 'np.array', (['[[4.0, 8.0, 4.0, 6.0], [0.0, 8.0, 8.0, 10.0], [10.0, 13.0, 2.0, 8.0]]'], {}), '([[4.0, 8.0, 4.0, 6.0], [0.0, 8.0, 8.0, 10.0], [10.0, 13.0, 2.0, 8.0]])\n', (10407, 10478), True, 'import numpy as np\n'), ((10557, 10747), 'numpy.concatenate', 'np.concatenate', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, \n 1, 1, 1, 1, 1, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n', (10571, 10747), True, 'import numpy as np\n'), ((10817, 11076), 'numpy.concatenate', 'np.concatenate', (['[[107, 103, 94, 96, 91, 102, 101, 106, 100, 105, 92, 89, 85, 88, 80], [84, \n 81, 76, 50, 75, 55, 59, 0, 5, 9, 51, 25, 30, 26, 34, 118, 111], [115, \n 120, 122, 114, 117, 123, 124, 119, 121, 4, 8, 7, 3, 15, 12], [14, 11, \n 18, 17, 20, 22]]'], {}), '([[107, 103, 94, 96, 91, 102, 101, 106, 100, 105, 92, 89, 85,\n 88, 80], [84, 81, 76, 50, 75, 55, 59, 0, 5, 9, 51, 25, 30, 26, 34, 118,\n 111], [115, 120, 122, 114, 117, 123, 124, 119, 121, 4, 8, 7, 3, 15, 12],\n [14, 11, 18, 17, 20, 22]])\n', (10831, 11076), True, 'import numpy as np\n'), ((11190, 11219), 'numpy.array_equal', 'np.array_equal', (['i', 'expected_i'], {}), '(i, expected_i)\n', (11204, 11219), True, 'import numpy as np\n'), ((11231, 11260), 'numpy.array_equal', 'np.array_equal', (['j', 'expected_j'], {}), '(j, expected_j)\n', (11245, 11260), True, 'import numpy as np\n'), ((11286, 11373), 'numpy.array', 'np.array', (['[[5.0, 3.0], [7.0, 3.0], [7.0, 5.0], [0.0, 6.0], [4.0, 4.0], [6.0, 10.0]]'], {}), '([[5.0, 3.0], [7.0, 3.0], [7.0, 5.0], [0.0, 6.0], [4.0, 4.0], [6.0,\n 10.0]])\n', (11294, 11373), True, 'import numpy as np\n'), ((11483, 11515), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5]]'], {}), '([[0, 1, 2], [3, 4, 5]])\n', (11491, 11515), True, 'import numpy as np\n'), ((11583, 11749), 'numpy.concatenate', 'np.concatenate', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n 1], [1]])\n', (11597, 11749), True, 'import numpy as np\n'), ((11819, 12024), 'numpy.concatenate', 'np.concatenate', (['[[85, 88, 80, 84, 77, 81, 76, 59, 66, 63, 123, 124, 116, 119, 121, 4], [8, \n 7, 3, 2, 14, 11, 1, 6, 0, 5, 10, 13, 9, 27, 16, 17, 21, 19, 32], [28, \n 23, 24, 33, 29, 25, 30, 26, 34, 31]]'], {}), '([[85, 88, 80, 84, 77, 81, 76, 59, 66, 63, 123, 124, 116, 119,\n 121, 4], [8, 7, 3, 2, 14, 11, 1, 6, 0, 5, 10, 13, 9, 27, 16, 17, 21, 19,\n 32], [28, 23, 24, 33, 29, 25, 30, 26, 34, 31]])\n', (11833, 12024), True, 'import numpy as np\n'), ((12159, 12188), 'numpy.array_equal', 'np.array_equal', (['i', 'expected_i'], {}), '(i, expected_i)\n', (12173, 12188), True, 'import numpy as np\n'), ((12200, 12229), 'numpy.array_equal', 'np.array_equal', (['j', 'expected_j'], {}), '(j, expected_j)\n', (12214, 12229), True, 'import numpy as np\n'), ((12249, 12315), 'numpy.array', 'np.array', (['[[[0.0, 0.0], [10.0, 10.0]], [[0.0, 10.0], [10.0, 0.0]]]'], {}), '([[[0.0, 0.0], [10.0, 10.0]], [[0.0, 10.0], [10.0, 0.0]]])\n', (12257, 12315), True, 'import numpy as np\n'), ((12382, 12514), 'numpy.concatenate', 'np.concatenate', (['[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,\n 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n', (12396, 12514), True, 'import numpy as np\n'), ((12577, 12758), 'numpy.concatenate', 'np.concatenate', (['[[112, 111, 115, 114, 110, 101, 106, 100, 105, 25, 30, 34, 41, 38, 46], [44,\n 48, 49, 83, 78, 82, 79, 80, 77, 81, 76, 75, 14, 0, 5, 10, 13, 9], [17, \n 20, 22]]'], {}), '([[112, 111, 115, 114, 110, 101, 106, 100, 105, 25, 30, 34, \n 41, 38, 46], [44, 48, 49, 83, 78, 82, 79, 80, 77, 81, 76, 75, 14, 0, 5,\n 10, 13, 9], [17, 20, 22]])\n', (12591, 12758), True, 'import numpy as np\n'), ((12870, 12899), 'numpy.array_equal', 'np.array_equal', (['i', 'expected_i'], {}), '(i, expected_i)\n', (12884, 12899), True, 'import numpy as np\n'), ((12911, 12940), 'numpy.array_equal', 'np.array_equal', (['j', 'expected_j'], {}), '(j, expected_j)\n', (12925, 12940), True, 'import numpy as np\n'), ((2768, 2793), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2781, 2793), False, 'import pytest\n'), ((2803, 2832), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces2', '(-1)'], {}), '(nodes, faces2, -1)\n', (2813, 2832), False, 'from numba_celltree import CellTree2d\n'), ((2842, 2867), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2855, 2867), False, 'import pytest\n'), ((2877, 2906), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes2', 'faces', '(-1)'], {}), '(nodes2, faces, -1)\n', (2887, 2906), False, 'from numba_celltree import CellTree2d\n'), ((2959, 2984), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2972, 2984), False, 'import pytest\n'), ((3029, 3054), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3042, 3054), False, 'import pytest\n'), ((3113, 3138), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3126, 3138), False, 'import pytest\n'), ((3197, 3222), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3210, 3222), False, 'import pytest\n'), ((3271, 3296), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3284, 3296), False, 'import pytest\n'), ((3348, 3373), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3361, 3373), False, 'import pytest\n'), ((3573, 3598), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3586, 3598), False, 'import pytest\n'), ((3685, 3710), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3698, 3710), False, 'import pytest\n'), ((3720, 3775), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'cells_per_leaf': '(-1)'}), '(nodes, faces, fill_value, cells_per_leaf=-1)\n', (3730, 3775), False, 'from numba_celltree import CellTree2d\n'), ((3786, 3811), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3799, 3811), False, 'import pytest\n'), ((3821, 3870), 'numba_celltree.CellTree2d', 'CellTree2d', (['nodes', 'faces', 'fill_value'], {'n_buckets': '(0)'}), '(nodes, faces, fill_value, n_buckets=0)\n', (3831, 3870), False, 'from numba_celltree import CellTree2d\n'), ((226, 248), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import pathlib\n'), ((3519, 3546), 'numpy.arange', 'np.arange', (['(MAX_N_VERTEX + 1)'], {}), '(MAX_N_VERTEX + 1)\n', (3528, 3546), True, 'import numpy as np\n')] |
#############################################################################
#
# Author: <NAME>, <NAME>
#
# Copyright: <NAME> TSRI 2000
#
#############################################################################
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autoflexCommands.py,v 1.69.2.2 2016/02/11 09:24:07 annao Exp $
#
# $Id: autoflexCommands.py,v 1.69.2.2 2016/02/11 09:24:07 annao Exp $
#
#
#
#
#
#
#
"""
This Module facilitates producing a files for AutoDock. The steps in this process are:
* Set the macromolecule:
o Read a PDBQT Macromolecule
o Choose Macromol...
* Select which residues are to be flexible in macromolecule using Pmv selection tools:
o ICOM Select
o SelectFromString
o Select Spherical Region
* The results of the previous steps are written to a file. The user selects a filename via a filebrowser.
"""
import numpy
from DejaVu import viewerConst
from ViewerFramework.VFCommand import CommandGUI
## from ViewerFramework.gui import InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.gui.BasicWidgets.Tk.thumbwheel import ThumbWheel
from mglutil.gui.BasicWidgets.Tk.customizedWidgets import ListChooser,\
ExtendedSliderWidget
from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM
from Pmv.deleteCommands import BeforeDeleteMoleculesEvent
from Pmv.moleculeViewer import EditAtomsEvent
from MolKit.tree import TreeNode, TreeNodeSet
from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet
from MolKit.protein import Residue, ResidueSet, Chain
from MolKit.pdbWriter import PdbqtWriter
from MolKit.bondSelector import RotatableBondSelector, AmideBondSelector
from MolKit.bondSelector import GuanidiniumBondSelector, LeafBondSelector
from Pmv.guiTools import MoleculeChooser
import types, _py2k_string as string, tkinter, os, Pmw
from AutoDockTools.autotorsCommands import MAXTORS, SetRotatableBonds
from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper
from tkinter.simpledialog import SimpleDialog
menuText = {}
menuText['AutoFlexMB'] = 'Flexible Residues'
menuText['InputMB'] = 'Input'
menuText['Read Macro'] = 'Open Macromolecule...'
menuText['Choose Macro'] = 'Choose Macromolecule...'
menuText['Set Residues'] = 'Choose Torsions in Currently Selected Residues...'
menuText['Setup Covalent Residue'] = 'Set up Covalent Residue...'
menuText['Set Hinge'] = 'Set up Hinge...'
menuText['Edit Hinge'] = 'Edit Hinge...'
menuText['Step Back'] = 'Redisplay Macromolecule'
menuText['WriteMB'] = 'Output'
menuText['writeFlexible'] = 'Save Flexible PDBQT...'
menuText['writeRigid'] = 'Save Rigid PDBQT...'
menuText['writeDir'] = 'Save Multiple Flexible PDBQTS...'
class AF_MacroReader(MVCommand):
""" allows user to select a filename for the macromolecule"""
def onRemoveObjectFromViewer(self, obj): #remove the covalent ligand
if hasattr(self.vf, 'flexDict'):
dict = self.vf.flexDict
if 'macrofilename' in dict:
macrofilename = dict['macrofilename']
if hasattr(obj, 'parser') and obj.parser.filename == macrofilename:
msg = "removing flexDict macrofilename entry: ", dict['macrofilename']
self.vf.warningMsg(msg)
del dict['macrofilename']
#if dict.has_key('macroname') and dict['macroname'] not in self.vf.Mols.name:
# del dict['macroname']
# if dict.has_key('macromol') and dict['macromol']:
# del dict['macromol']
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict={}
if 'readPDBQT' not in self.vf.commands:
self.vf.loadCommand('fileCommands', 'readPDBQT', 'Pmv')
def guiCallback(self):
"""called each time the 'select pdbqt macromolecule' button is pressed"""
macroFile = self.vf.askFileOpen(types=[('PDBQT files', '*.pdbqt')],
title = 'PDBQT Macromolecule File:')
if macroFile:
filename=os.path.split(macroFile)[-1]
ext = os.path.splitext(filename)[1]
if ext!='.pdbqt':
msg = 'File can only be PDBQT format'
self.warningMsg(msg)
return 'ERROR'
self.doitWrapper(macroFile)
def __call__(self, macroFile, **kw):
"""None<-ADflex_readMacro(macroFile)"""
if not macroFile: return 'ERROR'
ext = os.path.splitext(macroFile)[1]
if ext!='.pdbqt':
msg = 'File must be PDBQT format'
self.warningMsg(msg)
return 'ERROR'
self.doitWrapper(*(macroFile,), **kw)
def doit(self, macroFile):
mollist = self.vf.readPDBQT(macroFile, topCommand=0)
if not len(mollist): return 'ERROR'
mol = mollist[0]
mol.allAtoms.used = 0
dict = self.vf.flexDict
dict['macrofilename'] = macroFile
dict['macroname'] = mol.name
dict['macromol'] = mol
AF_MacroReaderGUI=CommandGUI()
AF_MacroReaderGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'], \
menuText['Read Macro'], cascadeName = menuText['InputMB'])
class AF_MacroChooser(MVCommand):
""" allows user to choose a molecule already present for the macromolecule"""
def __init__(self, mode='single', title = 'Choose Macromolecule'):
MVCommand.__init__(self)
self.mode = mode
self.title = title
def onRemoveObjectFromViewer(self, obj):
if hasattr(self.vf, 'flexDict'):
dict = self.vf.flexDict
if 'macrofilename' in dict:
ok = False
macrofilename = dict['macrofilename']
for m in self.vf.Mols:
if m.parser.filename==macrofilename:
ok = True
break
if not ok:
del dict['macrofilename']
if 'macroname' in dict and dict['macroname'] not in self.vf.Mols.name:
del dict['macroname']
if 'macromol' in dict and dict['macromol']:
del dict['macromol']
def chooseMolecule_cb(self, event = None):
"""called each time the 'choose Molecule' button is pressed"""
mols = self.chooser.getMolSet()
kw = {'redraw':0}
if mols: self.doitWrapper(*(mols,), **kw)
self.chooser.form.withdraw()
def guiCallback(self):
self.chooser = MoleculeChooser(self.vf, self.mode, self.title)
self.chooser.ipf.append({'name':'Select Button',
'widgetType':tkinter.Button,
'text':'Select Molecule',
'wcfg':{'bd':6},
'gridcfg':{'sticky':tkinter.E+tkinter.W},
'command': self.chooseMolecule_cb})
self.form = self.chooser.go(modal=0, blocking=0)
lb = self.chooser.ipf.entryByName['Molecule']['widget'].lb
lb.bind("<Double-Button-1>",self.chooseMolecule_cb)
def __call__(self, nodes, **kw):
"""None<-ADflex_chooseMacro(nodes)"""
nodes = self.vf.expandNodes(nodes)
if not len(nodes): return 'ERROR'
self.doitWrapper(*(nodes,), **kw)
def doit(self, nodes, **kw):
nodes = self.vf.expandNodes(nodes)
if not len(nodes): return 'ERROR'
mol = nodes[0]
mol.allAtoms.used=0
#if mol is from a pdbqt file, do not need to add 'q' or 't'
filetype = os.path.splitext(os.path.basename(mol.parser.filename))[1]
msg = ""
chargeMsg = ""
typeMsg = ""
if filetype!='.pdbqt':
#make sure all atoms have charge: 'q'
ats = mol.allAtoms.get(lambda x: x.chargeSet==None)
if len(ats):
mol.buildBondsByDistance()
self.vf.computeGasteiger(mol, topCommand=0)
chargeMsg = "added gasteiger charges "
#make sure that all atoms have autodock_element: 't'
ats = mol.allAtoms.get(lambda x: hasattr(x, 'autodock_element'))
if len(ats)!= len(mol.allAtoms):
ad4_typer = AutoDock4_AtomTyper()
mol.buildBondsByDistance()
ad4_typer.setAutoDockElements(mol)
typeMsg = " added autodock4 atom types "
if len(chargeMsg):
msg += chargeMsg
if len(typeMsg):
msg = msg + ' and ' + typeMsg + ' to ' + mol.name
elif len(typeMsg):
msg = typeMsg + ' to ' + mol.name
hs = mol.allAtoms.get(lambda x: x.element=='H' and len(x.bonds))
nphs = hs.get(lambda x: x.bonds[0].atom1.element=='C' or x.bonds[0].atom2.element=='C')
if len(nphs):
lenNPHS = 0
beforeLen = len(mol.allAtoms)
if 'automerge_nphs' in list(kw.keys()):
self.vf.mergeNPHSGC(mol.allAtoms)
else:
nphs_msg="There appear to be some nonpolar hydrogen in "+ mol.name+ " Do you wish to merge them to conform to AutoDock4 atom types? "
d = SimpleDialog(self.vf.GUI.ROOT, text=nphs_msg,
buttons=['No', 'Yes'], default=1,
title="Merge Non-polar Hydrogens?")
mergeNPHS = d.go()
if mergeNPHS:
self.vf.mergeNPHSGC(mol.allAtoms)
lenNPHS = beforeLen - len(mol.allAtoms)
if lenNPHS:
msg = msg + " and merged " + str(lenNPHS) + " non-polar hydrogens"
if len(msg):
self.warningMsg(msg)
dict = self.vf.flexDict
dict['macroname'] = mol.name
dict['macrofilename'] = mol.parser.filename
dict['macromol'] = mol
if hasattr(self.vf, 'gpo') and hasattr(self.vf.gpo, 'receptor') and self.vf.gpo.receptor==mol:
msg = mol.name + " is currently the 'macromolecule'\nin the Grid menu. Make sure the macromolecule specified in the gpf does not include the flexible residues!!"
self.warningMsg(msg)
def onPick(self,event):
listChooser = self.ipf.entryByName['Molecule']['widget']
tkListBox = listChooser.lb
atom,geom = self.vf.findPickedAtom(event)
if atom:
pickedMol = atom.top
#then need to make pickedMol the selection in self.lc
for i in range(len(listChooser.entries)):
listChooserlist=string.split(listChooser.entries[i][0])
if pickedMol.name == listChooserlist[0]:
self.pickedMolIndex= i
tkListBox.select_clear(0,'end')
listChooser.select(i)
return
t= "error: %s not in mv.Mols" %pickedMol.name
self.vf.warningMsg(t)
AF_MacroChooserGUI=CommandGUI()
AF_MacroChooserGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'],
menuText['Choose Macro'], cascadeName = menuText['InputMB'])
class AF_SelectResidues(MVCommand):
""" allows user to set up a set of residues in macromolecule whose sidechains are to be flexed in an autodock run"""
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict={}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
self.torscount = 0
def guiCallback(self):
"""called each time the 'Set Selected Residues' button is pressed"""
if 'macroname' not in self.vf.flexDict:
t='select protein first'
self.vf.warningMsg(t)
return 'ERROR'
else: macroname=self.vf.flexDict['macroname']
if len(self.vf.selection)==0:
msg = "Please select residues to be modelled as flexible first!"
self.warningMsg(msg)
return 'ERROR'
nodes = self.vf.getSelection()
nodes = nodes.findType(Residue).uniq()
if not len(nodes):
t='no current residues selected'
self.vf.warningMsg(t)
return 'ERROR'
#warn if there is there is not a specific subselection
mol = nodes[0].top
title = "Process ALL residues in %s?" %mol.name
if len(nodes)==len(mol.chains.residues):
msg="CAUTION: currently processing all the residues in "+mol.name+ " will be very time consuming. Do you wish to continue? "
d = SimpleDialog(self.vf.GUI.ROOT, text=msg,
buttons=['No', 'Yes'], default=1,
title=title)
useAll = d.go()
if not useAll:
return "ERROR"
if not nodes.__class__==ResidueSet:
self.vf.setIcomLevel(Residue, topCommand=0)
nodes = nodes.findType(Residue).uniq()
mol = nodes[0].top
mol.allAtoms.used=0
kw = {'redraw':0}
self.torscount = 0
self.doitWrapper(*(nodes,), **kw)
def __call__(self, nodes=None, **kw):
nodes = self.vf.expandNodes(nodes)
if not len(nodes): return "ERROR"
if nodes.__class__ != ResidueSet:
nodes = nodes.findType(Residue).uniq()
self.doitWrapper(*(nodes,), **kw)
def doit(self, nodes):
flex_residues = self.vf.expandNodes(nodes)
if not len(flex_residues): return 'ERROR'
#remove all prolines
proList = ResidueSet([x for x in flex_residues if x.type=='PRO'])
if proList:
flex_residues = flex_residues - proList
#remove all waters
h20List = ResidueSet([x for x in flex_residues if x.type=='HOH'])
if h20List:
flex_residues = flex_residues - h20List
if not len(flex_residues):
t='No non-water and non-proline Residues selected!'
self.vf.warningMsg(t)
return 'ERROR'
list(map(self.setAutoFlexFields, flex_residues))
#map(self.setSideChain, flex_residues)
#map(self.setTorsionFields,flex_residues)
#for item in flex_residues:
# self.setTorsionFields(item)
#map(self.getAmideBonds,flex_residues)
#remove any residues with no possible torsions
flexList = [x for x in flex_residues if x.torscount!=0]
flexList = ResidueSet(flexList)
if not len(flexList):
t='Current Selected Residues have no active torsions!'
self.vf.warningMsg(t)
return 'ERROR'
self.torscount = numpy.add.reduce(flexList.torscount)
mol = flex_residues[0].top
allResidues = mol.findType(Residue)
if len(allResidues)>len(flex_residues):
rigidResidues = allResidues-flex_residues
else:
rigidResidues = ResidueSet()
dict = self.vf.flexDict
dict['flex_residues'] = flex_residues
dict['flex_residues_number'] = len(flex_residues)
dict['rigidResidues'] = rigidResidues
self.vf.ADflex_processResidues.guiCallback()
def setAutoFlexFields(self, res):
#process residues
if hasattr(res, 'setup'):
return
res.setup = 1
res.atoms.used = 0
res.atoms.bonds[0].possibleTors = 0
res.atoms.bonds[0].activeTors = 0
backbone_names = ['C','N','O','HN','HN1','HN2', 'HA',
'H1','H2','H3','HO', 'H']
#includes CA
sidechain = res.atoms.get(lambda x: x.name not in backbone_names)
res.sideChain = sidechain
bondlist = res.bondlist = sidechain.bonds[0]
bondlist.leaf = 0
bondlist.possibleTors = 0
bondlist.activeTors = 0
rbs = RotatableBondSelector()
rotatables = rbs.select(bondlist)
for b in rotatables:
b.possibleTors = 1
b.activeTors = 1
amides = AmideBondSelector().select(bondlist)
for b in amides:
b.activeTors = 0
b.possibleTors = 1
guanidiniums = GuanidiniumBondSelector().select(bondlist)
for b in guanidiniums:
b.activeTors = 0
b.possibleTors = 1
leaves = LeafBondSelector().select(bondlist)
for b in leaves:
b.activeTors = 0
b.possibleTors = 0
res.torscount = len(bondlist.get(lambda x: x.activeTors==1))
#this field is not used in AutoDock4
res.torsdof = res.torscount
res.torscount = len(bondlist.get(lambda x: x.activeTors==1))
res.torsdof = res.torscount
caAtoms = res.atoms.get(lambda x: x.name=='CA')
#get returns an AtomSet
if caAtoms: #this checks for len(caAtoms)
res.rootlist = caAtoms
elif self.vf.hasGui:
#use inputForm to get root atom and atoms for flexible part
rootname = tkinter.StringVar(master=self.vf.GUI.ROOT)
if hasattr(res, 'rootlist'):
rootname.set(res.rootlist[0].name)
else:
at0 = res.atoms.get(lambda x: x._uniqIndex==0)[0]
rootname.set(at0.name)
s = 'Set Root Atom for ' + res.name
ifd = InputFormDescr(title=s)
ifd.append({'name':'sideChainLC',
'widgetType': 'ListChooser',
'mode': 'single',
'entries':res.atoms.name,
'title': 'Select Root Atom',
'lbwcfg':{'height':20,'selectforeground':'red','exportselection':0},
'gridcfg':{'sticky':tkinter.W +tkinter.E}}),
vals= self.vf.getUserInput(ifd, modal=1, blocking=1)
if vals:
try:
atList = vals['sideChainLC']
res.rootlist = res.atoms.get(lambda x, atList=atList: x.name==atList[0])
res.sideChain = res.atoms
except:
msg = 'rootatom not in res, using defaults'
self.vf.warningMsg(msg)
res.rootlist = AtomSet([res.atoms.get(lambda x: x._uniqIndex == 0)[0]])
res.sideChain = res.atoms
else:
msg = 'rootatom not in res, using defaults'
self.vf.warningMsg(msg)
res.rootlist = AtomSet([res.atoms.get(lambda x: x._uniqIndex == 0)[0]])
res.sideChain = res.atoms
else:
res.rootlist = AtomSet([res.atoms.get(lambda x: x._uniqIndex == 0)[0]])
res.sideChain = res.atoms
AF_SelectResiduesGUI = CommandGUI()
AF_SelectResiduesGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'],menuText['Set Residues'])
class AF_ProcessResidues(SetRotatableBonds, MVBondICOM):
""" allows user to process interactively a set of residues in macromolecule whose sidechains are to be flexed in an autodock run"""
def __init__(self, func=None):
SetRotatableBonds.__init__(self)
MVBondICOM.__init__(self)
self.save1 = None
self.save2 = None
self.guiUp = 0
self.pickLevel = 'parts'
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict = {}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
if not hasattr(self.vf, 'setICOM'):
self.vf.loadCommand('interactiveCommands', 'setICOM', 'Pmv')
if self.vf.hasGui and not self.torsStr:
self.torsStr = tkinter.StringVar(master=self.vf.GUI.ROOT)
def guiCallback(self):
"""called INDIRECTLY each time the 'Choose Torsions in Currently Selected Residues...' button is pressed"""
if 'flex_residues' not in self.vf.flexDict:
t='select residues first'
self.vf.warningMsg(t)
return 'ERROR'
nodes = self.vf.flexDict['flex_residues']
self.mol = nodes[0].top
if not len(nodes):
t='no residues selected'
self.vf.warningMsg(t)
return 'ERROR'
if not nodes.__class__==ResidueSet:
t='selection must be of type ResidueSet'
self.vf.warningMsg(t)
return 'ERROR'
self.changeFlexResCt(0)
if not hasattr(self, 'ifd'):
self.torsStr= tkinter.StringVar(master=self.vf.GUI.ROOT)
s = 'Number of rotatable bonds ='+str(0)+ ' / '+str(MAXTORS)+'\n'
self.torsStr.set(s)
self.renameAromatic= tkinter.IntVar(master=self.vf.GUI.ROOT)
infoStr = '"Shift+pick" or "shift+drag-&-pick bonds". \nGreen = rotatable, \nMagenta = non-rotatable, \nRed = unrotatable.\n\n'
ifd = self.ifd=InputFormDescr(title='Torsion Count')
ifd.append({'name': 'maxTorsLab',
'widgetType':tkinter.Label,
'wcfg':{'text':infoStr},
'gridcfg':{'sticky':tkinter.W + tkinter.E}}),
ifd.append({'name':'torsEntryLab',
'widgetType':tkinter.Label,
'wcfg':{'textvariable':self.torsStr},
'gridcfg':{'sticky':tkinter.W +tkinter.E}}),
ifd.append({'name': 'noAmideBut',
'widgetType':tkinter.Checkbutton,
'wcfg':{'text':'amide torsions are allowed',
#'activebackground':'white',
'selectcolor':'white',
'indicatoron':0,
'command':self.setNoAmideTors_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':2}}),
ifd.append({'name': 'closeBut',
'widgetType':tkinter.Button,
'wcfg':{'text':'Close','command':self.close_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':2}})
self.form= self.vf.getUserInput(self.ifd, modal=0, blocking=0)
else:
if hasattr(self,'form'):
self.form.root.deiconify()
## !@@! Is THIS APPROPRIATE?!?!?
if hasattr(self.vf.ADflex_setResidues,'torscount'):
self.torscount = self.vf.ADflex_setResidues.torscount
else:
self.torscount = 0
self.currentNodes=nodes.sideChain
self.vf.displayLines(self.currentNodes, topCommand=0)
self.vf.displayLines(self.currentNodes, only=1, topCommand=0, redraw=1)
pTbndset = self.currentNodes.bonds[0].get(lambda x:x.activeTors==1)
pTatomset = (pTbndset.atom1 + pTbndset.atom2).uniq()
if len(pTatomset):
self.vf.labelByProperty(pTatomset, ('name', ), topCommand=0,location='Last', redraw=1)
geom = self.mol.geomContainer.geoms['AtomLabels']
geom.Set(billboard=True, fontStyle='solid', fontScales=(.3,.3,.3,))
self.buildCol(self.mol, self.torscount)
self.pTatomset = pTatomset
self.save1 = self.vf.ICmdCaller.commands.value["Shift_L"]
self.save2 = self.vf.ICmdCaller.commands2.value["Shift_L"]
self.vf.setICOM(self, modifier="Shift_L", topCommand = 0)
self.vf.setIcomLevel( Atom )
self.vf.flexDict['torscount'] = self.torscount
#need to set up display by torsion activity here
def close_cb(self, event=None):
#at this point, remove any residues which have no active torsions left
dict = self.vf.flexDict
flex_residues = dict['flex_residues']
badList = []
for item in flex_residues:
if not hasattr(item,'torscount'):
badList.append(item)
if not item.torscount:
print("eliminating", item.name)
badList.append(item)
badSet = ResidueSet(badList)
flex_residues = flex_residues - badSet
dict['flex_residues'] = flex_residues
dict['rigidResidues'] = dict['rigidResidues']+badSet
dict['flex_residues_number'] = len(flex_residues)
if len(badSet):
badAtoms = badSet.atoms
self.currentNodes = self.currentNodes-badAtoms
self.changeFlexResCt(0)
self.vf.displayLines(self.currentNodes, topCommand=0, only=1)
self.vf.colorByAtomType(self.currentNodes)
self.vf.labelByProperty(self.pTatomset, ('name', ), negate=True, topCommand=0, redraw=1)
self.pTatomset = AtomSet()
#set the PCOM to something else
self.vf.setICOM(self.save1, modifier="Shift_L", mode='pick', topCommand = 0)
self.vf.setICOM(self.save2, modifier="Shift_L", mode='drag select', topCommand = 0)
self.vf.GUI.VIEWER.Redraw()
self.form.root.withdraw()
self.save1 = None
self.save2 = None
def setNoAmideTors_cb(self, event=None, log=1, redraw=0):
self.setNoAmideTors()
self.buildCol(self.mol, self.torscount)
def setNoAmideTors(self, log=0):
self.vf.flexDict['noAmides']=self.hasAmide
if self.hasAmide:
self.hasAmide = 0
self.turnOffAmides()
self.ifd.entryByName['noAmideBut']['widget'].config(text='amide torsions are not allowed')
else:
self.hasAmide = 1
self.turnOnAmides()
self.ifd.entryByName['noAmideBut']['widget'].config(text='amide torsions are allowed')
def turnOffAmides(self):
list(map(self.turnOffAmide, self.vf.flexDict['flex_residues']))
def turnOnAmides(self):
list(map(self.turnOnAmide, self.vf.flexDict['flex_residues']))
def turnOnAmide(self, res):
if not hasattr(res, 'amidebonds'):
return
for item in res.amidebonds:
#only turn on the ones which were turned off
if item.possibleTors and not item.activeTors:
item.activeTors = 1
self.torscount = self.torscount + 1
def turnOffAmide(self, res):
if not hasattr(res, 'amidebonds'):
return
for item in res.amidebonds:
#only turn off the ones which were turned on
if item.possibleTors and item.activeTors:
item.activeTors = 0
self.torscount = self.torscount - 1
def buildCol(self, mol, torscount):
#process residues
s = 'Number of rotatable bonds ='+str(torscount) + ' / '+str(MAXTORS)+'\n'
self.torsStr.set(s)
currentbonds=mol.geomContainer.atoms['bonded'].bonds[0]
col = []
for b in currentbonds:
if b.possibleTors:
if b.activeTors: col.append((0,1,0))
else: col.append((1,0,1))
else:
col.append((1,0,0))
mol.geomContainer.geoms['bonded'].Set(materials=col,
inheritMaterial=False,
matBind=viewerConst.PER_PART)
self.vf.GUI.VIEWER.Redraw()
def changeFlexResCt(self,delta):
n=self.vf.flexDict['flex_residues_number']
n=n+delta
self.vf.flexDict['flex_residues_number']=n
msg = 'current %d flexible residues'%n
self.vf.GUI.pickLabel.configure(text=msg)
def checkAromatic_cb(self, event=None):
if self.renameAromatic.get():
self.ifd.entryByName['checkAromBut']['widget'].config(text='aromatic carbons named A..')
list(map(self.nameA,self.vf.flexDict['flex_residues']))
else:
self.ifd.entryByName['checkAromBut']['widget'].config(text='aromatic carbons named C..')
list(map(self.renameC,self.vf.flexDict['flex_residues']))
def nameA(self, res):
print('changing the names of aromatic carbons is no longer supported')
return
def renameC(self,res):
print('changing the names of aromatic carbons is no longer supported')
return
def __call__(self, bonds, **kw):
kw['topCommand'] = 0
kw['busyIdle'] = 1
kw['log'] = 0
#self.setUpDisplay()
self.doitWrapper(*(bonds,), **kw)
def doit(self, bonds):
if not self.currentNodes:
msg='No residues currently processed'
self.vf.warningMsg(msg)
return 'ERROR'
for bond in bonds:
if not bond.possibleTors: continue
atoms = AtomSet([bond.atom1, bond.atom2])
self.vf.ADflex_setBondRotatableFlag(atoms, not bond.activeTors,
topCommand = 0, redraw = 1, log =1, setupUndo = 1)
def stop(self):
self.done_cb()
def getObjects(self,pick):
flexMol = self.vf.flexDict['flex_residues'].top.uniq()[0]
flexMolGeom = flexMol.geomContainer.geoms['bonded']
for o, val in list(pick.hits.items()): #loop over geometries
primInd = [x[0] for x in val]
if o != flexMolGeom: continue
else: g = o.mol.geomContainer
if o.name in g.geomPickToBonds:
func = g.geomPickToBonds[o.name]
if func: return func(o, primInd)
else:
l = []
bonds = g.atoms[o.name].bonds[0]
for i in range(len(primInd)):
l.append(bonds[int(primInd[i])])
return BondSet(l)
def done_cb(self):
self.guiUp = 0
if self.form: self.form.withdraw()
self.vf.colorByAtomType(self.vf.flexDict['flex_residues'],
topCommand=0, redraw=1)
def dismiss(self):
if self.save1 is not None:
self.vf.setICOM(self.save1, modifier="Shift_L", mode='pick', topCommand = 0)
self.save1 = None
if self.save2 is not None:
self.vf.setICOM(self.save2, modifier="Shift_L", mode='drag select', topCommand = 0)
self.save2 = None
self.done_cb()
AF_ProcessResiduesGUI = CommandGUI()
class AF_SetupCovalentFlexibleResidue(MVCommand):
"""
allows user to setup covalent docking input interactively. This involves:
Define input:
1. designating 2 ordered sets of 3 atoms each for the superposition
Change molecules:
1. superposing the ligand atoms on the atoms in the receptor residue
2. removing the 3 superposed-atoms from the receptor residue
3. building a bond from the 'atom_to_attach' (the first atom in the covalent-ligand)
to the receptor-residue atom
4. updating the receptor data structures: residue.atoms, receptor.allAtoms
"""
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict={}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
self.torscount = 0
def __init__(self):
self.form = None
MVCommand.__init__(self)
self.resFilename = None
self.writeFLEXRES = 0
def buildForm(self, event=None):
#self.selPick.set(0)
ifd = self.ifd = InputFormDescr(title = 'Setup Covalent Ligand')
ifd.append({'widgetType':tkinter.Label,
'wcfg':{'text': 'FlexRes'},
'tooltip': 'ReceptorName:ChainID:ResidueName\neg: hsg1:A:ARG8',
'gridcfg':{'sticky':'w','columnspan':2} })
ifd.append({'name':'resWid',
'widgetType':tkinter.Entry,
'wcfg':{'highlightcolor':'white'},
'gridcfg':{'sticky':'ew','row':-1, 'column':3, 'columnspan':3} })
ifd.append({'widgetType':tkinter.Label,
'wcfg':{'text': 'FlexRes'},
'tooltip': 'Name 3 residue atoms in this order:\natom1-atom2-atom3-receptor\neg: HG,SG,CB',
'gridcfg':{'sticky':'w', 'columnspan':2} })
ifd.append({'name':'rec_atomWid',
'widgetType':tkinter.Entry,
'wcfg':{'highlightcolor':'white'},
'gridcfg':{'sticky':'ew','row':-1, 'column':2, 'columnspan':2} })
ifd.append({'widgetType':tkinter.Label,
'wcfg':{'text': '--Receptor'},
'tooltip': "overlap atom bond requirements:\nHG ONLY one bond ->SG\nSG two:->HG, ->CB\nCB two:->SG, ->some other atom in residue",
'gridcfg':{'sticky':'w', 'row':-1, 'column':4} })
ifd.append({'widgetType':tkinter.Label,
'wcfg':{'text': 'Ligand ligand--'},
'tooltip': 'Name 3 ligand atoms in this order:\nligand-atom1-atom2-atom3\neg: C1,S,CS\neach name must match only\na single atom in the ligand',
'gridcfg':{'sticky':'w', 'columnspan':2} })
ifd.append({'name':'lig_atomWid',
'widgetType':tkinter.Entry,
'wcfg':{'highlightcolor':'white'},
'gridcfg':{'sticky':'ew','row':-1, 'column':2, 'columnspan':2} })
#'gridcfg':{'sticky':'ew','row':-1, 'column':1, 'columnspan':2} })
ifd.append({'widgetType':tkinter.Label,
'wcfg':{'text': 'Ligand'},
'tooltip': 'name of small molecule to be covalently attached\n eg "ind"',
'gridcfg':{'sticky':'w', 'columnspan':2} })
ifd.append({'name':'ligWid',
'widgetType':tkinter.Entry,
'wcfg':{'highlightcolor':'white'},
'gridcfg':{'sticky':'ew','row':-1, 'column':2, 'columnspan':2} })
# some kind of doit button...
ifd.append({'name': 'OK Button',
'widgetType':tkinter.Button,
'wcfg':{'text': 'OK',
'borderwidth':1, 'command': self.Ok_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'columnspan':3},
})
ifd.append({'name': 'Cancel Button',
'widgetType':tkinter.Button,
'wcfg':{'text': 'Cancel',
'borderwidth':1, 'command': self.Cancel_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'row':-1, 'column':3, 'columnspan':2},
})
self.form = self.vf.getUserInput(self.ifd, modal = 0, blocking=0)
self.form.root.protocol('WM_DELETE_WINDOW',self.Cancel_cb)
self.okBut = self.ifd.entryByName['OK Button']['widget']
self.cancelBut = self.ifd.entryByName['Cancel Button']['widget']
def Cancel_cb(self):
self.form.withdraw()
def Ok_cb(self, event=None):
ldict = self.ifd.entryByName
#process the receptor @@ catch failure(s)...
res_in_receptor = ldict['resWid']['widget'].get() #mol:chain:res
if not len(res_in_receptor):
msg = "No flexible residue has been specified!"
self.vf.warningMsg(msg)
return 'ERROR'
#molName = res_in_receptor.split(':')[0]
molName,cName,rName = res_in_receptor.split(':')
msg = res_in_receptor
if not len(molName):
msg = res_in_receptor + ' did not specify a single molecule name!'
self.vf.warningMsg(msg)
return "ERROR"
rMOLS = self.vf.Mols.get(molName)
msg = res_in_receptor
if not len(rMOLS) or len(rMOLS)>1: #@@
msg = msg + " did not specify a valid molecule!"
self.warningMsg(msg)
return "ERROR"
rec = rMOLS[0]
self.vf.flexDict['macromol'] = rec
self.vf.flexDict['macroname'] = rec.name
self.vf.flexDict['macrofilename'] = os.path.basename(rec.parser.filename)
recMol_name = rec.name
#mName,cName,rName = res_in_receptor.split(':')
resPSet = rec.chains.get(cName).residues.get(rName)
#resPSet = rec.chains.residues.get(res_in_receptor)
if not len(resPSet):
msg = res_in_receptor + " did not match any residue in ", rec.parser.filename
self.vf.warningMsg(msg)
return 'ERROR'
if len(resPSet)>1:
msg = '%s matched %d residues: using first %s' %(res_in_receptor, len(resSet), resPSet[0].name)
self.vf.warningMsg(msg)
resP = resPSet[0]
#find rec atoms for superposition:
rec_ats_str = ldict['rec_atomWid']['widget'].get() #'HG,SG,CB'
rec_ats = resP.atoms.get(rec_ats_str)
assert len(rec_ats)==3, rec_ats_str + " did not match exactly 3 atoms in receptor:" + molName
#find the bond to break in rec_ats[2]
cb = rec_ats[2]
found = False
for b in cb.bonds:
ra = b.neighborAtom(cb)
if ra not in rec_ats:
r_attach = ra
cb.bonds.remove(b)
ra.bonds.remove(b)
print(" found atom to attach:" + r_attach.full_name())
found = True
break
if found==False:
print("Unable to find atom for covalent bond based on " + rec_ats_str)
return
self.vf.flexDict['macromol'] = rec
self.vf.flexDict['macroname'] = recMol_name
self.vf.flexDict['macrofilename'] = os.path.basename(rec.parser.filename)
#process the ligand
ligName = ldict['ligWid']['widget'].get() #6a_cov1_out
lMols = self.vf.Mols.get(ligName)
if not len(lMols) or len(lMols)>1:#@@
self.warningMsg(msg)
return "ERROR"
lig = lMols[0]
lig_at_str = ldict['lig_atomWid']['widget'].get() #'C1,S,CS'
lig_ats = lig.allAtoms.get(lig_at_str) #@@
#@@ (??) might need to specify also chain+/or residue for atoms ...
msg = 'invalid atom set found with ' + lig_at_str + ". Please update it to match exactly 3 ligand atoms!"
assert len(lig_ats)==3, msg
if len(lig_ats)!=3:
msg = " invalid input!"
return msg
#@@ (??) necessary?
# set_up call to self.vf.superimposeCoords
movAts = lig.allAtoms
l_attach = lig_ats[2] # CS
self.vf.superimposeCoords(rec_ats, lig_ats, movAts)
event = EditAtomsEvent('coords', movAts)
self.vf.dispatchEvent(event)
# add bond between r_attach and l_attach
rec.addBond(r_attach, l_attach)
new_bond = r_attach.bonds[-1]
new_bond.possibleTors = 1
new_bond.activeTors = 1
#msg = "added bond between " + r_attach.name + " and " + l_attach.name
#self.vf.warningMsg(msg)
#UPDATE molkit data structures:
# first remove atoms that were overlapped: CYS215-HG,SG,CB
for name in rec_ats_str.split(','):
ats = resP.atoms.get(name)
resP.remove(ats[0])
#next add all the covalent ligand atoms to resP
for AT in lig.allAtoms:
resP.adopt(AT)
AT.top = rec
resP.atoms.top = rec
rec.allAtoms = rec.chains.residues.atoms
num_allAtoms_plus_one = len(rec.allAtoms) + 1
rec.allAtoms.number = list(range(1, num_allAtoms_plus_one))
try:
self.vf.ADflex_setResidues(resPSet)
except:
msg = "problems using ADflex_setResidues with " + resPSet.full_name()
raise msg
self.vf.flexDict['flex_residues'] = resPSet
self.vf.flexDict['rigidResidues'] = rec.chains.residues -resPSet
self.vf.flexDict['flex_residues_number'] = len(resPSet) #assert==1?@@
self.vf.flexDict['macrofilename'] = os.path.basename(rec.parser.filename)
if self.writeFLEXRES and self.resFilename:
from MolKit.pdbWriter import PdbqtWriter
writer = PdbqtWriter()
writer.write(self.resFilename, nodes=resP.atoms,
sort=0, records=['ATOM', 'HETATM'],
bondOrigin = ('File', 'UserDefined'))
msg = "SUCCESSFULLY attached ligand " + ligName + " to residue " + resP.name + "!"
self.vf.warningMsg(msg)
self.form.withdraw()
#locate ligDB: ligand entry in Dashboard
db = self.vf.dashboard
ligDB = None
for child in db.tree.root.children:
if child.object.name==lig.name:
ligDB = child
# TODO update dashboard by removing EMPTY ligand in final cleanup
if ligDB is not None:
ligDB.deleteMolecule()
self.vf.displayLines(res_in_receptor,log=False)
self.vf.select(res_in_receptor)
def guiCallback(self):
if not hasattr(self, 'ifd'):
self.buildForm()
else:
self.form.deiconify()
self.form.lift()
AF_SetupCovalentResidueGUI = CommandGUI()
AF_SetupCovalentResidueGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'],\
menuText['Setup Covalent Residue'])
class AF_ProcessHingeResidues(SetRotatableBonds, MVBondICOM):
""" allows user to process interactively a set of hinge residues in macromolecule whose sidechains are to be flexed in an autodock run"""
def __init__(self, func=None):
SetRotatableBonds.__init__(self)
MVBondICOM.__init__(self)
self.save = None
self.guiUp = 0
self.pickLevel = 'parts'
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict = {}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
if not hasattr(self.vf, 'setICOM'):
self.vf.loadCommand('interactiveCommands', 'setICOM', 'Pmv')
if self.vf.hasGui and not self.torsStr:
self.torsStr = tkinter.StringVar(master=self.vf.GUI.ROOT)
def setAutoFlexFieldsHinge(self, res, atomOne, atomTwo, atoms):
if hasattr(res, 'processed'):
print(res.full_name(), ' already has autoflexFields')
return
#PROCESS HINGE RESIDUES
#backbone_names = ['CA', 'C','N','O','HN','HN1','HN2', 'HA',
backbone_names = ['C','N','O','HN','HN1','HN2', 'HA',
'H1','H2','H3','HO', 'H']
#only process the sidechains of atoms which are designated to be moved by hinge
#resatoms = res.atoms - AtomSet([atomOne, atomTwo])
resatoms = res.atoms.get(lambda x: x in atoms)
sidechain = resatoms.get(lambda x: x.name not in backbone_names)
res.sideChain = sidechain
bondlist = res.bondlist = sidechain.bonds[0]
bondlist.leaf = 0
rbs = RotatableBondSelector()
rotatables = rbs.select(bondlist)
for b in rotatables:
b.possibleTors = 1
b.activeTors = 1
amides = AmideBondSelector().select(bondlist)
for b in amides:
b.activeTors = 0
b.possibleTors = 1
guanidiniums = GuanidiniumBondSelector().select(bondlist)
for b in guanidiniums:
b.activeTors = 0
b.possibleTors = 1
leaves = LeafBondSelector().select(bondlist)
for b in leaves:
b.activeTors = 0
b.possibleTors = 0
res.torscount = len(bondlist.get(lambda x: x.activeTors==1))
#this field is not used in AutoDock4
res.torsdof = res.torscount
#res.rootlist = AtomSet([resatoms[0]]) ??is this necessary?
#res.rootlist = AtomSet([resatoms.get(lambda x: x._uniqIndex == 0)[0]])
res.processed = True
def changeHingeFlexResCt(self, delta):
n = self.vf.flexDict.get('flex_residues_number',0)
n = n + delta
self.vf.flexDict['flex_residues_number'] = n
msg = 'current %d flexible residues'%n
self.vf.GUI.pickLabel.configure(text=msg)
def guiCallback(self):
"""called INDIRECTLY each time the 'Set Selected Residues' button is pressed"""
if 'hinge_list' not in self.vf.flexDict:
t='set hinge first'
self.vf.warningMsg(t)
return 'ERROR'
h_list = self.vf.flexDict['hinge_list']
if not len(h_list):
t='currently hinge list is empty!'
self.vf.warningMsg(t)
return 'ERROR'
if not hasattr(self, 'hinge'):
self.hinge_to_process = self.vf.flexDict['hinge_list'][-1]
hinge = self.hinge_to_process
(atomOne,atomTwo), atoms = hinge
self.mol = atoms[0].top
#self.mol.allAtoms.bonds[0].possibleTors = 0
#self.mol.allAtoms.bonds[0].activeTors = 0
if not len(atoms):
t='no hinge atoms specified'
self.vf.warningMsg(t)
return 'ERROR'
self.currentNodes = atoms
hinge_resSet = atoms.parent.uniq()
for res in hinge_resSet:
self.setAutoFlexFieldsHinge(res, atomOne, atomTwo, atoms)
self.torscount = numpy.add.reduce(hinge_resSet.torscount)
self.changeHingeFlexResCt(len(hinge_resSet))
if not hasattr(self, 'ifdX'):
self.torsStr= tkinter.StringVar(master=self.vf.GUI.ROOT)
s = 'Number of rotatable bonds ='+str(0)+ ' / '+str(MAXTORS)+'\n'
self.torsStr.set(s)
self.renameAromatic= tkinter.IntVar(master=self.vf.GUI.ROOT)
infoStr = '"Shift + pick" or "shift + drag-&-pick" bonds. \nGreen = rotatable, \nMagenta = non-rotatable, \nRed = unrotatable.\n\n'
ifdX = self.ifdX=InputFormDescr(title='Torsion Count')
ifdX.append({'name': 'maxTorsLab',
'widgetType':tkinter.Label,
'wcfg':{'text':infoStr},
'gridcfg':{'sticky':tkinter.W + tkinter.E}}),
ifdX.append({'name':'torsEntryLab',
'widgetType':tkinter.Label,
'wcfg':{'textvariable':self.torsStr},
'gridcfg':{'sticky':tkinter.W +tkinter.E}}),
ifdX.append({'name': 'closeBut',
'widgetType':tkinter.Button,
'wcfg':{'text':'Close','command':self.closeX_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':2}})
self.formX= self.vf.getUserInput(self.ifdX, modal=0, blocking=0)
else:
if hasattr(self,'formX'):
self.formX.root.deiconify()
self.save = self.vf.ICmdCaller.commands.value["Shift_L"]
self.vf.setICOM(self, modifier="Shift_L", topCommand = 0)
self.vf.setIcomLevel( Atom )
#### #need to set up display by torsion activity here
#could have preexisting flexible residues... or not
self.vf.flexDict.setdefault('torscount', 0)
if 'flex_residues' in list(self.vf.flexDict.keys()) and \
len(self.vf.flexDict['flex_residues']):
self.vf.flexDict['torscount'] += self.torscount
else:
self.vf.flexDict['torscount'] = self.torscount
self.vf.displayLines(self.currentNodes, only=1, topCommand=0, redraw=1)
self.buildCol(self.mol, self.torscount)
def closeX_cb(self, event=None):
dict = self.vf.flexDict
self.vf.displayLines(self.currentNodes, topCommand=0, only=1)
self.vf.colorByAtomType(self.currentNodes)
self.vf.GUI.VIEWER.Redraw()
self.formX.root.withdraw()
self.vf.flexDict.setdefault('torscount', 0)
#this is dangerous
#!@@! fix this: what if two hinges... or if remove one... or all
flex_residues_torscount = 0
if hasattr(self.vf.ADflex_setResidues, 'torscount') and \
self.vf.ADflex_setResidues.torscount>0:
flex_residues_torscount = self.vf.ADflex_setResidues.torscount
self.vf.flexDict['torscount'] = self.torscount + flex_residues_torscount
self.dismiss()
def buildCol(self, mol, torscount):
#process hinge
s = 'Number of rotatable bonds ='+str(torscount) + ' / '+str(MAXTORS)+'\n'
self.torsStr.set(s)
currentbonds = mol.geomContainer.atoms['bonded'].bonds[0]
col = []
for b in currentbonds:
if hasattr(b, 'possibleTors') and b.possibleTors:
if hasattr(b, 'activeTors') and b.activeTors: col.append((0,1,0))
else: col.append((1,0,1))
else:
col.append((1,0,0))
mol.geomContainer.geoms['bonded'].Set(materials=col,
inheritMaterial=False,
matBind=viewerConst.PER_PART)
self.vf.GUI.VIEWER.Redraw()
def changeFlexResCt(self, delta):
self.vf.flexDict.setdefault('flex_residues_number',0)
n = self.vf.flexDict['flex_residues_number']
n = n + delta
self.vf.flexDict['flex_residues_number'] = n
msg = 'current %d flexible residues'%n
self.vf.GUI.pickLabel.configure(text=msg)
def __call__(self, bonds, **kw):
kw['topCommand'] = 0
kw['busyIdle'] = 1
kw['log'] = 0
#kw['flexRes'] = False
#self.setUpDisplay()
self.doitWrapper(*(bonds,), **kw)
def doit(self, bonds, **kw):
if not self.currentNodes:
msg='No residues currently processed'
self.vf.warningMsg(msg)
return 'ERROR'
for bond in bonds:
if not bond.possibleTors:
continue
atoms = AtomSet([bond.atom1, bond.atom2])
self.vf.ADflex_setBondRotatableFlag(atoms, not bond.activeTors,
topCommand = 0, redraw = 1, log =1, setupUndo = 1,
flexRes=True)
def stop(self):
self.done_cb()
def getObjects(self,pick):
flexMol = self.mol
flexMolGeom = flexMol.geomContainer.geoms['bonded']
for o, val in list(pick.hits.items()): #loop over geometries
primInd = [x[0] for x in val]
if o != flexMolGeom: continue
else: g = o.mol.geomContainer
if o.name in g.geomPickToBonds:
func = g.geomPickToBonds[o.name]
if func: return func(o, primInd)
else:
l = []
bonds = g.atoms[o.name].bonds[0]
for i in range(len(primInd)):
l.append(bonds[int(primInd[i])])
return BondSet(l)
def done_cb(self):
self.guiUp = 0
if self.form: self.form.withdraw()
#self.vf.ADflex_setHinge.spheres.visible=False
self.vf.GUI.VIEWER.Redraw()
def dismiss(self):
if self.save is not None:
self.vf.setICOM(self.save, modifier="Shift_L", topCommand = 0)
self.save = None
self.done_cb()
AF_ProcessHingeResiduesGUI = CommandGUI()
class AF_EditHinge(MVCommand, MVAtomICOM):
""" allows user to remove atoms to be moved from a hinge interactively"""
def __init__(self, func=None):
MVCommand.__init__(self, func)
MVAtomICOM.__init__(self)
self.save = None
self.mode = None
self.atomList = AtomSet([])
#self.undoAtList = AtomSet([])
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict = {}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
if not hasattr(self.vf, 'setICOM'):
self.vf.loadCommand('interactiveCommands', 'setICOM', 'Pmv')
if not hasattr(self.vf, 'ADflex_setHinge'):
self.vf.loadCommand('autoflexCommands', 'ADflex_setHinge', 'AutoDockTools')
if self.vf.hasGui:
self.mode = tkinter.StringVar(master=self.vf.GUI.ROOT)
def continuousUpdate_cb(self, name, oldval, newval):
if newval == 'yes':
self.continuousUpdate = 1
for event in ['<B2-Motion>', '<B3-Motion>', '<Shift-B3-Motion>']:
self.vf.GUI.addCameraCallback(event, self.update_cb)
else:
self.continuousUpdate = 0
for event in ['<B2-Motion>', '<B3-Motion>', '<Shift-B3-Motion>']:
self.vf.GUI.removeCameraCallback(event, self.update_cb)
def update_cb(self, event=None):
#print "in update_cb"
self.update()
def update(self, forward=1, event=None):
#print 'in update: self.atomList=', self.atomList
if not len(self.atomList):
print('Currently no atoms: resetting geoms')
return
for at in self.atomList:
c1 = self.getTransformedCoords(at)
self.vf.ADflex_setHinge.lineVertices.append(tuple(c1))
self.vf.ADflex_setHinge.update()
def guiCallback(self):
""" """
if not hasattr(self,'form'):
ifd = self.ifd = InputFormDescr(title="Edit Atoms to be moved by current Hinge by:")
#ifd.append({'name': 'testLab',
# 'widgetType':Tkinter.Label,
# 'wcfg':{'text':'Mode:'},
# 'gridcfg':{'columnspan':3,'sticky':'w'}})
ifd.append({'widgetType':tkinter.Radiobutton,
'wcfg':{'text':'adding Atoms',
'variable':self.mode,
'value': 'add'},
'gridcfg':{'sticky':'we'}})
ifd.append({'widgetType':tkinter.Radiobutton,
'wcfg':{'text':'removing Atoms',
'variable':self.mode,
'value': 'remove'},
'gridcfg':{'row':-1,'sticky':'we'}})
#ifd.append({'widgetType':Tkinter.Button,
# 'wcfg':{'text':'Close', 'command':self.close_cb},
# 'gridcfg':{'sticky':'we', 'columnspan':2}})
self.form = self.vf.getUserInput(ifd, modal=0, blocking=0)
self.form.root.protocol('WM_DELETE_WINDOW',self.close_cb)
else:
self.form.root.deiconify()
self.save = self.vf.ICmdCaller.commands.value["Shift_L"]
self.vf.setICOM(self, modifier="Shift_L", topCommand = 0)
def close_cb(self, event=None):
#at this point, just cleanup
self.stop()
self.form.withdraw()
self.vf.GUI.VIEWER.Redraw()
self.mode.set("")
#update hinge_atoms and non_hinge_atoms here?
flexDict = self.vf.flexDict
all = 'all_hinge_atoms'
opp = 'non_hinge_atoms'
#print 'before call to getAllHingeAtoms, ha=', len(flexDict[all]),
#print 'nha=', len(flexDict[opp]),
flexDict = self.vf.flexDict
flexDict[all], flexDict[opp] = self.vf.ADflex_setHinge.getAllHingeAtoms()
#print '2:after call to getAllHingeAtoms, ha=', len(flexDict.get(all,[])),
#print '2:nha=', len(flexDict.get(opp, []))
def doit(self, atoms):
if not hasattr(self.vf, 'flexDict'):
self.warningMsg('no flexDIct!')
return "ERROR"
dict = self.vf.flexDict
if not 'hinge_list' in list(dict.keys()):
self.warningMsg('no hinge_list in flexDict!')
return "ERROR"
hinges = self.vf.flexDict['hinge_list']
if not len(hinges):
self.warningMsg('current hinges in hinge_list !')
return "ERROR"
if len(hinges):
current_hinge = hinges[-1]
if not len(current_hinge)==2:
self.warningMsg('last hinge in hinge_list is ill-formed!')
return 'ERROR'
atoms_to_move = current_hinge[1]
if not len(atoms_to_move):
self.warningMsg("no atoms to move in current hinge!")
return "ERROR"
if self.mode.get()=='remove':
new_atoms_to_move = atoms_to_move - atoms
#remember to put these atoms into rigid
else:
new_atoms_to_move = atoms_to_move + atoms
#remember to remove these atoms from rigid
#print 'setting atoms to move to ', new_atoms_to_move
new_ats = new_atoms_to_move.uniq()
self.vf.flexDict['hinge_list'][-1][1] = new_ats
self.vf.ADflex_setHinge.atoms = new_ats
if self.vf.hasGui:
self.vf.ADflex_setHinge.hingeAtoms.Set(vertices = new_ats.coords)
self.vf.ADflex_setHinge.hingeAtoms.visible = 1
self.vf.GUI.VIEWER.Redraw()
def startICOM(self):
self.vf.setIcomLevel( Atom )
def dismiss(self):
if self.save is not None:
self.vf.setICOM(self.save, modifier="Shift_L", topCommand=0)
self.save = None
self.vf.GUI.VIEWER.Redraw()
self.done_cb()
def stop(self):
self.dismiss()
def done_cb(self):
self.guiUp = 0
if self.form: self.form.withdraw()
AF_EditHingeGUI = CommandGUI()
AF_EditHingeGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'],\
menuText['Edit Hinge'])
class AF_SetHinge(MVCommand, MVAtomICOM):
""" allows user to setup a hinge interactively to be flexed in an autodock run"""
def __init__(self, func=None):
MVCommand.__init__(self, func)
MVAtomICOM.__init__(self)
self.save = None
self.guiUp = 0
self.atoms = []
self.atomList = []
self.atomCenters = []
self.atomOne = None #hinge pt 1
self.atomTwo = None #hinge pt 2
self.coordSlot = None
self.labelStrs = []
self.labelCenters = []
self.lineVertices = []
self.snakeLength = 2
self.old_hinge_method = ""
self.old_atoms_method = ""
#self.pickLevel = 'parts'
def getTransformedCoords(self, atom):
# when there is no viewer, the geomContainer is None
if not atom.top.geomContainer:
return atom.coords
g = atom.top.geomContainer.geoms['master']
c = self.vf.transformedCoordinatesWithInstances(AtomSet([atom]))
return numpy.array(c[0], 'f')
#def setupUndoBefore(self, ats):
# self.addUndoCall((),{}, self.name+'.undo')
#def undo(self,*args, **kw):
# if len(self.lineVertices):
# self.atomList = self.atomList[:-1]
# self.update(forward=0)
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict = {}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
if not hasattr(self.vf, 'setICOM'):
self.vf.loadCommand('interactiveCommands', 'setICOM', 'Pmv')
if self.vf.hasGui:
from DejaVu.Spheres import Spheres
from DejaVu.Geom import Geom
miscGeom = self.vf.GUI.miscGeom
self.masterGeom = Geom("setAutoFlexHingeGeoms",
shape=(0,0), protected=True)
self.vf.GUI.VIEWER.AddObject(self.masterGeom, parent=miscGeom)
self.spheres = Spheres(name='AutoFlexHinge_spheres',
materials=((1.,1.,0),), shape=(0,3), radii=0.2,
quality=15, inheritMaterial=0, protected=True)
from opengltk.OpenGL import GL
self.spheres.frontPolyMode = GL.GL_LINE
#marker for end points of hinge
self.spheres.Set(visible=1, tagModified=False)
self.spheres.pickable = 0
from DejaVu.IndexedPolylines import IndexedPolylines
#marker for axis of hinge
self.line = IndexedPolylines('distLine', materials = ((1,1,0),),
inheritMaterial=0, lineWidth=3,
stippleLines=1, protected=True,
visible=True)
self.vf.GUI.VIEWER.AddObject(self.line, parent=self.masterGeom)
self.line.pickable = 0
from DejaVu.Points import CrossSet
#markers for atoms moved by hinge motion
self.hingeAtoms = CrossSet('AutoFlexHinge_hingeAtoms',
inheritMaterial=0, materials=((1.,0.3,0),),
offset=0.1,lineWidth=2, protected=True, visible=True)
self.hingeAtoms.Set(visible=0, tagModified=False)
self.hingeAtoms.pickable = 0
self.selLevel = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.selectionBase = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.selectHingeMethod = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.selectionCenter = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.adjustPt = tkinter.IntVar(master=self.vf.GUI.ROOT)
self.adjustPt.set(0)
self.editAtomsToMove = tkinter.IntVar(master=self.vf.GUI.ROOT)
self.editAtomsToMove.set(0)
self.vf.GUI.VIEWER.AddObject(self.spheres, redo=0,
parent=self.masterGeom)
self.vf.GUI.VIEWER.AddObject(self.hingeAtoms, redo=0,
parent=self.masterGeom)
def continuousUpdate_cb(self, name, oldval, newval):
if newval == 'yes':
self.continuousUpdate = 1
for event in ['<B2-Motion>', '<B3-Motion>', '<Shift-B3-Motion>']:
self.vf.GUI.addCameraCallback(event, self.update_cb)
else:
self.continuousUpdate = 0
for event in ['<B2-Motion>', '<B3-Motion>', '<Shift-B3-Motion>']:
self.vf.GUI.removeCameraCallback(event, self.update_cb)
def update_cb(self, event=None):
if not len(self.atomList):
return
vi = self.vf.GUI.VIEWER
if vi.redirectTransformToRoot:
return
if vi.currentObject==vi.rootObject:
return
self.update()
def update(self, forward=1, event=None):
if not len(self.atomList):
print('Currently no atoms: resetting geoms')
self.spheres.Set(vertices=[])
self.labels.Set(vertices=[])
self.line.Set(vertices=[])
#self.hingeAtoms.Set(vertices=[]) #???
return
self.lineVertices=[]
for at in self.atomList:
c1 = self.getTransformedCoords(at)
self.lineVertices.append(tuple(c1))
self.spheres.Set(vertices=self.lineVertices)
if len(self.lineVertices)==1:
#this fixes case of stepping back over 1st label
self.labels.Set(vertices=[])
self.line.Set(vertices=[])
elif len(self.atomList)>1:
self.line.Set(vertices=self.lineVertices, faces=[(0,1),] )
self.atomOne = self.atomList[0]
if self.coordSlot==None:
mol = self.atomOne.top
self.coordSlot = len(mol.allAtoms[0]._coords)
mol.allAtoms.addConformation(mol.allAtoms.coords[:])
mol.allAtoms.setConformation(self.coordSlot)
self.atomTwo = self.atomList[1]
if self.atomOne==self.atomTwo:
self.vf.warningMsg('ERROR:two hinge atoms are identical')
self.clear_cb()
return
#setting spheres doesn't trigger redraw so do it explicitly
self.vf.GUI.VIEWER.Redraw()
def buildForm(self):
#build ifd and form
ifd = self.ifd = InputFormDescr(title='Set Up Hinge')
ifd.append({'widgetType': tkinter.Label,
'name':'set hinge atoms label',
'wcfg':{'text':'Set Hinge Atoms by:',
'bd':2,'relief':'ridge'},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'columnspan':8}})
ifd.append({'widgetType':tkinter.Radiobutton,
'name':'pickingRB',
'wcfg':{'variable':self.selectHingeMethod,
'text':'picking',
'value':'picking',
'command':self.updateHinge}, #1
'gridcfg':{'sticky':tkinter.W}}),
ifd.append({'widgetType':tkinter.Radiobutton,
'name':'selectionRB',
'wcfg':{'variable':self.selectHingeMethod,
'text':'current selection',
'value':'cursel',
'command':self.updateHinge}, #1
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':3}}),
# ifd.append({'widgetType':Tkinter.Checkbutton,
# 'name':'pt1CB',
# 'wcfg':{ 'text':'adjust x y z coords of hingePts ',
# 'command':self.adjustPt_cb},
# 'gridcfg':{'sticky':Tkinter.W}}),
# ifd.append({'widgetType':Tkinter.Checkbutton,
# 'name':'editLastHingeCB',
# 'wcfg':{'variable':self.editAtomsToMove,
# 'text':'edit atoms to move',
# 'command':self.editAtomsToMove_cb},
# 'gridcfg':{'sticky':Tkinter.W}}),
## 'gridcfg':{'sticky':Tkinter.W, 'row':-1, 'column':3}}),
# ifd.append({'name':'xval_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'x',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt1,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e'}})
# ifd.append({'name':'yval_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'y',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt1,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e', 'row':-1, 'column':1}})
# ifd.append({'name':'zval_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'z',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt1,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e', 'row':-1, 'column':2}})
# ifd.append({'name':'xval2_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'x',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt2,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e', 'row':-1, 'column':3}})
# ifd.append({'name':'yval2_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'y',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt2,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e', 'row':-1, 'column':4}})
# ifd.append({'name':'zval2_tw',
# 'widgetType':ThumbWheel,
# 'wType':ThumbWheel,
# 'wcfg':{ 'labCfg':{
# 'text': 'z',
# },
# 'type':'float',
# 'precision':2,
# 'width':30,
# 'continuous':1,
# 'wheelPad':2,
# 'height':20,
# 'value':0.,
# 'callback':self.updatePt2,
# 'oneTurn':50.,},
# 'gridcfg':{'columnspan':1,'sticky':'e', 'row':-1, 'column':5}})
ifd.append({'name': 'atoms to move label',
'widgetType': tkinter.Label,
'wcfg':{'bd':2,'relief':'ridge',
'text':'Atoms to move:'},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'columnspan':8}})
ifd.append({'widgetType':tkinter.Radiobutton,
'name':'betweenRB',
'wcfg':{'variable':self.selectionBase,
'text':'between hinge points',
'value':'between',
'command':self.updateBase},
'gridcfg':{'sticky':tkinter.W, 'columnspan':2}})
ifd.append({'widgetType':tkinter.Radiobutton,
'name':'selectionRB',
'wcfg':{'text':'current selection',
'variable':self.selectionBase,
'value':'selection',
'command':self.updateBase},
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':3}})
ifd.append({'widgetType':tkinter.Radiobutton,
'name':'savedSetRB',
'wcfg':{'text':'a saved set',
'variable':self.selectionBase,
'value':'set',
'command':self.updateBase},
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':5}})
#'gridcfg':{'sticky':Tkinter.W}})
ifd.append({'widgetType':tkinter.Checkbutton,
'name':'editLastHingeCB',
'wcfg':{'variable':self.editAtomsToMove,
'text':'edit atoms to move',
'command':self.editAtomsToMove_cb},
'gridcfg':{'sticky':tkinter.W}})
#'gridcfg':{'sticky':Tkinter.W, 'row':-1, 'column':2}}),
ifd.append({'name':'selectB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Set Hinge',
'command':self.setHinge_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':2}})
ifd.append({'name':'selectTorsionsB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Select Torsions in Last Hinge',
'command':self.selectTorsions_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E,'row':-1, 'column':2, 'columnspan':2}})
ifd.append({'name':'clearLastB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Remove Last Hinge',
'command':self.removeLastHinge_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'row':-1,'columnspan':2, 'column':4 }})
ifd.append({'name':'clearB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Clear',
'command':self.clear_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':2}})
ifd.append({'name':'clearAllB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Clear Hinge List',
'command':self.clearAll_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'row':-1, 'column':2, 'columnspan':2}})
ifd.append({'name':'closeB',
'widgetType':tkinter.Button,
'wcfg':{'text':'Close',
'command':self.close_cb},
'gridcfg':{'sticky':tkinter.W+tkinter.E,
'row':-1,'column':4, 'columnspan':2}})
self.form = self.vf.getUserInput(ifd, modal=0,blocking=0)
self.form.root.protocol('WM_DELETE_WINDOW',self.close_cb)
#self.selection_base = self.ifd.entryByName['atoms to move']['widget']
#self.lb = self.ifd.entryByName['baseMols']['widget'].lb
#self.lb.config({'selectmode':'multiple'})
#self.lb.bind('<Enter>',self.updateBase)
#self.lb.bind('<Leave>',self.updateBase)
# self.xval = self.ifd.entryByName['xval_tw']['widget']
# self.yval = self.ifd.entryByName['yval_tw']['widget']
# self.zval = self.ifd.entryByName['zval_tw']['widget']
# self.xval2 = self.ifd.entryByName['xval2_tw']['widget']
# self.yval2 = self.ifd.entryByName['yval2_tw']['widget']
# self.zval2 = self.ifd.entryByName['zval2_tw']['widget']
# self.hide_xyz()
self.old_val = ""
def adjustPt_cb(self, event=None):
if self.adjustPt.get():
self.repack_xyz()
else:
self.hide_xyz()
def editAtomsToMove_cb(self, event=None):
flexDict = self.vf.flexDict
if 'hinge_list' not in list(flexDict.keys()) or len(self.vf.flexDict['hinge_list'])==0:
msg = 'no hinges to edit'
self.warningMsg(msg)
self.editAtomsToMove.set(0)
return
if self.editAtomsToMove.get():
self.vf.ADflex_editHinge.guiCallback()
else:
self.vf.ADflex_editHinge.close_cb()
#reset the flexDict entries here
all = 'all_hinge_atoms'
opp = 'non_hinge_atoms'
flexDict[all], flexDict[opp] = self.getAllHingeAtoms()
# def updatePt1(self, event=None):
# print 'setting atomOne.x to', self.xval.get()
# self.atomOne.coords[0] = self.xval.get()
# print 'setting atomOne.y to', self.yval.get()
# self.atomOne.coords[1] = self.yval.get()
# print 'setting atomOne.z to', self.zval.get()
# self.atomOne.coords[2] = self.zval.get()
# #self.lineVertices.append(tuple(c1))
# self.spheres.Set(vertices=[self.atomOne.coords,])
# def updatePt2(self, event=None):
# print 'in updatePt2'
# print 'setting atomTwo.x to', self.xval2.get()
# self.atomTwo.coords[0] = self.xval2.get()
# print 'setting atomTwo.y to', self.yval2.get()
# self.atomTwo.coords[1] = self.yval2.get()
# print 'setting atomTwo.z to', self.zval2.get()
# self.atomTwo.coords[2] = self.zval2.get()
# self.spheres.Set(vertices=[self.atomOne.coords,])
# def update_xyz(self, val):
# if self.old_val=="":
# if val=='xyz':
# self.repack_xyz()
# elif val!=self.old_val:
# if self.old_val=='xyz':
# self.hide_xyz()
# elif val=='xyz':
# self.repack_xyz()
def getReverseSubtree(self, at1, at2, debug=False):
m1 = at1.top
m2 = at2.top
assert m1==m2
m1.allAtoms._subtree_selector = 0
for b in at1.bonds:
a2 = b.neighborAtom(at1)
if a2.number<at1.number:
a2._subtree_selector = 1
at1._subtree_selector = at2._subtree_selector = 1
non_moving_ats = m1.allAtoms.get(lambda x: x._subtree_selector==1)
m1._MarkTree(at1)
non_moving_ats._subtree_selector = 0
results = m1.allAtoms.get(lambda x: x._subtree_selector==1)
if not debug:
delattr(m1.allAtoms, '_subtree_selector')
return results
def updateBase(self, val=None):
base = self.selectionBase.get()
if base == 'between':
if self.atomOne is not None and self.atomTwo is not None:
#print 'calling getReverseSubtree with ', self.atomOne.full_name(), '+', self.atomTwo.full_name()
self.atoms = self.getReverseSubtree(self.atomOne, self.atomTwo)
self.hingeAtoms.Set(vertices = self.atoms.coords)
self.hingeAtoms.visible = 1
elif base=='selection':
self.atoms = self.vf.getSelection()[:].findType(Atom)
self.hingeAtoms.Set(vertices = self.atoms.coords)
self.hingeAtoms.visible = 1
elif base=='set':
self.atoms = self.getSet()
if self.atoms:
self.hingeAtoms.Set(vertices = self.atoms.coords)
self.hingeAtoms.visible = 1
self.vf.GUI.VIEWER.Redraw()
def getSet(self, event=None):
idf = InputFormDescr(title ='')
entries = []
names = list(self.vf.sets.keys())
names.sort()
for key, value in list(self.vf.sets.items()):
entries.append( (key, value.comments) )
idf.append({'name':'set',
'widgetType':ListChooser,
'wcfg':{'mode':'single',
'entries': entries,
'title':'Choose an item: '}})
vals = self.vf.getUserInput(idf)
if len(vals['set'])> 0:
setNames = vals['set']
print('returning ', setNames)
return self.vf.sets[setNames[0]]
def selectTorsions_cb(self, event=None):
hinge_list = self.vf.flexDict['hinge_list']
if len(hinge_list):
hinges = []
for item in hinge_list:
entry = str(item[0])[1:-1] # + AtomSet(item[1])?
hinges.append(entry)
s = 'Select Hinge to process '
ifd = InputFormDescr(title=s)
ifd.append({'name':'hingeLC',
'widgetType': 'ListChooser',
'mode': 'single',
'entries':hinges,
'title': s,
'lbwcfg':{'height':20,'selectforeground':'red','exportselection':0},
'gridcfg':{'sticky':tkinter.W +tkinter.E}}),
vals= self.vf.getUserInput(ifd, modal=1, blocking=1)
if vals:
try:
hinge = vals['hingeLC'][0]
index = hinges.index(hinge)
self.hinge_to_process = hinge_list[index]
except:
msg = "Unable to process hinge designated: ", hinge
self.warningMsg(msg)
return 'ERROR'
last_hinge = self.vf.flexDict['hinge_list'][-1]
if len(last_hinge)==2 and len(last_hinge[0])==2:
self.vf.ADflex_processHingeResidues.guiCallback()
else:
self.warningMsg("last hinge is ill-formed")
return
else:
self.warningMsg("currently there are no specified hinges")
return
#if not len(self.atoms):
# print "currently no hinge atoms have been designated"
# return
#self.vf.ADflex_processHingeResidues.guiCallback()
def clear_cb(self, event=None):
#callback for clear button
self.selectionBase.set("")
self.atomOne = None
self.atomTwo = None
if len(self.atoms):
resSet = self.atoms.parent.uniq()
tors_ct = 0
for r in resSet:
if hasattr(r, 'torscount'):
tors_ct += r.torscount
#remove these torsions from self.torscount
if hasattr(self, 'torscount'):
self.torscount = self.torscount - tors_ct
self.atoms = []
self.lineVertices = []
self.hingeAtoms.Set(vertices=[])
self.spheres.Set(vertices=[])
self.line.Set(vertices=[])
#self.selection_base.getcurselection()
self.selectionBase.set("")
self.selectHingeMethod.set("")
self.vf.GUI.VIEWER.Redraw()
def getAllHingeAtoms(self):
#rebuild hinge_residues
d = {}
for h in self.vf.flexDict['hinge_list']:
d[h[0][0]] = 1
d[h[0][1]] = 1
for at in h[1]: d[at] = 1
all_hinge_atoms = AtomSet(list(d.keys()))
resSet = all_hinge_atoms.parent.uniq()
non_hinge_atoms = resSet.atoms - all_hinge_atoms
#caution: should this reorder the residues?
#resSet.sort()
return all_hinge_atoms, non_hinge_atoms
def removeLastHinge_cb(self, event=None):
flexDict = self.vf.flexDict
if len(flexDict['hinge_list'])>1:
flexDict['hinge_list'] = flexDict['hinge_list'][:-1]
flexDict['all_hinge_atoms'], flexDict['non_hinge_atoms'] = self.getAllHingeAtoms()
else:
flexDict['hinge_list']=[]
flexDict['all_hinge_atoms'] = AtomSet()
flexDict['non_hinge_atoms'] = AtomSet()
self.clear_cb()
def clearAll_cb(self, event=None):
self.vf.flexDict['hinge_list']=[]
#self.vf.flexDict['hinge_residues'] = ResidueSet()
self.vf.flexDict['all_hinge_atoms'] = AtomSet()
self.vf.flexDict['non_hinge_atoms'] = AtomSet()
self.torscount = 0
#reset gui components
self.clear_cb()
def hide_xyz(self, event=None):
for b in [self.xval, self.yval, self.zval,
self.xval2, self.yval2, self.zval2]:
b.grid_forget()
def repack_xyz(self, event=None):
cfgstrings = ['xval_tw', 'yval_tw', 'zval_tw', 'xval2_tw', 'yval2_tw', 'zval2_tw']
buttons = [self.xval, self.yval, self.zval, self.xval2, self.yval2, self.zval2]
if hasattr(self, 'atomOne'):
self.xval.set(self.atomOne.coords[0])
self.yval.set(self.atomOne.coords[1])
self.zval.set(self.atomOne.coords[2])
if hasattr(self, 'atomTwo'):
self.xval2.set(self.atomTwo.coords[0])
self.yval2.set(self.atomTwo.coords[1])
self.zval2.set(self.atomTwo.coords[2])
for cfg_name, b in zip(cfgstrings, buttons):
b.grid(self.ifd.entryByName[cfg_name]['gridcfg'])
def updateHinge(self, event=None):
#callback for changes to radiobuttons for hinge
val = self.selectHingeMethod.get()
#if different method for setting hinge, clear geoms
if val!=self.old_hinge_method:
self.clear_cb()
self.old_hinge_method = val
#here manage picking an atom
#self.update_xyz(val)
if hasattr(self, 'ap'): # if there is an atom picker delete it
self.ap.stop()
del self.ap
if val == 'picking':
from Pmv.picker import AtomPicker
self.ap=AtomPicker(self.vf, None, gui=0,
callbacks=[self.setHingePt_cb], immediate=1)
self.ap.go(modal=0)
elif val == 'cursel':
#use current selection here
selnodes = self.vf.getSelection()[:]
len_nodes = len(selnodes)
if len_nodes>=2:
self.atomOne=selnodes[0]
c1 = self.getTransformedCoords(self.atomOne)
self.lineVertices.append(tuple(c1))
if selnodes[1]!=self.atomOne:
self.atomTwo=selnodes[1]
else:
return 'ERROR'
c2 = self.getTransformedCoords(self.atomTwo)
self.lineVertices.append(tuple(c2))
self.line.Set(vertices=self.lineVertices, faces=[(0,1),])
self.line.visible=1
self.spheres.Set(vertices=self.lineVertices)
self.vf.GUI.VIEWER.Redraw()
if len_nodes>2:
self.atoms = selnodes[2:]
c2 = self.getTransformedCoords(self.atomTwo)
self.setHingePt_cb(selnodes)
elif val == 'xyz':
pt = [float(self.xval.get()), float(self.yval.get()), float(self.zval.get())]
self.spheres.Set(vertices = (pt,))
if hasattr(self, 'ap'):
self.ap.stop()
del self.ap
#self.hingeAtoms.Set(visible=0, tagModified=False)
self.centerList = [list(map(float,[self.xval.get(),self.yval.get(),self.zval.get()])),]
self.spheres.Set(vertices = self.centerList)
else:
msg = val + "is not a recognized center for SelectInSphere"
self.vf.warningMsg(msg)
return 'ERROR'
#self.drawHinge()
if hasattr(self, 'form') and self.form:
self.form.lift()
self.old_val=val
self.vf.GUI.VIEWER.Redraw()
def get_state(self, event=None):
val = 'no first atom'
if self.atomOne is not None:
if self.atomTwo is not None:
if len(self.atoms):
val = 'ready'
else:
val = 'no atoms to move'
else:
val = 'no second atom'
return val
def check_torscount(self, atoms):
res = atoms.parent.uniq()
for r in res:
if not hasattr(r, 'torscount'):
r.atoms.bonds[0].possibleTors = 0
r.atoms.bonds[0].activeTors = 0
r.torscount = 0
def saveHinge(self, newval, event=None):
if len(newval)!=2:
print(newval, ' not required format which is [(atom1, atom2), atoms_to_move]')
return
if len(newval[0])!=2:
print(newval, ' not required format which is [(atom1, atom2), atoms_to_move]')
return
(atomOne, atomTwo), atoms = newval
dict = self.vf.flexDict
dict.setdefault('hinge_list', [])
self.check_torscount(atoms)
#print "flexDict['hinge_list']=", dict['hinge_list']
kw = {'log': 1}
self.doitWrapper(*((atomOne, atomTwo), atoms), **kw)
def setHinge_cb(self, event=None):
#callback for Set Hinge button
#print "in setHinge_cb with selectionBase= ", self.selectionBase.get()
state = self.get_state()
if state=='ready':
newval = [(self.atomOne, self.atomTwo), self.atoms]
#print "calling saveHinge with ", newval
self.saveHinge(newval)
elif state=='no atoms to move':
self.warningMsg('atoms to be moved by hinge must be specified first')
elif state=='no second atom':
self.warningMsg('second hinge atom for hinge must be specified first')
else:
self.warningMsg('two hinge points must be specified first!')
def setHingePt_cb(self, atoms, event=None):
if not len(atoms):
return
state = self.get_state()
if state=='no first atom':
self.atomOne=atoms[0]
c1 = self.getTransformedCoords(self.atomOne)
self.lineVertices.append(tuple(c1))
elif state=='no second atom':
self.atomTwo=atoms[0]
c2 = self.getTransformedCoords(self.atomTwo)
self.lineVertices.append(tuple(c2))
self.line.Set(vertices = self.lineVertices, faces=[(0,1),])
elif state=='no atoms to move':
self.warningMsg('atoms to be moved by hinge must be specified first')
else:
newval = [(self.atomOne, self.atomTwo), self.atoms]
self.saveHinge(newval)
self.lineVertices = []
self.spheres.Set(vertices=self.lineVertices)
self.hingeAtoms.Set(vertices=atoms.coords,)
self.vf.GUI.VIEWER.Redraw()
def guiCallback(self):
""" """
if not hasattr(self,'form'):
self.buildForm()
else:
self.form.root.deiconify()
def close_cb(self, event=None):
#at this point, just cleanup
self.line.Set(visible=False)
self.spheres.Set(visible=False)
self.hingeAtoms.Set(visible=False)
self.vf.GUI.VIEWER.Redraw()
self.form.root.withdraw()
self.stop()
def changeFlexResCt(self,delta):
n=self.vf.flexDict.get('flex_residues_number',0)
n=n+delta
self.vf.flexDict['flex_residues_number']=n
msg = 'current %d flexible residues'%n
self.vf.GUI.pickLabel.configure(text=msg)
def __call__(self, xxx_todo_changeme, atoms, **kw):
"""
atomOne: first atom in hinge
atomTwo: second atom in hinge
atoms: all atoms which will be moved by this hinge
"""
(atomOne, atomTwo) = xxx_todo_changeme
kw['topCommand'] = 0
kw['busyIdle'] = 1
kw['log'] = 0
#self.setUpDisplay()
atomOne = self.vf.expandNodes(atomOne)
if not len(atomOne):
return "ERROR"
atomOne = atomOne[0]
atomTwo = self.vf.expandNodes(atomTwo)
if not len(atomTwo):
return "ERROR"
atomTwo = atomTwo[0]
atoms = self.vf.expandNodes(atoms)
if not len(atoms):
return "ERROR"
self.doitWrapper(*((atomOne, atomTwo), atoms), **kw)
def doit(self, xxx_todo_changeme1, atoms, **kw):
"""
atomOne
"""
(atomOne, atomTwo) = xxx_todo_changeme1
dict = self.vf.flexDict
#if 'hinge_list' is not in dict.keys, add it with [] as its value
hinge_list = dict.setdefault('hinge_list', [])
newval = ((atomOne, atomTwo), atoms)
if not newval in hinge_list:
dict['hinge_list'].append(newval)
dict['all_hinge_atoms'], dict['non_hinge_atoms'] = self.getAllHingeAtoms()
#created a log string here
else:
print(newval, " already saved in flexDict['hinge_list']")
return "ERROR"
def stop(self):
self.dismiss()
def getObjects(self,pick):
print("in get Objects with ", pick)
if 'flex_residues' in list(self.vf.flexDict.keys()):
flexMol = self.vf.flexDict['flex_residues'].top.uniq()[0]
elif 'hinge_list' in list(self.vf.flexDict.keys()):
#hinge_list= [[(at1,at2),atoms]]
#get the first atom's top
flexMol = self.vf.flexDict['hinge_list'][0][0][0].top
else:
return 'ERROR'
flexMolGeom = flexMol.geomContainer.geoms['bonded']
## flexMolGeom = flexMol.geomContainer.geoms['lines']
for o, val in list(pick.hits.items()): #loop over geometries
primInd = [x[0] for x in val]
if o != flexMolGeom: continue
else: g = o.mol.geomContainer
if o.name in g.geomPickToBonds:
func = g.geomPickToBonds[o.name]
if func: return func(o, primInd)
else:
l = []
bonds = g.atoms[o.name].bonds[0]
for i in range(len(primInd)):
l.append(bonds[int(primInd[i])])
return BondSet(l)
def done_cb(self):
self.guiUp = 0
if self.form: self.form.withdraw()
def dismiss(self):
if self.save is not None:
self.vf.setICOM(self.save, modifier="Shift_L", topCommand=0)
self.save = None
self.done_cb()
AF_SetHingeGUI = CommandGUI()
AF_SetHingeGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'],\
menuText['Set Hinge'])
class AF_SetBondRotatableFlag(SetRotatableBonds):
"""set the flag that tells whether a bond is rotatable in aan AutoDock
ligand"""
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'setICOM'):
self.vf.loadCommand('interactiveCommands', 'setICOM', 'Pmv')
self.torsStr = None
def buildCol(self, mol, torscount):
self.vf.ADflex_processResidues.torsStr('Current Active Torsions: ' + str(torscount))
currentbonds=mol.geomContainer.atoms['bonded'].bonds[0]
## currentbonds=mol.geomContainer.atoms['lines'].bonds[0]
col = []
for b in currentbonds:
if b.possibleTors:
if b.activeTors: col.append((0,1,0))
else: col.append((1,0,1))
else:
col.append((1,0,0))
mol.geomContainer.geoms['bonded'].Set(materials=col,
inheritMaterial=False,
matBind=viewerConst.PER_PART)
## mol.geomContainer.geoms['lines'].Set(materials=col,
## matBind=viewerConst.PER_PART)
self.vf.GUI.VIEWER.Redraw()
def setupUndoBefore(self, atoms, rotatable, flexRes=False):
self.addUndoCall( (atoms, not rotatable), {'redraw':1,'flexRes':flexRes},
self.name )
def doit(self, atoms, rotatable, flexRes=False):
"""arguments are the numbers of the two atoms"""
assert rotatable in [ 0, 1 ]
if len(atoms) < 2:
return 'ERROR'
bonds = atoms[:2].bonds
if len(bonds[0])==0:
print('ERROR: no bond between ...')
return 'ERROR'
bond = bonds[0][0]
if not hasattr(bond, 'possibleTors') or bond.possibleTors==0:
return 'ERROR'
# update torsion count
res = atoms[0].parent
mol = res.top
torscount = res.torscount
if bond.activeTors!=rotatable:
if rotatable==0: inc = -1
else: inc = 1
torscount = torscount + inc
bond.activeTors = rotatable
res.torscount = torscount
else:
return 'ERROR'
ttc = self.vf.flexDict['torscount']
ttc = ttc + inc
if not flexRes:
self.vf.ADflex_setResidues.torscount = ttc
self.vf.ADflex_processResidues.torscount = ttc
self.vf.flexDict['torscount'] = ttc
if self.vf.hasGui:
self.vf.ADflex_processResidues.buildCol(mol, ttc)
else:
#print 'ttc=', ttc, ' inc=', inc, 'newtorscount =', ttc
self.vf.ADflex_processHingeResidues.torscount = ttc
self.vf.flexDict['torscount'] = ttc
if self.vf.hasGui:
self.vf.ADflex_processHingeResidues.buildCol(mol, ttc)
def __call__(self, atoms, rotatable, **kw):
"""None <- setBondRotatableFlag(atoms, rotatable, **kw)
rotatable can be either 1 or 0
"""
atoms = self.vf.expandNodes(atoms)
if len(atoms)<2: return
assert isinstance( atoms[0], Atom )
kw.setdefault('flexRes', False)
self.doitWrapper(*(atoms, rotatable), **kw)
class AF_StepBack(MVCommand):
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'flexDict'):
self.vf.flexDict = {}
if not hasattr(self.vf, 'colorByAtomType'):
self.vf.loadCommand('colorCommands', 'colorByAtomType', 'Pmv')
def guiCallback(self):
"""called each time the 'Redisplay Macromolecule' button is pressed"""
dict=self.vf.flexDict
if 'macroname' not in dict or not dict['macroname']:
msg='No Protein designated'
self.vf.warningMsg(msg)
return
if 'flex_residues' not in dict or not dict['flex_residues']:
msg='No residues designated'
self.vf.warningMsg(msg)
return
self.doitWrapper(dict['macroname'])
def doit(self, molName):
mols = self.vf.Mols.NodesFromName(molName)
if not len(mols)==1:
msg='error in finding ' + molName+ ' in self.vf.Mols'
self.vf.warningMsg(msg)
return 'ERROR'
mol=mols[0]
#need to redraw the molecule and close the ProcessResidue panel
self.vf.displayLines(mol, topCommand=0)
self.vf.colorByAtomType(mol.allAtoms, topCommand=0, redraw=1)
#need to reset total torscounts:
self.vf.flexDict['torscount'] = 0
self.vf.ADflex_setResidues.torscount = 0
c = self.vf.ADflex_processResidues
c.torscount = 0
c.dismiss()
AF_StepBackGUI = CommandGUI()
AF_StepBackGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'], menuText['Step Back'])
class AF_FlexFileWriter(MVCommand):
""" allows user to choose an output filename and write autoflex stuff"""
def onAddCmdToViewer(self):
self.writer = PdbqtWriter()
self.writer.recType = 'none'
self.debug = False
def guiCallback(self):
"""called each time the 'writeFlexFile' button is pressed"""
outfile = self.vf.askFileSave(types=[('autoflex file:', '*.pdbqt')],
title = 'AutoFlex File:')
if outfile:
self.doitWrapper(outfile)
def __call__(self, outfile=None, **kw):
if not outfile: return 'ERROR'
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
self.writtenAtoms = []
outfileptr=open(outfile,'w')
dict=self.vf.flexDict
if 'macrofilename' in dict:
macrofilename = dict['macrofilename']
elif 'macrofilename' not in dict and 'macromol' in dict:
dict['macrofilename'] = os.path.basename(dict['macromol'].parser.filename)
else:
self.vf.warningMsg("No Protein File Selected!")
return 'ERROR'
macrofilename = dict['macrofilename']
if 'flex_residues' not in dict and 'hinge_list' not in dict:
self.vf.warningMsg("Must Specify Residues for Flexible Side Chains or set Hinge First")
return 'ERROR'
flex_residues=dict.get('flex_residues',[])
flexResTorsCt = 0
for r in flex_residues:
flexResTorsCt = flexResTorsCt + r.torscount
flex_types = {}
dict = self.vf.flexDict
self.outatom_counter = 1
self.torsionCtr = 0
for item in flex_residues:
self.writeResidue(item, outfileptr)
###don't add residue's torsdof
###self.torsdof += item.torsdof
for a in item.atoms:
flex_types[a.autodock_element] = 1
if 'hinge_list' in list(dict.keys()):
for item in dict['hinge_list']:
self.writeHinge(item, outfileptr)
#include hinge atoms' types, also
for a in item[0]:
flex_types[a.autodock_element] = 1
for a in item[1]:
flex_types[a.autodock_element] = 1
outfileptr.close()
dict['flex_filename'] = outfile
dict['flex_types'] = list(flex_types.keys())
def writeHinge(self, item, outfileptr):
#item format is [(atomOne, atomTwo), atoms_to_move]
###print "in writeHinge with ",
###for a in item[0]:
### print a.full_name(),
###print
###print len(item[1])
###for a in item[1]:
### print a.parent.parent.id,
###print
(atomOne, atomTwo), atoms_to_move = item
atomOne.rnum = 0
atomTwo.rnum = 0
atoms_to_move.rnum = -1
res = atomOne.parent
outfileptr.write("BEGIN_RES %s %s%4s\n" %(res.type, res.parent.id, res.number))
#change this when start to support nested torsions
# get total number of active torsions plus one for the hinge itsel
parents_atoms_to_move = atoms_to_move.parent.uniq()
for res in parents_atoms_to_move:
#@@WHEN DOES THIS HAPPEN???
if not hasattr(res, 'torscount'):
res.torscount = 0
ntors = numpy.add.reduce(parents_atoms_to_move.torscount) + 1
oStr = "REMARK %d active torsions:\nREMARK status: ('A' for Active; 'I' for Inactive)\n" %(ntors)
outfileptr.write(oStr)
#!@@! these are original numbers...
atomOne.number = self.outatom_counter
self.outatom_counter = self.outatom_counter + 1
atomTwo.number = self.outatom_counter
remark_counter = 1
oStr = "REMARK %d A between atoms: %s_%d and %s_%d\n" %(remark_counter, atomOne.name, atomOne.number, atomTwo.name, atomTwo.number)
outfileptr.write(oStr)
remark_counter += 1
#need to write remarks about all the nested torsions here...
all_moving = AtomSet([atomOne , atomTwo]) + atoms_to_move
bondset = AtomSet(all_moving).bonds[0].get(lambda x: hasattr(x, 'activeTors') and x.activeTors)
bondset.written = 0
bTors = 'I'
for b in bondset:
if not (b.atom1 in all_moving and b.atom2 in all_moving):
continue
if hasattr(b, 'activeTors') and b.activeTors:
outstring = "REMARK " +" %3d A between atoms: %-3s and %-3s \n" %(remark_counter, b.atom1.name + '_' + b.atom1.parent.name+'_'+b.atom1.name, b.atom2.name + '_' + b.atom2.parent.name + '_'+b.atom2.name)
outfileptr.write(outstring)
remark_counter += 1
outfileptr.write("ROOT\n")
self.writer.write_atom(outfileptr, atomOne)
atomOne.used = 1
outfileptr.write("ENDROOT\n")
# this is already set here...atomTwo.number = self.outatom_counter
oStr = "BRANCH %d %d\n" %(atomOne.number, atomTwo.number)
outfileptr.write(oStr)
self.writer.write_atom(outfileptr, atomTwo)
self.outatom_counter = self.outatom_counter + 1
atomTwo.used = 1
self.writtenAtoms = []
#start with atomOne in order to proceed by writing atoms bonded to it..
atoms_to_write = AtomSet([atomOne])
atoms_to_write.extend(atoms_to_move)
for at1 in atoms_to_write:
if not at1.used:
at1.number = self.outatom_counter
self.writer.write_atom(outfileptr, at1)
self.outatom_counter = self.outatom_counter + 1
at1.used = 1
for bond in at1.bonds:
at2 = bond.atom1
if at2==at1:
at2 = bond.atom2
if at2.used or at2 not in atoms_to_move:
continue
if not hasattr(bond, 'activeTors') or not bond.activeTors:
# don't do anything for atoms bonded by non-rotatable bonds
continue
else:
#here have to write subtree
marker = self.outatom_counter
outstring = "BRANCH %3d %3d\n"%(at1.number, marker)
outfileptr.write(outstring)
#print "calling writesubtree with ", at1.parent.name, ' ', at1.name, '-', at2.name
self.writeSubtree(outfileptr, at1, at2)
outstring = "ENDBRANCH %3d %3d\n"%(at1.number, marker)
outfileptr.write(outstring)
outfileptr.write( "ENDBRANCH %3d %3d\n"%(atomOne.number, atomTwo.number))
outfileptr.write("END_RES %s %s%4s\n" %(res.type, res.parent.id, res.number))
def writeResidue(self, res, outfileptr):
# should have ALREADY eliminated residues with no active torsions
# need to have already done autotors stuff to this residue's sidechain:
# atoms should know which charge to use
# also must have these fields: torscount, torsdof, and bonds know if active
# also res.bondlist
#start output
outfileptr.write("BEGIN_RES %s %s%4s\n" %(res.type, res.parent.id, res.number))
#first write out remarks about torsions
outfileptr.write("REMARK " + "%d" %(res.torscount) + " active torsions:\n")
outfileptr.write("REMARK status: ('A' for Active; 'I' for Inactive)\n")
#only want to process bonds pertaining to sideChain atoms
for b in res.bondlist:
#hack for covalent docking issues:
if not hasattr(b, 'possibleTors'):
b.possibleTors=1
if not hasattr(b, 'activeTors'):
b.activeTors=1
if b.activeTors == 1: bTors = 'A'
else: bTors = 'I'
if b.possibleTors:
if bTors=='A':
self.torsionCtr = self.torsionCtr + 1
outstring = "REMARK " +" %3d %s between atoms: %-3s and %-3s \n" %(self.torsionCtr,bTors,b.atom1.name,b.atom2.name)
else:
outstring = "REMARK " +" %s between atoms: %-3s and %-3s \n" %(bTors,b.atom1.name,b.atom2.name)
outfileptr.write(outstring)
#next write out root which is always the CA atom
outfileptr.write("ROOT\n")
assert hasattr(res, 'rootlist')
#reset used field to serve as visited flag
res.atoms.used = 0
#rootlist grows to include atoms up to first active tors in each subtree
for at in res.rootlist:
at.used = 1
for bond in at.bonds:
if bond.activeTors and bond.possibleTors: continue
at2 = bond.atom1
if at2==at: at2 = bond.atom2
#only track and write out bonds to the sideChain atoms
if at2 not in res.sideChain: continue
if at2.used: continue
if at2 not in res.rootlist:
res.rootlist.append(at2)
at2.rnum = len(res.rootlist)
#remove all atoms which have no rotatable bonds
#from BOTH the rootlist and the sideChain atoms
#this means they will be written in the rigid portion
badList = AtomSet([])
for at in res.rootlist:
hasTorsion=0
for b in at.bonds:
if b.activeTors:
hasTorsion=1
break
if not hasTorsion:
badList.append(at)
if len(badList) and len(badList)!=len(res.rootlist):
res.rootlist = res.rootlist - badList
res.sideChain = res.sideChain - badList
#now visit atoms connected to expanded rootlist
for at in res.rootlist:
at.number = self.outatom_counter
self.writer.write_atom(outfileptr, at)
at.newindex = self.outatom_counter
at.used = 1
self.outatom_counter = self.outatom_counter + 1
outfileptr.write("ENDROOT\n")
#last write out the rest of the stuff, using writeSubtree.....
for at in res.rootlist:
for b in at.bonds:
at2 = b.atom1
if at2 == at: at2 = b.atom2
if at2 in res.rootlist:
continue
if at2 not in res.sideChain:
continue
if at2.used:
continue
self.process(at, at2, outfileptr)
outfileptr.write("END_RES %s %s%4s\n" %(res.type, res.parent.id, res.number))
def process(self, fromAtom, nextAtom, outfileptr):
startIndex = fromAtom.number
endIndex = self.outatom_counter
outstring = "BRANCH %3d %3d\n"%(startIndex, endIndex)
outfileptr.write(outstring)
queue = self.writeBreadthFirst(outfileptr, fromAtom, nextAtom)
if self.debug: print(fromAtom.name, ':', nextAtom.name, ': queue=', queue)
if len(queue):
for fromAtom, nextAtom in queue:
if self.debug: print(" processing queue entry: ", fromAtom.name, '-', nextAtom.name)
self.process(fromAtom, nextAtom, outfileptr)
outstring = "ENDBRANCH %3d %3d\n"%(startIndex, endIndex)
outfileptr.write(outstring)
def writeLevel(self, atom, outfptr):
"""
write all atoms bonded to atoms bonded to this atom by non-rotatable
bonds
"""
if self.debug:
print("\n\nin writeLevel with ", atom.name, " outatom_counter=", self.outatom_counter)
print("len(", atom.name, ").bonds=", len(atom.bonds))
queue = []
nextAts = []
for b in atom.bonds:
if self.debug:
print("processing b=", b.atom1.name, '-', b.atom2.name, ' activeTors=', b.activeTors)
print('atom1 in writtenAtoms=', b.atom1 in self.writtenAtoms)
print('atom2 in writtenAtoms=', b.atom2 in self.writtenAtoms)
if b.activeTors:
at2 = b.atom1
if at2==atom: at2=b.atom2
queue.append((atom, at2))
if self.debug: print(atom.name, 'wL: queue=', queue)
continue
a2 = b.atom1
if a2==atom:
a2 = b.atom2
if a2.used:
if self.debug: print("!!a2 is already used!!", a2.name)
continue
if a2 not in self.writtenAtoms:
a2.number = a2.newindex = self.outatom_counter
if self.debug: print("writeLevel: wrote bonded atom named=", a2.name, 'a2.used=', a2.used)
self.writer.write_atom(outfptr, a2)
self.writtenAtoms.append(a2)
a2.used = 1
self.outatom_counter+=1
nextAts.append(a2)
for a2 in nextAts:
if self.debug:
print('in for nextAts loop with a2=', a2.name)
print('calling wL')
nq = self.writeLevel(a2, outfptr)
if len(nq):
if self.debug: print("extending queue with", nq)
queue.extend(nq)
if self.debug:
print('returning queue=', queue)
return queue
def writeBreadthFirst(self, outfptr, fromAtom, startAtom):
"""
None <-writeBreadthFirst(outfptr, fromAtom, startAtom)
writeBreadthFirst visits all the atoms in the current level
then the first level down etc in a Breadth First Order traversal.
1 <-1
5 6 7 8 <-3
9 10 11 12 <-4
It is used to write out the molecule with the correct format
for AutoDock. Specifically, appropriate BRANCH/ENDBRANCH
statements are added.
"""
if self.debug:
print("in wBF with fromAtom=", fromAtom.name, '+ startAtom=', startAtom.name, 'startAtom.used=', startAtom.used)
queue = []
if startAtom.used==0:
startAtom.used = 1
startAtom.number = startAtom.newindex = self.outatom_counter
self.writer.write_atom(outfptr,startAtom)
if self.debug: print('wBF: wrote ', startAtom.name)
self.writtenAtoms.append(startAtom)
self.outatom_counter += 1
if self.debug: print("self.outatom_counter=", self.outatom_counter)
activeTors = []
#outfptr.write(outstring)
for bond in startAtom.bonds:
if not hasattr(bond, 'activeTors'):
continue
at2 = bond.atom1
if at2==startAtom: at2 = bond.atom2
if at2==fromAtom: continue #this is current bond
elif not at2.used:
if bond.activeTors:
queue.append((startAtom,at2))
else:
at2.number = at2.newindex = self.outatom_counter
if self.debug:
print("\n\nwriting and calling wL with nA=", at2.name, '-', at2.number)
self.writer.write_atom(outfptr, at2)
if self.debug: print('wBF2: wrote ', at2.name)
at2.written = 1
self.writtenAtoms.append(at2)
at2.newindex = self.outatom_counter
self.outatom_counter = self.outatom_counter + 1
if self.debug: print('!!!2:calling wL')
newQ = self.writeLevel(at2, outfptr)
if self.debug: print("newQ=", newQ)
at2.used = 1
if len(newQ):
if self.debug: print("@@@@len(newq)=", len(newQ))
queue.extend(newQ)
if self.debug: print("queue=", queue)
if self.debug:
print(" currently queue=", end=' ')
for atom1, atom2 in queue:
print(atom1.name, '-', atom2.name, ',', end=' ')
print()
return queue
def writeSubtree(self, outfptr, fromAtom, startAtom):
"""
None <-writeSubtree(outfptr, fromAtom, startAtom)
writeSubtree recursively visits the atoms in the current
'subtree' of the molecule in a Depth First Order traversal.
It is used to write out the molecule with the correct format
for AutoDock. Specifically, appropriate BRANCH/ENDBRANCH
statements are added.
"""
if startAtom.used==0:
startAtom.used = 1
at = startAtom
for bond in startAtom.bonds:
if bond.activeTors:
continue
marker = self.outatom_counter
nextAtom = bond.atom1
if nextAtom==startAtom:
nextAtom = bond.atom2
if nextAtom==fromAtom:
continue
if not nextAtom.used:
if hasattr(bond,'incycle'):
if not hasattr(nextAtom, 'cycleout'):
nextAtom.cycleout = 1
nextAtom.newindex = self.outatom_counter
nextAtom.number = self.outatom_counter
self.writer.write_atom(outfptr,nextAtom)
self.writtenAtoms.append(nextAtom)
self.outatom_counter = self.outatom_counter+1
else:
nextAtom.newindex = self.outatom_counter
nextAtom.number = self.outatom_counter
self.writer.write_atom(outfptr,nextAtom)
self.writtenAtoms.append(nextAtom)
self.outatom_counter = self.outatom_counter+1
for bond in startAtom.bonds:
marker = self.outatom_counter
nextAtom = bond.atom1
if nextAtom==startAtom:
nextAtom = bond.atom2
if nextAtom==fromAtom:
continue
if not nextAtom.used:
testcond = len(nextAtom.bonds)
if bond.activeTors and bond.possibleTors:
if testcond >0:
outstring = "BRANCH %3d %3d\n"%(at.newindex,marker)
outfptr.write(outstring)
nextAtom.newindex = self.outatom_counter
nextAtom.number = self.outatom_counter
self.writer.write_atom(outfptr,nextAtom)
self.writtenAtoms.append(nextAtom)
self.outatom_counter = self.outatom_counter+1
self.WriteSubtree(startAtom, nextAtom)
if bond.activeTors and bond.possibleTors:
if testcond >0:
outstring = "ENDBRANCH %3d %3d\n"%(at.newindex,marker)
outfptr.write(outstring)
return
def writeFile_cb(self):
"""This callback function allows the user to select an output file, if none has
been designated before. It opens that file and calls the write method of
AutoTors to write pdbqt-formatted for AutoDock4, including appropriate keywords
such as ROOT/ENDROOT, BRANCH/ENDBRANCH etc."""
if self.rootnum > 0:
if self.outfile == None:
self.message( "no output file specified, please select:\n")
from tkinter.dialog import Dialog
from tkinter.filedialog import SaveFileDialog
fd2 = SaveFileDialog(self.ROOT,title="File for AutoTors Formatted Output")
self.outfile = fd2.go (key = "test")
if self.outfile != None:
f=open(self.outfile, 'w')
self.write()
else:
self.message( "WRITE ERROR:no root atoms specified yet!\n")
AF_FlexFileWriterGUI = CommandGUI()
AF_FlexFileWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'], \
menuText['writeFlexible'], cascadeName = menuText['WriteMB'])
class AF_RigidFileWriter(MVCommand):
""" allows user to choose an output filename and write reduced macromolecule"""
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'writePDBQT'):
self.vf.loadCommand('fileCommands','writePDBQT', 'Pmv')
self.PdbqtWriter = PdbqtWriter()
self.PdbqtWriter.recType = 'none'
def __call__(self, outfile=None, **kw):
"""None<-ADflex_writeRigidFile(outfile)"""
if not outfile: return 'ERROR'
self.doitWrapper(*(outfile,), **kw)
def doit(self, outfile):
#write the reduced macromolecule here
d = self.vf.flexDict
if not ('flex_residues' in d or 'all_hinge_atoms' in d) :
msg = 'flexible residues or hinge must be specified first!'
self.vf.warningMsg(msg)
return 'ERROR'
flex_residues = d.get('flex_residues', ResidueSet())
all_hinge_atoms = d.get('all_hinge_atoms', AtomSet())
mol = d['macromol']
#mol = self.vf.Mols.NodesFromName(d['macroname'])
rigidResidues = d.get('rigidResidues', mol.chains.residues)
if len(rigidResidues):
#build AtomSet of rigidResidue atoms PLUS N,C and O from flex
rigidAtoms = rigidResidues.findType(Atom) + d.get('non_hinge_atoms', AtomSet())
#flexAtoms = flex_residues.findType(Atom)
outatoms = mol.allAtoms
else:
outatoms = AtomSet()
if len(flex_residues):
flexSideChain = flex_residues.sideChain
outatoms = outatoms - flexSideChain
if len(all_hinge_atoms):
#remove the hinge atoms, also...
outatoms = outatoms - all_hinge_atoms
#outatoms = outatoms - hinge_atoms
#renumber this atoms?
if len(outatoms):
outatoms.number = list(range(1, len(outatoms)+1))
outatoms_type = {}
for a in outatoms:
outatoms_type[a.autodock_element] = 1
outptr = open(outfile, 'w')
for at in outatoms:
self.PdbqtWriter.write_atom(outptr, at)
outptr.close()
d = self.vf.flexDict
d['rigid_filename'] = outfile
d['rigid_types'] = list(outatoms_type.keys())
else:
d['rigid_filename'] = ""
d['rigid_types'] = ""
msg = "no rigid atoms to write: all atoms are in flexible residue"
self.vf.warningMsg(msg)
def guiCallback(self):
outfile = self.vf.askFileSave(types=[('pdbqt file', '*.pdbqt')],
title = 'Autoflex Non-Flexible Residue Output File:')
if outfile:
self.doitWrapper(outfile)
AF_RigidFileWriterGUI = CommandGUI()
AF_RigidFileWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'], \
menuText['writeRigid'], cascadeName = menuText['WriteMB'])
class AF_LigandDirectoryWriter(MVCommand):
""" allows user to choose a directory of formatted ligands and write flexible output for each ligand"""
def onAddCmdToViewer(self):
if not hasattr(self.vf, 'writePDBQ'):
self.vf.loadCommand('fileCommands','writePDBQ', 'Pmv')
def __call__(self, ligDir=None, **kw):
"""None<-ADflex_writeFlexDir(ligDir)"""
if not ligDir: return 'ERROR'
self.doitWrapper(*(ligDir,), **kw)
def doit(self, ligDir):
fileList= os.listdir(ligDir)
for item in fileList:
xx=string.split(item,'.')
if len(xx)>1 and xx[-1]=='pdbqt':
ligFileFullName=string.join((ligDir, item),'/')
self.vf.ADflex_readLigand(item,1)
##9/11 FIX THIS STUPID NAME
outputFile='autoflex_'+item
self.vf.ADflex_writeFlexFile(outputFile)
def guiCallback(self):
"""called each time the 'writeFlexDir' button is pressed"""
outfile = self.vf.askFileOpen(types=[('select any formatted autotors file:', '*.out.pdbqt'),('formatted pdbqt file:', '*.pdbqt')],
title = 'To Set Directory: Select any autotors formatted file:')
if outfile:
ligDir=os.path.split(outfile)[0]
self.doitWrapper(ligDir)
AF_LigandDirectoryWriterGUI = CommandGUI()
AF_LigandDirectoryWriterGUI.addMenuCommand('AutoToolsBar', menuText['AutoFlexMB'], \
menuText['writeDir'], cascadeName = menuText['WriteMB'])
commandList = [
{'name':'ADflex_readMacro','cmd':AF_MacroReader(),'gui':AF_MacroReaderGUI},
{'name':'ADflex_chooseMacro','cmd':AF_MacroChooser(),'gui':AF_MacroChooserGUI},
{'name':'ADflex_setResidues','cmd':AF_SelectResidues(),'gui':AF_SelectResiduesGUI},
{'name':'ADflex_processResidues','cmd':AF_ProcessResidues(),'gui':None},
{'name':'ADflex_setupCovalentResidue','cmd':AF_SetupCovalentFlexibleResidue(),'gui':AF_SetupCovalentResidueGUI},
#{'name':'ADflex_processHingeResidues','cmd':AF_ProcessHingeResidues(),'gui':None},
{'name':'ADflex_setBondRotatableFlag','cmd':AF_SetBondRotatableFlag(),'gui':None},
#{'name':'ADflex_setHinge','cmd':AF_SetHinge(),'gui':AF_SetHingeGUI},
#{'name':'ADflex_editHinge','cmd':AF_EditHinge(),'gui':None},
{'name':'ADflex_stepBack','cmd':AF_StepBack(),'gui':AF_StepBackGUI},
{'name':'ADflex_writeFlexFile','cmd':AF_FlexFileWriter(),'gui':AF_FlexFileWriterGUI},
{'name':'ADflex_writeRigidFile','cmd':AF_RigidFileWriter(),'gui':AF_RigidFileWriterGUI},
#{'name':'ADflex_writeFlexDir','cmd':AF_LigandDirectoryWriter(),'gui':AF_LigandDirectoryWriterGUI}
]
def initModule(vf):
for dict in commandList:
vf.addCommand(dict['cmd'], dict['name'], dict['gui'])
if vf.hasGui:
vf.GUI.menuBars['AutoToolsBar'].menubuttons[menuText['AutoFlexMB']].config(bg='tan',underline='-1')
if not hasattr(vf.GUI, 'adtBar'):
vf.GUI.adtBar = vf.GUI.menuBars['AutoToolsBar']
vf.GUI.adtFrame = list(vf.GUI.adtBar.menubuttons.values())[0].master
# if hasattr(vf.GUI, 'ligandLabel'):
# vf.GUI.ligandLabelLabel.pack_forget()
# vf.GUI.ligandLabelLabel.pack(side='left')
# vf.GUI.ligandLabel.pack_forget()
# vf.GUI.ligandLabel.pack(side='left')
# else:
# vf.GUI.ligandLabelLabel = Tkinter.Label(vf.GUI.adtFrame, \
# text="Ligand:", bg='tan')
# vf.GUI.ligandLabelLabel.pack(side='left')
# vf.GUI.ligandLabel=Tkinter.Label(vf.GUI.adtFrame, text="None", width=4,
# relief='sunken', borderwidth=1,
# anchor='w' )
# vf.GUI.ligandLabel.pack(side='left')
# if hasattr(vf.GUI, 'receptorLabel'):
# vf.GUI.receptorLabelLabel.pack_forget()
# vf.GUI.receptorLabelLabel.pack(side='left')
# vf.GUI.receptorLabel.pack_forget()
# vf.GUI.receptorLabel.pack(side='left')
# else:
# vf.GUI.receptorLabelLabel = Tkinter.Label(vf.GUI.adtFrame, \
# text="Receptor:", bg='tan')
# vf.GUI.receptorLabelLabel.pack(side='left')
# vf.GUI.receptorLabel=Tkinter.Label(vf.GUI.adtFrame, text="None", width=4,
# relief='sunken', borderwidth=1,
# anchor='w' )
# vf.GUI.receptorLabel.pack(side='left')
| [
"tkinter.StringVar",
"DejaVu.IndexedPolylines.IndexedPolylines",
"MolKit.protein.ResidueSet",
"DejaVu.Points.CrossSet",
"Pmv.moleculeViewer.EditAtomsEvent",
"MolKit.bondSelector.RotatableBondSelector",
"tkinter.simpledialog.SimpleDialog",
"AutoDockTools.atomTypeTools.AutoDock4_AtomTyper",
"MolKit.bo... | [((5162, 5174), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (5172, 5174), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((10989, 11001), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (10999, 11001), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((18638, 18650), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (18648, 18650), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((29836, 29848), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (29846, 29848), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((40632, 40644), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (40642, 40644), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((50565, 50577), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (50575, 50577), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((56532, 56544), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (56542, 56544), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((91259, 91271), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (91269, 91271), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((96051, 96063), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (96061, 96063), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((116389, 116401), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (116399, 116401), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((119286, 119298), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (119296, 119298), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((120816, 120828), 'ViewerFramework.VFCommand.CommandGUI', 'CommandGUI', ([], {}), '()\n', (120826, 120828), False, 'from ViewerFramework.VFCommand import CommandGUI\n'), ((5517, 5541), 'Pmv.mvCommand.MVCommand.__init__', 'MVCommand.__init__', (['self'], {}), '(self)\n', (5535, 5541), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((6620, 6667), 'Pmv.guiTools.MoleculeChooser', 'MoleculeChooser', (['self.vf', 'self.mode', 'self.title'], {}), '(self.vf, self.mode, self.title)\n', (6635, 6667), False, 'from Pmv.guiTools import MoleculeChooser\n'), ((13572, 13629), 'MolKit.protein.ResidueSet', 'ResidueSet', (["[x for x in flex_residues if x.type == 'PRO']"], {}), "([x for x in flex_residues if x.type == 'PRO'])\n", (13582, 13629), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((13746, 13803), 'MolKit.protein.ResidueSet', 'ResidueSet', (["[x for x in flex_residues if x.type == 'HOH']"], {}), "([x for x in flex_residues if x.type == 'HOH'])\n", (13756, 13803), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((14452, 14472), 'MolKit.protein.ResidueSet', 'ResidueSet', (['flexList'], {}), '(flexList)\n', (14462, 14472), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((14656, 14692), 'numpy.add.reduce', 'numpy.add.reduce', (['flexList.torscount'], {}), '(flexList.torscount)\n', (14672, 14692), False, 'import numpy\n'), ((15814, 15837), 'MolKit.bondSelector.RotatableBondSelector', 'RotatableBondSelector', ([], {}), '()\n', (15835, 15837), False, 'from MolKit.bondSelector import RotatableBondSelector, AmideBondSelector\n'), ((18993, 19025), 'AutoDockTools.autotorsCommands.SetRotatableBonds.__init__', 'SetRotatableBonds.__init__', (['self'], {}), '(self)\n', (19019, 19025), False, 'from AutoDockTools.autotorsCommands import MAXTORS, SetRotatableBonds\n'), ((19034, 19059), 'Pmv.mvCommand.MVBondICOM.__init__', 'MVBondICOM.__init__', (['self'], {}), '(self)\n', (19053, 19059), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((23748, 23767), 'MolKit.protein.ResidueSet', 'ResidueSet', (['badList'], {}), '(badList)\n', (23758, 23767), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((24374, 24383), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (24381, 24383), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((30795, 30819), 'Pmv.mvCommand.MVCommand.__init__', 'MVCommand.__init__', (['self'], {}), '(self)\n', (30813, 30819), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((30975, 31020), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '"""Setup Covalent Ligand"""'}), "(title='Setup Covalent Ligand')\n", (30989, 31020), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((35580, 35617), 'os.path.basename', 'os.path.basename', (['rec.parser.filename'], {}), '(rec.parser.filename)\n', (35596, 35617), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((37143, 37180), 'os.path.basename', 'os.path.basename', (['rec.parser.filename'], {}), '(rec.parser.filename)\n', (37159, 37180), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((38096, 38128), 'Pmv.moleculeViewer.EditAtomsEvent', 'EditAtomsEvent', (['"""coords"""', 'movAts'], {}), "('coords', movAts)\n", (38110, 38128), False, 'from Pmv.moleculeViewer import EditAtomsEvent\n'), ((39461, 39498), 'os.path.basename', 'os.path.basename', (['rec.parser.filename'], {}), '(rec.parser.filename)\n', (39477, 39498), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((41024, 41056), 'AutoDockTools.autotorsCommands.SetRotatableBonds.__init__', 'SetRotatableBonds.__init__', (['self'], {}), '(self)\n', (41050, 41056), False, 'from AutoDockTools.autotorsCommands import MAXTORS, SetRotatableBonds\n'), ((41065, 41090), 'Pmv.mvCommand.MVBondICOM.__init__', 'MVBondICOM.__init__', (['self'], {}), '(self)\n', (41084, 41090), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((42452, 42475), 'MolKit.bondSelector.RotatableBondSelector', 'RotatableBondSelector', ([], {}), '()\n', (42473, 42475), False, 'from MolKit.bondSelector import RotatableBondSelector, AmideBondSelector\n'), ((44746, 44786), 'numpy.add.reduce', 'numpy.add.reduce', (['hinge_resSet.torscount'], {}), '(hinge_resSet.torscount)\n', (44762, 44786), False, 'import numpy\n'), ((50747, 50777), 'Pmv.mvCommand.MVCommand.__init__', 'MVCommand.__init__', (['self', 'func'], {}), '(self, func)\n', (50765, 50777), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((50786, 50811), 'Pmv.mvCommand.MVAtomICOM.__init__', 'MVAtomICOM.__init__', (['self'], {}), '(self)\n', (50805, 50811), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((50886, 50897), 'MolKit.molecule.AtomSet', 'AtomSet', (['[]'], {}), '([])\n', (50893, 50897), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((56826, 56856), 'Pmv.mvCommand.MVCommand.__init__', 'MVCommand.__init__', (['self', 'func'], {}), '(self, func)\n', (56844, 56856), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((56865, 56890), 'Pmv.mvCommand.MVAtomICOM.__init__', 'MVAtomICOM.__init__', (['self'], {}), '(self)\n', (56884, 56890), False, 'from Pmv.mvCommand import MVCommand, MVBondICOM, MVAtomICOM\n'), ((57672, 57694), 'numpy.array', 'numpy.array', (['c[0]', '"""f"""'], {}), "(c[0], 'f')\n", (57683, 57694), False, 'import numpy\n'), ((63011, 63047), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '"""Set Up Hinge"""'}), "(title='Set Up Hinge')\n", (63025, 63047), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((76744, 76768), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '""""""'}), "(title='')\n", (76758, 76768), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((81122, 81131), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (81129, 81131), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((81178, 81187), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (81185, 81187), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((96329, 96342), 'MolKit.pdbWriter.PdbqtWriter', 'PdbqtWriter', ([], {}), '()\n', (96340, 96342), False, 'from MolKit.pdbWriter import PdbqtWriter\n'), ((101493, 101511), 'MolKit.molecule.AtomSet', 'AtomSet', (['[atomOne]'], {}), '([atomOne])\n', (101500, 101511), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((105455, 105466), 'MolKit.molecule.AtomSet', 'AtomSet', (['[]'], {}), '([])\n', (105462, 105466), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((116854, 116867), 'MolKit.pdbWriter.PdbqtWriter', 'PdbqtWriter', ([], {}), '()\n', (116865, 116867), False, 'from MolKit.pdbWriter import PdbqtWriter\n'), ((119973, 119991), 'os.listdir', 'os.listdir', (['ligDir'], {}), '(ligDir)\n', (119983, 119991), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((4598, 4625), 'os.path.splitext', 'os.path.splitext', (['macroFile'], {}), '(macroFile)\n', (4614, 4625), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((9299, 9418), 'tkinter.simpledialog.SimpleDialog', 'SimpleDialog', (['self.vf.GUI.ROOT'], {'text': 'nphs_msg', 'buttons': "['No', 'Yes']", 'default': '(1)', 'title': '"""Merge Non-polar Hydrogens?"""'}), "(self.vf.GUI.ROOT, text=nphs_msg, buttons=['No', 'Yes'],\n default=1, title='Merge Non-polar Hydrogens?')\n", (9311, 9418), False, 'from tkinter.simpledialog import SimpleDialog\n'), ((12623, 12714), 'tkinter.simpledialog.SimpleDialog', 'SimpleDialog', (['self.vf.GUI.ROOT'], {'text': 'msg', 'buttons': "['No', 'Yes']", 'default': '(1)', 'title': 'title'}), "(self.vf.GUI.ROOT, text=msg, buttons=['No', 'Yes'], default=1,\n title=title)\n", (12635, 12714), False, 'from tkinter.simpledialog import SimpleDialog\n'), ((14916, 14928), 'MolKit.protein.ResidueSet', 'ResidueSet', ([], {}), '()\n', (14926, 14928), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((19600, 19642), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (19617, 19642), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((20399, 20441), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (20416, 20441), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((20585, 20624), 'tkinter.IntVar', 'tkinter.IntVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (20599, 20624), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((20793, 20830), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '"""Torsion Count"""'}), "(title='Torsion Count')\n", (20807, 20830), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((28304, 28337), 'MolKit.molecule.AtomSet', 'AtomSet', (['[bond.atom1, bond.atom2]'], {}), '([bond.atom1, bond.atom2])\n', (28311, 28337), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((39624, 39637), 'MolKit.pdbWriter.PdbqtWriter', 'PdbqtWriter', ([], {}), '()\n', (39635, 39637), False, 'from MolKit.pdbWriter import PdbqtWriter\n'), ((41604, 41646), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (41621, 41646), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((44904, 44946), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (44921, 44946), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((45090, 45129), 'tkinter.IntVar', 'tkinter.IntVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (45104, 45129), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((45304, 45341), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '"""Torsion Count"""'}), "(title='Torsion Count')\n", (45318, 45341), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((49235, 49268), 'MolKit.molecule.AtomSet', 'AtomSet', (['[bond.atom1, bond.atom2]'], {}), '([bond.atom1, bond.atom2])\n', (49242, 49268), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((51485, 51527), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (51502, 51527), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((52609, 52676), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': '"""Edit Atoms to be moved by current Hinge by:"""'}), "(title='Edit Atoms to be moved by current Hinge by:')\n", (52623, 52676), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((57639, 57654), 'MolKit.molecule.AtomSet', 'AtomSet', (['[atom]'], {}), '([atom])\n', (57646, 57654), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((58487, 58546), 'DejaVu.Geom.Geom', 'Geom', (['"""setAutoFlexHingeGeoms"""'], {'shape': '(0, 0)', 'protected': '(True)'}), "('setAutoFlexHingeGeoms', shape=(0, 0), protected=True)\n", (58491, 58546), False, 'from DejaVu.Geom import Geom\n'), ((58684, 58825), 'DejaVu.Spheres.Spheres', 'Spheres', ([], {'name': '"""AutoFlexHinge_spheres"""', 'materials': '((1.0, 1.0, 0),)', 'shape': '(0, 3)', 'radii': '(0.2)', 'quality': '(15)', 'inheritMaterial': '(0)', 'protected': '(True)'}), "(name='AutoFlexHinge_spheres', materials=((1.0, 1.0, 0),), shape=(0,\n 3), radii=0.2, quality=15, inheritMaterial=0, protected=True)\n", (58691, 58825), False, 'from DejaVu.Spheres import Spheres\n'), ((59212, 59346), 'DejaVu.IndexedPolylines.IndexedPolylines', 'IndexedPolylines', (['"""distLine"""'], {'materials': '((1, 1, 0),)', 'inheritMaterial': '(0)', 'lineWidth': '(3)', 'stippleLines': '(1)', 'protected': '(True)', 'visible': '(True)'}), "('distLine', materials=((1, 1, 0),), inheritMaterial=0,\n lineWidth=3, stippleLines=1, protected=True, visible=True)\n", (59228, 59346), False, 'from DejaVu.IndexedPolylines import IndexedPolylines\n'), ((59698, 59841), 'DejaVu.Points.CrossSet', 'CrossSet', (['"""AutoFlexHinge_hingeAtoms"""'], {'inheritMaterial': '(0)', 'materials': '((1.0, 0.3, 0),)', 'offset': '(0.1)', 'lineWidth': '(2)', 'protected': '(True)', 'visible': '(True)'}), "('AutoFlexHinge_hingeAtoms', inheritMaterial=0, materials=((1.0, \n 0.3, 0),), offset=0.1, lineWidth=2, protected=True, visible=True)\n", (59706, 59841), False, 'from DejaVu.Points import CrossSet\n'), ((59996, 60038), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60013, 60038), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((60072, 60114), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60089, 60114), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((60152, 60194), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60169, 60194), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((60230, 60272), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60247, 60272), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((60301, 60340), 'tkinter.IntVar', 'tkinter.IntVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60315, 60340), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((60409, 60448), 'tkinter.IntVar', 'tkinter.IntVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (60423, 60448), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((77736, 77759), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': 's'}), '(title=s)\n', (77750, 77759), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((80848, 80857), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (80855, 80857), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((80900, 80909), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (80907, 80909), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((82746, 82823), 'Pmv.picker.AtomPicker', 'AtomPicker', (['self.vf', 'None'], {'gui': '(0)', 'callbacks': '[self.setHingePt_cb]', 'immediate': '(1)'}), '(self.vf, None, gui=0, callbacks=[self.setHingePt_cb], immediate=1)\n', (82756, 82823), False, 'from Pmv.picker import AtomPicker\n'), ((99510, 99559), 'numpy.add.reduce', 'numpy.add.reduce', (['parents_atoms_to_move.torscount'], {}), '(parents_atoms_to_move.torscount)\n', (99526, 99559), False, 'import numpy\n'), ((100221, 100248), 'MolKit.molecule.AtomSet', 'AtomSet', (['[atomOne, atomTwo]'], {}), '([atomOne, atomTwo])\n', (100228, 100248), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((117445, 117457), 'MolKit.protein.ResidueSet', 'ResidueSet', ([], {}), '()\n', (117455, 117457), False, 'from MolKit.protein import Residue, ResidueSet, Chain\n'), ((117510, 117519), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (117517, 117519), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((117996, 118005), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (118003, 118005), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((120037, 120060), '_py2k_string.split', 'string.split', (['item', '"""."""'], {}), "(item, '.')\n", (120049, 120060), True, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((4183, 4207), 'os.path.split', 'os.path.split', (['macroFile'], {}), '(macroFile)\n', (4196, 4207), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((4230, 4256), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (4246, 4256), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((7711, 7748), 'os.path.basename', 'os.path.basename', (['mol.parser.filename'], {}), '(mol.parser.filename)\n', (7727, 7748), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((8357, 8378), 'AutoDockTools.atomTypeTools.AutoDock4_AtomTyper', 'AutoDock4_AtomTyper', ([], {}), '()\n', (8376, 8378), False, 'from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper\n'), ((10615, 10654), '_py2k_string.split', 'string.split', (['listChooser.entries[i][0]'], {}), '(listChooser.entries[i][0])\n', (10627, 10654), True, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((15986, 16005), 'MolKit.bondSelector.AmideBondSelector', 'AmideBondSelector', ([], {}), '()\n', (16003, 16005), False, 'from MolKit.bondSelector import RotatableBondSelector, AmideBondSelector\n'), ((16131, 16156), 'MolKit.bondSelector.GuanidiniumBondSelector', 'GuanidiniumBondSelector', ([], {}), '()\n', (16154, 16156), False, 'from MolKit.bondSelector import GuanidiniumBondSelector, LeafBondSelector\n'), ((16282, 16300), 'MolKit.bondSelector.LeafBondSelector', 'LeafBondSelector', ([], {}), '()\n', (16298, 16300), False, 'from MolKit.bondSelector import GuanidiniumBondSelector, LeafBondSelector\n'), ((16957, 16999), 'tkinter.StringVar', 'tkinter.StringVar', ([], {'master': 'self.vf.GUI.ROOT'}), '(master=self.vf.GUI.ROOT)\n', (16974, 16999), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((17281, 17304), 'mglutil.gui.InputForm.Tk.gui.InputFormDescr', 'InputFormDescr', ([], {'title': 's'}), '(title=s)\n', (17295, 17304), False, 'from mglutil.gui.InputForm.Tk.gui import InputFormDescr\n'), ((29234, 29244), 'MolKit.molecule.BondSet', 'BondSet', (['l'], {}), '(l)\n', (29241, 29244), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((42624, 42643), 'MolKit.bondSelector.AmideBondSelector', 'AmideBondSelector', ([], {}), '()\n', (42641, 42643), False, 'from MolKit.bondSelector import RotatableBondSelector, AmideBondSelector\n'), ((42769, 42794), 'MolKit.bondSelector.GuanidiniumBondSelector', 'GuanidiniumBondSelector', ([], {}), '()\n', (42792, 42794), False, 'from MolKit.bondSelector import GuanidiniumBondSelector, LeafBondSelector\n'), ((42920, 42938), 'MolKit.bondSelector.LeafBondSelector', 'LeafBondSelector', ([], {}), '()\n', (42936, 42938), False, 'from MolKit.bondSelector import GuanidiniumBondSelector, LeafBondSelector\n'), ((50156, 50166), 'MolKit.molecule.BondSet', 'BondSet', (['l'], {}), '(l)\n', (50163, 50166), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((90954, 90964), 'MolKit.molecule.BondSet', 'BondSet', (['l'], {}), '(l)\n', (90961, 90964), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((97127, 97177), 'os.path.basename', 'os.path.basename', (["dict['macromol'].parser.filename"], {}), "(dict['macromol'].parser.filename)\n", (97143, 97177), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((116048, 116117), 'tkinter.filedialog.SaveFileDialog', 'SaveFileDialog', (['self.ROOT'], {'title': '"""File for AutoTors Formatted Output"""'}), "(self.ROOT, title='File for AutoTors Formatted Output')\n", (116062, 116117), False, 'from tkinter.filedialog import SaveFileDialog\n'), ((120138, 120170), '_py2k_string.join', 'string.join', (['(ligDir, item)', '"""/"""'], {}), "((ligDir, item), '/')\n", (120149, 120170), True, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((120721, 120743), 'os.path.split', 'os.path.split', (['outfile'], {}), '(outfile)\n', (120734, 120743), False, 'import types, _py2k_string as string, tkinter, os, Pmw\n'), ((117861, 117870), 'MolKit.molecule.AtomSet', 'AtomSet', ([], {}), '()\n', (117868, 117870), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n'), ((100284, 100303), 'MolKit.molecule.AtomSet', 'AtomSet', (['all_moving'], {}), '(all_moving)\n', (100291, 100303), False, 'from MolKit.molecule import Atom, AtomSet, BondSet, MoleculeSet\n')] |
# this is a follow up on face-recognition2.py.
# attempt to use a video file based on face-recognition2.py
import os
from model import create_model
import numpy as np
import os.path
import matplotlib.pyplot as plt
from align import AlignDlib
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import LinearSVC
import warnings
import time
from utils import IdentityMetadata, load_image, align_image
import pickle
import cv2
image_folder = 'images'
metadata = pickle.load(open('metadata.p'))
embedded = pickle.load(open('embedded.p'))
targets = np.array([m.name for m in metadata])
encoder = LabelEncoder()
encoder.fit(targets)
# Numerical encoding of identities
y = encoder.transform(targets)
train_idx = np.arange(metadata.shape[0]) % 2 != 0
test_idx = np.arange(metadata.shape[0]) % 2 == 0
# 50 train examples of 10 identities (5 examples each)
x_train = embedded[train_idx]
y_train = y[train_idx]
svc = LinearSVC()
svc.fit(x_train, y_train)
example_idx = 0
example_prediction = svc.predict( [embedded[test_idx][example_idx]] )
print (example_prediction)
example_identity = encoder.inverse_transform(example_prediction)[0]
print('Recognized as ' + example_identity)
| [
"sklearn.svm.LinearSVC",
"numpy.array",
"numpy.arange",
"sklearn.preprocessing.LabelEncoder"
] | [((558, 594), 'numpy.array', 'np.array', (['[m.name for m in metadata]'], {}), '([m.name for m in metadata])\n', (566, 594), True, 'import numpy as np\n'), ((605, 619), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (617, 619), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((924, 935), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (933, 935), False, 'from sklearn.svm import LinearSVC\n'), ((721, 749), 'numpy.arange', 'np.arange', (['metadata.shape[0]'], {}), '(metadata.shape[0])\n', (730, 749), True, 'import numpy as np\n'), ((770, 798), 'numpy.arange', 'np.arange', (['metadata.shape[0]'], {}), '(metadata.shape[0])\n', (779, 798), True, 'import numpy as np\n')] |
from unittest import TestCase
from unittest.mock import Mock, create_autospec
import numpy as np
import pandas as pd
from acnportal.acnsim import Simulator
from acnportal.acnsim.network import ChargingNetwork
from acnportal.algorithms import BaseAlgorithm
from acnportal.acnsim.events import EventQueue, Event
from datetime import datetime
from acnportal.acnsim.models import EVSE
class TestSimulator(TestCase):
def setUp(self):
start = Mock(datetime)
network = ChargingNetwork()
evse1 = EVSE('PS-001', max_rate=32)
network.register_evse(evse1, 240, 0)
evse2 = EVSE('PS-002', max_rate=32)
network.register_evse(evse2, 240, 0)
evse3 = EVSE('PS-003', max_rate=32)
network.register_evse(evse3, 240, 0)
scheduler = create_autospec(BaseAlgorithm)
scheduler.max_recompute = None
events = EventQueue(events=[Event(1), Event(2)])
self.simulator = Simulator(network, scheduler, events, start)
def test_correct_on_init_pilot_signals(self):
np.testing.assert_allclose(self.simulator.pilot_signals,
np.zeros((len(self.simulator.network.station_ids), self.simulator.event_queue.get_last_timestamp() + 1)))
def test_correct_on_init_charging_rates(self):
np.testing.assert_allclose(self.simulator.charging_rates,
np.zeros((len(self.simulator.network.station_ids), self.simulator.event_queue.get_last_timestamp() + 1)))
def test_update_schedules_not_in_network(self):
new_schedule = {'PS-001' : [24, 16], 'PS-004' : [16, 24]}
with self.assertRaises(KeyError):
self.simulator._update_schedules(new_schedule)
def test_update_schedules_valid_schedule(self):
new_schedule = {'PS-001' : [24, 16], 'PS-002' : [16, 24]}
self.simulator._update_schedules(new_schedule)
np.testing.assert_allclose(self.simulator.pilot_signals[:, :2], np.array([[24, 16], [16, 24], [0, 0]]))
def test_index_of_evse_error(self):
with self.assertRaises(KeyError):
_ = self.simulator.index_of_evse('PS-004')
def test_index_of_evse(self):
idx = self.simulator.index_of_evse('PS-002')
self.assertEqual(idx, 1)
def test_pilot_signals_as_df(self):
self.simulator.pilot_signals = np.array([[1, 2], [3, 4], [5, 6]])
outframe = self.simulator.pilot_signals_as_df()
pd.testing.assert_frame_equal(outframe,
pd.DataFrame(np.array([[1, 3, 5], [2, 4, 6]]),
columns=['PS-001', 'PS-002', 'PS-003']))
def test_charging_rates_as_df(self):
self.simulator.charging_rates = np.array([[1.1, 2.1], [3.1, 4.1], [5.1, 6.1]])
outframe = self.simulator.charging_rates_as_df()
pd.testing.assert_frame_equal(outframe,
pd.DataFrame(np.array([[1.1, 3.1, 5.1], [2.1, 4.1, 6.1]]),
columns=['PS-001', 'PS-002', 'PS-003'])) | [
"unittest.mock.create_autospec",
"acnportal.acnsim.network.ChargingNetwork",
"unittest.mock.Mock",
"acnportal.acnsim.models.EVSE",
"numpy.array",
"acnportal.acnsim.Simulator",
"acnportal.acnsim.events.Event"
] | [((453, 467), 'unittest.mock.Mock', 'Mock', (['datetime'], {}), '(datetime)\n', (457, 467), False, 'from unittest.mock import Mock, create_autospec\n'), ((486, 503), 'acnportal.acnsim.network.ChargingNetwork', 'ChargingNetwork', ([], {}), '()\n', (501, 503), False, 'from acnportal.acnsim.network import ChargingNetwork\n'), ((520, 547), 'acnportal.acnsim.models.EVSE', 'EVSE', (['"""PS-001"""'], {'max_rate': '(32)'}), "('PS-001', max_rate=32)\n", (524, 547), False, 'from acnportal.acnsim.models import EVSE\n'), ((609, 636), 'acnportal.acnsim.models.EVSE', 'EVSE', (['"""PS-002"""'], {'max_rate': '(32)'}), "('PS-002', max_rate=32)\n", (613, 636), False, 'from acnportal.acnsim.models import EVSE\n'), ((698, 725), 'acnportal.acnsim.models.EVSE', 'EVSE', (['"""PS-003"""'], {'max_rate': '(32)'}), "('PS-003', max_rate=32)\n", (702, 725), False, 'from acnportal.acnsim.models import EVSE\n'), ((791, 821), 'unittest.mock.create_autospec', 'create_autospec', (['BaseAlgorithm'], {}), '(BaseAlgorithm)\n', (806, 821), False, 'from unittest.mock import Mock, create_autospec\n'), ((943, 987), 'acnportal.acnsim.Simulator', 'Simulator', (['network', 'scheduler', 'events', 'start'], {}), '(network, scheduler, events, start)\n', (952, 987), False, 'from acnportal.acnsim import Simulator\n'), ((2303, 2337), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (2311, 2337), True, 'import numpy as np\n'), ((2640, 2686), 'numpy.array', 'np.array', (['[[1.1, 2.1], [3.1, 4.1], [5.1, 6.1]]'], {}), '([[1.1, 2.1], [3.1, 4.1], [5.1, 6.1]])\n', (2648, 2686), True, 'import numpy as np\n'), ((1924, 1962), 'numpy.array', 'np.array', (['[[24, 16], [16, 24], [0, 0]]'], {}), '([[24, 16], [16, 24], [0, 0]])\n', (1932, 1962), True, 'import numpy as np\n'), ((2467, 2499), 'numpy.array', 'np.array', (['[[1, 3, 5], [2, 4, 6]]'], {}), '([[1, 3, 5], [2, 4, 6]])\n', (2475, 2499), True, 'import numpy as np\n'), ((2817, 2861), 'numpy.array', 'np.array', (['[[1.1, 3.1, 5.1], [2.1, 4.1, 6.1]]'], {}), '([[1.1, 3.1, 5.1], [2.1, 4.1, 6.1]])\n', (2825, 2861), True, 'import numpy as np\n'), ((897, 905), 'acnportal.acnsim.events.Event', 'Event', (['(1)'], {}), '(1)\n', (902, 905), False, 'from acnportal.acnsim.events import EventQueue, Event\n'), ((907, 915), 'acnportal.acnsim.events.Event', 'Event', (['(2)'], {}), '(2)\n', (912, 915), False, 'from acnportal.acnsim.events import EventQueue, Event\n')] |
import chess
import numpy as np
root = None
# MCTS class
class MctsNode():
def __init__(self, state, parent=None, parent_action=None):
'''
Initialise a board state
'''
self.state = state
self.board = chess.Board(state)
self.parent = parent
if self.parent and self.parent.board.turn == chess.BLACK:
self.board.turn = chess.WHITE
else:
self.board.turn = chess.BLACK
self.parent_action = parent_action
self.children = []
self._num_visits = 0
self._num_wins = 0
self._num_losses = 0
self._available_actions = self.get_available_actions()
def get_q(self):
'''
Returns expected reward from a node,i.e., q value
'''
return self._num_wins - self._num_losses
def get_n(self):
'''
Returns number of visits to a node till now
'''
return self._num_visits
def expand(self):
'''
Returns new state expanded from current
state after taking a possible action
'''
action = self._available_actions.pop()
new_state = self.move(action)
new_child_node = MctsNode(new_state, parent=self, parent_action=action)
self.children.append(new_child_node)
return new_child_node
def select(self):
'''
Returns node to start simulation from
'''
curr = self
while not curr.is_terminal():
if len(curr._available_actions) == 0:
curr = curr.best_child()
else:
return curr.expand() # expandable node
return curr # terminal node
def simulate(self):
'''
Game simulated from expanded node
till an outcome is returned
'''
# we use rollout policy here
curr = self
while not curr.is_game_over():
possible_moves = curr.get_available_actions()
chosen_move = np.random.randint(len(possible_moves))
new_board = curr.move(possible_moves[chosen_move])
curr = MctsNode(state=new_board, parent=curr,
parent_action=chosen_move)
return curr.get_result()
def backpropagate(self, result):
'''
Once we have the result, the number of
win/loss and number of visits is updated
till the parent node is reached
'''
if result == 1:
self._num_wins += 1
elif result == -1:
self._num_losses += 1
self._num_visits += 1
if self.parent != None:
self.parent.backpropagate(result)
def is_terminal(self):
'''
Returns true if the node is a terminal node
'''
return self.is_game_over()
def best_child(self, c_param=0.1):
'''
Returns child with maximum value
'''
# print(len(self.children))
weights = [(child.get_q() / child.get_n()) + c_param * np.sqrt((2 *
np.log(self.get_n()) / child.get_n())) for child in self.children]
best_c = np.argmax(weights)
return self.children[best_c]
def is_game_over(self):
result = (self.board.is_checkmate() or self.board.is_stalemate(
) or self.board.is_seventyfive_moves() or self.board.is_fivefold_repetition() or self.board.is_insufficient_material())
return result
def get_available_actions(self):
actions_list = list(self.board.legal_moves)
return actions_list
def move(self, action):
'''
Returns board state after action
'''
next_state = self.board.copy()
next_state.push(action)
return next_state.fen()
def get_result(self):
'''
Returns result of the game
1 for win, -1 for loss, and 0 for tie
'''
'''
hardcoded white to human, black to computer
'''
if self.board.outcome().winner == chess.WHITE:
res = -1
elif self.board.outcome().winner == chess.BLACK:
res = 1
elif self.board.outcome().winner == None:
res = 0
return res
def get_best_move(self, num_iter):
'''
Play the best move from current state
'''
for i in range(int(num_iter)):
node = self.select()
result = node.simulate()
node.backpropagate(result)
# print(len(self.children))
return self.best_child(c_param=0.0).parent_action
def run_mcts(root_state, num_iter):
'''
gameplay
'''
global root
print(num_iter)
root = MctsNode(state=root_state)
if root.is_game_over():
return root
return root.get_best_move(num_iter)
| [
"chess.Board",
"numpy.argmax"
] | [((249, 267), 'chess.Board', 'chess.Board', (['state'], {}), '(state)\n', (260, 267), False, 'import chess\n'), ((3189, 3207), 'numpy.argmax', 'np.argmax', (['weights'], {}), '(weights)\n', (3198, 3207), True, 'import numpy as np\n')] |
import random
import numpy as np
import json
from tqdm import tqdm
class IUR_Dataset:
def __init__(
self,
data_path,
num_sample_per_author=4,
episode_length=16,
max_token_length=32,
num_authors=None,
):
self.dataset_path = data_path
self.num_sample_per_author = num_sample_per_author
self.episode_length = episode_length
self.max_token_length = max_token_length
self.num_authors = num_authors
self.load_data()
def load_data(self):
print("Loading dataset file: {}".format(self.dataset_path))
with open(self.dataset_path) as f:
data = [json.loads(l.strip()) for l in tqdm(f.readlines())]
feats = [f for f in data[0].keys() if f != "author_id"]
if self.num_authors is not None:
data = data[: self.num_authors]
self.num_authors = len(data)
# This is only to reduce the GPU memory requirements of integration tests
self.author_id = [d for d in range(self.num_authors)]
# self.author_id = [d["author_id"] for d in data]
self.data = {f: [d[f] for d in data] for f in feats}
self.num_docs = [len(d) for d in self.data["syms"]]
def sample_random_episode(self, index, episode_length):
maxval = self.num_docs[index] - episode_length
start_index = random.randint(0, maxval)
episode = {
k: v[index][start_index : start_index + episode_length]
for k, v in self.data.items()
if len(v) > 0
}
episode["author_id"] = self.author_id[index]
return episode
def __len__(self):
return self.num_authors
def sample_size(self):
return 1 + self.max_token_length * self.episode_length * 2
def __getitem__(self, index):
sample_size = min(self.episode_length, self.num_docs[index])
episode = self.sample_random_episode(index, sample_size)
input_ids = episode["syms_input_ids"]
attn_mask = episode["syms_attention_mask"]
# pad and truncate
input_ids = [
x[: self.max_token_length] + [-1] * max(0, self.max_token_length - len(x))
for x in input_ids
]
attn_mask = [
x[: self.max_token_length] + [0] * max(0, self.max_token_length - len(x))
for x in attn_mask
]
author_id = episode["author_id"]
input_ids = np.array(input_ids).reshape(1, -1).flatten()
attn_mask = np.array(attn_mask).reshape(1, -1).flatten()
return np.concatenate((np.array([author_id]), input_ids, attn_mask))
test_data = IUR_Dataset(
"/p/vast1/brain/iur_dataset/bert_tokenization/validation.jsonl", num_authors=1000
)
def sample_dims():
return (test_data.sample_size(),)
def num_test_samples():
return len(test_data)
def get_test_sample(i):
return test_data[i]
| [
"numpy.array",
"random.randint"
] | [((1376, 1401), 'random.randint', 'random.randint', (['(0)', 'maxval'], {}), '(0, maxval)\n', (1390, 1401), False, 'import random\n'), ((2598, 2619), 'numpy.array', 'np.array', (['[author_id]'], {}), '([author_id])\n', (2606, 2619), True, 'import numpy as np\n'), ((2456, 2475), 'numpy.array', 'np.array', (['input_ids'], {}), '(input_ids)\n', (2464, 2475), True, 'import numpy as np\n'), ((2521, 2540), 'numpy.array', 'np.array', (['attn_mask'], {}), '(attn_mask)\n', (2529, 2540), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 14:51, 17/03/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import numpy as np
from math import gamma
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseHHO(Optimizer):
"""
The original version of: Harris Hawks Optimization (HHO)
(Harris Hawks Optimization: Algorithm and Applications)
Link:
https://doi.org/10.1016/j.future.2019.02.028
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = 1.5*pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
pop_new = []
for idx in range(0, self.pop_size):
# -1 < E0 < 1
E0 = 2 * np.random.uniform() - 1
# factor to show the decreasing energy of rabbit
E = 2 * E0 * (1 - (epoch + 1) * 1.0 / self.epoch)
J = 2 * (1 - np.random.uniform())
# -------- Exploration phase Eq. (1) in paper -------------------
if (np.abs(E) >= 1):
# Harris' hawks perch randomly based on 2 strategy:
if np.random.rand() >= 0.5: # perch based on other family members
X_rand = deepcopy(self.pop[np.random.randint(0, self.pop_size)][self.ID_POS])
pos_new = X_rand - np.random.uniform() * np.abs(X_rand - 2 * np.random.uniform() * self.pop[idx][self.ID_POS])
else: # perch on a random tall tree (random site inside group's home range)
X_m = np.mean([x[self.ID_POS] for x in self.pop])
pos_new = (self.g_best[self.ID_POS] - X_m) - np.random.uniform() * \
(self.problem.lb + np.random.uniform() * (self.problem.ub - self.problem.lb))
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
# -------- Exploitation phase -------------------
else:
# Attacking the rabbit using 4 strategies regarding the behavior of the rabbit
# phase 1: ----- surprise pounce (seven kills) ----------
# surprise pounce (seven kills): multiple, short rapid dives by different hawks
if (np.random.rand() >= 0.5):
delta_X = self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS]
if np.abs(E) >= 0.5: # Hard besiege Eq. (6) in paper
pos_new = delta_X - E * np.abs(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else: # Soft besiege Eq. (4) in paper
pos_new = self.g_best[self.ID_POS] - E * np.abs(delta_X)
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None])
else:
xichma = np.power((gamma(1 + 1.5) * np.sin(np.pi * 1.5 / 2.0)) /
(gamma((1 + 1.5) * 1.5 * np.power(2, (1.5 - 1) / 2)) / 2.0), 1.0 / 1.5)
LF_D = 0.01 * np.random.uniform() * xichma / np.power(np.abs(np.random.uniform()), 1.0 / 1.5)
if np.abs(E) >= 0.5: # Soft besiege Eq. (10) in paper
Y = self.g_best[self.ID_POS] - E * np.abs(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])
else: # Hard besiege Eq. (11) in paper
X_m = np.mean([x[self.ID_POS] for x in self.pop])
Y = self.g_best[self.ID_POS] - E * np.abs(J * self.g_best[self.ID_POS] - X_m)
pos_Y = self.amend_position_faster(Y)
fit_Y = self.get_fitness_position(pos_Y)
Z = Y + np.random.uniform(self.problem.lb, self.problem.ub) * LF_D
pos_Z = self.amend_position_faster(Z)
fit_Z = self.get_fitness_position(pos_Z)
if self.compare_agent([pos_Y, fit_Y], self.pop[idx]):
pop_new.append([pos_Y, fit_Y])
continue
if self.compare_agent([pos_Z, fit_Z], self.pop[idx]):
pop_new.append([pos_Z, fit_Z])
continue
pop_new.append(deepcopy(self.pop[idx]))
self.pop = self.update_fitness_population(pop_new)
| [
"numpy.random.uniform",
"copy.deepcopy",
"numpy.abs",
"numpy.power",
"numpy.mean",
"numpy.random.randint",
"math.gamma",
"numpy.sin",
"numpy.random.rand"
] | [((2036, 2045), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (2042, 2045), True, 'import numpy as np\n'), ((1748, 1767), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1765, 1767), True, 'import numpy as np\n'), ((1920, 1939), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1937, 1939), True, 'import numpy as np\n'), ((2140, 2156), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2154, 2156), True, 'import numpy as np\n'), ((2553, 2596), 'numpy.mean', 'np.mean', (['[x[self.ID_POS] for x in self.pop]'], {}), '([x[self.ID_POS] for x in self.pop])\n', (2560, 2596), True, 'import numpy as np\n'), ((3269, 3285), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3283, 3285), True, 'import numpy as np\n'), ((3402, 3411), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (3408, 3411), True, 'import numpy as np\n'), ((4179, 4188), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (4185, 4188), True, 'import numpy as np\n'), ((4446, 4489), 'numpy.mean', 'np.mean', (['[x[self.ID_POS] for x in self.pop]'], {}), '([x[self.ID_POS] for x in self.pop])\n', (4453, 4489), True, 'import numpy as np\n'), ((5277, 5300), 'copy.deepcopy', 'deepcopy', (['self.pop[idx]'], {}), '(self.pop[idx])\n', (5285, 5300), False, 'from copy import deepcopy\n'), ((2341, 2360), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2358, 2360), True, 'import numpy as np\n'), ((2662, 2681), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2679, 2681), True, 'import numpy as np\n'), ((4739, 4790), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.lb', 'self.problem.ub'], {}), '(self.problem.lb, self.problem.ub)\n', (4756, 4790), True, 'import numpy as np\n'), ((2251, 2286), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.pop_size'], {}), '(0, self.pop_size)\n', (2268, 2286), True, 'import numpy as np\n'), ((3501, 3566), 'numpy.abs', 'np.abs', (['(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])'], {}), '(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])\n', (3507, 3566), True, 'import numpy as np\n'), ((3691, 3706), 'numpy.abs', 'np.abs', (['delta_X'], {}), '(delta_X)\n', (3697, 3706), True, 'import numpy as np\n'), ((3886, 3900), 'math.gamma', 'gamma', (['(1 + 1.5)'], {}), '(1 + 1.5)\n', (3891, 3900), False, 'from math import gamma\n'), ((3903, 3928), 'numpy.sin', 'np.sin', (['(np.pi * 1.5 / 2.0)'], {}), '(np.pi * 1.5 / 2.0)\n', (3909, 3928), True, 'import numpy as np\n'), ((4076, 4095), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4093, 4095), True, 'import numpy as np\n'), ((4123, 4142), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4140, 4142), True, 'import numpy as np\n'), ((4290, 4355), 'numpy.abs', 'np.abs', (['(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])'], {}), '(J * self.g_best[self.ID_POS] - self.pop[idx][self.ID_POS])\n', (4296, 4355), True, 'import numpy as np\n'), ((4549, 4591), 'numpy.abs', 'np.abs', (['(J * self.g_best[self.ID_POS] - X_m)'], {}), '(J * self.g_best[self.ID_POS] - X_m)\n', (4555, 4591), True, 'import numpy as np\n'), ((2735, 2754), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2752, 2754), True, 'import numpy as np\n'), ((3995, 4021), 'numpy.power', 'np.power', (['(2)', '((1.5 - 1) / 2)'], {}), '(2, (1.5 - 1) / 2)\n', (4003, 4021), True, 'import numpy as np\n'), ((2383, 2402), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2400, 2402), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""
Change the HICO-DET detection results to the right format.
"""
import pickle
import numpy as np
import scipy.io as sio
import os
# HICO = None
from ult.tools import get_convert_matrix
def save_HICO(HICO, HICO_dir, classid, begin, finish, fuse_type='spho'):
all_boxes = []
for i in range(finish - begin + 1):
total = []
score = []
for key, value in HICO.items():
for element in value:
if element[2] == classid:
temp = []
temp.append(element[0].tolist()) # Human box
temp.append(element[1].tolist()) # Object box
temp.append(int(key)) # image id
temp.append(int(i)) # action id (0-599)
# if fuse_type == 'spv':
# preds = element[11]
# else:
preds = obtain_fuse_preds(element, fuse_type)
# preds = obtain_fuse_preds(element, fuse_type)
# cls_prob_sp * (cls_prob_O + cls_prob_H) + cls_prob_verbs
# preds = pSp * (pO + pH + pVerbs)
# preds = pSp * (pO + pH)
# preds = pSp
# preds = pO + pH
# preds = pSp * pVerbs
# preds = pVerbs
# print(preds, element[4], element[5])
temp.append(preds[begin - 1 + i] * element[4] * element[5])
total.append(temp)
score.append(preds[begin - 1 + i] * element[4] * element[5])
idx = np.argsort(score, axis=0)[::-1]
for i_idx in range(min(len(idx), 19999)):
all_boxes.append(total[idx[i_idx]])
savefile = HICO_dir + 'detections_' + str(classid).zfill(2) + '.mat'
# print('length:', classid, len(all_boxes))
sio.savemat(savefile, {'all_boxes': all_boxes})
return all_boxes
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix()
hoi_2_obj = {}
for i in range(600):
for j in range(80):
if obj_to_HO_matrix[j][i] > 0:
hoi_2_obj[i] = j
def obtain_fuse_preds(element, fuse_type):
preds = element[3]
if fuse_type != 'preds':
pH = element[6]
pO = element[7]
pSp = element[8]
pHoi = element[9]
if fuse_type == 'preds':
preds = preds
elif fuse_type == 'spho':
preds = pSp * (pO + pH)
elif fuse_type == 'ho':
preds = pO + pH
elif fuse_type == 'spv':
preds = pSp * pHoi
elif fuse_type == 'sp':
preds = pSp
elif fuse_type == 'v':
preds = pHoi
else:
raise Exception('fuse_type error, you must select those types{spho, spv, sp, sphov}')
return preds
def save_HICO3(HICO, HICO_dir, classid, begin, finish, fuse_type='spho'):
# "spho" is from iCAN which includes three branch: sp, v, o
global hoi_2_obj
global obj_to_HO_matrix
all_boxes = []
ones = np.ones(600)
for i in range(finish - begin + 1):
total = []
score = []
for key, value in HICO.items():
for element in value:
if fuse_type == 'spv':
preds = element[11]
else:
preds = obtain_fuse_preds(element, fuse_type)
# st1 = time.time()
obj_scores = element[12] # here is the different
objid = element[13]
# objid = label_trans_map[objid] + 1
objid += 1
element[5] = obj_scores
if objid == classid:
temp = []
temp.append(element[0].tolist()) # Human box
temp.append(element[1].tolist()) # Object box
temp.append(int(key)) # image id
temp.append(int(i)) # action id (0-599)
temp.append(preds[begin - 1 + i] * element[4] * element[5])
total.append(temp)
score.append(preds[begin - 1 + i] * element[4] * element[5])
idx = np.argsort(score, axis=0)[::-1]
for i_idx in range(min(len(idx), 19999)):
all_boxes.append(total[idx[i_idx]])
savefile = HICO_dir + 'detections_' + str(classid).zfill(2) + '.mat'
# print('length:', classid, len(all_boxes))
sio.savemat(savefile, {'all_boxes': all_boxes})
return all_boxes
def Generate_HICO_detection3(HICO, HICO_dir, fuse_type, gpool, func_type = 0):
if not os.path.exists(HICO_dir):
os.makedirs(HICO_dir)
# Remove previous results
filelist = [ f for f in os.listdir(HICO_dir)]
for f in filelist:
os.remove(os.path.join(HICO_dir, f))
params = [[1 ,161, 170], # 1 person
[2 ,11, 24],# 2 bicycle
[3, 66, 76 ], # 3 car
[ 4, 147, 160], # 4 motorcycle
[ 5, 1, 10], # 5 airplane
[ 6, 55, 65], # 6 bus
[ 7, 187, 194], # 7 train
[ 8, 568, 576], # 8 truck
[ 9, 32, 46], # 9 boat
[ 10, 563, 567], # 10 traffic light
[ 11, 326, 330], # 11 fire_hydrant
[ 12, 503, 506], # 12 stop_sign
[ 13, 415, 418], # 13 parking_meter
[ 14, 244, 247], # 14 bench
[ 15, 25, 31], # 15 bird
[ 16, 77, 86], # 16 cat
[ 17, 112, 129], # 17 dog
[ 18, 130, 146], # 18 horse
[ 19, 175, 186], # 19 sheep
[ 20, 97, 107], # 20 cow
[ 21, 314, 325], # 21 elephant
[ 22, 236, 239], # 22 bear
[ 23, 596, 600], # 23 zebra
[ 24, 343, 348], # 24 giraffe
[ 25, 209, 214], # 25 backpack
[ 26, 577, 584], # 26 umbrella
[ 27, 353, 356], # 27 handbag
[ 28, 539, 546], # 28 tie
[ 29, 507, 516], # 29 suitcase
[ 30, 337, 342], # 30 Frisbee
[ 31, 464, 474], # 31 skis
[ 32, 475, 483], # 32 snowboard
[ 33, 489, 502], # 33 sports_ball
[ 34, 369, 376], # 34 kite
[ 35, 225, 232], # 35 baseball_bat
[ 36, 233, 235], # 36 baseball_glove
[ 37, 454, 463], # 37 skateboard
[ 38, 517, 528], # 38 surfboard
[ 39, 534, 538], # 39 tennis_racket
[ 40, 47, 54], # 40 bottle
[ 41, 589, 595], # 41 wine_glass
[ 42, 296, 305], # 42 cup
[ 43, 331, 336], # 43 fork
[ 44, 377, 383], # 44 knife
[ 45, 484, 488], # 45 spoon
[ 46, 253, 257], # 46 bowl
[ 47, 215, 224], # 47 banana
[ 48, 199, 208], # 48 apple
[ 49, 439, 445], # 49 sandwich
[ 50, 398, 407], # 50 orange
[ 51, 258, 264], # 51 broccoli
[ 52, 274, 283], # 52 carrot
[ 53, 357, 363], # 53 hot_dog
[ 54, 419, 429], # 54 pizza
[ 55, 306, 313], # 55 donut
[ 56, 265, 273], # 56 cake
[ 57, 87, 92], # 57 chair
[ 58, 93, 96], # 58 couch
[ 59, 171, 174], # 59 potted_plant
[ 60, 240, 243], # 60 bed
[ 61, 108, 111], # 61 dining_table
[ 62, 551, 558], # 62 toilet
[ 63, 195, 198], # 63 TV
[ 64, 384, 389], # 64 laptop
[ 65, 394, 397], # 65 mouse
[ 66, 435, 438], # 66 remote
[ 67, 364, 368], # 67 keyboard
[ 68, 284, 290], # 68 cell_phone
[ 69, 390, 393], # 69 microwave
[ 70, 408, 414], # 70 oven
[ 71, 547, 550], # 71 toaster
[ 72, 450, 453], # 72 sink
[ 73, 430, 434], # 73 refrigerator
[ 74, 248, 252], # 74 book
[ 75, 291, 295], # 75 clock
[ 76, 585, 588], # 76 vase
[ 77, 446, 449], # 77 scissors
[ 78, 529, 533], # 78 teddy_bear
[ 79, 349, 352], # 79 hair_drier
[ 80, 559, 562], # 80 toothbrush
]
import datetime
# from multiprocessing import Pool
#
# process_num = 16 if fuse_type == 'spv' else 2
# # global pool
# # if pool is None:
# pool = Pool(processes=process_num)
# def func(item):
#
# save_HICO(HICO, HICO_dir, item[0], item[1], item[2])
#
from itertools import repeat
# gpool.starmap(save_HICO1, zip(repeat(output_file), repeat(HICO_dir), params, repeat(fuse_type)))
from sys import version_info
print('Load HICO sucessfully', datetime.datetime.now())
all_boxes = []
for p in params:
# print(p)
res = save_HICO3(HICO, HICO_dir, p[0], p[1], p[2], fuse_type)
# all_boxes.extend(res)
# savefile = HICO_dir + 'detections.mat'
# sio.savemat(savefile, {'all_boxes': all_boxes})
# print('end', p)
print("Finish save HICO", datetime.datetime.now())
def Generate_HICO_detection(output_file, HICO_dir, fuse_type, gpool):
if not os.path.exists(HICO_dir):
os.makedirs(HICO_dir)
# Remove previous results
filelist = [ f for f in os.listdir(HICO_dir)]
for f in filelist:
os.remove(os.path.join(HICO_dir, f))
params = [[1 ,161, 170], # 1 person
[2 ,11, 24],# 2 bicycle
[3, 66, 76 ], # 3 car
[ 4, 147, 160], # 4 motorcycle
[ 5, 1, 10], # 5 airplane
[ 6, 55, 65], # 6 bus
[ 7, 187, 194], # 7 train
[ 8, 568, 576], # 8 truck
[ 9, 32, 46], # 9 boat
[ 10, 563, 567], # 10 traffic light
[ 11, 326, 330], # 11 fire_hydrant
[ 12, 503, 506], # 12 stop_sign
[ 13, 415, 418], # 13 parking_meter
[ 14, 244, 247], # 14 bench
[ 15, 25, 31], # 15 bird
[ 16, 77, 86], # 16 cat
[ 17, 112, 129], # 17 dog
[ 18, 130, 146], # 18 horse
[ 19, 175, 186], # 19 sheep
[ 20, 97, 107], # 20 cow
[ 21, 314, 325], # 21 elephant
[ 22, 236, 239], # 22 bear
[ 23, 596, 600], # 23 zebra
[ 24, 343, 348], # 24 giraffe
[ 25, 209, 214], # 25 backpack
[ 26, 577, 584], # 26 umbrella
[ 27, 353, 356], # 27 handbag
[ 28, 539, 546], # 28 tie
[ 29, 507, 516], # 29 suitcase
[ 30, 337, 342], # 30 Frisbee
[ 31, 464, 474], # 31 skis
[ 32, 475, 483], # 32 snowboard
[ 33, 489, 502], # 33 sports_ball
[ 34, 369, 376], # 34 kite
[ 35, 225, 232], # 35 baseball_bat
[ 36, 233, 235], # 36 baseball_glove
[ 37, 454, 463], # 37 skateboard
[ 38, 517, 528], # 38 surfboard
[ 39, 534, 538], # 39 tennis_racket
[ 40, 47, 54], # 40 bottle
[ 41, 589, 595], # 41 wine_glass
[ 42, 296, 305], # 42 cup
[ 43, 331, 336], # 43 fork
[ 44, 377, 383], # 44 knife
[ 45, 484, 488], # 45 spoon
[ 46, 253, 257], # 46 bowl
[ 47, 215, 224], # 47 banana
[ 48, 199, 208], # 48 apple
[ 49, 439, 445], # 49 sandwich
[ 50, 398, 407], # 50 orange
[ 51, 258, 264], # 51 broccoli
[ 52, 274, 283], # 52 carrot
[ 53, 357, 363], # 53 hot_dog
[ 54, 419, 429], # 54 pizza
[ 55, 306, 313], # 55 donut
[ 56, 265, 273], # 56 cake
[ 57, 87, 92], # 57 chair
[ 58, 93, 96], # 58 couch
[ 59, 171, 174], # 59 potted_plant
[ 60, 240, 243], # 60 bed
[ 61, 108, 111], # 61 dining_table
[ 62, 551, 558], # 62 toilet
[ 63, 195, 198], # 63 TV
[ 64, 384, 389], # 64 laptop
[ 65, 394, 397], # 65 mouse
[ 66, 435, 438], # 66 remote
[ 67, 364, 368], # 67 keyboard
[ 68, 284, 290], # 68 cell_phone
[ 69, 390, 393], # 69 microwave
[ 70, 408, 414], # 70 oven
[ 71, 547, 550], # 71 toaster
[ 72, 450, 453], # 72 sink
[ 73, 430, 434], # 73 refrigerator
[ 74, 248, 252], # 74 book
[ 75, 291, 295], # 75 clock
[ 76, 585, 588], # 76 vase
[ 77, 446, 449], # 77 scissors
[ 78, 529, 533], # 78 teddy_bear
[ 79, 349, 352], # 79 hair_drier
[ 80, 559, 562], # 80 toothbrush
]
import datetime
# from multiprocessing import Pool
#
# process_num = 16 if fuse_type == 'spv' else 2
# # global pool
# # if pool is None:
# pool = Pool(processes=process_num)
# def func(item):
#
# save_HICO(HICO, HICO_dir, item[0], item[1], item[2])
#
# gpool.starmap(save_HICO1, zip(repeat(output_file), repeat(HICO_dir), params, repeat(fuse_type)))
from sys import version_info
if version_info.major == 3:
HICO = pickle.load(open(output_file, "rb"), encoding='latin1')
else:
HICO = pickle.load(open(output_file, "rb"))
print('Load HICO sucessfully', datetime.datetime.now())
for p in params:
# print(p)
save_HICO(HICO, HICO_dir, p[0], p[1], p[2], fuse_type)
# print('end', p)
# pool.close()
# pool.join()
# pool.terminate()
# del pool
# import gc
# gc.collect()
# pool.map(save_HICO, params)
print("Finish save HICO", datetime.datetime.now())
| [
"os.makedirs",
"os.path.join",
"os.path.exists",
"numpy.ones",
"scipy.io.savemat",
"datetime.datetime.now",
"numpy.argsort",
"ult.tools.get_convert_matrix",
"os.listdir"
] | [((2192, 2212), 'ult.tools.get_convert_matrix', 'get_convert_matrix', ([], {}), '()\n', (2210, 2212), False, 'from ult.tools import get_convert_matrix\n'), ((2084, 2131), 'scipy.io.savemat', 'sio.savemat', (['savefile', "{'all_boxes': all_boxes}"], {}), "(savefile, {'all_boxes': all_boxes})\n", (2095, 2131), True, 'import scipy.io as sio\n'), ((3195, 3207), 'numpy.ones', 'np.ones', (['(600)'], {}), '(600)\n', (3202, 3207), True, 'import numpy as np\n'), ((4571, 4618), 'scipy.io.savemat', 'sio.savemat', (['savefile', "{'all_boxes': all_boxes}"], {}), "(savefile, {'all_boxes': all_boxes})\n", (4582, 4618), True, 'import scipy.io as sio\n'), ((4733, 4757), 'os.path.exists', 'os.path.exists', (['HICO_dir'], {}), '(HICO_dir)\n', (4747, 4757), False, 'import os\n'), ((4767, 4788), 'os.makedirs', 'os.makedirs', (['HICO_dir'], {}), '(HICO_dir)\n', (4778, 4788), False, 'import os\n'), ((8232, 8255), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8253, 8255), False, 'import datetime\n'), ((8573, 8596), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8594, 8596), False, 'import datetime\n'), ((8683, 8707), 'os.path.exists', 'os.path.exists', (['HICO_dir'], {}), '(HICO_dir)\n', (8697, 8707), False, 'import os\n'), ((8717, 8738), 'os.makedirs', 'os.makedirs', (['HICO_dir'], {}), '(HICO_dir)\n', (8728, 8738), False, 'import os\n'), ((12314, 12337), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12335, 12337), False, 'import datetime\n'), ((12642, 12665), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12663, 12665), False, 'import datetime\n'), ((1829, 1854), 'numpy.argsort', 'np.argsort', (['score'], {'axis': '(0)'}), '(score, axis=0)\n', (1839, 1854), True, 'import numpy as np\n'), ((4316, 4341), 'numpy.argsort', 'np.argsort', (['score'], {'axis': '(0)'}), '(score, axis=0)\n', (4326, 4341), True, 'import numpy as np\n'), ((4848, 4868), 'os.listdir', 'os.listdir', (['HICO_dir'], {}), '(HICO_dir)\n', (4858, 4868), False, 'import os\n'), ((4911, 4936), 'os.path.join', 'os.path.join', (['HICO_dir', 'f'], {}), '(HICO_dir, f)\n', (4923, 4936), False, 'import os\n'), ((8798, 8818), 'os.listdir', 'os.listdir', (['HICO_dir'], {}), '(HICO_dir)\n', (8808, 8818), False, 'import os\n'), ((8861, 8886), 'os.path.join', 'os.path.join', (['HICO_dir', 'f'], {}), '(HICO_dir, f)\n', (8873, 8886), False, 'import os\n')] |
'''
Set of functions useful in some modules
2021 - <NAME> & <NAME>
'''
import inspect
import numpy as np
from numpy import dot
from numpy.linalg import inv
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
import deepdish as dd
# Prepare matrices for spline reconstruction
def spline_preparation(z_knots):
'''
Prepares the matrices needed for the spline interpolation
'''
# Define matrices and intervals for the spline reconstruction
# h. h has a size of n-1: h[i] = x[i+1] - x[i]
n_mat = len(z_knots)
h = np.zeros(n_mat - 1)
for i in range(0, n_mat - 1):
h[i] = z_knots[i + 1] - z_knots[i]
# Define matrices R [(n-2)x(n-2)] and Q[(n-2)xn]
R = np.zeros([n_mat - 2, n_mat - 2])
Q = np.zeros([n_mat, n_mat - 2])
for i in range(0, n_mat - 2):
R[i, i] = 2. / 3. * (h[i] + h[i + 1])
Q[i, i] = 1. / h[i]
Q[i + 1, i] = - (1. / h[i] + 1. / h[i + 1])
Q[i + 2, i] = 1. / h[i + 1]
if (i < (n_mat - 3)):
R[i, i + 1] = h[i + 1] / 3.
R[i + 1, i] = h[i + 1] / 3.
return h, R, Q
# Obtain the spline reconstruction from the nodes y and spacing h
def spline_reconstruction(y, h, R, Q):
'''
Computes the coefficients of the spline reconstruction from the
value of H(z) at the z_knots
'''
# We assume a value for the smoothing parameter:
l = 0. # No smoothing
A = dot(Q.T, Q) * l + R
bi = dot(dot(inv(A), Q.T), y)
b = np.zeros(len(y))
a = np.zeros(len(y))
c = np.zeros(len(y))
B = dot(Q, bi)
d = y - l * B
for p in range(1, len(y) - 1):
b[p] = bi[p - 1]
for p in range(0, len(y) - 1):
a[p] = (b[p + 1] - b[p]) / 3. / h[p]
c[p] = (d[p + 1] - d[p]) / h[p] - 1. / 3. * (b[p + 1] + 2. * b[p]) * h[p]
return a, b, c, d
def get_default_params(func):
'''
Gets the default parameters of a function or class. Output
is a dictionary of parameter names and values, removing any
potential instance of "self"
'''
args = inspect.getargspec(func)
param_names = args.args
if 'self' in param_names:
param_names.remove('self')
default_values = args.defaults
default_params = dict(zip(param_names,default_values))
return default_params
def check_params(input_params, default_params):
'''
Check input parameter values to ensure that they have the required type
'''
for key in input_params.keys():
# Check if input is a valid parameter
if key not in default_params.keys():
raise AttributeError(key+" is not a valid parameter")
input_value = input_params[key]
default_value = default_params[key]
# Check if input has the correct type
if type(input_value)!=type(default_value):
raise TypeError("Parameter "+key+" must be a "+
str(type(default_value)))
# Special requirements for some parameters
if key == 'sampler':
if input_params[key] != 'zeus' and input_params[key] != 'emcee':
raise AttributeError('Please, choose a sampler between zeus and emcee')
return
def check_lkls(lkls):
'''
Check that incompatible likelihoods are not included at the same time
'''
#Add here all the rules required
if lkls['rdprior'] and not lkls['BAO']:
raise AttributeError('Please use only a prior in rd ("rdprior" likelihood) when also using the "BAO" likelihood.')
return
def check_zmax(self):
'''
Check that zmax cover the maximum redshift of the data
'''
if self.lkls['BAO']:
if self.zmax < self.data['BAO'][-1][0]:
raise ValueError('You have input zmax = {}, but BAO requires zmax >= {}.'.format(self.zmax,self.data['BAO'][-1][0]))
if self.lkls['SN']:
if self.zmax < np.max(self.data['SN'][:,0]):
raise ValueError('You have input zmax = {}, but SN requires zmax >= {}.'.format(self.zmax,np.max(self.data['SN'][:,0])))
if self.lkls['Clocks']:
if self.zmax < np.max(self.data['Clocks'][0]):
raise ValueError('You have input zmax = {}, but Clocks require zmax >= {}.'.format(self.zmax,np.max(self.data['Clocks'][0])))
return
def gaussian(x,mu,sigma):
'''
Returns a Gaussian PDF P(x) with mean mu and std sigma
'''
return np.exp(-0.5*((x-mu)/sigma)**2)/(sigma*np.sqrt(2*np.pi))
def assign_params(self, theta):
'''
Assign the parameters at each point in the MCMC (e.g., theta) to a
params dictionary (and updates self.z_knot for flexknot).
To be used while running
'''
params = {}
if self.expansion == 'flexknot':
self.z_knots = np.zeros(self.Nknots)
self.z_knots[-1] = self.zmax
self.z_knots[1:-1] = theta[:self.Nknots - 2]
y = theta[self.Nknots-2:2*self.Nknots-2]
H0 = y[0]
#check all knots in order
if np.any(np.diff(self.z_knots) <= 0.):
return -np.inf
else:
# Prepare spline matrices:
h, R, Q = spline_preparation(self.z_knots)
a,b,c,d = spline_reconstruction(y,h,R,Q)
params['coeffs'] = a,b,c,d
params['H0'] = H0
elif self.expansion == 'spline':
y = theta[:self.Nknots]
H0 = y[0]
a,b,c,d = spline_reconstruction(y,self.h,self.R,self.Q)
params['coeffs'] = a,b,c,d
params['H0'] = H0
else:
params['H0'] = theta[0]
params['Omega_m'] = theta[1]
if self.expansion == "flatLCDM":
pass
elif self.expansion == "flatwCDM":
params['w0'] = theta[2]
elif self.expansion == "flatw0waCDM":
params['w0'] = theta[2]
params['wa'] = theta[3]
elif self.expansion == "LCDM":
pass
elif self.expansion == "wCDM":
params['w0'] = theta[2]
elif self.expansion == "w0waCDM":
params['w0'] = theta[2]
params['wa'] = theta[3]
#Assign the nuisance and additional parameteres
counter = -1
if self.lkls['SN']:
params['M'] = theta[counter]
counter += -1
if self.lkls['BAO']:
params['rs'] = theta[counter]
counter += -1
if not self.flat or not 'flat' in self.expansion:
params['Omega_k'] = theta[counter]
else:
params['Omega_k'] = 0.
return params
def open_MCMC(path):
'''
Returns the chain, the log_prob(including prior), and the information of the MCMC
'''
d = dd.io.load(path)
return d['samples'],d['log_prob_samples'],d['Summary']
def find_max_mean_CLregions(samples,bandwidth=0,CL=0.6829,
ranges=None,printing=False,visual_check=False):
'''
Computes the maximum, mean and limits at a given CL for a 1D marginalized posterior.
input parameters:
-samples: MCMC samples for the parameter of interest
-bandwidth: value for the Gaussian smoothing of the histogram
(default: 0, no smoothing)
-CL: Confidence level (over 1) at which compute the errors.
(default: 0.6829, 1sigma)
-ranges: cuts in 1D posterior before computing everything
(default: None)
-printing: whether you want the results printed,
if False they're returned as output of the function
(default: False)
-visual_check: Check visually whether the bandwitdh is suitable or not.
(default: False)
'''
if not ranges:
vmin,vmax = np.min(samples),np.max(samples)
else:
vmin,vmax = ranges
dat = np.linspace(vmin,vmax,1000)
dist = gaussian_kde(samples,bandwidth)(dat)
dist *= 1./np.trapz(dist,dat)
if visual_check:
plt.hist(samples,bins=100,density=True,range=[vmin,vmax])
plt.plot(dat,dist)
plt.show()
#Get the maximum:
maxi = dat[np.argmax(dist)]
#Get the mean:
mean = np.trapz(dat*dist,dat)
#Get the CL limits
lim_up = np.max(dist)
lim_down = 0
eps = 0.001
for i in range(0,512):
lim = (lim_up+lim_down) / 2
ind = np.where(dist >= lim)
dens = np.trapz(dist[ind],dat[ind])
if dens >= CL+eps:
lim_down = lim
elif dens <= CL-eps:
lim_up = lim
else:
break
low, high = dat[ind[0][0]],dat[ind[0][-1]]
if printing:
print('maxi = {}, mean = {}, 1sigma_low = {}, 1sigma_high = {}'.format(maxi,mean,maxi-low,high-maxi))
return
else:
return maxi,mean,maxi-low,high-maxi
| [
"numpy.trapz",
"deepdish.io.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.zeros",
"scipy.stats.gaussian_kde",
"numpy.max",
"numpy.where",
"inspect.getargspec",
"numpy.exp",
"numpy.linspace",
"numpy.linalg.inv",
"numpy.min",
... | [((555, 574), 'numpy.zeros', 'np.zeros', (['(n_mat - 1)'], {}), '(n_mat - 1)\n', (563, 574), True, 'import numpy as np\n'), ((714, 746), 'numpy.zeros', 'np.zeros', (['[n_mat - 2, n_mat - 2]'], {}), '([n_mat - 2, n_mat - 2])\n', (722, 746), True, 'import numpy as np\n'), ((755, 783), 'numpy.zeros', 'np.zeros', (['[n_mat, n_mat - 2]'], {}), '([n_mat, n_mat - 2])\n', (763, 783), True, 'import numpy as np\n'), ((1576, 1586), 'numpy.dot', 'dot', (['Q', 'bi'], {}), '(Q, bi)\n', (1579, 1586), False, 'from numpy import dot\n'), ((2080, 2104), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (2098, 2104), False, 'import inspect\n'), ((6687, 6703), 'deepdish.io.load', 'dd.io.load', (['path'], {}), '(path)\n', (6697, 6703), True, 'import deepdish as dd\n'), ((7981, 8010), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', '(1000)'], {}), '(vmin, vmax, 1000)\n', (7992, 8010), True, 'import numpy as np\n'), ((8308, 8333), 'numpy.trapz', 'np.trapz', (['(dat * dist)', 'dat'], {}), '(dat * dist, dat)\n', (8316, 8333), True, 'import numpy as np\n'), ((8367, 8379), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (8373, 8379), True, 'import numpy as np\n'), ((4494, 4532), 'numpy.exp', 'np.exp', (['(-0.5 * ((x - mu) / sigma) ** 2)'], {}), '(-0.5 * ((x - mu) / sigma) ** 2)\n', (4500, 4532), True, 'import numpy as np\n'), ((4852, 4873), 'numpy.zeros', 'np.zeros', (['self.Nknots'], {}), '(self.Nknots)\n', (4860, 4873), True, 'import numpy as np\n'), ((8020, 8052), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['samples', 'bandwidth'], {}), '(samples, bandwidth)\n', (8032, 8052), False, 'from scipy.stats import gaussian_kde\n'), ((8072, 8091), 'numpy.trapz', 'np.trapz', (['dist', 'dat'], {}), '(dist, dat)\n', (8080, 8091), True, 'import numpy as np\n'), ((8120, 8181), 'matplotlib.pyplot.hist', 'plt.hist', (['samples'], {'bins': '(100)', 'density': '(True)', 'range': '[vmin, vmax]'}), '(samples, bins=100, density=True, range=[vmin, vmax])\n', (8128, 8181), True, 'import matplotlib.pyplot as plt\n'), ((8186, 8205), 'matplotlib.pyplot.plot', 'plt.plot', (['dat', 'dist'], {}), '(dat, dist)\n', (8194, 8205), True, 'import matplotlib.pyplot as plt\n'), ((8213, 8223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8221, 8223), True, 'import matplotlib.pyplot as plt\n'), ((8261, 8276), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (8270, 8276), True, 'import numpy as np\n'), ((8490, 8511), 'numpy.where', 'np.where', (['(dist >= lim)'], {}), '(dist >= lim)\n', (8498, 8511), True, 'import numpy as np\n'), ((8527, 8556), 'numpy.trapz', 'np.trapz', (['dist[ind]', 'dat[ind]'], {}), '(dist[ind], dat[ind])\n', (8535, 8556), True, 'import numpy as np\n'), ((1437, 1448), 'numpy.dot', 'dot', (['Q.T', 'Q'], {}), '(Q.T, Q)\n', (1440, 1448), False, 'from numpy import dot\n'), ((1474, 1480), 'numpy.linalg.inv', 'inv', (['A'], {}), '(A)\n', (1477, 1480), False, 'from numpy.linalg import inv\n'), ((3969, 3998), 'numpy.max', 'np.max', (["self.data['SN'][:, 0]"], {}), "(self.data['SN'][:, 0])\n", (3975, 3998), True, 'import numpy as np\n'), ((4183, 4213), 'numpy.max', 'np.max', (["self.data['Clocks'][0]"], {}), "(self.data['Clocks'][0])\n", (4189, 4213), True, 'import numpy as np\n'), ((4532, 4550), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (4539, 4550), True, 'import numpy as np\n'), ((7893, 7908), 'numpy.min', 'np.min', (['samples'], {}), '(samples)\n', (7899, 7908), True, 'import numpy as np\n'), ((7909, 7924), 'numpy.max', 'np.max', (['samples'], {}), '(samples)\n', (7915, 7924), True, 'import numpy as np\n'), ((5083, 5104), 'numpy.diff', 'np.diff', (['self.z_knots'], {}), '(self.z_knots)\n', (5090, 5104), True, 'import numpy as np\n'), ((4101, 4130), 'numpy.max', 'np.max', (["self.data['SN'][:, 0]"], {}), "(self.data['SN'][:, 0])\n", (4107, 4130), True, 'import numpy as np\n'), ((4320, 4350), 'numpy.max', 'np.max', (["self.data['Clocks'][0]"], {}), "(self.data['Clocks'][0])\n", (4326, 4350), True, 'import numpy as np\n')] |
from copy import deepcopy
import numpy as np
from khan.common import get_mauna_kea_summit_extinction
from khan.pipeline.images import CCDImage
def correct_for_airmass_extinction(data: list[list[CCDImage]]) \
-> list[list[CCDImage]]:
"""
Apply the wavelength- and airmass-dependent flux correction. For a
complete description of the data used, see the function
`khan.pipeline.files.get_mauna_kea_summit_extinction`.
Parameters
----------
data : list[list[CCDImage]]
A set of rectified data. Can be before or after cosmic ray removal.
Returns
-------
The data with flux counts corrected for airmass extinction.
"""
calibration_data = get_mauna_kea_summit_extinction(value=True)
extinction_corrected_frames = []
n_observations, n_orders = np.shape(data)
for obs in range(n_observations):
extinction_corrected_orders = []
for order in range(n_orders):
image = data[obs][order].data
anc = deepcopy(data[obs][order].anc)
if 'pixel_center_wavelengths' not in anc.keys():
raise Exception('No wavelength solution in the ancillary data '
'dictionary. Make sure you run '
'`select_orders_with_solutions` on these data '
'first!')
wavelengths = anc['pixel_center_wavelengths'].value
interp_extinction = np.interp(wavelengths,
calibration_data['wavelength'],
calibration_data['extinction'])
factor = anc['airmass'].value / 100 ** (1 / 5)
extinction = np.tile(10 ** (interp_extinction * factor),
(image.shape[0], 1))
anc['reductions_applied'].append('airmass_ext_corrected')
extinction_corrected_data = image * extinction
extinction_corrected_orders.append(
CCDImage(extinction_corrected_data, anc))
extinction_corrected_frames.append(extinction_corrected_orders)
return extinction_corrected_frames
| [
"copy.deepcopy",
"khan.common.get_mauna_kea_summit_extinction",
"numpy.shape",
"numpy.tile",
"numpy.interp",
"khan.pipeline.images.CCDImage"
] | [((702, 745), 'khan.common.get_mauna_kea_summit_extinction', 'get_mauna_kea_summit_extinction', ([], {'value': '(True)'}), '(value=True)\n', (733, 745), False, 'from khan.common import get_mauna_kea_summit_extinction\n'), ((814, 828), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (822, 828), True, 'import numpy as np\n'), ((1006, 1036), 'copy.deepcopy', 'deepcopy', (['data[obs][order].anc'], {}), '(data[obs][order].anc)\n', (1014, 1036), False, 'from copy import deepcopy\n'), ((1461, 1552), 'numpy.interp', 'np.interp', (['wavelengths', "calibration_data['wavelength']", "calibration_data['extinction']"], {}), "(wavelengths, calibration_data['wavelength'], calibration_data[\n 'extinction'])\n", (1470, 1552), True, 'import numpy as np\n'), ((1716, 1780), 'numpy.tile', 'np.tile', (['(10 ** (interp_extinction * factor))', '(image.shape[0], 1)'], {}), '(10 ** (interp_extinction * factor), (image.shape[0], 1))\n', (1723, 1780), True, 'import numpy as np\n'), ((2007, 2047), 'khan.pipeline.images.CCDImage', 'CCDImage', (['extinction_corrected_data', 'anc'], {}), '(extinction_corrected_data, anc)\n', (2015, 2047), False, 'from khan.pipeline.images import CCDImage\n')] |
import carla
import numpy as np
class Vehicle:
def __init__(self, controller, vehicle_id, auto_pilot=True, dashcam=True, third_camera=True, color=None):
self.controller = controller
self.world = self.controller.world
self.blueprint = self.controller.world.get_blueprint_library().find(vehicle_id)
recommend_spawn_points = self.world.get_map().get_spawn_points()
vehicle_spawn_point = np.random.choice(recommend_spawn_points)
if color is None:
color = np.random.choice(
self.blueprint.get_attribute('color').recommended_values)
self.blueprint.set_attribute('color', color)
self.entity = self.world.spawn_actor(
self.blueprint, vehicle_spawn_point)
self.set_autopilot(auto_pilot)
camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb')
camera_bp.set_attribute('image_size_x', str(1028))
camera_bp.set_attribute('image_size_y', str(720))
self.dash_camera = None
if dashcam:
self.dash_camera = self.world.spawn_actor(camera_bp,
carla.Transform(carla.Location(
x=1.5, y=0, z=1.2)),
self.entity,
carla.AttachmentType.Rigid
)
self.third_camera = None
if third_camera:
self.third_camera = self.world.spawn_actor(camera_bp,
carla.Transform(carla.Location(
x=-5.5, z=2.5), carla.Rotation(pitch=8.0)),
self.entity,
carla.AttachmentType.SpringArm
)
def set_autopilot(self, value):
self.entity.set_autopilot(value)
pass
| [
"carla.Location",
"carla.Rotation",
"numpy.random.choice"
] | [((431, 471), 'numpy.random.choice', 'np.random.choice', (['recommend_spawn_points'], {}), '(recommend_spawn_points)\n', (447, 471), True, 'import numpy as np\n'), ((1187, 1220), 'carla.Location', 'carla.Location', ([], {'x': '(1.5)', 'y': '(0)', 'z': '(1.2)'}), '(x=1.5, y=0, z=1.2)\n', (1201, 1220), False, 'import carla\n'), ((1683, 1712), 'carla.Location', 'carla.Location', ([], {'x': '(-5.5)', 'z': '(2.5)'}), '(x=-5.5, z=2.5)\n', (1697, 1712), False, 'import carla\n'), ((1774, 1799), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(8.0)'}), '(pitch=8.0)\n', (1788, 1799), False, 'import carla\n')] |
import matplotlib
import matplotlib.cm
import numpy as np
import matplotlib.pyplot as plt
import torch
def ShowMissclassifiedImages(model, data, class_id, device,dataType='val', num_images=12,save_as="misclassified.jpg"):
dataloaders, class_names = data.dataloaders, data.class_names
was_training = model.training
model.eval()
images_so_far = 0
fig, axs = plt.subplots(int(num_images/4),4,figsize=(12,12))
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders[dataType]):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
if((preds[j] != labels[j]) and (labels[j] == class_id)):
row = int((images_so_far)/4)
col = (images_so_far)%4
imagex = inputs.cpu().data[j]
imagex = np.transpose(imagex, (1, 2, 0))
imagex=imagex.numpy()
mean = np.array([0.53713346, 0.58979464, 0.62127595])
std = np.array([0.27420551, 0.25534403, 0.29759673])
imagex = std*imagex + mean
imagex = np.clip(imagex, 0, 1)
axs[row,col].imshow(imagex)
axs[row,col].axis('off')
fig.tight_layout(pad=2.0)
axs[row,col].set_title('Predicted: {} \n Actual: {}'.format(class_names[preds[j]],class_names[labels[j]]))
images_so_far += 1
if images_so_far == num_images:
model.train(mode=was_training)
plt.show()
fig.savefig(save_as)
return
model.train(mode=was_training)
def ShowCustomDataFaces_plot(model, data, class_id, device,dataType='val', num_images=6,save_as="misclassified.jpg"):
dataloaders, class_names = data.dataloaders, data.class_names
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure(figsize=(12, 6))
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloaders[dataType]):
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
if labels[j] == class_id:
row = 0
col = images_so_far+1
imagex = inputs.cpu().data[j]
imagex = np.transpose(imagex, (1, 2, 0))
imagex=imagex.numpy()
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
imagex = std*imagex + mean
imagex = np.clip(imagex, 0, 1)
ax = fig.add_subplot(1, 6, col, xticks=[], yticks=[])
ax.imshow(imagex)
ax.set_title('{}'.format(class_names[preds[j]]))
images_so_far += 1
if images_so_far == num_images:
model.train(mode=was_training)
fig.tight_layout()
plt.show()
fig.savefig(save_as)
return
model.train(mode=was_training)
def ShowCustomDataFaces(model, data, device,dataType='val', num_images=6):
print("------------ Prediction on Validation set of Images for Aishwarya Rai -------------------")
ShowCustomDataFaces_plot(model, data,0, device,dataType,save_as="Predictions_AishwaryaRai.jpg")
print("------------ Prediction on Validation set of Images for Elon Musk -------------------")
ShowCustomDataFaces_plot(model, data,14, device,dataType,save_as="Predictions_ElonMusk.jpg")
print("------------ Prediction on Validation set of Images for <NAME> -------------------")
ShowCustomDataFaces_plot(model, data,43, device,dataType,save_as="Predictions_MahendraSinghDhoni.jpg")
print("------------ Prediction on Validation set of Images for Malala Yousafzai -------------------")
ShowCustomDataFaces_plot(model, data,45, device,dataType,save_as="Predictions_MalalaYousafzai.jpg")
print("------------ Prediction on Validation set of Images for Narendra Modi -------------------")
ShowCustomDataFaces_plot(model, data,49, device,dataType,save_as="Predictions_NarendraModi.jpg")
print("------------ Prediction on Validation set of Images for Priyanka Chopra -------------------")
ShowCustomDataFaces_plot(model, data,53, device,dataType,save_as="Predictions_PriyankaChopra.jpg")
print("------------ Prediction on Validation set of Images for Rahul Gandhi -------------------")
ShowCustomDataFaces_plot(model, data,54, device,dataType,save_as="Predictions_RahulGandhi.jpg")
print("------------ Prediction on Validation set of Images for Sachin Tendulkar -------------------")
ShowCustomDataFaces_plot(model, data,59, device,dataType,save_as="Predictions_SachinTendulkar.jpg")
print("------------ Prediction on Validation set of Images for Shahrukh Khan -------------------")
ShowCustomDataFaces_plot(model, data,62, device,dataType,save_as="Predictions_ShahrukhKhan.jpg")
print("------------ Prediction on Validation set of Images for Shreya Ghoshal -------------------")
ShowCustomDataFaces_plot(model, data,63, device,dataType,save_as="Predictions_ShreyaGhoshal.jpg") | [
"matplotlib.pyplot.show",
"numpy.transpose",
"numpy.clip",
"matplotlib.pyplot.figure",
"numpy.array",
"torch.max",
"torch.no_grad"
] | [((2107, 2134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2117, 2134), True, 'import matplotlib.pyplot as plt\n'), ((437, 452), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (450, 452), False, 'import torch\n'), ((2144, 2159), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2157, 2159), False, 'import torch\n'), ((660, 681), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (669, 681), False, 'import torch\n'), ((2367, 2388), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (2376, 2388), False, 'import torch\n'), ((980, 1011), 'numpy.transpose', 'np.transpose', (['imagex', '(1, 2, 0)'], {}), '(imagex, (1, 2, 0))\n', (992, 1011), True, 'import numpy as np\n'), ((1077, 1123), 'numpy.array', 'np.array', (['[0.53713346, 0.58979464, 0.62127595]'], {}), '([0.53713346, 0.58979464, 0.62127595])\n', (1085, 1123), True, 'import numpy as np\n'), ((1148, 1194), 'numpy.array', 'np.array', (['[0.27420551, 0.25534403, 0.29759673]'], {}), '([0.27420551, 0.25534403, 0.29759673])\n', (1156, 1194), True, 'import numpy as np\n'), ((1268, 1289), 'numpy.clip', 'np.clip', (['imagex', '(0)', '(1)'], {}), '(imagex, 0, 1)\n', (1275, 1289), True, 'import numpy as np\n'), ((2633, 2664), 'numpy.transpose', 'np.transpose', (['imagex', '(1, 2, 0)'], {}), '(imagex, (1, 2, 0))\n', (2645, 2664), True, 'import numpy as np\n'), ((2730, 2761), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2738, 2761), True, 'import numpy as np\n'), ((2786, 2817), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2794, 2817), True, 'import numpy as np\n'), ((2891, 2912), 'numpy.clip', 'np.clip', (['imagex', '(0)', '(1)'], {}), '(imagex, 0, 1)\n', (2898, 2912), True, 'import numpy as np\n'), ((1717, 1727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1725, 1727), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3308), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3306, 3308), True, 'import matplotlib.pyplot as plt\n')] |
from typing import List
import os
import numpy as np
from data import VECTORS_DIR
from transformers import AutoTokenizer
class Tokenizer(object):
def __init__(self):
pass
def tokenize(self, sequences: List[List[str]], max_sequence_size=100, **kwargs):
raise NotImplementedError
class BERTTokenizer(Tokenizer):
def __init__(self, bert_version):
super().__init__()
self.tokenizer = AutoTokenizer.from_pretrained(bert_version)
def tokenize(self, sequences: List[List[str]], max_sequence_size=100, **kwargs):
word_inputs = np.asarray(self.tokenizer.batch_encode_plus([' '.join(seq) for seq in sequences],
max_length=max_sequence_size,
padding='max_length',
truncation=True)['input_ids'])
return word_inputs
class ELMoTokenizer(Tokenizer):
def __init__(self):
super().__init__()
def tokenize(self, sequences: List[List[str]], max_sequence_size=100, **kwargs):
word_inputs = []
# Encode ELMo embeddings
for i, tokens in enumerate(sequences):
sequence = ' '.join([token for token in tokens[:max_sequence_size]])
if len(tokens) < max_sequence_size:
sequence = sequence + ' ' + ' '.join(['#' for i in range(max_sequence_size - len(tokens))])
word_inputs.append([sequence])
return np.asarray(word_inputs)
class W2VTokenizer(Tokenizer):
def __init__(self, w2v_model='glove.6B.200d.txt'):
super().__init__()
self.w2v_model = w2v_model
self.word_indices = {'PAD': 0}
count = 1
with open(os.path.join(VECTORS_DIR, w2v_model)) as file:
for line in file.readlines()[1:]:
self.word_indices[line.split()[0]] = count
count += 1
def tokenize(self, sequences: List[List[str]], max_sequence_size=100, **kwargs):
"""
Produce W2V indices for each token in the list of tokens
:param sequences: list of lists of tokens
:param max_sequence_size: maximum padding
"""
word_inputs = np.zeros((len(sequences), max_sequence_size, ), dtype=np.int32)
for i, sentence in enumerate(sequences):
for j, token in enumerate(sentence[:max_sequence_size]):
if token.lower() in self.word_indices:
word_inputs[i][j] = self.word_indices[token.lower()]
else:
word_inputs[i][j] = self.word_indices['unknown']
return word_inputs
| [
"numpy.asarray",
"transformers.AutoTokenizer.from_pretrained",
"os.path.join"
] | [((432, 475), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['bert_version'], {}), '(bert_version)\n', (461, 475), False, 'from transformers import AutoTokenizer\n'), ((1550, 1573), 'numpy.asarray', 'np.asarray', (['word_inputs'], {}), '(word_inputs)\n', (1560, 1573), True, 'import numpy as np\n'), ((1801, 1837), 'os.path.join', 'os.path.join', (['VECTORS_DIR', 'w2v_model'], {}), '(VECTORS_DIR, w2v_model)\n', (1813, 1837), False, 'import os\n')] |
import numpy as np
import torch
class Rocket:
"""
This rocket has state [x, z, theta, xdot, zdot, thetadot], where x/z are
the position or the center of the rocket. theta is the angle between the
rocket and the verticle line.
"""
def __init__(self):
self.length = 0.2
self.mass = 0.5
self.inertia = 0.005
self.gravity = 9.81
@property
def hover_thrust(self):
return self.mass * self.gravity
def dynamics(self, x, u):
assert (x.shape == (6, ))
assert (u.shape == (2, ))
q = x[:3]
qdot = x[3:]
if isinstance(x, np.ndarray):
c_theta = np.cos(q[2])
s_theta = np.sin(q[2])
qddot = np.array([
(c_theta * u[0] - s_theta * u[1]) / self.mass,
(s_theta * u[0] + c_theta * u[1]) / self.mass - self.gravity,
self.length / 2 * u[0] / self.inertia
])
return np.concatenate((qdot, qddot))
elif isinstance(x, torch.Tensor):
c_theta = torch.cos(q[2])
s_theta = torch.sin(q[2])
qddot = torch.stack(
((c_theta * u[0] - s_theta * u[1]) / self.mass,
(s_theta * u[0] + c_theta * u[1]) / self.mass - self.gravity,
self.length / 2 * u[0] / self.inertia))
return torch.cat((qdot, qddot))
def linearized_dynamics(self, x, u):
A = np.zeros((6, 6))
B = np.zeros((6, 2))
c_theta = np.cos(x[2])
s_theta = np.sin(x[2])
A[0:3, 3:6] = np.eye(3)
A[3, 2] = (-s_theta * u[0] - c_theta * u[1]) / self.mass
A[4, 2] = (c_theta * u[0] - s_theta * u[1]) / self.mass
B[3, 0] = c_theta / self.mass
B[3, 1] = -s_theta / self.mass
B[4, 0] = s_theta / self.mass
B[4, 1] = c_theta / self.mass
B[5, 0] = self.length / (2 * self.inertia)
return A, B
class Rocket2(Rocket):
"""
This rocket has state [x, z, theta, xdot, zdot, thetadot], where x/z are
the position or the BOTTOM of the rocket. theta is the angle between the
rocket and the verticle line.
"""
def __init__(self):
super(Rocket2, self).__init__()
def dynamics(self, x, u):
assert (x.shape == (6, ))
assert (u.shape == (2, ))
if isinstance(x, np.ndarray):
c_theta = np.cos(x[2])
s_theta = np.sin(x[2])
theta_dot = x[5]
theta_ddot = self.length * u[0] / (2 * self.inertia)
# The double time derivative of cos(theta)
c_theta_ddot = -s_theta * theta_ddot - c_theta * (theta_dot**2)
s_theta_ddot = c_theta * theta_ddot - s_theta * (theta_dot**2)
xddot = (c_theta * u[0] - s_theta *
u[1]) / self.mass + self.length / 2 * s_theta_ddot
zddot = (
s_theta * u[0] + c_theta * u[1]
) / self.mass - self.gravity - self.length / 2 * c_theta_ddot
return np.array([x[3], x[4], x[5], xddot, zddot, theta_ddot])
elif isinstance(x, torch.Tensor):
c_theta = torch.cos(x[2])
s_theta = torch.sin(x[2])
theta_dot = x[5]
theta_ddot = self.length * u[0] / (2 * self.inertia)
# The double time derivative of cos(theta)
c_theta_ddot = -s_theta * theta_ddot - c_theta * (theta_dot**2)
s_theta_ddot = c_theta * theta_ddot - s_theta * (theta_dot**2)
xddot = (c_theta * u[0] - s_theta *
u[1]) / self.mass + self.length / 2 * s_theta_ddot
zddot = (
s_theta * u[0] + c_theta * u[1]
) / self.mass - self.gravity - self.length / 2 * c_theta_ddot
return torch.stack((x[3], x[4], x[5], xddot, zddot, theta_ddot))
def linearized_dynamics(self, x, u):
assert (x.shape == (6, ))
assert (u.shape == (2, ))
s_theta = np.sin(x[2])
c_theta = np.cos(x[2])
thetadot = x[5]
A = np.zeros((6, 6))
B = np.zeros((6, 2))
A[:3, 3:] = np.eye(3)
A[3, 2] = (-s_theta * u[0] -
c_theta * u[1]) / self.mass + self.length / 2 * (
-s_theta * self.length * u[0] /
(2 * self.inertia) - c_theta * (thetadot**2))
A[3, 5] = self.length / 2 * (-s_theta) * 2 * thetadot
A[4, 2] = (c_theta * u[0] -
s_theta * u[1]) / self.mass + self.length / 2 * (
c_theta * self.length * u[0] /
(2 * self.inertia) - s_theta * (thetadot**2))
A[4, 5] = self.length / 2 * c_theta * 2 * thetadot
B[3, 0] = c_theta / self.mass + self.length**2 / 4 * c_theta / (
self.inertia)
B[3, 1] = -s_theta / self.mass
B[4,
0] = s_theta / self.mass + self.length**2 * s_theta / (4 *
self.inertia)
B[4, 1] = c_theta / self.mass
B[5, 0] = self.length / (2 * self.inertia)
return A, B
class RocketVisualizer:
def __init__(self, ax, x_lim, y_lim, length):
self.ax = ax
self.ax.set_aspect("equal")
self.ax.set_xlim(x_lim[0], x_lim[1])
self.ax.set_ylim(y_lim[0], y_lim[1])
self.length = length
self.body = np.vstack(
(self.length * np.array([-0.05, 0.05, 0.05, -0.05, -0.05]),
self.length * np.array([-0.5, -0.5, 0.5, 0.5, -0.5])))
self.head = np.vstack((self.length * np.array([-0.05, 0.05, 0, -0.05]),
self.length * np.array([0.5, 0.5, 0.6, 0.5])))
self.bottom = np.vstack(
(self.length * np.array([-0.05, -0.08, 0.08, 0.05, -0.05]),
self.length * np.array([-0.5, -0.55, -0.55, -0.5, -0.5])))
self.body_fill = self.ax.fill(self.body[0, :],
self.body[1, :],
zorder=1,
edgecolor='k',
facecolor=[.6, .6, .6])
self.head_fill = self.ax.fill(self.head[0, :],
self.head[1, :],
zorder=0,
edgecolor="k",
facecolor=[0, 0, 0])
self.bottom_fill = self.ax.fill(self.bottom[0, :],
self.bottom[1, :],
zorder=0,
edgecolor="k",
facecolor=[0, 0, 0])
def draw(self, x):
R = np.array([[np.cos(x[2]), -np.sin(x[2])],
[np.sin(x[2]), np.cos(x[2])]])
p = np.dot(R, self.body)
self.body_fill[0].get_path().vertices[:, 0] = x[0] + p[0, :]
self.body_fill[0].get_path().vertices[:, 1] = x[1] + p[1, :]
p = np.dot(R, self.head)
self.head_fill[0].get_path().vertices[:, 0] = x[0] + p[0, :]
self.head_fill[0].get_path().vertices[:, 1] = x[1] + p[1, :]
p = np.dot(R, self.bottom)
self.bottom_fill[0].get_path().vertices[:, 0] = x[0] + p[0, :]
self.bottom_fill[0].get_path().vertices[:, 1] = x[1] + p[1, :]
| [
"numpy.eye",
"torch.stack",
"numpy.zeros",
"torch.cat",
"torch.cos",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot",
"torch.sin",
"numpy.concatenate"
] | [((1451, 1467), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (1459, 1467), True, 'import numpy as np\n'), ((1480, 1496), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (1488, 1496), True, 'import numpy as np\n'), ((1515, 1527), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (1521, 1527), True, 'import numpy as np\n'), ((1546, 1558), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (1552, 1558), True, 'import numpy as np\n'), ((1581, 1590), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1587, 1590), True, 'import numpy as np\n'), ((3969, 3981), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (3975, 3981), True, 'import numpy as np\n'), ((4000, 4012), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (4006, 4012), True, 'import numpy as np\n'), ((4049, 4065), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (4057, 4065), True, 'import numpy as np\n'), ((4078, 4094), 'numpy.zeros', 'np.zeros', (['(6, 2)'], {}), '((6, 2))\n', (4086, 4094), True, 'import numpy as np\n'), ((4115, 4124), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4121, 4124), True, 'import numpy as np\n'), ((6841, 6861), 'numpy.dot', 'np.dot', (['R', 'self.body'], {}), '(R, self.body)\n', (6847, 6861), True, 'import numpy as np\n'), ((7013, 7033), 'numpy.dot', 'np.dot', (['R', 'self.head'], {}), '(R, self.head)\n', (7019, 7033), True, 'import numpy as np\n'), ((7185, 7207), 'numpy.dot', 'np.dot', (['R', 'self.bottom'], {}), '(R, self.bottom)\n', (7191, 7207), True, 'import numpy as np\n'), ((664, 676), 'numpy.cos', 'np.cos', (['q[2]'], {}), '(q[2])\n', (670, 676), True, 'import numpy as np\n'), ((699, 711), 'numpy.sin', 'np.sin', (['q[2]'], {}), '(q[2])\n', (705, 711), True, 'import numpy as np\n'), ((732, 899), 'numpy.array', 'np.array', (['[(c_theta * u[0] - s_theta * u[1]) / self.mass, (s_theta * u[0] + c_theta *\n u[1]) / self.mass - self.gravity, self.length / 2 * u[0] / self.inertia]'], {}), '([(c_theta * u[0] - s_theta * u[1]) / self.mass, (s_theta * u[0] + \n c_theta * u[1]) / self.mass - self.gravity, self.length / 2 * u[0] /\n self.inertia])\n', (740, 899), True, 'import numpy as np\n'), ((972, 1001), 'numpy.concatenate', 'np.concatenate', (['(qdot, qddot)'], {}), '((qdot, qddot))\n', (986, 1001), True, 'import numpy as np\n'), ((2396, 2408), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (2402, 2408), True, 'import numpy as np\n'), ((2431, 2443), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (2437, 2443), True, 'import numpy as np\n'), ((3027, 3081), 'numpy.array', 'np.array', (['[x[3], x[4], x[5], xddot, zddot, theta_ddot]'], {}), '([x[3], x[4], x[5], xddot, zddot, theta_ddot])\n', (3035, 3081), True, 'import numpy as np\n'), ((1066, 1081), 'torch.cos', 'torch.cos', (['q[2]'], {}), '(q[2])\n', (1075, 1081), False, 'import torch\n'), ((1104, 1119), 'torch.sin', 'torch.sin', (['q[2]'], {}), '(q[2])\n', (1113, 1119), False, 'import torch\n'), ((1140, 1309), 'torch.stack', 'torch.stack', (['((c_theta * u[0] - s_theta * u[1]) / self.mass, (s_theta * u[0] + c_theta *\n u[1]) / self.mass - self.gravity, self.length / 2 * u[0] / self.inertia)'], {}), '(((c_theta * u[0] - s_theta * u[1]) / self.mass, (s_theta * u[0] +\n c_theta * u[1]) / self.mass - self.gravity, self.length / 2 * u[0] /\n self.inertia))\n', (1151, 1309), False, 'import torch\n'), ((1372, 1396), 'torch.cat', 'torch.cat', (['(qdot, qddot)'], {}), '((qdot, qddot))\n', (1381, 1396), False, 'import torch\n'), ((3146, 3161), 'torch.cos', 'torch.cos', (['x[2]'], {}), '(x[2])\n', (3155, 3161), False, 'import torch\n'), ((3184, 3199), 'torch.sin', 'torch.sin', (['x[2]'], {}), '(x[2])\n', (3193, 3199), False, 'import torch\n'), ((3783, 3840), 'torch.stack', 'torch.stack', (['(x[3], x[4], x[5], xddot, zddot, theta_ddot)'], {}), '((x[3], x[4], x[5], xddot, zddot, theta_ddot))\n', (3794, 3840), False, 'import torch\n'), ((5424, 5467), 'numpy.array', 'np.array', (['[-0.05, 0.05, 0.05, -0.05, -0.05]'], {}), '([-0.05, 0.05, 0.05, -0.05, -0.05])\n', (5432, 5467), True, 'import numpy as np\n'), ((5496, 5534), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.5, 0.5, -0.5]'], {}), '([-0.5, -0.5, 0.5, 0.5, -0.5])\n', (5504, 5534), True, 'import numpy as np\n'), ((5582, 5615), 'numpy.array', 'np.array', (['[-0.05, 0.05, 0, -0.05]'], {}), '([-0.05, 0.05, 0, -0.05])\n', (5590, 5615), True, 'import numpy as np\n'), ((5662, 5692), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.6, 0.5]'], {}), '([0.5, 0.5, 0.6, 0.5])\n', (5670, 5692), True, 'import numpy as np\n'), ((5755, 5798), 'numpy.array', 'np.array', (['[-0.05, -0.08, 0.08, 0.05, -0.05]'], {}), '([-0.05, -0.08, 0.08, 0.05, -0.05])\n', (5763, 5798), True, 'import numpy as np\n'), ((5827, 5869), 'numpy.array', 'np.array', (['[-0.5, -0.55, -0.55, -0.5, -0.5]'], {}), '([-0.5, -0.55, -0.55, -0.5, -0.5])\n', (5835, 5869), True, 'import numpy as np\n'), ((6746, 6758), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (6752, 6758), True, 'import numpy as np\n'), ((6799, 6811), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (6805, 6811), True, 'import numpy as np\n'), ((6813, 6825), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (6819, 6825), True, 'import numpy as np\n'), ((6761, 6773), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (6767, 6773), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""
SP8_tiff.py
<NAME>, 20 Sept 2020
mailto:<EMAIL>
required packages: matplotlib, numpy, scipy, czifile, roipoly, tifffile
"""
from SZmicroscopy import *
import numpy as np
################ Set tiff format
# img_array = Read_1by1_TCYX(n_time=2, n_channel=3) # for 1by1 reading
# img_array = Read_by_channel(n_channel=3) # Time lapse from SP8
img_array1 = Read_by_channel(3)[0:-1]
img_array2 = Read_by_channel(3)[0:-1]
img_array3 = Read_by_channel(3)[0:-1]
img_array = np.concatenate((img_array1, img_array2, img_array3), axis=0)
img_obj = imageStack(imgArray=img_array, meta='TCYX')
# (img_array, path) = load_msr()
# img_obj = imageStack(imgArray=img_array, meta='CYX')
bg_roi = img_obj.draw_roi2(title="Please draw background")
b = img_obj.get_intensity_list(bg_roi, average_all_roi=True)
sub_bg_obj = subtract_background(img_obj, b)
cell_roi = sub_bg_obj.draw_roi2(title="Please draw ROIs for the cells.")
cell = sub_bg_obj.get_intensity_list(cell_roi, average_all_roi=False)
cell_flat = np.concatenate((cell[:,:,0].T, cell[:,:,1].T, cell[:,:,2].T), axis=1)
file_path = tkFileDialog.asksaveasfilename()
np.savetxt(file_path,
cell_flat, delimiter=",")
# ratio1 = np.nan_to_num(np.true_divide(sub_bg_obj.stack[:, 1], sub_bg_obj.stack[:, 0]))
# ratio2 = np.nan_to_num(np.true_divide(sub_bg_obj.stack[:, 1], sub_bg_obj.stack[:, 2]))
# ratio1.astype('float32')
# ratio2.astype('float32')
# # rArray = np.stack((ratio1, ratio2), axis=1).astype('float32')
# rArray = np.stack((ratio1, ratio2), axis=1)
#
# ratio_obj = imageStack(imgArray=rArray, meta='TCYX')
#
# file_path = sub_bg_obj.write_ome_tiff()
# ratio_obj.write_ome_tiff(dtype='float32',
# file_path=file_path.split(".")[0] + "_ratio." + file_path.split(".")[1])
#tifffile.imwrite("corrected.tiff", corrImg, metadata={'axes': 'CYX'}) | [
"numpy.savetxt",
"numpy.concatenate"
] | [((519, 579), 'numpy.concatenate', 'np.concatenate', (['(img_array1, img_array2, img_array3)'], {'axis': '(0)'}), '((img_array1, img_array2, img_array3), axis=0)\n', (533, 579), True, 'import numpy as np\n'), ((1045, 1120), 'numpy.concatenate', 'np.concatenate', (['(cell[:, :, 0].T, cell[:, :, 1].T, cell[:, :, 2].T)'], {'axis': '(1)'}), '((cell[:, :, 0].T, cell[:, :, 1].T, cell[:, :, 2].T), axis=1)\n', (1059, 1120), True, 'import numpy as np\n'), ((1161, 1208), 'numpy.savetxt', 'np.savetxt', (['file_path', 'cell_flat'], {'delimiter': '""","""'}), "(file_path, cell_flat, delimiter=',')\n", (1171, 1208), True, 'import numpy as np\n')] |
import numpy as np
from pyearth import Earth
from timeit import Timer
# The robot arm example, as defined in:
# Fast MARS, <NAME>, Technical Report No.110, May 1993, section 6.2.
np.random.seed(2)
nb_examples = 400
theta1 = np.random.uniform(0, 2 * np.pi, size=nb_examples)
theta2 = np.random.uniform(0, 2 * np.pi, size=nb_examples)
phi = np.random.uniform(-np.pi/2, np.pi/2, size=nb_examples)
l1 = np.random.uniform(0, 1, size=nb_examples)
l2 = np.random.uniform(0, 1, size=nb_examples)
x = l1 * np.cos(theta1) - l2 * np.cos(theta1 + theta2) * np.cos(phi)
y = l1 * np.sin(theta1) - l2 * np.sin(theta1 + theta2) * np.cos(phi)
z = l2 * np.sin(theta2) * np.sin(phi)
d = np.sqrt(x**2 + y**2 + z**2)
inputs = np.concatenate([theta1[:, np.newaxis],
theta2[:, np.newaxis],
phi[:, np.newaxis],
l1[:, np.newaxis],
l2[:, np.newaxis]], axis=1)
outputs = d
hp = dict(
max_degree=5,
minspan=1,
endspan=1,
max_terms=100,
allow_linear=False,
)
model_normal = Earth(**hp)
t = Timer(lambda: model_normal.fit(inputs, outputs))
duration_normal = t.timeit(number=1)
print("Normal : MSE={0:.5f}, duration={1:.2f}s".
format(model_normal.mse_, duration_normal))
model_fast = Earth(use_fast=True,
fast_K=5,
fast_h=1,
**hp)
t = Timer(lambda: model_fast.fit(inputs, outputs))
duration_fast = t.timeit(number=1)
print("Fast: MSE={0:.5f}, duration={1:.2f}s".
format(model_fast.mse_, duration_fast))
speedup = duration_normal / duration_fast
print("diagnostic : MSE goes from {0:.5f} to {1:.5f} but it "
"is {2:.2f}x faster".
format(model_normal.mse_, model_fast.mse_, speedup))
| [
"numpy.random.uniform",
"numpy.random.seed",
"pyearth.Earth",
"numpy.sin",
"numpy.cos",
"numpy.concatenate",
"numpy.sqrt"
] | [((181, 198), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (195, 198), True, 'import numpy as np\n'), ((226, 275), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {'size': 'nb_examples'}), '(0, 2 * np.pi, size=nb_examples)\n', (243, 275), True, 'import numpy as np\n'), ((285, 334), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)'], {'size': 'nb_examples'}), '(0, 2 * np.pi, size=nb_examples)\n', (302, 334), True, 'import numpy as np\n'), ((341, 399), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi / 2)', '(np.pi / 2)'], {'size': 'nb_examples'}), '(-np.pi / 2, np.pi / 2, size=nb_examples)\n', (358, 399), True, 'import numpy as np\n'), ((401, 442), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'nb_examples'}), '(0, 1, size=nb_examples)\n', (418, 442), True, 'import numpy as np\n'), ((448, 489), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'nb_examples'}), '(0, 1, size=nb_examples)\n', (465, 489), True, 'import numpy as np\n'), ((670, 703), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (677, 703), True, 'import numpy as np\n'), ((709, 842), 'numpy.concatenate', 'np.concatenate', (['[theta1[:, np.newaxis], theta2[:, np.newaxis], phi[:, np.newaxis], l1[:, np\n .newaxis], l2[:, np.newaxis]]'], {'axis': '(1)'}), '([theta1[:, np.newaxis], theta2[:, np.newaxis], phi[:, np.\n newaxis], l1[:, np.newaxis], l2[:, np.newaxis]], axis=1)\n', (723, 842), True, 'import numpy as np\n'), ((1090, 1101), 'pyearth.Earth', 'Earth', ([], {}), '(**hp)\n', (1095, 1101), False, 'from pyearth import Earth\n'), ((1304, 1350), 'pyearth.Earth', 'Earth', ([], {'use_fast': '(True)', 'fast_K': '(5)', 'fast_h': '(1)'}), '(use_fast=True, fast_K=5, fast_h=1, **hp)\n', (1309, 1350), False, 'from pyearth import Earth\n'), ((654, 665), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (660, 665), True, 'import numpy as np\n'), ((499, 513), 'numpy.cos', 'np.cos', (['theta1'], {}), '(theta1)\n', (505, 513), True, 'import numpy as np\n'), ((547, 558), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (553, 558), True, 'import numpy as np\n'), ((568, 582), 'numpy.sin', 'np.sin', (['theta1'], {}), '(theta1)\n', (574, 582), True, 'import numpy as np\n'), ((616, 627), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (622, 627), True, 'import numpy as np\n'), ((637, 651), 'numpy.sin', 'np.sin', (['theta2'], {}), '(theta2)\n', (643, 651), True, 'import numpy as np\n'), ((521, 544), 'numpy.cos', 'np.cos', (['(theta1 + theta2)'], {}), '(theta1 + theta2)\n', (527, 544), True, 'import numpy as np\n'), ((590, 613), 'numpy.sin', 'np.sin', (['(theta1 + theta2)'], {}), '(theta1 + theta2)\n', (596, 613), True, 'import numpy as np\n')] |
import numpy as np
import math
import dlib
def ExtractNormalLandMark(image):
# Initialization
landx = np.zeros((1, 68), np.float64)
landy = np.zeros((1, 68), np.float64)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
detections = detector(image, 1)
for k, d in enumerate(detections):
shape = predictor(image, d)
Xface = d.right()-d.left()
Yface = d.bottom()-d.top()
# Loading Landmark coordinates
for i in range(0, 68):
landx[0, i] = float(shape.part(i).x)
landy[0, i] = float(shape.part(i).y)
# Calcualting New coordinates with respect to the central landmark (nose probabily)
xmean = np.mean(landx[0,0:15])
ymean = np.mean(landy[0,0:15])
landx2 = (landx - xmean)/Xface
landy2 = (landy - ymean)/Yface
# calculating rotation bias (points 0,28 and 0,30 are right below and above the nose)
if landx[0, 27] == landx[0, 29]:
biasdegree = 0
else:
biasdegree = int(math.atan((landy[0, 27] - landy[0, 29]) / (landx[0, 27] - landx[0, 29])) * 180 / math.pi)
biasdegree = biasdegree + 90 if biasdegree < 0 else biasdegree - 90
finalLands = np.zeros((2, 68), np.float64)
finalLands[0, :] = landx2
finalLands[1, :] = landy2
return finalLands,biasdegree
| [
"math.atan",
"numpy.zeros",
"numpy.mean",
"dlib.get_frontal_face_detector",
"dlib.shape_predictor"
] | [((119, 148), 'numpy.zeros', 'np.zeros', (['(1, 68)', 'np.float64'], {}), '((1, 68), np.float64)\n', (127, 148), True, 'import numpy as np\n'), ((162, 191), 'numpy.zeros', 'np.zeros', (['(1, 68)', 'np.float64'], {}), '((1, 68), np.float64)\n', (170, 191), True, 'import numpy as np\n'), ((210, 242), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (240, 242), False, 'import dlib\n'), ((260, 321), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (280, 321), False, 'import dlib\n'), ((774, 797), 'numpy.mean', 'np.mean', (['landx[0, 0:15]'], {}), '(landx[0, 0:15])\n', (781, 797), True, 'import numpy as np\n'), ((810, 833), 'numpy.mean', 'np.mean', (['landy[0, 0:15]'], {}), '(landy[0, 0:15])\n', (817, 833), True, 'import numpy as np\n'), ((1284, 1313), 'numpy.zeros', 'np.zeros', (['(2, 68)', 'np.float64'], {}), '((2, 68), np.float64)\n', (1292, 1313), True, 'import numpy as np\n'), ((1097, 1169), 'math.atan', 'math.atan', (['((landy[0, 27] - landy[0, 29]) / (landx[0, 27] - landx[0, 29]))'], {}), '((landy[0, 27] - landy[0, 29]) / (landx[0, 27] - landx[0, 29]))\n', (1106, 1169), False, 'import math\n')] |
from .. import moment
import numpy as np
import pandas as pd
import datetime as dt
import tzlocal
import pytz
import time
class TestMoment:
def test_moment(self):
test0 = float(1580780607)
test = test0
assert moment.Moment(test)._posix == test0
test = dt.datetime.fromtimestamp(test0)
assert moment.Moment(test)._posix == test0
test = np.datetime64(test)
assert moment.Moment(test)._posix == test0
test = pd.Timestamp.fromtimestamp(test0)
assert moment.Moment(test)._posix == test0
test = pd.Timestamp(test0, unit='s', tz=tzlocal.get_localzone())
assert moment.Moment(test)._posix == test0
def test_to(self):
test0 = float(1580780607)
test = moment.Moment(test0)
assert test.to_unix_timestamp() == test0
assert test.to_datetime() == dt.datetime.fromtimestamp(test0)
assert test.to_datetime(
tz=moment.Moment.get_utc_timezone()) == dt.datetime.fromtimestamp(test0, moment.Moment.get_utc_timezone())
assert test.to_datetime(
tz=moment.Moment.get_local_timezone()) == dt.datetime.fromtimestamp(test0, moment.Moment.get_local_timezone())
assert test.to_datetime64() == np.datetime64(dt.datetime.fromtimestamp(test0))
assert test.to_pandas_timestamp() == pd.Timestamp.fromtimestamp(test0)
assert test.to_pandas_timestamp(tz=moment.Moment.get_utc_timezone(
)) == pd.Timestamp(test0, unit='s', tz=moment.Moment.get_utc_timezone())
assert test.to_string(fmt='%Y') == '2020'
assert test.to_string(fmt='%Y%z') == '2020'
assert test.to_string(
fmt='%Y%z', tz=moment.Moment.get_utc_timezone()) == '2020+0000'
def test_time_zones(self):
assert moment.Moment.get_timezone('UTC') == pytz.utc
def test_get_duration(self):
ts = time.time()
assert moment.Moment.get_duration(ts, ts + 1) == 1
ts = time.time()
a = [ts] * 10
b = [ts + 10] * 10
np.testing.assert_array_equal(
moment.Moment.get_durations(a, b), 10)
def test_get_sequence(self):
st = time.time()
result = moment.Moment.get_sequence(st, sr=1, N=2)
assert moment.Moment.get_duration(result[0], result[1]) == 1
result = moment.Moment.get_sequence(st, sr=100, N=2)
assert round(moment.Moment.get_duration(
result[0], result[1]), ndigits=2) == 1 / 100.0
result = moment.Moment.get_sequence(st, sr=1000, N=2)
np.testing.assert_almost_equal(moment.Moment.get_duration(
result[0], result[1]), 1 / 1000.0, decimal=2)
result = moment.Moment.get_sequence(st, sr=1, N=2, format='posix')
assert moment.Moment.get_duration(result[0], result[1]) == 1
result = moment.Moment.get_sequence(st, sr=100, N=2, format='posix')
np.testing.assert_almost_equal(moment.Moment.get_duration(
result[0], result[1]), 1 / 100.0, decimal=2)
result = moment.Moment.get_sequence(st, sr=1000, N=2, format='posix')
np.testing.assert_almost_equal(moment.Moment.get_duration(
result[0], result[1]), 1 / 1000.0, decimal=2)
result = moment.Moment.get_sequence(
st, sr=1, N=2, endpoint_as_extra=False, format='posix')
assert moment.Moment.get_duration(result[0], result[1]) == 1
assert len(result) == 2
def test_seq_to_unix_timestamp(self):
st0 = time.time()
et = st0 + 1
ts0 = np.arange(st0, et, step=1.0/50)
result = moment.Moment.seq_to_unix_timestamp(ts0)
np.testing.assert_array_equal(result, ts0)
st = dt.datetime.fromtimestamp(st0)
ts = []
for n in range(0, 50):
ts.append(st + dt.timedelta(milliseconds=1000.0/50 * n))
result = moment.Moment.seq_to_unix_timestamp(ts)
np.testing.assert_array_almost_equal(result, ts0, decimal=3)
st = dt.datetime.fromtimestamp(st0)
ts = []
for n in range(0, 50):
ts.append(st + dt.timedelta(milliseconds=1000.0/50 * n))
ts = pd.to_datetime(ts).values
result = moment.Moment.seq_to_unix_timestamp(ts)
np.testing.assert_array_almost_equal(result, ts0, decimal=3)
| [
"tzlocal.get_localzone",
"pandas.Timestamp.fromtimestamp",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"time.time",
"numpy.arange",
"pandas.to_datetime",
"datetime.timedelta",
"datetime.datetime.fromtimestamp",
"numpy.testing.assert_array_almost_equal"
] | [((290, 322), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['test0'], {}), '(test0)\n', (315, 322), True, 'import datetime as dt\n'), ((389, 408), 'numpy.datetime64', 'np.datetime64', (['test'], {}), '(test)\n', (402, 408), True, 'import numpy as np\n'), ((475, 508), 'pandas.Timestamp.fromtimestamp', 'pd.Timestamp.fromtimestamp', (['test0'], {}), '(test0)\n', (501, 508), True, 'import pandas as pd\n'), ((1876, 1887), 'time.time', 'time.time', ([], {}), '()\n', (1885, 1887), False, 'import time\n'), ((1961, 1972), 'time.time', 'time.time', ([], {}), '()\n', (1970, 1972), False, 'import time\n'), ((2159, 2170), 'time.time', 'time.time', ([], {}), '()\n', (2168, 2170), False, 'import time\n'), ((3475, 3486), 'time.time', 'time.time', ([], {}), '()\n', (3484, 3486), False, 'import time\n'), ((3522, 3555), 'numpy.arange', 'np.arange', (['st0', 'et'], {'step': '(1.0 / 50)'}), '(st0, et, step=1.0 / 50)\n', (3531, 3555), True, 'import numpy as np\n'), ((3620, 3662), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'ts0'], {}), '(result, ts0)\n', (3649, 3662), True, 'import numpy as np\n'), ((3677, 3707), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['st0'], {}), '(st0)\n', (3702, 3707), True, 'import datetime as dt\n'), ((3889, 3949), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'ts0'], {'decimal': '(3)'}), '(result, ts0, decimal=3)\n', (3925, 3949), True, 'import numpy as np\n'), ((3964, 3994), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['st0'], {}), '(st0)\n', (3989, 3994), True, 'import datetime as dt\n'), ((4215, 4275), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result', 'ts0'], {'decimal': '(3)'}), '(result, ts0, decimal=3)\n', (4251, 4275), True, 'import numpy as np\n'), ((864, 896), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['test0'], {}), '(test0)\n', (889, 896), True, 'import datetime as dt\n'), ((1337, 1370), 'pandas.Timestamp.fromtimestamp', 'pd.Timestamp.fromtimestamp', (['test0'], {}), '(test0)\n', (1363, 1370), True, 'import pandas as pd\n'), ((4124, 4142), 'pandas.to_datetime', 'pd.to_datetime', (['ts'], {}), '(ts)\n', (4138, 4142), True, 'import pandas as pd\n'), ((608, 631), 'tzlocal.get_localzone', 'tzlocal.get_localzone', ([], {}), '()\n', (629, 631), False, 'import tzlocal\n'), ((1258, 1290), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['test0'], {}), '(test0)\n', (1283, 1290), True, 'import datetime as dt\n'), ((3782, 3824), 'datetime.timedelta', 'dt.timedelta', ([], {'milliseconds': '(1000.0 / 50 * n)'}), '(milliseconds=1000.0 / 50 * n)\n', (3794, 3824), True, 'import datetime as dt\n'), ((4069, 4111), 'datetime.timedelta', 'dt.timedelta', ([], {'milliseconds': '(1000.0 / 50 * n)'}), '(milliseconds=1000.0 / 50 * n)\n', (4081, 4111), True, 'import datetime as dt\n')] |
# Call this function after running gld_align to visualize the aligned data.
# Assumptions:
# uE Interface : Channel 1, microelectrode recording, sampled at 32 kHz.
# Lf Interface : Channel 1, EOG channel, sampled at 1 kHz.
# Sync Interface: Digin 1, TTL input, sampled at 32 kHZ.
import numpy as np
import matplotlib.pyplot as plt
def gld_view_aligned(t1, t2):
n = len(t1.segments)
eog_delays = np.zeros(n) # holds mean MER-EOG misalignment per segment.
segs = list(range(0, n))
# segs = [9]
for k in range(0, n):
nseg = k
if nseg not in segs:
continue
plt.figure(nseg+1)
sf_mer = t1.segments[nseg].sampling_rate_mer[0] # MER sampling rate recorded on the first device.
sf_eog = t2.segments[nseg].sampling_rate_lf[0] # EOG data sampling rate recorded on the second device.
sf_lfp = t1.segments[nseg].sampling_rate_lf
sf_analog = t1.segments[nseg].aux.sampling_rate;
# Normalize data to more easily visualize alignment
mer = t1.segments[nseg].channels[1].continuous # MER data
mean = np.mean(mer)
maxim = max(mer)
mer = [(x - mean) / maxim for x in mer]
eog = t2.segments[nseg].channels[1].lf
mean = np.mean(eog)
maxim = max(eog)
eog = [(x - mean) / maxim for x in eog]
analog = t1.segments[nseg].aux.channels[0].continuous;
analog = np.subtract(analog, np.mean(analog));
analog = np.divide(analog,max(analog));
tanalog = np.linspace(0, len(analog)/sf_analog, num=len(analog))
# time base for the mer:
tm = np.linspace(0, len(mer) / sf_mer, num=len(mer))
teog = np.linspace(0, len(eog) / sf_eog, num=len(eog))
nixmeraux = []
for i in range(0, len(mer) - 1 ):
if abs(mer[i+1] - mer[i]) > 0.25:
nixmeraux.append(i)
nixmer = []
for i in range(0, len(nixmeraux) - 1 ):
if tm[nixmeraux[i+1]] - tm[nixmeraux[i]] > 0.001:
nixmer.append(nixmeraux[i])
nixeogaux = []
for i in range(0, len(eog) - 1):
if abs(eog[i+1] - eog[i]) > 0.25:
nixeogaux.append(i)
nixeog = []
for i in range(0, len(nixeogaux) - 1 ):
if teog[nixeogaux[i+1]] - teog[nixeogaux[i]] > 0.002:
nixeog.append(nixeogaux[i])
if len([tm[i] for i in nixmer]) >= 10 and len([teog[i] for i in nixeog]) >= 10:
aux = []
for i in range(0, 10):
#print(F'\ndiff (MER-EOG) = {tm[nixmer[i]] - teog[nixeog[i]]}')
aux.append(tm[nixmer[i]] - teog[nixeog[i]])
#print(F'\n(MER-EOG): nseg = {nseg}, mean = {np.mean(aux)}')
eog_delays[nseg] = np.mean(aux)
merPlot, = plt.plot(tm, mer, label='MER')
eogPlot, = plt.plot(teog, eog, label='LFP')
analogPlot = plt.plot(tanalog, analog, label='Analog')
# plot( tlf, lfp)
meredgePlot, = plt.plot([tm[x] for x in nixmer], [mer[x] for x in nixmer], '*', label='MER-EDGE')
eogedgePlot, = plt.plot([teog[x] for x in nixeog], [eog[x] for x in nixeog], 'o', label='EOG-EDGE')
plt.title('Segment ' + str(nseg))
if t1.segments[nseg].sync:
if any(t1.segments[nseg].sync.digin):
diginPlot, = plt.plot(t1.segments[nseg].sync.rt_timestamps, t1.segments[nseg].sync.digin, '-*',
label='DIGIN')
plt.legend(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot, diginPlot])
else:
plt.legend(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot])
else:
plt.legend(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot])
plt.xlabel('Seconds')
plt.ylabel('Normalized amplitude')
plt.ylim((-2.5, 1.5))
plt.figure(n)
plt.title('EOG delays')
plt.stem([1000*abs(x) for x in eog_delays], use_line_collection=True)
plt.ylabel( 'ms')
plt.xlabel('Segment')
print(F'\nmean delay = {np.mean(eog_delays)}')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((404, 415), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (412, 415), True, 'import numpy as np\n'), ((3883, 3896), 'matplotlib.pyplot.figure', 'plt.figure', (['n'], {}), '(n)\n', (3893, 3896), True, 'import matplotlib.pyplot as plt\n'), ((3901, 3924), 'matplotlib.pyplot.title', 'plt.title', (['"""EOG delays"""'], {}), "('EOG delays')\n", (3910, 3924), True, 'import matplotlib.pyplot as plt\n'), ((4003, 4019), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ms"""'], {}), "('ms')\n", (4013, 4019), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4046), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Segment"""'], {}), "('Segment')\n", (4035, 4046), True, 'import matplotlib.pyplot as plt\n'), ((4103, 4113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4111, 4113), True, 'import matplotlib.pyplot as plt\n'), ((613, 633), 'matplotlib.pyplot.figure', 'plt.figure', (['(nseg + 1)'], {}), '(nseg + 1)\n', (623, 633), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1118), 'numpy.mean', 'np.mean', (['mer'], {}), '(mer)\n', (1113, 1118), True, 'import numpy as np\n'), ((1255, 1267), 'numpy.mean', 'np.mean', (['eog'], {}), '(eog)\n', (1262, 1267), True, 'import numpy as np\n'), ((2811, 2841), 'matplotlib.pyplot.plot', 'plt.plot', (['tm', 'mer'], {'label': '"""MER"""'}), "(tm, mer, label='MER')\n", (2819, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2893), 'matplotlib.pyplot.plot', 'plt.plot', (['teog', 'eog'], {'label': '"""LFP"""'}), "(teog, eog, label='LFP')\n", (2869, 2893), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2956), 'matplotlib.pyplot.plot', 'plt.plot', (['tanalog', 'analog'], {'label': '"""Analog"""'}), "(tanalog, analog, label='Analog')\n", (2923, 2956), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3094), 'matplotlib.pyplot.plot', 'plt.plot', (['[tm[x] for x in nixmer]', '[mer[x] for x in nixmer]', '"""*"""'], {'label': '"""MER-EDGE"""'}), "([tm[x] for x in nixmer], [mer[x] for x in nixmer], '*', label=\n 'MER-EDGE')\n", (3015, 3094), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3202), 'matplotlib.pyplot.plot', 'plt.plot', (['[teog[x] for x in nixeog]', '[eog[x] for x in nixeog]', '"""o"""'], {'label': '"""EOG-EDGE"""'}), "([teog[x] for x in nixeog], [eog[x] for x in nixeog], 'o', label=\n 'EOG-EDGE')\n", (3121, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3783, 3804), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Seconds"""'], {}), "('Seconds')\n", (3793, 3804), True, 'import matplotlib.pyplot as plt\n'), ((3813, 3847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized amplitude"""'], {}), "('Normalized amplitude')\n", (3823, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3856, 3877), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2.5, 1.5)'], {}), '((-2.5, 1.5))\n', (3864, 3877), True, 'import matplotlib.pyplot as plt\n'), ((1442, 1457), 'numpy.mean', 'np.mean', (['analog'], {}), '(analog)\n', (1449, 1457), True, 'import numpy as np\n'), ((2778, 2790), 'numpy.mean', 'np.mean', (['aux'], {}), '(aux)\n', (2785, 2790), True, 'import numpy as np\n'), ((3709, 3773), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[merPlot, eogPlot, meredgePlot, eogedgePlot]'}), '(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot])\n', (3719, 3773), True, 'import matplotlib.pyplot as plt\n'), ((3356, 3457), 'matplotlib.pyplot.plot', 'plt.plot', (['t1.segments[nseg].sync.rt_timestamps', 't1.segments[nseg].sync.digin', '"""-*"""'], {'label': '"""DIGIN"""'}), "(t1.segments[nseg].sync.rt_timestamps, t1.segments[nseg].sync.digin,\n '-*', label='DIGIN')\n", (3364, 3457), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3583), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[merPlot, eogPlot, meredgePlot, eogedgePlot, diginPlot]'}), '(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot, diginPlot])\n', (3518, 3583), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3682), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[merPlot, eogPlot, meredgePlot, eogedgePlot]'}), '(handles=[merPlot, eogPlot, meredgePlot, eogedgePlot])\n', (3628, 3682), True, 'import matplotlib.pyplot as plt\n'), ((4075, 4094), 'numpy.mean', 'np.mean', (['eog_delays'], {}), '(eog_delays)\n', (4082, 4094), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from scipy import integrate, stats
from numpy import absolute, mean
from itertools import islice
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels.stats.multicomp
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.formula.api as smf
import os
## Note: MAD isn't working; can come back to it later. See behavanalysis_part3 line 50 for examples
## This calculates 2 different ways of looking at reaction time: the first trial after a switch, "first_switch_trial",
# and the average of the first three switch trials, "average_switch_trial."
def create_df3(raw_data_location3):
raw_data_location3 = open(r'C:\Users\danie\Documents\SURREY\Project_1\TaskSwitchingParadigm\online_TSP\second_online_cohort\pilot2_withoccurence.csv')
path = (r'C:\Users\danie\Documents\SURREY\Project_1\TaskSwitchingParadigm\online_TSP\second_online_cohort')
df = pd.read_csv(raw_data_location3, header = 0)
df_behavstats = pd.DataFrame()
df_behavstats1 = pd.DataFrame()
df_behavstats2 = pd.DataFrame()
df_behavstats3 = pd.DataFrame()
df_switch_type = pd.DataFrame()
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# LOOP WHICH CALCULATES AND CONCATS MAD, SD, MRT, MED
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
df.set_index(['auto_participant_id', 'type', 'occurence'], inplace = True)
df_switch_type = df
df_rt = df
for group_i, group_v in df_rt.groupby(level=[0, 1, 2]):
group_v = group_v.apply(pd.to_numeric, errors = 'coerce').dropna(how = 'all')
mask = group_v.index.get_level_values(2)
mrt = group_v['response_time'].mean()
SD = group_v['response_time'].std()
med = group_v['response_time'].median()
switchtrial0 = group_v['response_time'].iloc[0]
## The below can be used if you want to use more than the 1st switch trial to calculate switch cost
# switchtrial1 = group_v['response_time'].iloc[1]
# if n > 2:
# switchtrial2 = group_v['response_time'].iloc[2]
group_v.at[group_i, 'mean_rt'] = mrt
group_v.at[group_i, 'SD_rt'] = SD
group_v.at[group_i, 'median_rt'] = med
group_v.at[group_i, 'first_switch_rt'] = switchtrial0
group_v.reset_index(drop = False, inplace = True)
df_behavstats1 = pd.concat([df_behavstats1, group_v], sort=False)
df_behavstats1.set_index(['auto_participant_id', 'type', 'occurence'], inplace = True)
df_behavstats1.drop(df_behavstats1.columns[df_behavstats1.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# LOOP WHICH CALCULATES AND CONCATS SWITCH RT
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for group_i, group_v in df_behavstats1.groupby(level=[0, 1, 2]):
n = 0
for index, row in group_v.iterrows():
n = n + 1
# here dicates over how many trials the RT is averaged over (m), dependant on how many
# trials are in the overall group (n).
##
# eg, when the number of overall trials in the group is less than 3 (if n < 3), then
# the number of trials to average over is 0 (m = 0), and the rows are left empty (np.nan).
if n < 3:
m = 0
for index, row in group_v.iterrows():
group_v.at[index, 'average_switch_rt'] = np.nan
elif n >= 3 and n < 5:
m = 2
elif n >= 5:
m = 3
number_of_trials = 0
overall_rt = 0
# the 'islice' tells pandas to iterate with iterrows over the first 'm' rows, where 'm' is
# dictated above and depends on the overall number of trials, 'n', in the group.
for index, row in islice(group_v.iterrows(), m):
number_of_trials = number_of_trials + 1
overall_rt = overall_rt + row['response_time']
j = (overall_rt/number_of_trials)
group_v.at[index, 'average_switch_rt'] = j
group_v.reset_index(drop = True, inplace = False)
df_behavstats = pd.concat([df_behavstats, group_v], sort=True)
df_behavstats = pd.concat([df_behavstats, df_switch_type.reindex(columns=df.columns)], axis=1)
df_behavstats = df_behavstats.drop(columns=['response_time'])
df_behavstats.drop_duplicates(subset="mean_rt", keep='first', inplace=True)
df_behavstats.drop(df_behavstats.columns[df_behavstats.columns.str.contains('unnamed',case = False)],axis = 1, inplace = True)
# when a group has less than 3 trials in it, the switch_rt is not calculated (m = 0).
# if there are NaN values in any of the rows of a column, that column returns NaN as a t-test
# value for any t-test calculations it is involved in. therefore i have excluded those rows below:
print("")
print("")
print('BELOW DISPLAYS THE GROUP(S) WHICH HAVE BEEN EXCLUDED AS THERE WERE LESS THAN')
print('3 TRIALS IN THE GROUP, CAUSING A NaN VALUE FOR THE T-TEST CALCULATIONS:')
print("")
print(df_behavstats[df_behavstats.isna().any(axis=1)].index)
df_behavstats = df_behavstats[pd.notnull(df_behavstats['average_switch_rt'])]
print("")
print("")
df_behavstats.reset_index(drop=False, inplace=True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# SWITCH-TYPE COLUMN
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
df_behavstats = df_behavstats.loc[:,~df_behavstats.columns.duplicated()]
df_behavstats.set_index(['auto_participant_id', 'occurence', 'type'], inplace = True)
for group_index, group_value in df_behavstats.groupby(level=[0, 1]):
group_value.reset_index(drop = False, inplace = True)
row_iterator = group_value.iterrows()
_, previous = next(row_iterator)
for index, row in group_value.iterrows():
if np.logical_and(row['changed'] == 1, index == 0):
if row['type'] == 'ts-trial-digit-span':
j = 'NONE-DS'
if row['type'] == 'ts-trial-spatial-span':
j = 'NONE-SS'
if row['type'] == 'ts-trial-spatial-rotation':
j = 'NONE-SR'
if row['type'] == '':
pass
group_value.at[0, 'switch_type'] = j
for index, row in row_iterator:
j = 'none'
if row['changed'] == 1:
if row['type'] == 'ts-trial-digit-span' and previous['type'] == 'ts-trial-spatial-span':
j = 'SS-DS'
if row['type'] == 'ts-trial-digit-span' and previous['type'] == 'ts-trial-spatial-rotation':
j = 'SR-DS'
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-digit-span':
j = 'DS-SS'
if row['type'] == 'ts-trial-spatial-span' and previous['type'] == 'ts-trial-spatial-rotation':
j = 'SR-SS'
if row['type'] == 'ts-trial-spatial-rotation' and previous['type'] == 'ts-trial-digit-span':
j = 'DS-SR'
if row['type'] == 'ts-trial-spatial-rotation' and previous['type'] == 'ts-trial-spatial-span':
j = 'SS-SR'
if row['type'] == '' and previous['type'] == 'ts-trial-spatial-span':
pass
if row['type'] == '' and previous['type'] == 'ts-trial-spatial-rotation':
pass
if row['type'] == '' and previous['type'] == 'ts-trial-digit-span':
pass
previous = row
# group_value.reset_index(drop = True, inplace = True)
group_value.at[index, 'switch_type'] = j
df_behavstats = pd.concat([df_behavstats, group_value], sort=True)
df_behavstats = df_behavstats.dropna(subset=['switch_type'])
df_behavstats.to_csv('WithSwitchType.csv')
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# LMEM
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
md1 = smf.mixedlm("first_switch_rt ~ auto_participant_id ", df_behavstats, groups=df_behavstats["type"])
mdf1 = md1.fit()
print('*************************************************************************************')
print('LINEAR MIXED EFFECTS MODELS')
print(mdf1.summary())
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ANOVAs
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
model = ols(
'first_switch_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)',
data=df_behavstats
).fit()
anova_table = sm.stats.anova_lm(model, typ=2)
print('*************************************************************************************')
print('ANOVA TABLE FIRST SWITCH RT')
print(anova_table)
model1 = ols(
'mean_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)',
data=df_behavstats
).fit()
anova_table1 = sm.stats.anova_lm(model1, typ=2)
print('*************************************************************************************')
print('ANOVA TABLE MEAN RT')
print(anova_table1)
model2 = ols(
'median_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)',
data=df_behavstats
).fit()
anova_table2 = sm.stats.anova_lm(model2, typ=2)
print('*************************************************************************************')
print('ANOVA TABLE MEDIAN SWITCH RT')
print(anova_table2)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# T-TESTS
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
mean = df_behavstats['mean_rt']
SD = df_behavstats['SD_rt']
median = df_behavstats['median_rt']
# Check here is mean or median is different from one another; if so, decide which to use. If not, move ahead with one or the other.
g1 = stats.ttest_ind(median, mean, equal_var = False)
print('*************************************************************************************')
print('TTEST for difference between mean and median rt: All tasks, all occurences =', g1)
rt1 = df_behavstats['first_switch_rt']
rt123 = df_behavstats['average_switch_rt']
f1 = stats.ttest_rel(rt1, rt123)
print('*************************************************************************************')
print('TTEST for difference between first and average rt: All tasks, all occurences =', f1)
df_behavstats.set_index(['auto_participant_id', 'type', 'occurence'], inplace = True)
for group_i, group_v in df_behavstats.groupby(level=[1,2]):
group_v.reset_index(drop = False, inplace = True)
for index, row in group_v.iterrows():
task = group_v['type'].loc[1]
occurence = group_v['occurence'].loc[1]
SRT = group_v['first_switch_rt']
MRT = group_v['average_switch_rt']
n = len(MRT)
x = range(0,n,1)
ttest = stats.ttest_rel(MRT, SRT)
print('*************************************************************************************')
print('TASK TYPE=', task, 'OCCURENCE =', occurence)
print('TTEST BETWEEN FIRST AND AVERAGE RT=', ttest)
fig, axMRT = plt.subplots()
color = 'tab:red'
axMRT.set_xlabel('Number of trials')
axMRT.set_ylabel('Mean RT', color=color)
axMRT.plot(x, MRT, color=color)
axMRT.set_ylim([0,3000])
axMRT.tick_params(axis='y')
axSRT = axMRT.twinx() # instantiate a second axes that shares the same x-axis
color= 'tab:blue'
axSRT.set_ylabel('Switch RT', color=color) # we already handled the x-label with ax1
axSRT.plot(x, SRT, color=color)
axSRT.set_ylim([0,3000])
axSRT.tick_params(axis='y')
t = str(task)
o = str(occurence)
name = 'Figures/ScatterPlot for' + t + 'Occurence=' + o +'.jpeg'
plt.legend(loc='upper left');
axSRT.text(0, 10, name, bbox={'facecolor': 'wheat', 'alpha': 0.5, 'pad': 10})
fig.tight_layout() # otherwise the right y-label is slightly clipped
fig.savefig(name, dpi=400)
df_behavstats.reset_index(drop = False, inplace = True)
# # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# # Testing for learning effects
# # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# df_behavstats.set_index(['auto_participant_id', 'type', 'occurence'], inplace = True)
# for group_i, group_v in df_behavstats.groupby(level=[1,2]):
# group_v.reset_index(drop = False, inplace = True)
# last_occ = []
# first_occ = []
# for index, row in group_v.iterrows():
# if group_v['occurence'].loc[1] == 8 :
# last_occ = group_v['average_switch_rt']
# print('last_occ',last_occ)
# elif group_v['occurence'].loc[1] == 0 :
# first_occ = group_v['average_switch_rt']
# print('first_occ',first_occ)
# else:
# continue
# task = group_v['type'].loc[1]
# occurence = group_v['occurence'].loc[1]
# ttest = stats.ttest_rel(last_occ, first_occ)
# print('*************************************************************************************')
# print('TASK TYPE=', task, 'OCCURENCE =', occurence)
# print('TTEST BETWEEN FIRST LAST AVERAGE RT=', ttest)
# df_behavstats.reset_index(drop = False, inplace = True)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Plots!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ax1 = sns.boxplot(x='type', y='mean_rt', data=df_behavstats)
ax1 = sns.swarmplot(x='type', y='mean_rt', data=df_behavstats, color=".25")
figure1 = ax1.get_figure()
figure1.savefig('Figures/boxplot_Mean_ShowDataPoints.png', dpi=400)
plt.close()
ax2 = sns.boxplot(x='type', y='mean_rt', hue='occurence', data=df_behavstats)
figure2 = ax2.get_figure()
figure2.savefig('Figures/boxplot_Mean_byTaskType.png', dpi=400)
plt.close()
ax3 = sns.boxplot(x='type', y='first_switch_rt', data=df_behavstats)
ax3 = sns.swarmplot(x='type', y='first_switch_rt', data=df_behavstats, color=".25")
figure3 = ax3.get_figure()
figure3.savefig('Figures/boxplot_Switch_ShowDataPoints.png', dpi=400)
plt.close()
ax4 = sns.boxplot(x='type', y='first_switch_rt', hue='occurence', data=df_behavstats)
figure4 = ax4.get_figure()
figure4.savefig('Figures/boxplot_Switch_byTaskType.png', dpi=400)
plt.close()
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Write ttests to a .csv file
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# a = stats.ttest_ind(mean, rt1)
# MEANvsAVRT = stats.ttest_ind(mean, rt123)
# d = stats.ttest_ind(median, rt1)
# MEDvsAVRT = stats.ttest_ind(median, rt123)
# standard_t_tests = [a,MEANvsAVRT,d,MEDvsAVRT]
# a1 = stats.ttest_ind(mean, rt1, equal_var = False)
# MEANvsAVRT1 = stats.ttest_ind(mean, rt123, equal_var = False)
# d1 = stats.ttest_ind(median, rt1, equal_var = False)
# MEDvsAVRT1 = stats.ttest_ind(median, rt123, equal_var = False)
# welchs_t_tests = [a1,MEANvsAVRT1,d1,MEDvsAVRT1]
# t_data = {'standard':standard_t_tests, 'welchs':welchs_t_tests}
# t_rows = ['mean_vs_rt1', 'mean_vs_rt123', 'med_vs_rt1', 'med_vs_rt123']
# df_t_tests = pd.DataFrame(data=t_data, index=t_rows)
# name='TTests.csv'
# dest = os.path.join(path, name)
# df_t_tests.to_csv(dest)
return df_behavstats | [
"pandas.DataFrame",
"numpy.logical_and",
"pandas.read_csv",
"statsmodels.formula.api.mixedlm",
"scipy.stats.ttest_rel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"scipy.stats.ttest_ind",
"statsmodels.api.stats.anova_lm",
"seaborn.swarmplot",
"pandas.notnull",
"seaborn.boxplot",
... | [((962, 1003), 'pandas.read_csv', 'pd.read_csv', (['raw_data_location3'], {'header': '(0)'}), '(raw_data_location3, header=0)\n', (973, 1003), True, 'import pandas as pd\n'), ((1027, 1041), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1039, 1041), True, 'import pandas as pd\n'), ((1063, 1077), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1075, 1077), True, 'import pandas as pd\n'), ((1102, 1116), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1114, 1116), True, 'import pandas as pd\n'), ((1138, 1152), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1150, 1152), True, 'import pandas as pd\n'), ((1174, 1188), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1186, 1188), True, 'import pandas as pd\n'), ((8221, 8324), 'statsmodels.formula.api.mixedlm', 'smf.mixedlm', (['"""first_switch_rt ~ auto_participant_id """', 'df_behavstats'], {'groups': "df_behavstats['type']"}), "('first_switch_rt ~ auto_participant_id ', df_behavstats, groups\n =df_behavstats['type'])\n", (8232, 8324), True, 'import statsmodels.formula.api as smf\n'), ((8992, 9023), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['model'], {'typ': '(2)'}), '(model, typ=2)\n', (9009, 9023), True, 'import statsmodels.api as sm\n'), ((9529, 9561), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['model1'], {'typ': '(2)'}), '(model1, typ=2)\n', (9546, 9561), True, 'import statsmodels.api as sm\n'), ((10061, 10093), 'statsmodels.api.stats.anova_lm', 'sm.stats.anova_lm', (['model2'], {'typ': '(2)'}), '(model2, typ=2)\n', (10078, 10093), True, 'import statsmodels.api as sm\n'), ((10666, 10712), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['median', 'mean'], {'equal_var': '(False)'}), '(median, mean, equal_var=False)\n', (10681, 10712), False, 'from scipy import integrate, stats\n'), ((11018, 11045), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['rt1', 'rt123'], {}), '(rt1, rt123)\n', (11033, 11045), False, 'from scipy import integrate, stats\n'), ((14561, 14615), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""type"""', 'y': '"""mean_rt"""', 'data': 'df_behavstats'}), "(x='type', y='mean_rt', data=df_behavstats)\n", (14572, 14615), True, 'import seaborn as sns\n'), ((14626, 14695), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""type"""', 'y': '"""mean_rt"""', 'data': 'df_behavstats', 'color': '""".25"""'}), "(x='type', y='mean_rt', data=df_behavstats, color='.25')\n", (14639, 14695), True, 'import seaborn as sns\n'), ((14807, 14818), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14816, 14818), True, 'import matplotlib.pyplot as plt\n'), ((14830, 14901), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""type"""', 'y': '"""mean_rt"""', 'hue': '"""occurence"""', 'data': 'df_behavstats'}), "(x='type', y='mean_rt', hue='occurence', data=df_behavstats)\n", (14841, 14901), True, 'import seaborn as sns\n'), ((15009, 15020), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15018, 15020), True, 'import matplotlib.pyplot as plt\n'), ((15032, 15094), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""type"""', 'y': '"""first_switch_rt"""', 'data': 'df_behavstats'}), "(x='type', y='first_switch_rt', data=df_behavstats)\n", (15043, 15094), True, 'import seaborn as sns\n'), ((15105, 15182), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""type"""', 'y': '"""first_switch_rt"""', 'data': 'df_behavstats', 'color': '""".25"""'}), "(x='type', y='first_switch_rt', data=df_behavstats, color='.25')\n", (15118, 15182), True, 'import seaborn as sns\n'), ((15296, 15307), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15305, 15307), True, 'import matplotlib.pyplot as plt\n'), ((15319, 15398), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""type"""', 'y': '"""first_switch_rt"""', 'hue': '"""occurence"""', 'data': 'df_behavstats'}), "(x='type', y='first_switch_rt', hue='occurence', data=df_behavstats)\n", (15330, 15398), True, 'import seaborn as sns\n'), ((15508, 15519), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15517, 15519), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2462), 'pandas.concat', 'pd.concat', (['[df_behavstats1, group_v]'], {'sort': '(False)'}), '([df_behavstats1, group_v], sort=False)\n', (2423, 2462), True, 'import pandas as pd\n'), ((4233, 4279), 'pandas.concat', 'pd.concat', (['[df_behavstats, group_v]'], {'sort': '(True)'}), '([df_behavstats, group_v], sort=True)\n', (4242, 4279), True, 'import pandas as pd\n'), ((5269, 5315), 'pandas.notnull', 'pd.notnull', (["df_behavstats['average_switch_rt']"], {}), "(df_behavstats['average_switch_rt'])\n", (5279, 5315), True, 'import pandas as pd\n'), ((7900, 7950), 'pandas.concat', 'pd.concat', (['[df_behavstats, group_value]'], {'sort': '(True)'}), '([df_behavstats, group_value], sort=True)\n', (7909, 7950), True, 'import pandas as pd\n'), ((12074, 12088), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (12086, 12088), True, 'import matplotlib.pyplot as plt\n'), ((12766, 12794), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (12776, 12794), True, 'import matplotlib.pyplot as plt\n'), ((5995, 6042), 'numpy.logical_and', 'np.logical_and', (["(row['changed'] == 1)", '(index == 0)'], {}), "(row['changed'] == 1, index == 0)\n", (6009, 6042), True, 'import numpy as np\n'), ((8660, 8946), 'statsmodels.formula.api.ols', 'ols', (['"""first_switch_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)"""'], {'data': 'df_behavstats'}), "('first_switch_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)'\n , data=df_behavstats)\n", (8663, 8946), False, 'from statsmodels.formula.api import ols\n'), ((9202, 9481), 'statsmodels.formula.api.ols', 'ols', (['"""mean_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)"""'], {'data': 'df_behavstats'}), "('mean_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)'\n , data=df_behavstats)\n", (9205, 9481), False, 'from statsmodels.formula.api import ols\n'), ((9732, 10013), 'statsmodels.formula.api.ols', 'ols', (['"""median_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)"""'], {'data': 'df_behavstats'}), "('median_rt ~ +C(type)+C(auto_participant_id)+C(switch_type)+C(occurence)+C(occurence):C(type)+C(occurence):C(auto_participant_id)+C(occurence):C(switch_type)+C(type):C(switch_type)+C(auto_participant_id):C(switch_type)+C(type):C(auto_participant_id)'\n , data=df_behavstats)\n", (9735, 10013), False, 'from statsmodels.formula.api import ols\n'), ((11795, 11820), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['MRT', 'SRT'], {}), '(MRT, SRT)\n', (11810, 11820), False, 'from scipy import integrate, stats\n')] |
import numpy
from coopihc.policy.BasePolicy import BasePolicy
from coopihc.base.Space import Space
class BadlyDefinedLikelihoodError(Exception):
pass
class ELLDiscretePolicy(BasePolicy):
"""ELLDiscretePolicy
Explicitly defined Likelihood Policy. A policy which is described by an explicit probabilistic model.
This policy expects you to bind the actual likelihood model. An example is as follows:
.. code-block:: python
se = StateElement(
1, autospace([0, 1, 2, 3, 4, 5, 6]), seed=_seed
)
action_state = State(**{"action": se})
policy = ELLDiscretePolicy(action_state, seed=_seed)
# Define the likelihood model
def likelihood_model(self, action, observation, *args, **kwargs):
if action == 0:
return 1 / 7
elif action == 1:
return 1 / 7 + 0.05
elif action == 2:
return 1 / 7 - 0.05
elif action == 3:
return 1 / 7 + 0.1
elif action == 4:
return 1 / 7 - 0.1
elif action == 5:
return 1 / 7 + 0.075
elif action == 6:
return 1 / 7 - 0.075
else:
raise RuntimeError(
"warning, unable to compute likelihood. You may have not covered all cases in the likelihood definition"
)
# Attach it
policy.attach_likelihood_function(likelihood_model)
.. note::
The signature of the likelihood model should be the same signature as a bound method (i.e. the first argument is self)
:param action_state: See the BasePolicy keyword argument with the same name
:type action_state: See the BasePolicy keyword argument with the same name
:param seed: seed for the RNG
:type seed: int, optional
"""
def __init__(self, action_state, *args, seed=None, **kwargs):
super().__init__(*args, action_state=action_state, **kwargs)
self.explicit_likelihood = True
self.rng = numpy.random.default_rng(seed)
def attach_likelihood_function(self, _function):
"""attach_likelihood_function
Bind the likelihood model by calling BasePolicy's _bind method.
:param _function: likelihood model to bind to the policy
:type _function: function
"""
self._bind(_function, "compute_likelihood")
@BasePolicy.default_value
def sample(self, agent_observation=None, agent_state=None):
"""sample from likelihood model.
Select an action according to its probability as defined by the likelihood model. You can pass an observation as well, in which case the policy will not look up he actual observation but use the observation you passed. This is useful e.g. when debugging the policy.
:param observation: if passed, this is the observation upon which action selection is based upon. Otherwise, the policy will look at the actual agent observation, defaults to None
:type observation: `State<coopihc.base.State.State>`, optional
:return: action, reward
:rtype: tuple(`StateElement<coopihc.base.StateElement.StateElement>`, float)
"""
actions, llh = self.forward_summary(agent_observation)
action = actions[self.rng.choice(len(llh), p=llh)]
return action, 0
def forward_summary(self, observation):
"""forward_summary
Compute the likelihood of each action, given the current observation
:param observation: current agent observation
:type observation: `State<coopihc.base.State.State>`
:return: [description]
:rtype: [type]
"""
llh, actions = [], []
action_stateelement = self.action_state["action"]
action_space = action_stateelement.space
for action in Space.cartesian_product(action_space)[0]:
llh.append(self.compute_likelihood(action, observation))
actions.append(action)
ACCEPTABLE_ERROR = 1e-13
error = abs(1 - sum(llh))
if error > ACCEPTABLE_ERROR:
raise BadlyDefinedLikelihoodError(
"Likelihood does not sum to 1: {}".format(llh)
)
return actions, llh
| [
"numpy.random.default_rng",
"coopihc.base.Space.Space.cartesian_product"
] | [((2069, 2099), 'numpy.random.default_rng', 'numpy.random.default_rng', (['seed'], {}), '(seed)\n', (2093, 2099), False, 'import numpy\n'), ((3870, 3907), 'coopihc.base.Space.Space.cartesian_product', 'Space.cartesian_product', (['action_space'], {}), '(action_space)\n', (3893, 3907), False, 'from coopihc.base.Space import Space\n')] |
import cv2
import numpy as np
def hough(orig_frame):
frame = cv2.GaussianBlur(orig_frame, (5, 5), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_yellow = np.array([0, 0, 215])
up_yellow = np.array([117, 255, 255])
mask = cv2.inRange(hsv, low_yellow, up_yellow)
edges = cv2.Canny(mask, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 5)
kluar = frame
return kluar
def nothing(x):
pass
cv2.namedWindow('Trackbar')
cap = cv2.VideoCapture("danu1.mp4")
#cap = cv2.VideoCapture(0)
#cap.set(3,320);
#cap.set(4,240);
H_bawah = 20
H_atas = 48
S_bawah = 110
S_atas = 176
V_bawah = 130
V_atas = 255
ukuran = 0
cv2.createTrackbar('H_bawah','Trackbar',H_bawah,255,nothing)
cv2.createTrackbar('H_atas','Trackbar',H_atas,255,nothing)
cv2.createTrackbar('S_bawah','Trackbar',S_bawah,255,nothing)
cv2.createTrackbar('S_atas','Trackbar',S_atas,255,nothing)
cv2.createTrackbar('V_bawah','Trackbar',V_bawah,255,nothing)
cv2.createTrackbar('V_atas','Trackbar',V_atas,255,nothing)
cv2.createTrackbar('ukuran','Trackbar',ukuran,255,nothing)
fou = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('fra.avi', fou, 60.0, (320, 240))
def my_mouse_callback(event,x,y,flags,param):
global hsv
if event == cv2.EVENT_LBUTTONUP:
print("warna:")
print(hsv[y,x])
cv2.setTrackbarPos('H_bawah', 'Trackbar', hsv[y,x][0]-25)
cv2.setTrackbarPos('H_atas', 'Trackbar', hsv[y,x][0]+25)
cv2.setTrackbarPos('S_bawah', 'Trackbar', hsv[y,x][1])
cv2.setTrackbarPos('V_bawah', 'Trackbar', hsv[y,x][2])
if event == cv2.EVENT_RBUTTONUP:
print("x,y", x, y)
cv2.waitKey(2000)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame",my_mouse_callback)
tr2 = 0
daa = []
while(1):
ret,frame = cap.read()
#cv2.rectangle(frame, (1086, 66), (1244, 114), (0, 0, 0), -1)
ret,frame2 = cap.read()
ret,frame3 = cap.read()
# frame3 = hough(frame)
# frame = cv2.imread('im2.jpeg',1)#cap.read()
# frame2 = cv2.imread('im2.jpeg',1)#cap.read()
# frame3 = cv2.imread('im2.jpeg',1)#cap.read()
#frame = frame[400:100, 0:700]
# frame2 = frame2[400:10, 0:700]
# frame3 = frame3[400:0, 0:700]
try :
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
except cv2.error:
break
H_bawah = cv2.getTrackbarPos('H_bawah','Trackbar')
S_bawah = cv2.getTrackbarPos('S_bawah','Trackbar')
V_bawah = cv2.getTrackbarPos('V_bawah','Trackbar')
H_atas = cv2.getTrackbarPos('H_atas','Trackbar')
S_atas = cv2.getTrackbarPos('S_atas','Trackbar')
V_atas = cv2.getTrackbarPos('V_atas','Trackbar')
ukuran = cv2.getTrackbarPos('ukuran','Trackbar')
batas_atas = np.array([H_atas,S_atas,V_atas])
batas_bawah = np.array([H_bawah,S_bawah,V_bawah])
mask = cv2.inRange(hsv, batas_bawah, batas_atas)
kernel = np.ones((10,10), np.uint8)
hasil_dilasi = cv2.erode(mask, kernel)
kernel2 = np.ones((10,10), np.uint8)
hasil_erosi = cv2.erode(hasil_dilasi, kernel2)
x, y, w, h = cv2.boundingRect(hasil_erosi)
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 5)
print(x,y)
if w*h>ukuran and (w*h<1000000):
cv2.rectangle(frame, (x, y), (x+w, y+h), (10, 255, 0), 5)
#res = cv2.bitwise_and(frame2,frame2, mask= hasil_dilasi)
daa.append((x+int(w/2), y+int(h/2) ))
print("panjang array : ", len(daa))
#cv2.line(frame,daa[0],daa[len(daa)-1],(0,255,0),2)#ini
'''
#hs = daa[len(daa)-1][0] - daa[0][0] #nilai x
#hs2 = daa[len(daa)-1][1] - daa[0][1] #milai y
#if hs<0: hs = hs*-1
if hs2<0: hs2 = hs2*-1
hs = int(hs/1.8)
hs2 = int(hs2/1.8)
print(hs, hs2)
'''
for ko in range(1,len(daa)):
#if ko < 456:
#print(ko)
cv2.circle(frame,daa[ko], 5, (0,255,0), -1)
cv2.line(frame,daa[ko],daa[ko-1],(0,0,255),2)
#garis
#gray = cv2.cvtColor(frame3,cv2.COLOR_BGR2GRAY)
#edges = cv2.Canny(gray,50,150,apertureSize = 3)
#cv2.imshow('edges',edges)
# lines = cv2.HoughLinesP(edges,1,np.pi/180,100)
# for x1,y1,x2,y2 in lines[0]:
# cv2.line(frame,(x1,y1),(x2,y2),(0,255,0),2)
#cv2.line(frame,(x1,y1),(x2,y2),(0,0,255),2)
hasil_erosi = cv2.resize(hasil_erosi, (940,640))
cv2.imshow('mask',hasil_erosi)
#cv2.imshow('res',res)
try :
#frame3 = cv2.resize(frame, (100,100))
#cv2.imshow('frame3',frame3)
out.write(frame)
except cv2.error:
break
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
frame = cv2.resize(frame, (940,640))
cv2.imshow('frame',frame)
cv2.imwrite("da.jpg", frame)
cap.release()
out.release()
cv2.destroyAllWindows()
| [
"cv2.GaussianBlur",
"cv2.VideoWriter_fourcc",
"numpy.ones",
"cv2.VideoWriter",
"cv2.HoughLinesP",
"cv2.erode",
"cv2.rectangle",
"cv2.imshow",
"cv2.inRange",
"cv2.line",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.setMouseCallback",
"cv2.setTrackbarPos",
"cv2.getTrackbarPos",
"cv2.boundingRect"... | [((631, 658), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Trackbar"""'], {}), "('Trackbar')\n", (646, 658), False, 'import cv2\n'), ((668, 697), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""danu1.mp4"""'], {}), "('danu1.mp4')\n", (684, 697), False, 'import cv2\n'), ((874, 938), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""H_bawah"""', '"""Trackbar"""', 'H_bawah', '(255)', 'nothing'], {}), "('H_bawah', 'Trackbar', H_bawah, 255, nothing)\n", (892, 938), False, 'import cv2\n'), ((936, 998), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""H_atas"""', '"""Trackbar"""', 'H_atas', '(255)', 'nothing'], {}), "('H_atas', 'Trackbar', H_atas, 255, nothing)\n", (954, 998), False, 'import cv2\n'), ((998, 1062), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""S_bawah"""', '"""Trackbar"""', 'S_bawah', '(255)', 'nothing'], {}), "('S_bawah', 'Trackbar', S_bawah, 255, nothing)\n", (1016, 1062), False, 'import cv2\n'), ((1060, 1122), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""S_atas"""', '"""Trackbar"""', 'S_atas', '(255)', 'nothing'], {}), "('S_atas', 'Trackbar', S_atas, 255, nothing)\n", (1078, 1122), False, 'import cv2\n'), ((1122, 1186), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""V_bawah"""', '"""Trackbar"""', 'V_bawah', '(255)', 'nothing'], {}), "('V_bawah', 'Trackbar', V_bawah, 255, nothing)\n", (1140, 1186), False, 'import cv2\n'), ((1184, 1246), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""V_atas"""', '"""Trackbar"""', 'V_atas', '(255)', 'nothing'], {}), "('V_atas', 'Trackbar', V_atas, 255, nothing)\n", (1202, 1246), False, 'import cv2\n'), ((1246, 1308), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""ukuran"""', '"""Trackbar"""', 'ukuran', '(255)', 'nothing'], {}), "('ukuran', 'Trackbar', ukuran, 255, nothing)\n", (1264, 1308), False, 'import cv2\n'), ((1314, 1345), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (1336, 1345), False, 'import cv2\n'), ((1353, 1402), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""fra.avi"""', 'fou', '(60.0)', '(320, 240)'], {}), "('fra.avi', fou, 60.0, (320, 240))\n", (1368, 1402), False, 'import cv2\n'), ((1913, 1937), 'cv2.namedWindow', 'cv2.namedWindow', (['"""frame"""'], {}), "('frame')\n", (1928, 1937), False, 'import cv2\n'), ((1939, 1987), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""frame"""', 'my_mouse_callback'], {}), "('frame', my_mouse_callback)\n", (1959, 1987), False, 'import cv2\n'), ((5098, 5121), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5119, 5121), False, 'import cv2\n'), ((70, 109), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['orig_frame', '(5, 5)', '(0)'], {}), '(orig_frame, (5, 5), 0)\n', (86, 109), False, 'import cv2\n'), ((121, 159), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (133, 159), False, 'import cv2\n'), ((178, 199), 'numpy.array', 'np.array', (['[0, 0, 215]'], {}), '([0, 0, 215])\n', (186, 199), True, 'import numpy as np\n'), ((217, 242), 'numpy.array', 'np.array', (['[117, 255, 255]'], {}), '([117, 255, 255])\n', (225, 242), True, 'import numpy as np\n'), ((255, 294), 'cv2.inRange', 'cv2.inRange', (['hsv', 'low_yellow', 'up_yellow'], {}), '(hsv, low_yellow, up_yellow)\n', (266, 294), False, 'import cv2\n'), ((308, 332), 'cv2.Canny', 'cv2.Canny', (['mask', '(75)', '(150)'], {}), '(mask, 75, 150)\n', (317, 332), False, 'import cv2\n'), ((346, 403), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['edges', '(1)', '(np.pi / 180)', '(50)'], {'maxLineGap': '(50)'}), '(edges, 1, np.pi / 180, 50, maxLineGap=50)\n', (361, 403), False, 'import cv2\n'), ((2598, 2639), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""H_bawah"""', '"""Trackbar"""'], {}), "('H_bawah', 'Trackbar')\n", (2616, 2639), False, 'import cv2\n'), ((2654, 2695), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""S_bawah"""', '"""Trackbar"""'], {}), "('S_bawah', 'Trackbar')\n", (2672, 2695), False, 'import cv2\n'), ((2710, 2751), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""V_bawah"""', '"""Trackbar"""'], {}), "('V_bawah', 'Trackbar')\n", (2728, 2751), False, 'import cv2\n'), ((2771, 2811), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""H_atas"""', '"""Trackbar"""'], {}), "('H_atas', 'Trackbar')\n", (2789, 2811), False, 'import cv2\n'), ((2825, 2865), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""S_atas"""', '"""Trackbar"""'], {}), "('S_atas', 'Trackbar')\n", (2843, 2865), False, 'import cv2\n'), ((2879, 2919), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""V_atas"""', '"""Trackbar"""'], {}), "('V_atas', 'Trackbar')\n", (2897, 2919), False, 'import cv2\n'), ((2935, 2975), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""ukuran"""', '"""Trackbar"""'], {}), "('ukuran', 'Trackbar')\n", (2953, 2975), False, 'import cv2\n'), ((2995, 3029), 'numpy.array', 'np.array', (['[H_atas, S_atas, V_atas]'], {}), '([H_atas, S_atas, V_atas])\n', (3003, 3029), True, 'import numpy as np\n'), ((3047, 3084), 'numpy.array', 'np.array', (['[H_bawah, S_bawah, V_bawah]'], {}), '([H_bawah, S_bawah, V_bawah])\n', (3055, 3084), True, 'import numpy as np\n'), ((3101, 3142), 'cv2.inRange', 'cv2.inRange', (['hsv', 'batas_bawah', 'batas_atas'], {}), '(hsv, batas_bawah, batas_atas)\n', (3112, 3142), False, 'import cv2\n'), ((3157, 3184), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.uint8'], {}), '((10, 10), np.uint8)\n', (3164, 3184), True, 'import numpy as np\n'), ((3204, 3227), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {}), '(mask, kernel)\n', (3213, 3227), False, 'import cv2\n'), ((3243, 3270), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.uint8'], {}), '((10, 10), np.uint8)\n', (3250, 3270), True, 'import numpy as np\n'), ((3289, 3321), 'cv2.erode', 'cv2.erode', (['hasil_dilasi', 'kernel2'], {}), '(hasil_dilasi, kernel2)\n', (3298, 3321), False, 'import cv2\n'), ((3340, 3369), 'cv2.boundingRect', 'cv2.boundingRect', (['hasil_erosi'], {}), '(hasil_erosi)\n', (3356, 3369), False, 'import cv2\n'), ((4625, 4660), 'cv2.resize', 'cv2.resize', (['hasil_erosi', '(940, 640)'], {}), '(hasil_erosi, (940, 640))\n', (4635, 4660), False, 'import cv2\n'), ((4665, 4696), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'hasil_erosi'], {}), "('mask', hasil_erosi)\n", (4675, 4696), False, 'import cv2\n'), ((4969, 4998), 'cv2.resize', 'cv2.resize', (['frame', '(940, 640)'], {}), '(frame, (940, 640))\n', (4979, 4998), False, 'import cv2\n'), ((5003, 5029), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (5013, 5029), False, 'import cv2\n'), ((5034, 5062), 'cv2.imwrite', 'cv2.imwrite', (['"""da.jpg"""', 'frame'], {}), "('da.jpg', frame)\n", (5045, 5062), False, 'import cv2\n'), ((1565, 1625), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""H_bawah"""', '"""Trackbar"""', '(hsv[y, x][0] - 25)'], {}), "('H_bawah', 'Trackbar', hsv[y, x][0] - 25)\n", (1583, 1625), False, 'import cv2\n'), ((1632, 1691), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""H_atas"""', '"""Trackbar"""', '(hsv[y, x][0] + 25)'], {}), "('H_atas', 'Trackbar', hsv[y, x][0] + 25)\n", (1650, 1691), False, 'import cv2\n'), ((1698, 1753), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""S_bawah"""', '"""Trackbar"""', 'hsv[y, x][1]'], {}), "('S_bawah', 'Trackbar', hsv[y, x][1])\n", (1716, 1753), False, 'import cv2\n'), ((1762, 1817), 'cv2.setTrackbarPos', 'cv2.setTrackbarPos', (['"""V_bawah"""', '"""Trackbar"""', 'hsv[y, x][2]'], {}), "('V_bawah', 'Trackbar', hsv[y, x][2])\n", (1780, 1817), False, 'import cv2\n'), ((1892, 1909), 'cv2.waitKey', 'cv2.waitKey', (['(2000)'], {}), '(2000)\n', (1903, 1909), False, 'import cv2\n'), ((2506, 2544), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2518, 2544), False, 'import cv2\n'), ((3496, 3557), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(10, 255, 0)', '(5)'], {}), '(frame, (x, y), (x + w, y + h), (10, 255, 0), 5)\n', (3509, 3557), False, 'import cv2\n'), ((4121, 4167), 'cv2.circle', 'cv2.circle', (['frame', 'daa[ko]', '(5)', '(0, 255, 0)', '(-1)'], {}), '(frame, daa[ko], 5, (0, 255, 0), -1)\n', (4131, 4167), False, 'import cv2\n'), ((4174, 4227), 'cv2.line', 'cv2.line', (['frame', 'daa[ko]', 'daa[ko - 1]', '(0, 0, 255)', '(2)'], {}), '(frame, daa[ko], daa[ko - 1], (0, 0, 255), 2)\n', (4182, 4227), False, 'import cv2\n'), ((4902, 4916), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4913, 4916), False, 'import cv2\n'), ((508, 559), 'cv2.line', 'cv2.line', (['frame', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(5)'], {}), '(frame, (x1, y1), (x2, y2), (0, 255, 0), 5)\n', (516, 559), False, 'import cv2\n')] |
"""
What is the grid world is less grid like - sort of like zelda?
"""
from enum import Enum
import numpy as np
import random
import gym
from gym.spaces import Box, Discrete, Tuple
class Direction(Enum):
NOOP = 0
N = 1
S = 2
W = 3
E = 4
NE = 5
NW = 6
SE = 7
SW = 8
direction_delta = dict(
N=(-1, 0),
S=(1, 0),
W=(0, -1),
E=(0, 1),
NE=(-1, 1),
NW=(-1, -1),
SE=(1, 1),
SW=(1, -1),
)
class Block(object):
"""
a block of some shape, we always start from the top left corner
and we can move it around (+ checking collisions)
"""
def __init__(self, x, y, height=1, width=1):
self.x = x
self.y = y
self.height = height
self.width = width
def try_move(self, direction, mask=None):
if Direction(direction) == Direction["NOOP"]:
return self
dx, dy = direction_delta[Direction(direction).name]
x, y = self.x + dx, self.y + dy
if mask is None:
self.x = x
self.y = y
return self
if not (
x >= 0
and y >= 0
and x + self.height < mask.shape[0]
and y + self.width < mask.shape[1]
):
# out of bounds
return self
elif np.sum(mask[x : x + self.height, y : y + self.width]) == 0:
# otherwise we have to check that the new proposal is all in blank spaces
self.x = x
self.y = y
return self
else:
# we'll collide with something in proposed move
return self
def create_mask(self, height, width):
base_mask = np.zeros((height, width))
base_mask[self.x : self.x + self.height, self.y : self.y + self.width] = 1
return base_mask
maze = """000000000000000000000
000000000000000000000
000000000000000000000
000000000000000000000
000000001111111000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000001000000
000000000000000000000
000000000000000000000""".split(
"\n"
)
maze = np.array([list(x) for x in maze]).astype(int)
class GridWorld(gym.Env):
def __init__(self, maze=maze, block=None):
self.maze = maze
self.height, self.width = maze.shape
if block is None:
self.block = Block(1, 1, 2, 2)
else:
self.block = block
self.last_action = None
self.action_space = Discrete(9)
self.observation_space = Box(0, 10, (self.height, self.width))
def step(self, action):
if type(action) is int:
self.last_action = Direction(action).name.ljust(5, " ")
self.last_direction = Direction(action)
else:
map_action = {"w": "N", "s": "S", "a": "W", "d": "E"}
self.last_action = Direction[map_action[action]].name.ljust(5, " ")
self.last_direction = Direction[map_action[action]]
self.block.try_move(self.last_direction.value, self.maze)
return None, None, None, None
def get_board(self):
maze_render = self.maze.copy()
block_mask = self.block.create_mask(self.height, self.width).astype(int)
maze_render[block_mask > 0] = 2
assert np.sum(block_mask) > 0
return maze_render
def get_board_info(self):
chr_mapping = {
0: "ꞏ",
1: "#",
2: "@",
"text": "last action taken is: {}".format(self.last_action),
}
return chr_mapping
if __name__ == "__main__":
from castle.base import play_blocking, play_random
from castle.ascii import AsciiWrapper
env = AsciiWrapper(GridWorld(block=Block(1, 1, 4, 4)))
# play_blocking(env, ['w', 'a', 's', 'd'])
play_random(env)
| [
"numpy.sum",
"gym.spaces.Discrete",
"numpy.zeros",
"castle.base.play_random",
"gym.spaces.Box"
] | [((3862, 3878), 'castle.base.play_random', 'play_random', (['env'], {}), '(env)\n', (3873, 3878), False, 'from castle.base import play_blocking, play_random\n'), ((1688, 1713), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (1696, 1713), True, 'import numpy as np\n'), ((2557, 2568), 'gym.spaces.Discrete', 'Discrete', (['(9)'], {}), '(9)\n', (2565, 2568), False, 'from gym.spaces import Box, Discrete, Tuple\n'), ((2602, 2639), 'gym.spaces.Box', 'Box', (['(0)', '(10)', '(self.height, self.width)'], {}), '(0, 10, (self.height, self.width))\n', (2605, 2639), False, 'from gym.spaces import Box, Discrete, Tuple\n'), ((3350, 3368), 'numpy.sum', 'np.sum', (['block_mask'], {}), '(block_mask)\n', (3356, 3368), True, 'import numpy as np\n'), ((1311, 1360), 'numpy.sum', 'np.sum', (['mask[x:x + self.height, y:y + self.width]'], {}), '(mask[x:x + self.height, y:y + self.width])\n', (1317, 1360), True, 'import numpy as np\n')] |
##
# create Features form live data and compare them with trained SVM Model
#
##
import json
import numpy as np
import pickle
import sys
from p300Functions import filterDownsampleData
from sklearn import preprocessing
debug = False # enable/disable debug Mode
avgChannel = False # avg all channel in Feature
def main():
# Load SVM Model
if(avgChannel):
with open('data/p300/model/svm_model_avg.txt', 'rb') as e:
clf = pickle.load(e)
else:
with open('data/p300/model/svm_model.txt', 'rb') as e:
clf = pickle.load(e)
# get data as an array from read_in()
datainput = json.loads(sys.stdin.read())
cmdIdx = datainput['cmdIdx']
volts = datainput['volts']
baseline = datainput['baseline']
# create a numpy array
volts = np.array(volts, dtype='f')
baseline = np.array(baseline, dtype='f')
# active channels
channels = [0,1,2,3,4,5,6,7] # 0-7 channels
if(debug):
print("\n------ Filter and Downsample Data ------")
## 1. Filter and Downsample Traingsdata
[dataDownSampleP300, dataBaseline] = filterDownsampleData(volts, baseline, cmdIdx, channels, debug)
if (debug):
print("\n------ Create Features ------")
## 2. Extract Features
X_test = extractXFeature(dataDownSampleP300)
## 3. Compare Data with model
if (debug):
print("\n------ Model Accuracy ------")
y_pred = np.array(clf.predict(X_test)) #Predict the response for dataset
# 4. Get Command with most p300
if(np.any(y_pred == 1)):
cmdP300 = np.zeros(len(cmdIdx))
cmdCount = len(cmdIdx)
for y in range(len(y_pred)):
if(y_pred[y] == 1):
# increment cmd counter if classified as p300
cmdP300[int(y/cmdCount)] += 1
# return cmd idx with most found p300 classifications
maxIdx = np.argmax(cmdP300)
if(cmdP300[maxIdx]>1):
print(np.argmax(cmdP300))
else:
print("nop")
else:
print("nop")
def extractXFeature(dataDownSample):
if(avgChannel == False):
cmdCount = len(dataDownSample)
cycles = len(dataDownSample[0])
## Reshape Data
reshapedData = [[],[],[],[],[]]
for cmd in range(cmdCount):
cmdData = np.array(dataDownSample[cmd])
cycle, nx, ny = cmdData.shape
reshapedData[cmd] = cmdData.reshape((cycle, nx * ny))
# reshapedData[cmd].append(cycleData.reshape((nx * ny)))
if (debug):
print("\n-- Reshaped Data ---")
print("len(reshapedData) aka 5 cmds: " + str(len(reshapedData)))
print("len(reshapedData[0]) aka 3 cycles : " + str(len(reshapedData[0])))
print("len(reshapedData[0][0]) aka 8 channels and 20 samples : " + str(len(reshapedData[0][0])))
else:
cmdCount = len(dataDownSample)
cycles = len(dataDownSample[0])
## Reshape Data
reshapedData = [[], [], [], [], []]
for cmd in range(cmdCount):
for cycle in range(cycles):
median = np.median(dataDownSample[cmd][cycle], axis=0)
reshapedData[cmd].append(median)
if (debug):
print("\n-- Reshaped Data ---")
print("len(reshapedData) aka 5 cmds: " + str(len(reshapedData)))
print("len(reshapedData[0]) aka 3 cycles : " + str(len(reshapedData[0])))
print("len(reshapedData[0][0]) aka 8 channels and 20 samples : " + str(len(reshapedData[0][0])))
## Create X data for SVM training
X = []
for cmd in range(cmdCount):
for cycle in range(cycles):
X.append(reshapedData[cmd][cycle])
if (debug):
print("\n-- X and Y Data ---")
print("len(X) cycles x cmd = "+str(cycles)+" * "+(str(cmdCount))+" = "+str(cycles*cmdCount)+" : " + str(len(X)))
## Feature Standardization
X = preprocessing.scale(X)
return X
# start process
if __name__ == '__main__':
main()
| [
"sys.stdin.read",
"sklearn.preprocessing.scale",
"numpy.argmax",
"numpy.median",
"numpy.any",
"pickle.load",
"numpy.array",
"p300Functions.filterDownsampleData"
] | [((812, 838), 'numpy.array', 'np.array', (['volts'], {'dtype': '"""f"""'}), "(volts, dtype='f')\n", (820, 838), True, 'import numpy as np\n'), ((854, 883), 'numpy.array', 'np.array', (['baseline'], {'dtype': '"""f"""'}), "(baseline, dtype='f')\n", (862, 883), True, 'import numpy as np\n'), ((1118, 1180), 'p300Functions.filterDownsampleData', 'filterDownsampleData', (['volts', 'baseline', 'cmdIdx', 'channels', 'debug'], {}), '(volts, baseline, cmdIdx, channels, debug)\n', (1138, 1180), False, 'from p300Functions import filterDownsampleData\n'), ((1547, 1566), 'numpy.any', 'np.any', (['(y_pred == 1)'], {}), '(y_pred == 1)\n', (1553, 1566), True, 'import numpy as np\n'), ((3943, 3965), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (3962, 3965), False, 'from sklearn import preprocessing\n'), ((653, 669), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (667, 669), False, 'import sys\n'), ((1898, 1916), 'numpy.argmax', 'np.argmax', (['cmdP300'], {}), '(cmdP300)\n', (1907, 1916), True, 'import numpy as np\n'), ((461, 475), 'pickle.load', 'pickle.load', (['e'], {}), '(e)\n', (472, 475), False, 'import pickle\n'), ((567, 581), 'pickle.load', 'pickle.load', (['e'], {}), '(e)\n', (578, 581), False, 'import pickle\n'), ((2326, 2355), 'numpy.array', 'np.array', (['dataDownSample[cmd]'], {}), '(dataDownSample[cmd])\n', (2334, 2355), True, 'import numpy as np\n'), ((1966, 1984), 'numpy.argmax', 'np.argmax', (['cmdP300'], {}), '(cmdP300)\n', (1975, 1984), True, 'import numpy as np\n'), ((3128, 3173), 'numpy.median', 'np.median', (['dataDownSample[cmd][cycle]'], {'axis': '(0)'}), '(dataDownSample[cmd][cycle], axis=0)\n', (3137, 3173), True, 'import numpy as np\n')] |
#################################################################################################
# Given two sorted arrays arr1[] and arr2[] non-decreasing order with size n and m. The task is #
# to merge the two sorted arrays into sorted array (in non-decreasing order). #
#################################################################################################
import numpy as np
import heapq
def main():
n_test = int(input())
for i in range(n_test):
size_arr1, size_arr2 = raw_input().split(' ')
size_arr1 = int(size_arr1)
size_arr2 = int(size_arr2)
arr1 = list(map(int, raw_input().split(' ')))
arr2 = list(map(int, raw_input().split(' ')))
merged_arr = np.concatenate((arr1, arr2), axis=1)
print(np.sort(merged_arr))
if __name__ == "__main__":
main()
| [
"numpy.sort",
"numpy.concatenate"
] | [((750, 786), 'numpy.concatenate', 'np.concatenate', (['(arr1, arr2)'], {'axis': '(1)'}), '((arr1, arr2), axis=1)\n', (764, 786), True, 'import numpy as np\n'), ((801, 820), 'numpy.sort', 'np.sort', (['merged_arr'], {}), '(merged_arr)\n', (808, 820), True, 'import numpy as np\n')] |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import pygame
import numpy as np
import cv2
from .action import Action
from .arrow import *
from .kiloBot import KiloBot
import os
class KiloBotEnv(gym.Env):
metadata={'render.modes':['human']}
BLACK=(0,0,0);WHITE=(255,255,255);BLUE=(0,0,255);RED=(255,0,0)
pygame.init()
def __init__(self,n=5,k=5,objective="graph",render=True,dupper=None,dlower=None,dthreshold=None,sigma=None,module_color=(0,255,0),radius=5,screen_width=250,screen_heigth=250):
super().__init__() ##Check it once never used before
self.n = n
self.k = k
self.modules = []
self.render_mode = render
self.target = (0,0)
if objective=="localization":
self.obj = True
self.target = (np.random.randint(0,screen_width-radius),np.random.randint(0,screen_heigth-radius))
else:
self.obj = False
self.module_color = module_color
self.screen_width = screen_width
self.screen_heigth = screen_heigth
self.target_color = self.BLUE
self.relation_color = self.RED
self.relationship_color = (255,0,0)
self.radius = radius
self.dummy_action = Action ## This is a class not a object that is stored
self.module_queue = []
self.graph_reward = 0
self.target_reward = 0
self.dupper = dupper or 14*self.radius
self.dlower = dlower or 4*self.radius
self.sigma = sigma or 0.025*self.screen_width
if self.obj:
self.dthreshold = dthreshold or 14*self.radius
else:
self.dthreshold = dthreshold or 16*self.radius
self.ring_radius = 0.5*self.dthreshold
self.epsilon = 1e-4
for i in range(n):
self.modules.append(KiloBot(module_color,
radius,
xinit=np.random.randint(0,screen_width-radius),
yinit=np.random.randint(0,screen_heigth-radius),
theta=(2*np.pi*np.random.random_sample()),
screen_width=self.screen_width,
screen_heigth=self.screen_heigth)
)
self.clock = pygame.time.Clock()
self.arrow = init_arrow(self.module_color)
self.action_space = spaces.Box(low = np.array([[0,0]]*self.n ,dtype=np.float32) ,
high=np.array([[self.radius, 2*np.pi]]*self.n, dtype=np.float32))
### This will change with respect to output if its the histogram or the graph or the localization###
####################################################################################################
self.observation_space = spaces.Box(low = np.zeros((self.n,self.k)) ,dtype=np.float32,
high = 2*np.ones((self.n,self.k) , dtype=np.float32))
def fetch_histogram(self):
self.module_queue
temphist = [ list([]) for i in range(self.n) ] ## Dont use [[]]*self.n as it uses same pointer for all the lists and hence they are identical at the end
stepsize = self.dthreshold/self.k
steps = [i*stepsize for i in range(1,self.k+1)]
for relation in self.module_queue:
temphist[relation[0]].append(relation[2])
temphist[relation[1]].append(relation[2])
histvalues = []
for histplot in temphist:
histplot = np.array(histplot,dtype=np.float32)
temp = []
for step in steps:
ans = np.sum(np.array(histplot<=self.dthreshold , dtype=np.float32)*(histplot*np.exp(-np.square(histplot - step)/(2*(self.sigma**2)))))
temp.append(ans)
temp = np.array(temp,dtype=np.float32)
temp /= (np.sum(temp)+self.epsilon)
histvalues.append(temp.copy())
del temp
return np.array(histvalues,dtype=np.float32)
def graph_obj_distances(self):
for i in range(self.n):
for j in range(i+1,self.n):
tempdst = (self.modules[i]-self.modules[j]).norm()
if tempdst<=self.dthreshold:
self.module_queue.append([i,j,tempdst])
if self.dlower<=tempdst<=self.dupper:
self.graph_reward += tempdst/10
return True
def step(self,actions):
if not pygame.display.get_init() and self.render_mode:
raise Exception("Some problem in the rendering contiivity of the code OpenAI Wrapper messing it up! or try running reset once at the beginning")
states=[]
reward = 0
self.screen.fill(self.BLACK)
for module,action in zip(self.modules,actions):
reward -= 0.0005 * module.update(action)
states.append(module.get_state())
if (not self.obj) or (module.l!=1):
pygame.draw.circle(self.screen,module.color,(module.rect.x,module.rect.y),module.radius)
pygame.draw.line(self.screen,module.color,(module.rect.x,module.rect.y),
(module.rect.x + self.radius*2*np.cos(module.theta),module.rect.y + self.radius*2*np.sin(module.theta)),1)
nar,nrect = rotate_arrow(self.arrow.copy(),
(module.rect.x + self.radius*2*np.cos(module.theta),module.rect.y + self.radius*2*np.sin(module.theta)),
module.theta)
self.screen.blit(nar,(nrect.x,nrect.y))
pygame.draw.circle(self.screen,(0,102,51),(module.rect.x,module.rect.y),int(self.ring_radius),2)## Draw A circle around it and draw the Region of interest
self.graph_obj_distances()
if self.obj:
mask = [0]*self.n
pygame.draw.circle(self.screen,self.target_color,self.target,self.radius) ## draw the blue dot
for i,module in enumerate(self.modules):
if module.dist(self.target).norm()<=self.dthreshold:
mask[i] = 1
if module.dist(self.target).norm()<=5*self.radius or module.l==1:
module.l=1
pygame.draw.circle(self.screen,(255,0,0),(module.rect.x,module.rect.y),module.radius)
self.target_reward += 1
reward += self.target_reward
neighbouring_bit = [0]*self.n
for relation in self.module_queue:
if relation[2]<=self.dthreshold:
if mask[relation[0]]==1:
neighbouring_bit[relation[1]]=1
if mask[relation[1]]==1:
neighbouring_bit[relation[0]]=1
else:
reward += self.graph_reward
for relation in self.module_queue:
if relation[2]<=self.dthreshold:
i ,j = relation[:2]
pygame.draw.line(self.screen,(255,0,0),self.modules[i].get_state()[:2],self.modules[j].get_state()[:2])
hist = self.fetch_histogram()
self.module_queue = []
if self.obj and reward>(self.n - 1):
done = True
else:
done = False
critic_input = np.array(pygame.surfarray.array3d(self.screen).swapaxes(0,1),dtype=np.uint8).reshape([self.screen_width,self.screen_heigth,3])
info = {"critic_input":critic_input,"localization_bit": [module.l for module in self.modules]}
if self.obj:
info["target_distance"]= [ module.dist(self.target).norm() if module.dist(self.target).norm()<self.dthreshold else -1 for module in self.modules]
info["neighbouring_bit"] = neighbouring_bit
self.graph_reward,self.target_reward = 0,0
return hist,reward,done,info
def reset(self):
if self.render_mode:
self.screen = pygame.display.set_mode((self.screen_width,self.screen_heigth))
pygame.display.set_caption("Swarm")
else:
self.screen = pygame.Surface((self.screen_width,self.screen_heigth))
self.screen.fill(self.BLACK)
if self.render_mode and (not pygame.display.get_init()):
pygame.display.init()
for module in self.modules:
module.spawn()
if self.obj:
self.target = (np.random.randint(self.radius,self.screen_width-self.radius),np.random.randint(self.radius,self.screen_heigth-self.radius))
pygame.draw.circle(self.screen,self.target_color,self.target,self.radius)
for module in self.modules:
module.l = 0
def render(self,mode='human',close=False):
if not pygame.display.get_init() and self.render_mode:
self.screen = pygame.display.set_mode((self.screen_width,self.screen_heigth))
pygame.display.set_caption("Swarm")
pygame.draw.circle(self.screen,module.color,(module.rect.x,module.rect.y),module.radius)
elif not self.render_mode:
raise Exception("You cant render if you have passed its arguement as False")
pygame.display.flip()
if mode=="human":
self.clock.tick(60)
def close(self):
if self.render_mode:
pygame.display.quit()
pygame.quit()
| [
"numpy.sum",
"numpy.random.random_sample",
"numpy.ones",
"numpy.random.randint",
"numpy.sin",
"pygame.display.quit",
"pygame.surfarray.array3d",
"pygame.display.set_mode",
"pygame.display.set_caption",
"pygame.quit",
"pygame.Surface",
"numpy.square",
"pygame.init",
"pygame.display.get_init... | [((348, 361), 'pygame.init', 'pygame.init', ([], {}), '()\n', (359, 361), False, 'import pygame\n'), ((2350, 2369), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2367, 2369), False, 'import pygame\n'), ((4030, 4068), 'numpy.array', 'np.array', (['histvalues'], {'dtype': 'np.float32'}), '(histvalues, dtype=np.float32)\n', (4038, 4068), True, 'import numpy as np\n'), ((9138, 9159), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9157, 9159), False, 'import pygame\n'), ((9311, 9324), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (9322, 9324), False, 'import pygame\n'), ((3578, 3614), 'numpy.array', 'np.array', (['histplot'], {'dtype': 'np.float32'}), '(histplot, dtype=np.float32)\n', (3586, 3614), True, 'import numpy as np\n'), ((3871, 3903), 'numpy.array', 'np.array', (['temp'], {'dtype': 'np.float32'}), '(temp, dtype=np.float32)\n', (3879, 3903), True, 'import numpy as np\n'), ((5885, 5961), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'self.target_color', 'self.target', 'self.radius'], {}), '(self.screen, self.target_color, self.target, self.radius)\n', (5903, 5961), False, 'import pygame\n'), ((7924, 7988), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.screen_width, self.screen_heigth)'], {}), '((self.screen_width, self.screen_heigth))\n', (7947, 7988), False, 'import pygame\n'), ((8000, 8035), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Swarm"""'], {}), "('Swarm')\n", (8026, 8035), False, 'import pygame\n'), ((8076, 8131), 'pygame.Surface', 'pygame.Surface', (['(self.screen_width, self.screen_heigth)'], {}), '((self.screen_width, self.screen_heigth))\n', (8090, 8131), False, 'import pygame\n'), ((8245, 8266), 'pygame.display.init', 'pygame.display.init', ([], {}), '()\n', (8264, 8266), False, 'import pygame\n'), ((8514, 8590), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'self.target_color', 'self.target', 'self.radius'], {}), '(self.screen, self.target_color, self.target, self.radius)\n', (8532, 8590), False, 'import pygame\n'), ((8793, 8857), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.screen_width, self.screen_heigth)'], {}), '((self.screen_width, self.screen_heigth))\n', (8816, 8857), False, 'import pygame\n'), ((8869, 8904), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Swarm"""'], {}), "('Swarm')\n", (8895, 8904), False, 'import pygame\n'), ((8917, 9014), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'module.color', '(module.rect.x, module.rect.y)', 'module.radius'], {}), '(self.screen, module.color, (module.rect.x, module.rect.y\n ), module.radius)\n', (8935, 9014), False, 'import pygame\n'), ((9281, 9302), 'pygame.display.quit', 'pygame.display.quit', ([], {}), '()\n', (9300, 9302), False, 'import pygame\n'), ((827, 870), 'numpy.random.randint', 'np.random.randint', (['(0)', '(screen_width - radius)'], {}), '(0, screen_width - radius)\n', (844, 870), True, 'import numpy as np\n'), ((868, 912), 'numpy.random.randint', 'np.random.randint', (['(0)', '(screen_heigth - radius)'], {}), '(0, screen_heigth - radius)\n', (885, 912), True, 'import numpy as np\n'), ((2466, 2511), 'numpy.array', 'np.array', (['([[0, 0]] * self.n)'], {'dtype': 'np.float32'}), '([[0, 0]] * self.n, dtype=np.float32)\n', (2474, 2511), True, 'import numpy as np\n'), ((2556, 2619), 'numpy.array', 'np.array', (['([[self.radius, 2 * np.pi]] * self.n)'], {'dtype': 'np.float32'}), '([[self.radius, 2 * np.pi]] * self.n, dtype=np.float32)\n', (2564, 2619), True, 'import numpy as np\n'), ((2885, 2911), 'numpy.zeros', 'np.zeros', (['(self.n, self.k)'], {}), '((self.n, self.k))\n', (2893, 2911), True, 'import numpy as np\n'), ((3924, 3936), 'numpy.sum', 'np.sum', (['temp'], {}), '(temp)\n', (3930, 3936), True, 'import numpy as np\n'), ((4526, 4551), 'pygame.display.get_init', 'pygame.display.get_init', ([], {}), '()\n', (4549, 4551), False, 'import pygame\n'), ((5024, 5121), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', 'module.color', '(module.rect.x, module.rect.y)', 'module.radius'], {}), '(self.screen, module.color, (module.rect.x, module.rect.y\n ), module.radius)\n', (5042, 5121), False, 'import pygame\n'), ((8205, 8230), 'pygame.display.get_init', 'pygame.display.get_init', ([], {}), '()\n', (8228, 8230), False, 'import pygame\n'), ((8378, 8441), 'numpy.random.randint', 'np.random.randint', (['self.radius', '(self.screen_width - self.radius)'], {}), '(self.radius, self.screen_width - self.radius)\n', (8395, 8441), True, 'import numpy as np\n'), ((8439, 8503), 'numpy.random.randint', 'np.random.randint', (['self.radius', '(self.screen_heigth - self.radius)'], {}), '(self.radius, self.screen_heigth - self.radius)\n', (8456, 8503), True, 'import numpy as np\n'), ((8719, 8744), 'pygame.display.get_init', 'pygame.display.get_init', ([], {}), '()\n', (8742, 8744), False, 'import pygame\n'), ((2983, 3026), 'numpy.ones', 'np.ones', (['(self.n, self.k)'], {'dtype': 'np.float32'}), '((self.n, self.k), dtype=np.float32)\n', (2990, 3026), True, 'import numpy as np\n'), ((6268, 6363), 'pygame.draw.circle', 'pygame.draw.circle', (['self.screen', '(255, 0, 0)', '(module.rect.x, module.rect.y)', 'module.radius'], {}), '(self.screen, (255, 0, 0), (module.rect.x, module.rect.y),\n module.radius)\n', (6286, 6363), False, 'import pygame\n'), ((1947, 1990), 'numpy.random.randint', 'np.random.randint', (['(0)', '(screen_width - radius)'], {}), '(0, screen_width - radius)\n', (1964, 1990), True, 'import numpy as np\n'), ((2031, 2075), 'numpy.random.randint', 'np.random.randint', (['(0)', '(screen_heigth - radius)'], {}), '(0, screen_heigth - radius)\n', (2048, 2075), True, 'import numpy as np\n'), ((3696, 3751), 'numpy.array', 'np.array', (['(histplot <= self.dthreshold)'], {'dtype': 'np.float32'}), '(histplot <= self.dthreshold, dtype=np.float32)\n', (3704, 3751), True, 'import numpy as np\n'), ((2125, 2150), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (2148, 2150), True, 'import numpy as np\n'), ((5261, 5281), 'numpy.cos', 'np.cos', (['module.theta'], {}), '(module.theta)\n', (5267, 5281), True, 'import numpy as np\n'), ((5312, 5332), 'numpy.sin', 'np.sin', (['module.theta'], {}), '(module.theta)\n', (5318, 5332), True, 'import numpy as np\n'), ((5452, 5472), 'numpy.cos', 'np.cos', (['module.theta'], {}), '(module.theta)\n', (5458, 5472), True, 'import numpy as np\n'), ((5503, 5523), 'numpy.sin', 'np.sin', (['module.theta'], {}), '(module.theta)\n', (5509, 5523), True, 'import numpy as np\n'), ((7302, 7339), 'pygame.surfarray.array3d', 'pygame.surfarray.array3d', (['self.screen'], {}), '(self.screen)\n', (7326, 7339), False, 'import pygame\n'), ((3769, 3795), 'numpy.square', 'np.square', (['(histplot - step)'], {}), '(histplot - step)\n', (3778, 3795), True, 'import numpy as np\n')] |
import unittest
import cdms2
import numpy
import subprocess
import tempfile
import os
class CDMSNc3(unittest.TestCase):
def testOutputNC3(self):
tempdir = tempfile.mkdtemp()
data = numpy.random.random((12,10))
cdms2.useNetcdf3()
fnm = os.path.join(tempdir,"temp_cdms2file.nc")
with cdms2.open(fnm,"w") as f:
f.write(data,id="data")
f.close()
cmd = "ncdump -hs {0}".format(fnm)
cmd = cmd.split()
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
o, e = p.communicate()
self.assertEqual(str(o).find("_IsNetcdf4"),-1)
| [
"cdms2.open",
"cdms2.useNetcdf3",
"subprocess.Popen",
"tempfile.mkdtemp",
"numpy.random.random",
"os.path.join"
] | [((168, 186), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (184, 186), False, 'import tempfile\n'), ((202, 231), 'numpy.random.random', 'numpy.random.random', (['(12, 10)'], {}), '((12, 10))\n', (221, 231), False, 'import numpy\n'), ((239, 257), 'cdms2.useNetcdf3', 'cdms2.useNetcdf3', ([], {}), '()\n', (255, 257), False, 'import cdms2\n'), ((272, 314), 'os.path.join', 'os.path.join', (['tempdir', '"""temp_cdms2file.nc"""'], {}), "(tempdir, 'temp_cdms2file.nc')\n", (284, 314), False, 'import os\n'), ((327, 347), 'cdms2.open', 'cdms2.open', (['fnm', '"""w"""'], {}), "(fnm, 'w')\n", (337, 347), False, 'import cdms2\n'), ((504, 573), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n', (520, 573), False, 'import subprocess\n')] |
import pandas as pd
import numpy as np
from sklearn.linear_model import Lasso
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures, OneHotEncoder, MinMaxScaler
from sklearn.ensemble import BaggingRegressor
from sklearn.model_selection import cross_val_score
# Import the data.
train = pd.read_csv('../train.csv', index_col='Id')
test = pd.read_csv('../test.csv', index_col='Id')
# The features that'll create too much noise in the imputting phase.
mask = train.isnull().sum() > 100
noisy = train.columns[mask]
train.drop(noisy, axis=1, inplace=True)
test.drop(noisy, axis=1, inplace=True)
X = train.drop('SalePrice', axis=1)
y = train['SalePrice']
test = test[X.columns]
# Getting the numerical and the categorical variables for polynomial expansion.
categorical_cols = [col for col in X.columns if
X[col].dtype == 'object']
numerical_cols = [col for col in X.columns if
col not in categorical_cols]
cat = Pipeline(
steps=(
('impute', SimpleImputer(strategy='most_frequent')),
('encode', OneHotEncoder(sparse=False, handle_unknown='ignore'))
)
)
transformer = ColumnTransformer(
transformers=[
('cat_imputer', cat, categorical_cols),
('num_imputer', SimpleImputer(strategy='median'), numerical_cols)
]
)
# Best alpha is 40, so the GridSearch says.
preprocessor = Pipeline(
steps=[
('preprocessor', transformer),
('poly', PolynomialFeatures(degree=2)),
('scaler', MinMaxScaler())
]
)
X_ = preprocessor.fit_transform(X)
test_ = preprocessor.transform(test)
y_ = np.log1p(y)
lasso = Lasso(alpha=40, max_iter=10000)
bag_reg = BaggingRegressor(lasso, n_estimators=50, bootstrap=True)
score = cross_val_score(bag_reg, X_, y_, cv=5, scoring='neg_mean_squared_error')
# Submission stuff.
# bag_reg.fit(X_, y)
# pred = bag_reg.predict(test_)
# pred = pd.DataFrame(pred, index=test.index, columns=['SalePrice'])
# pred.to_csv('../submission.csv') | [
"sklearn.impute.SimpleImputer",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.Lasso",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.ensemble.BaggingRegressor",
"numpy.log1p"
] | [((412, 455), 'pandas.read_csv', 'pd.read_csv', (['"""../train.csv"""'], {'index_col': '"""Id"""'}), "('../train.csv', index_col='Id')\n", (423, 455), True, 'import pandas as pd\n'), ((463, 505), 'pandas.read_csv', 'pd.read_csv', (['"""../test.csv"""'], {'index_col': '"""Id"""'}), "('../test.csv', index_col='Id')\n", (474, 505), True, 'import pandas as pd\n'), ((1624, 1635), 'numpy.log1p', 'np.log1p', (['y'], {}), '(y)\n', (1632, 1635), True, 'import numpy as np\n'), ((1645, 1676), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(40)', 'max_iter': '(10000)'}), '(alpha=40, max_iter=10000)\n', (1650, 1676), False, 'from sklearn.linear_model import Lasso\n'), ((1687, 1743), 'sklearn.ensemble.BaggingRegressor', 'BaggingRegressor', (['lasso'], {'n_estimators': '(50)', 'bootstrap': '(True)'}), '(lasso, n_estimators=50, bootstrap=True)\n', (1703, 1743), False, 'from sklearn.ensemble import BaggingRegressor\n'), ((1753, 1825), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['bag_reg', 'X_', 'y_'], {'cv': '(5)', 'scoring': '"""neg_mean_squared_error"""'}), "(bag_reg, X_, y_, cv=5, scoring='neg_mean_squared_error')\n", (1768, 1825), False, 'from sklearn.model_selection import cross_val_score\n'), ((1079, 1118), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (1092, 1118), False, 'from sklearn.impute import SimpleImputer\n'), ((1134, 1186), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'handle_unknown': '"""ignore"""'}), "(sparse=False, handle_unknown='ignore')\n", (1147, 1186), False, 'from sklearn.preprocessing import PolynomialFeatures, OneHotEncoder, MinMaxScaler\n'), ((1303, 1335), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (1316, 1335), False, 'from sklearn.impute import SimpleImputer\n'), ((1481, 1509), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(2)'}), '(degree=2)\n', (1499, 1509), False, 'from sklearn.preprocessing import PolynomialFeatures, OneHotEncoder, MinMaxScaler\n'), ((1525, 1539), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1537, 1539), False, 'from sklearn.preprocessing import PolynomialFeatures, OneHotEncoder, MinMaxScaler\n')] |
""" Generate Graeco-Latin Squares """
from .latin_square import latin_square
from .utils import _unroll
import numpy as np
_MAX_ITERATIONS = 10000
def greaco_latin_square(k, factor_1_labels=None, factor_2_labels=None, seed=None, unroll=None):
""" Creates a k by k Greaco-Latin Square Design
A greaco-latin square is a design comprised of two orthogonal latin
squares. Note, there are no designs for k = 6.
Arguments:
k: the number of treatments.
factor_1_names: (optional) A list with k elements containing the
labels applied to the levels of the first factor. The default are
the first k uppercase Latin letters.
seed: (optional) The seed for the random number generation.
Raises:
ValueError: if k is not an integer greater than 2 or if one of the
names arguments does not have the correct number of names.
Returns:
ndarray: the Greaco-Latin Square design
Note:
This is not compatible with Python 2 due to the use of ord('α').
"""
if k < 2 or k == 6:
raise ValueError('No Greaco-Latin Squares exist for k={}'.format(k))
if factor_1_labels is None:
factor_1_labels = [chr(ord('A') + i) for i in range(k)]
elif not isinstance(factor_1_labels, list) or len(factor_1_labels) != k:
raise ValueError('factor_1_labels must be a list of length {}}').format(k)
if factor_2_labels is None:
factor_2_labels = [chr(ord('α') + i) for i in range(k)]
elif not isinstance(factor_2_labels, list) or len(factor_2_labels) != k:
raise ValueError('factor_2_labels must be a list of length {}}').format(k)
if seed is None or seed == 0:
seed = 7172
n_iter = 0
while True:
n_iter += 1
latin_square_1 = latin_square(k,
factor_labels=factor_1_labels,
seed=seed * n_iter)
latin_square_2 = latin_square(k,
factor_labels=factor_2_labels,
seed=35 * seed * n_iter)
if _is_orthoganal(k, latin_square_1, latin_square_2):
break
if n_iter > _MAX_ITERATIONS:
raise Exception('Maximum number of iterations reached')
greaco_latin_square = []
for i in range(k):
row = []
for j in range(k):
row.append((str(latin_square_1[i][j]) +
str(latin_square_2[i][j])))
greaco_latin_square.append(row)
greaco_latin_square = np.array(greaco_latin_square)
if unroll:
greaco_latin_square = _unroll(greaco_latin_square)
return greaco_latin_square
def _is_orthoganal(k, latin_square_1, latin_square_2):
symbols = []
for i in range(k):
for j in range(k):
symbol = str(latin_square_1[i][j]) + str(latin_square_2[i][j])
if symbol in symbols:
return False
symbols.append(symbol)
return True
| [
"numpy.array"
] | [((2575, 2604), 'numpy.array', 'np.array', (['greaco_latin_square'], {}), '(greaco_latin_square)\n', (2583, 2604), True, 'import numpy as np\n')] |
import torch
import argparse
import numpy as np
import skimage.io as sio
from scipy.ndimage import zoom
import matplotlib.pylab as plt
from models.docs import DOCSNet
def load_image(filename, rgb_mean, input_size=512):
im = sio.imread(filename)
h, w = im.shape[:2]
if h>=w and h>input_size:
im=zoom(im,(input_size/h,input_size/h,1))
h, w = im.shape[:2]
elif w>=h and w>input_size:
im=zoom(im,(input_size/w,input_size/w,1))
h, w = im.shape[:2]
pad_top = (input_size - h)//2
pad_lef = (input_size - w )//2
pad_bottom = input_size - h - pad_top
pad_right = input_size - w - pad_lef
pad = ((pad_top, pad_bottom), (pad_lef, pad_right), (0,0))
im_padded = np.pad(im, pad, 'constant', constant_values=0)
im_padded = im_padded.astype(np.float32)
im_padded -= rgb_mean
im_padded = torch.from_numpy(im_padded.transpose((2,0,1))).unsqueeze(0)
return im, im_padded, pad
def remove_pad(a, pad):
return a[pad[0][0]:a.shape[0]-pad[0][1],pad[1][0]:a.shape[1]-pad[1][1]]
def parse_args():
parser = argparse.ArgumentParser(description='Deep Object Co-Segmentation (DOCS) Demo: '
'Given two input images, segments the common objects within two images.')
parser.add_argument('gpu', metavar='GPU', type=int, help='gpu-id')
parser.add_argument('image_a_path', metavar='IMG_A_PATH', help='path to first image.')
parser.add_argument('image_b_path', metavar='IMG_B_PATH', help='path to second image.')
parser.add_argument('snapshot', metavar='SNAPSHOT_PATH', help='paht to model\'s snapshot.')
return parser.parse_args()
def main():
args = parse_args()
rgb_means = [122.67892, 116.66877, 104.00699]
# set the device
if not torch.cuda.is_available():
raise RuntimeError('You need gpu for running this demo.')
device = torch.device('cuda:%d'%args.gpu)
print('Device:', device)
print('Setting up the network...')
state = torch.load(args.snapshot, map_location='cpu')
net = DOCSNet(init_weights=False)
net.load_state_dict(state['net_params'])
net.eval()
net.to(device)
# load img_a
img_a, img_a_padded, pad_a= load_image(args.image_a_path, rgb_means)
# load img_b
img_b, img_b_padded, pad_b= load_image(args.image_b_path, rgb_means)
img_a_padded = img_a_padded.to(device)
img_b_padded = img_b_padded.to(device)
out_a, out_b = net.forward(img_a_padded, img_b_padded, softmax_out=True)
result_a = remove_pad(out_a[0,1].cpu().detach().numpy(), pad_a)>0.5
result_b = remove_pad(out_b[0,1].cpu().detach().numpy(), pad_b)>0.5
filtered_img_a = img_a * np.tile(result_a,(3,1,1)).transpose((1,2,0))
filtered_img_b = img_b * np.tile(result_b,(3,1,1)).transpose((1,2,0))
plt.subplot(2,2,1)
plt.imshow(img_a)
plt.subplot(2,2,2)
plt.imshow(img_b)
plt.subplot(2,2,3)
plt.imshow(filtered_img_a)
plt.subplot(2,2,4)
plt.imshow(filtered_img_b)
plt.show()
if __name__ == '__main__':
main()
| [
"numpy.pad",
"argparse.ArgumentParser",
"matplotlib.pylab.subplot",
"matplotlib.pylab.imshow",
"models.docs.DOCSNet",
"torch.load",
"scipy.ndimage.zoom",
"torch.cuda.is_available",
"numpy.tile",
"torch.device",
"skimage.io.imread",
"matplotlib.pylab.show"
] | [((230, 250), 'skimage.io.imread', 'sio.imread', (['filename'], {}), '(filename)\n', (240, 250), True, 'import skimage.io as sio\n'), ((732, 778), 'numpy.pad', 'np.pad', (['im', 'pad', '"""constant"""'], {'constant_values': '(0)'}), "(im, pad, 'constant', constant_values=0)\n", (738, 778), True, 'import numpy as np\n'), ((1090, 1250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Deep Object Co-Segmentation (DOCS) Demo: Given two input images, segments the common objects within two images."""'}), "(description=\n 'Deep Object Co-Segmentation (DOCS) Demo: Given two input images, segments the common objects within two images.'\n )\n", (1113, 1250), False, 'import argparse\n'), ((1859, 1893), 'torch.device', 'torch.device', (["('cuda:%d' % args.gpu)"], {}), "('cuda:%d' % args.gpu)\n", (1871, 1893), False, 'import torch\n'), ((1973, 2018), 'torch.load', 'torch.load', (['args.snapshot'], {'map_location': '"""cpu"""'}), "(args.snapshot, map_location='cpu')\n", (1983, 2018), False, 'import torch\n'), ((2029, 2056), 'models.docs.DOCSNet', 'DOCSNet', ([], {'init_weights': '(False)'}), '(init_weights=False)\n', (2036, 2056), False, 'from models.docs import DOCSNet\n'), ((2790, 2810), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (2801, 2810), True, 'import matplotlib.pylab as plt\n'), ((2813, 2830), 'matplotlib.pylab.imshow', 'plt.imshow', (['img_a'], {}), '(img_a)\n', (2823, 2830), True, 'import matplotlib.pylab as plt\n'), ((2835, 2855), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2846, 2855), True, 'import matplotlib.pylab as plt\n'), ((2858, 2875), 'matplotlib.pylab.imshow', 'plt.imshow', (['img_b'], {}), '(img_b)\n', (2868, 2875), True, 'import matplotlib.pylab as plt\n'), ((2880, 2900), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (2891, 2900), True, 'import matplotlib.pylab as plt\n'), ((2903, 2929), 'matplotlib.pylab.imshow', 'plt.imshow', (['filtered_img_a'], {}), '(filtered_img_a)\n', (2913, 2929), True, 'import matplotlib.pylab as plt\n'), ((2934, 2954), 'matplotlib.pylab.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2945, 2954), True, 'import matplotlib.pylab as plt\n'), ((2957, 2983), 'matplotlib.pylab.imshow', 'plt.imshow', (['filtered_img_b'], {}), '(filtered_img_b)\n', (2967, 2983), True, 'import matplotlib.pylab as plt\n'), ((2988, 2998), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2996, 2998), True, 'import matplotlib.pylab as plt\n'), ((317, 362), 'scipy.ndimage.zoom', 'zoom', (['im', '(input_size / h, input_size / h, 1)'], {}), '(im, (input_size / h, input_size / h, 1))\n', (321, 362), False, 'from scipy.ndimage import zoom\n'), ((1753, 1778), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1776, 1778), False, 'import torch\n'), ((427, 472), 'scipy.ndimage.zoom', 'zoom', (['im', '(input_size / w, input_size / w, 1)'], {}), '(im, (input_size / w, input_size / w, 1))\n', (431, 472), False, 'from scipy.ndimage import zoom\n'), ((2666, 2694), 'numpy.tile', 'np.tile', (['result_a', '(3, 1, 1)'], {}), '(result_a, (3, 1, 1))\n', (2673, 2694), True, 'import numpy as np\n'), ((2740, 2768), 'numpy.tile', 'np.tile', (['result_b', '(3, 1, 1)'], {}), '(result_b, (3, 1, 1))\n', (2747, 2768), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 15:32:25 2020
@author: sliakat
"""
from readSpe import readSpe
from matplotlib import pyplot as plt
import numpy as np
import tkinter as tk
from tkinter import filedialog
plt.close(fig='all')
root = tk.Tk()
root.withdraw()
regionToDisplay=0 #this will display ROI #1
filePath = filedialog.askopenfilenames() #can select multiple files
for i in range(0,len(filePath)):
totalData = readSpe(filePath[i])
waveCal=False
try:
xmlFooter = totalData.xmlFooter
wavelengths = totalData.wavelengths
waveCal=True
except:
print('No wavelength calibration in spe.')
dataList = totalData.data
figCount=0
displayRange = range(0,1)
for k in displayRange: #whatever range of figs dictated by displayRange
data = dataList[regionToDisplay][k,:,:]
fig = plt.figure('%s, Frame %d'%(filePath[i],k+1))
ax = fig.add_subplot(111)
#image contrast adjustments
display_min = int(np.percentile(data.flatten(),5))
display_max = int(np.percentile(data.flatten(),95))
if display_min < 1:
display_min = 1
if np.size(data,0)==1:
try:
ax.plot(wavelengths,data[0])
ax.set_xlabel('Wavelength (nm)')
except:
ax.plot(data[0])
ax.set_ylabel('Intensity (counts)')
else:
if waveCal==True:
#aspect=(np.size(data,0)/np.size(data,1))
aspect = 0.05
ax.imshow(data,vmin=display_min,vmax=display_max,cmap='gray',extent=[wavelengths[0],wavelengths[-1],np.size(data,0),0],aspect=aspect)
ax.set(xlabel='Wavelength (nm)')
else:
ax.imshow(data,origin='upper',vmin=display_min,vmax=display_max,cmap='gray')
ax.set(xlabel='Column')
ax.set(ylabel='Row')
if k+1 >= np.size(dataList[regionToDisplay],0):
break
| [
"numpy.size",
"matplotlib.pyplot.close",
"tkinter.filedialog.askopenfilenames",
"matplotlib.pyplot.figure",
"readSpe.readSpe",
"tkinter.Tk"
] | [((237, 257), 'matplotlib.pyplot.close', 'plt.close', ([], {'fig': '"""all"""'}), "(fig='all')\n", (246, 257), True, 'from matplotlib import pyplot as plt\n'), ((266, 273), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (271, 273), True, 'import tkinter as tk\n'), ((352, 381), 'tkinter.filedialog.askopenfilenames', 'filedialog.askopenfilenames', ([], {}), '()\n', (379, 381), False, 'from tkinter import filedialog\n'), ((463, 483), 'readSpe.readSpe', 'readSpe', (['filePath[i]'], {}), '(filePath[i])\n', (470, 483), False, 'from readSpe import readSpe\n'), ((925, 974), 'matplotlib.pyplot.figure', 'plt.figure', (["('%s, Frame %d' % (filePath[i], k + 1))"], {}), "('%s, Frame %d' % (filePath[i], k + 1))\n", (935, 974), True, 'from matplotlib import pyplot as plt\n'), ((1251, 1267), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (1258, 1267), True, 'import numpy as np\n'), ((2037, 2074), 'numpy.size', 'np.size', (['dataList[regionToDisplay]', '(0)'], {}), '(dataList[regionToDisplay], 0)\n', (2044, 2074), True, 'import numpy as np\n'), ((1742, 1758), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (1749, 1758), True, 'import numpy as np\n')] |
#-----------------------------------------------------------------------------
# This file is part of the 'Simple-10GbE-RUDP-KCU105-Example'. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the 'Simple-10GbE-RUDP-KCU105-Example', including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import rogue.interfaces.stream as ris
import pyrogue as pr
import numpy as np
#################################################################
class FrameStrut(object):
def __init__(self):
#############################
# Header
#############################
self.header = None
#############################
# Payload
#############################
self.wrdData = None
#################################################################
def ParseFrame(frame):
# Next we can get the size of the frame payload
size = frame.getPayload()
# To access the data we need to create a byte array to hold the data
fullData = bytearray(size)
# Next we read the frame data into the byte array, from offset 0
frame.read(fullData,0)
# Calculate the number of 64-bit words
num64bWords = (size>>3)
# Create the event
eventFrame = FrameStrut()
# Fill an array of 64-bit formatted word
eventFrame.wrdData = [None for i in range(num64bWords)]
eventFrame.wrdData = np.frombuffer(fullData, dtype='uint64', count=num64bWords)
# Parse the data header
eventFrame.header = eventFrame.wrdData[0]
# Return the results
return eventFrame
#################################################################
# Refer to the following:
# https://slaclab.github.io/rogue/interfaces/stream/receiving.html
# https://github.com/slaclab/rogue/blob/master/python/pyrogue/_DataReceiver.py
#################################################################
# Class for streaming RX
class SwRx(pr.DataReceiver):
# Init method must call the parent class init
def __init__( self,**kwargs):
super().__init__(**kwargs)
self.add(pr.LocalVariable(
name = 'DebugPrint',
description = 'Flag to enable debug printing',
value = False,
))
# Method which is called when a frame is received
def process(self,frame):
# Print out the event
if self.DebugPrint.get():
# Parse the frame
eventFrame = ParseFrame(frame)
# Print the header
print( f'eventFrame.header = {eventFrame.header}' )
#################################################################
| [
"numpy.frombuffer",
"pyrogue.LocalVariable"
] | [((1683, 1741), 'numpy.frombuffer', 'np.frombuffer', (['fullData'], {'dtype': '"""uint64"""', 'count': 'num64bWords'}), "(fullData, dtype='uint64', count=num64bWords)\n", (1696, 1741), True, 'import numpy as np\n'), ((2362, 2460), 'pyrogue.LocalVariable', 'pr.LocalVariable', ([], {'name': '"""DebugPrint"""', 'description': '"""Flag to enable debug printing"""', 'value': '(False)'}), "(name='DebugPrint', description=\n 'Flag to enable debug printing', value=False)\n", (2378, 2460), True, 'import pyrogue as pr\n')] |
import csv
import threading
import os
import socket
import sys
import logging
import time
import argparse
import datetime
import numpy as np
import glob
import pandas
import cv2
from numpy import sin,sign,eye, zeros,cos, ones,vstack,hstack, matmul,transpose, quantile, mean, std , maximum, minimum, amax,amin
from numpy.linalg import norm,inv,det, matrix_power
from numpy.random import randn,randint,uniform
from math import pi,sqrt,atan2,sin,cos, floor
from controlpy.synthesis import controller_lqr_discrete_time as dlqr
import gurobipy as gp
from gurobipy import *
import scipy.sparse as sp
from scipy.linalg import block_diag
import matplotlib.pyplot as plt
from pytope import Polytope
from pytope.polytope import intersection, minkowski_sum
import multiprocessing
import scipy.special as sc
sc.seterr(all='ignore' )
def DefineStabilizingController(A, B,Q,R):
# UNTITLED2 Summary of this function goes here
# Detailed explanation goes here
Ks,_,_ = dlqr(A,B,Q,R);
Ks = -Ks;
Ke,_,_ = dlqr(A,B,Q,R);
Ke = -Ke;
return Ks, Ke
def stackarrays(m,n, *args):
# m is number of rows
# n is number of columns
# args fill across columns first
if len(args) != m*n:
print("Invalid number of entries")
return
i = 0
return np.vstack((np.hstack((args[0],args[1])),np.hstack((args[2],args[3]))))
def size(M,n):
if isinstance(M,list):
return len(M)
else:
return M.shape[n-1]
def computeInvariantUgo( Acl, W ):
#computeInvariant: If the algorithm in this function converges it returns a Robust Positive Invariant (RPI) set
# Acl: This matrix is the closed-loop matrix given by Acl = A - BK
# W: This polytope defined the domain of the uncertanty. It is defined
# using MPC. An 2d example is W = Polytope([1 1;1 -1; -1 1; -1 -1]);
X = [zeros([size(Acl,1),1])]; # Initialize the set
for i in range(10000):
if i == 0:
X.append(Acl@X[i] + W); # Propagate the uncertanty
else:
X.append(Acl*X[i] + W); # Propagate the uncertanty
# Check if the algorithm has covnerged
if i > 1:
if (X[i+1].contains(X[i])) and (X[i].contains(X[i+1])):
invariantSet = X[i+1]; # Set invaraint to the current iterate
print('Invariant set computed in i = ',str(i),' iterations')
return invariantSet
# Sets a high confidence value. Uses data. Constructs support. Checks
# feasibility of MPC problem from x_0. If infeasible, lowers confidence and
# repeats. if feasible, this support is rolled out to the main code to
# complete an iteration. Then data is collected and process is repeated
# again.
def isEmptySet(P):
return any([dim==0 for dim in P.V.shape])
def v_constructGauss(v_samples, W, conf, nx,nu,A,B,Q, R, U,N, y_0, Ks, Ke, L):
bsSize = 1000; # Bootstrap copies
vbs = zeros([bsSize,nx,size(v_samples,2)]);
## First construct the convex hull vertices
mincvxH = amin(v_samples,axis=1).reshape(nx,1);
maxcvxH = amax(v_samples,axis=1).reshape(nx,1);
## Start Bootstrap here
meanEmp = mean(v_samples,1);
stdEmp = std(v_samples,1);
for j in range(bsSize):
for i in range(size(v_samples,2)):
entr = randint(0,size(v_samples,2));
vbs[j,:,i] = v_samples[:,entr];
meanBatch = zeros([nx,bsSize]);
stdBatch = zeros([nx,bsSize]);
for j in range(bsSize):
meanBatch[:,j] = mean(vbs[j,:,:],1);
stdBatch[:,j] = std(vbs[j,:,:],1);
###### Take the confidence set #########
minMu = zeros([nx,1]);
maxMu = zeros([nx,1]);
maxStd = zeros([nx,1]);
###########################
##
feas_conf = 0;
count = 0;
count2 = 0;
print("Adding Constraints")
while feas_conf == 0:
print(count,"=========================================================================================================================================================")
if conf >= 0.001:
conf = conf*(0.9999**count);
for i in range(nx):
minMu[i,0] = quantile(meanBatch[i,:],(1-conf)/2);
maxMu[i,0] = quantile(meanBatch[i,:],1-(1-conf)/2);
maxStd[i,0] = quantile(stdBatch[i,:],1-(1-conf)/2);
###########################
v_lb = minimum(mincvxH, minMu - 3.08*maxStd);
v_ub = maximum(maxcvxH, maxMu + 3.08*maxStd);
# v_lb = minMu - 3.08*maxStd;
# v_ub = maxMu + 3.08*maxStd; # NO CVX HULL UNION
Vmn = Polytope(lb = v_lb, ub = v_ub); # CVX HULL OF UNION
else :
v_lb = min(mincvxH, meanEmp - (3.08-0.1*count2)*stdEmp);
v_ub = max(maxcvxH, meanEmp + (3.08-0.1*count2)*stdEmp);
# v_lb = meanEmp - (3.08-0.1*count2)*stdEmp;
# v_ub = meanEmp + (3.08-0.1*count2)*stdEmp; # NO CVX HULL UNION
Vmn = Polytope(lb = v_lb, ub = v_ub); # CVX HULL OF UNION
count2 = count2 + 1;
### ALL CHECKS MUST GO THROUGH WITH THIS SET
####################### MAYNE APPROACH (1) ######################
ALcl = A-L; # FOLLOW MAYNE NOTATIONS HERE
LVsc = (-L)*Vmn;
DeltaTilde = W + LVsc;
minRTilde = computeInvariantUgo(ALcl, DeltaTilde);
### second piece
Acl = A + B@Ke;
DeltaBar = L*minRTilde + L*Vmn;
Vlist = DeltaBar.V;
tolV = 300; # after 100 vertices, max box outside
if size(Vlist,1) < tolV:
print('***** NOT FITTING BOX. EXACT MIN INVARIANT SET ATTEMPT******')
l = np.array([[min(Vlist[:,0])],[min(Vlist[:,1])]]);
u = np.array([[max(Vlist[:,0])],[max(Vlist[:,1])]]);
polOut = Polytope(lb = l, ub = u);
minRBar = computeInvariantUgo(Acl, polOut);
print("UGO COMPUTED")
else:
print('***** FITTING BOX. APPROX MIN INVARIANT SET ******')
minRBar = computeInvariantUgo(Acl, DeltaBar);
print("UGO COMPUTED")
minR = minRTilde + minRBar; # NET PIECE
print("minRTilde",minRTilde.V)
print("minRBar",minRBar.V)
print("minr",minR.V)
# Compute the Tightened Ubar
Ubar = U-Ke*minRBar;
Hubar = Ubar.A;
hubar = Ubar.b;
# Terminal Condition = 0
Xn_nom = Polytope(lb = zeros(nx), ub = zeros(nx))
Hxn_nom = Xn_nom.A
hxn_nom = Xn_nom.b
## Checking if x_hat exists
maxB = np.linalg.norm([v_ub, -v_lb],np.inf,0)
vmnN0 = Polytope(lb = -maxB, ub = maxB);
mthX0 = y_0 + (-vmnN0);
try:
polxhat0 = mthX0 - minRTilde;
except:
polxhat0 = -(minRTilde - mthX0)
if isEmptySet(polxhat0) == 0 and isEmptySet(Ubar) == 0:
gamma_hat = np.linalg.norm((polxhat0 + (-minRBar)).V,np.inf,0)
Xbar = Polytope(lb=-gamma_hat,ub=gamma_hat)
Hxbar = Xbar.A
hxbar = Xbar.b
if isEmptySet(Xbar) == 0:
print("Xbar",Xbar.V)
print("Ubar",Ubar.V)
print("Xn_nom",Xn_nom.V)
print("Vmn",Vmn.V)
feas_conf = 1
else:
feas_conf = 0;
else:
print("no v gauss - lower confidence")
feas_conf = 0;
count = count + 1;
return Vmn, v_lb, v_ub, minRTilde, minRBar, minR, Hxn_nom, hxn_nom, Hxbar, hxbar, Hubar, hubar , Xbar, Ubar, Xn_nom, polxhat0
def sys_load():
dt = 0.01
N = 25
A = eye(2)
B = dt*eye(2)
nx = A.shape[1]
nu = B.shape[1]
Q = np.diag([500,500])
R = np.diag([0.4,0.4])
ua = 8
uxmin = -1.0*ua
uxmax = 1.0*ua
uzmin = -1.0*ua
uzmax = 1.0*ua
U = Polytope(lb = np.array([[uxmin],[uzmin]]),ub = np.array([[uxmax],[uzmax]]))
# # # Defining Process Noise Bounds
wlb_true = -0.00; # Lower bound of additive noise value ######
wub_true = -wlb_true; # Upper bound of additive noise value ######
y_0 = Polytope(lb=np.array([-0.3492,-0.2457]),ub=np.array([-0.3158,-0.2095]))
_, Pinf,_ = dlqr(A,B,Q,R);
return A,B,U,nx,nu,wub_true,wlb_true, y_0, Q,R,N,dt ,Pinf
def construct_Models(Hxn_nom, hxn_nom, Hxbar, hxbar, Hubar, hubar, Pinf, nx,nu,A,B,Q,R,U,N,y_0, soft_flg, Wslack,env=gp.Env()):
env.setParam('OutputFlag', 0)
# Initialize each as a list in case we wanted time-varying cost matrices
m = []
epsilon = []
previous_u_cup = []
e_x = []
e_u = []
cost_stage = []
e_uprevious = []
U_penalty = []
U_between_penalty = []
polxnom_A = []
polxnom_b = []
for step in range(N):
m.append(gp.Model(env=env, name=str(step)+"matrix"))
m[step].setParam('OutputFlag', 0)
# Decision variables
e_x.append(m[step].addMVar(shape=(nx,N+1-step), lb=-GRB.INFINITY, vtype=GRB.CONTINUOUS, name=str(step)+"e_x"))
e_u.append(m[step].addMVar(shape=(nu,N-step),lb=-GRB.INFINITY, vtype=GRB.CONTINUOUS, name=str(step)+"e_u"))
e_uprevious.append(m[step].addMVar(shape=(nu,N),lb=-GRB.INFINITY, vtype=GRB.CONTINUOUS, name=str(step)+"e_uprevious"))
previous_u_cup.append(m[step].addMVar(shape=(nu),lb=-GRB.INFINITY, vtype=GRB.CONTINUOUS, name=str(step)+"u_ball_nom"))
epsilon.append(m[step].addMVar(shape=(1), vtype=GRB.CONTINUOUS, name=str(step)+"epsilon"))
# Penalty for input between steps, does not change any guarantees
U_penalty.append(np.diag([50,500]))
U_between_penalty.append(np.diag([50,50]))
# Initialize cost
cost_stage.append(0)
# k=0
# cost_stage[step] = cost_stage[step] + e_u[step][:,k]@U_penalty[step]@e_u[step][:,k]-e_u[step][:,k]@U_penalty[step]@e_uprevious[step][:,k]*2+e_uprevious[step][:,k]@U_penalty[step]@e_uprevious[step][:,k]
for k in range(N-step):
# Dynamics
m[step].addConstr(e_x[step][:,k+1] == A@e_x[step][:,k]+ B@(e_u[step][:,k]), name=str(step)+"xpred"+str(k))
# State and input constraints
m[step].addConstr(Hxbar@e_x[step][:,k]<= hxbar.flatten(), name=str(step)+"Hubar1"+str(k))
m[step].addConstr(Hubar@e_u[step][:,k]<= hubar.flatten(), name=str(step)+"Hubar2"+str(k))
# Add stage cost
cost_stage[step] = cost_stage[step] + e_x[step][:,k]@Q@e_x[step][:,k]+e_u[step][:,k]@R@e_u[step][:,k]*k**2;
if k<N-1-step:
cost_stage[step] = cost_stage[step] + e_u[step][:,k+1]@U_between_penalty[step]@e_u[step][:,k+1]-e_u[step][:,k+1]@U_between_penalty[step]@e_u[step][:,k]*2+e_u[step][:,k]@U_between_penalty[step]@e_u[step][:,k];
cost_stage[step] = cost_stage[step] + e_u[step][:,k]@U_penalty[step]@e_u[step][:,k]-e_u[step][:,k]@U_penalty[step]@e_uprevious[step][:,k]*2+e_uprevious[step][:,k]@U_penalty[step]@e_uprevious[step][:,k]
# Terminal constraint (0 in this case)
m[step].addConstr(Hxn_nom@e_x[step][:,N-step]<= hxn_nom.flatten(), name=str(step)+"Hxnom")
m[step].setObjective(cost_stage[step], GRB.MINIMIZE)
m[step].update()
# Add terminal cost
cost_stage[step] = cost_stage[step] + e_x[step][:,N-step]@Pinf@e_x[step][:,N-step] + epsilon[step]@epsilon[step]*soft_flg*Wslack
return e_x,e_u,previous_u_cup,m,epsilon | [
"numpy.minimum",
"numpy.maximum",
"numpy.amin",
"numpy.quantile",
"numpy.std",
"pytope.Polytope",
"numpy.zeros",
"numpy.hstack",
"numpy.amax",
"controlpy.synthesis.controller_lqr_discrete_time",
"numpy.mean",
"numpy.linalg.norm",
"numpy.array",
"scipy.special.seterr",
"numpy.eye",
"gur... | [((807, 830), 'scipy.special.seterr', 'sc.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (816, 830), True, 'import scipy.special as sc\n'), ((978, 994), 'controlpy.synthesis.controller_lqr_discrete_time', 'dlqr', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (982, 994), True, 'from controlpy.synthesis import controller_lqr_discrete_time as dlqr\n'), ((1022, 1038), 'controlpy.synthesis.controller_lqr_discrete_time', 'dlqr', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (1026, 1038), True, 'from controlpy.synthesis import controller_lqr_discrete_time as dlqr\n'), ((3184, 3202), 'numpy.mean', 'mean', (['v_samples', '(1)'], {}), '(v_samples, 1)\n', (3188, 3202), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3216, 3233), 'numpy.std', 'std', (['v_samples', '(1)'], {}), '(v_samples, 1)\n', (3219, 3233), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3419, 3438), 'numpy.zeros', 'zeros', (['[nx, bsSize]'], {}), '([nx, bsSize])\n', (3424, 3438), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3459, 3478), 'numpy.zeros', 'zeros', (['[nx, bsSize]'], {}), '([nx, bsSize])\n', (3464, 3478), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3656, 3670), 'numpy.zeros', 'zeros', (['[nx, 1]'], {}), '([nx, 1])\n', (3661, 3670), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3683, 3697), 'numpy.zeros', 'zeros', (['[nx, 1]'], {}), '([nx, 1])\n', (3688, 3697), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3711, 3725), 'numpy.zeros', 'zeros', (['[nx, 1]'], {}), '([nx, 1])\n', (3716, 3725), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((8063, 8069), 'numpy.eye', 'eye', (['(2)'], {}), '(2)\n', (8066, 8069), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((8141, 8160), 'numpy.diag', 'np.diag', (['[500, 500]'], {}), '([500, 500])\n', (8148, 8160), True, 'import numpy as np\n'), ((8170, 8189), 'numpy.diag', 'np.diag', (['[0.4, 0.4]'], {}), '([0.4, 0.4])\n', (8177, 8189), True, 'import numpy as np\n'), ((8651, 8667), 'controlpy.synthesis.controller_lqr_discrete_time', 'dlqr', (['A', 'B', 'Q', 'R'], {}), '(A, B, Q, R)\n', (8655, 8667), True, 'from controlpy.synthesis import controller_lqr_discrete_time as dlqr\n'), ((8849, 8857), 'gurobipy.Env', 'gp.Env', ([], {}), '()\n', (8855, 8857), True, 'import gurobipy as gp\n'), ((3534, 3555), 'numpy.mean', 'mean', (['vbs[j, :, :]', '(1)'], {}), '(vbs[j, :, :], 1)\n', (3538, 3555), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3579, 3599), 'numpy.std', 'std', (['vbs[j, :, :]', '(1)'], {}), '(vbs[j, :, :], 1)\n', (3582, 3599), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((6908, 6948), 'numpy.linalg.norm', 'np.linalg.norm', (['[v_ub, -v_lb]', 'np.inf', '(0)'], {}), '([v_ub, -v_lb], np.inf, 0)\n', (6922, 6948), True, 'import numpy as np\n'), ((6964, 6991), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-maxB)', 'ub': 'maxB'}), '(lb=-maxB, ub=maxB)\n', (6972, 6991), False, 'from pytope import Polytope\n'), ((8081, 8087), 'numpy.eye', 'eye', (['(2)'], {}), '(2)\n', (8084, 8087), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((1309, 1338), 'numpy.hstack', 'np.hstack', (['(args[0], args[1])'], {}), '((args[0], args[1]))\n', (1318, 1338), True, 'import numpy as np\n'), ((1338, 1367), 'numpy.hstack', 'np.hstack', (['(args[2], args[3])'], {}), '((args[2], args[3]))\n', (1347, 1367), True, 'import numpy as np\n'), ((3051, 3074), 'numpy.amin', 'amin', (['v_samples'], {'axis': '(1)'}), '(v_samples, axis=1)\n', (3055, 3074), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((3103, 3126), 'numpy.amax', 'amax', (['v_samples'], {'axis': '(1)'}), '(v_samples, axis=1)\n', (3107, 3126), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((4440, 4479), 'numpy.minimum', 'minimum', (['mincvxH', '(minMu - 3.08 * maxStd)'], {}), '(mincvxH, minMu - 3.08 * maxStd)\n', (4447, 4479), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((4501, 4540), 'numpy.maximum', 'maximum', (['maxcvxH', '(maxMu + 3.08 * maxStd)'], {}), '(maxcvxH, maxMu + 3.08 * maxStd)\n', (4508, 4540), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((4702, 4728), 'pytope.Polytope', 'Polytope', ([], {'lb': 'v_lb', 'ub': 'v_ub'}), '(lb=v_lb, ub=v_ub)\n', (4710, 4728), False, 'from pytope import Polytope\n'), ((5137, 5163), 'pytope.Polytope', 'Polytope', ([], {'lb': 'v_lb', 'ub': 'v_ub'}), '(lb=v_lb, ub=v_ub)\n', (5145, 5163), False, 'from pytope import Polytope\n'), ((6087, 6107), 'pytope.Polytope', 'Polytope', ([], {'lb': 'l', 'ub': 'u'}), '(lb=l, ub=u)\n', (6095, 6107), False, 'from pytope import Polytope\n'), ((7292, 7342), 'numpy.linalg.norm', 'np.linalg.norm', (['(polxhat0 + -minRBar).V', 'np.inf', '(0)'], {}), '((polxhat0 + -minRBar).V, np.inf, 0)\n', (7306, 7342), True, 'import numpy as np\n'), ((7363, 7400), 'pytope.Polytope', 'Polytope', ([], {'lb': '(-gamma_hat)', 'ub': 'gamma_hat'}), '(lb=-gamma_hat, ub=gamma_hat)\n', (7371, 7400), False, 'from pytope import Polytope\n'), ((8303, 8331), 'numpy.array', 'np.array', (['[[uxmin], [uzmin]]'], {}), '([[uxmin], [uzmin]])\n', (8311, 8331), True, 'import numpy as np\n'), ((8336, 8364), 'numpy.array', 'np.array', (['[[uxmax], [uzmax]]'], {}), '([[uxmax], [uzmax]])\n', (8344, 8364), True, 'import numpy as np\n'), ((8571, 8599), 'numpy.array', 'np.array', (['[-0.3492, -0.2457]'], {}), '([-0.3492, -0.2457])\n', (8579, 8599), True, 'import numpy as np\n'), ((8602, 8630), 'numpy.array', 'np.array', (['[-0.3158, -0.2095]'], {}), '([-0.3158, -0.2095])\n', (8610, 8630), True, 'import numpy as np\n'), ((10029, 10047), 'numpy.diag', 'np.diag', (['[50, 500]'], {}), '([50, 500])\n', (10036, 10047), True, 'import numpy as np\n'), ((10083, 10100), 'numpy.diag', 'np.diag', (['[50, 50]'], {}), '([50, 50])\n', (10090, 10100), True, 'import numpy as np\n'), ((4194, 4235), 'numpy.quantile', 'quantile', (['meanBatch[i, :]', '((1 - conf) / 2)'], {}), '(meanBatch[i, :], (1 - conf) / 2)\n', (4202, 4235), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((4260, 4305), 'numpy.quantile', 'quantile', (['meanBatch[i, :]', '(1 - (1 - conf) / 2)'], {}), '(meanBatch[i, :], 1 - (1 - conf) / 2)\n', (4268, 4305), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((4329, 4373), 'numpy.quantile', 'quantile', (['stdBatch[i, :]', '(1 - (1 - conf) / 2)'], {}), '(stdBatch[i, :], 1 - (1 - conf) / 2)\n', (4337, 4373), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((6774, 6783), 'numpy.zeros', 'zeros', (['nx'], {}), '(nx)\n', (6779, 6783), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n'), ((6790, 6799), 'numpy.zeros', 'zeros', (['nx'], {}), '(nx)\n', (6795, 6799), False, 'from numpy import sin, sign, eye, zeros, cos, ones, vstack, hstack, matmul, transpose, quantile, mean, std, maximum, minimum, amax, amin\n')] |
"""
Unit tests for train_pg_f18.py
"""
import numpy as np
from mock import patch
from sklearn import preprocessing
from train_pg_f18 import Agent
class TestPolicyGradients(object):
def test_normalize(self):
with patch.object(Agent, "__init__", lambda p1, p2, p3, p4: None):
agent = Agent(None, None, None)
a = np.array([1, -13, 44, 100, 57, -20, 53, 53, 6, 0])
np.testing.assert_allclose(agent.norm(a), preprocessing.scale(a))
def test_sum_of_rewards_monte_carlo(self):
with patch.object(Agent, "__init__", lambda p1, p2, p3, p4: None):
agent = Agent(None, None, None)
agent.reward_to_go = False
agent.gamma = 0.5
rewards = np.array([np.ones(3), np.ones(3)])
expected = [1.75] * 6
actual = agent.sum_of_rewards(rewards)
assert len(expected) == len(actual)
np.testing.assert_allclose(expected, actual)
def test_sum_of_rewards_reward_to_go(self):
with patch.object(Agent, "__init__", lambda p1, p2, p3, p4: None):
agent = Agent(None, None, None)
agent.reward_to_go = True
agent.gamma = 0.5
rewards = np.array([np.ones(3), np.ones(3)])
expected = [1.75, 1.5, 1, 1.75, 1.5, 1]
actual = agent.sum_of_rewards(rewards)
assert len(expected) == len(actual)
np.testing.assert_allclose(expected, actual)
| [
"mock.patch.object",
"sklearn.preprocessing.scale",
"train_pg_f18.Agent",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_allclose"
] | [((228, 288), 'mock.patch.object', 'patch.object', (['Agent', '"""__init__"""', '(lambda p1, p2, p3, p4: None)'], {}), "(Agent, '__init__', lambda p1, p2, p3, p4: None)\n", (240, 288), False, 'from mock import patch\n'), ((310, 333), 'train_pg_f18.Agent', 'Agent', (['None', 'None', 'None'], {}), '(None, None, None)\n', (315, 333), False, 'from train_pg_f18 import Agent\n'), ((350, 400), 'numpy.array', 'np.array', (['[1, -13, 44, 100, 57, -20, 53, 53, 6, 0]'], {}), '([1, -13, 44, 100, 57, -20, 53, 53, 6, 0])\n', (358, 400), True, 'import numpy as np\n'), ((540, 600), 'mock.patch.object', 'patch.object', (['Agent', '"""__init__"""', '(lambda p1, p2, p3, p4: None)'], {}), "(Agent, '__init__', lambda p1, p2, p3, p4: None)\n", (552, 600), False, 'from mock import patch\n'), ((622, 645), 'train_pg_f18.Agent', 'Agent', (['None', 'None', 'None'], {}), '(None, None, None)\n', (627, 645), False, 'from train_pg_f18 import Agent\n'), ((919, 963), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (945, 963), True, 'import numpy as np\n'), ((1026, 1086), 'mock.patch.object', 'patch.object', (['Agent', '"""__init__"""', '(lambda p1, p2, p3, p4: None)'], {}), "(Agent, '__init__', lambda p1, p2, p3, p4: None)\n", (1038, 1086), False, 'from mock import patch\n'), ((1108, 1131), 'train_pg_f18.Agent', 'Agent', (['None', 'None', 'None'], {}), '(None, None, None)\n', (1113, 1131), False, 'from train_pg_f18 import Agent\n'), ((1422, 1466), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (1448, 1466), True, 'import numpy as np\n'), ((455, 477), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['a'], {}), '(a)\n', (474, 477), False, 'from sklearn import preprocessing\n'), ((747, 757), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (754, 757), True, 'import numpy as np\n'), ((759, 769), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (766, 769), True, 'import numpy as np\n'), ((1232, 1242), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1239, 1242), True, 'import numpy as np\n'), ((1244, 1254), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1251, 1254), True, 'import numpy as np\n')] |
import numpy as np
def centeredfft2(Z, FsX, FsY):
"""
Computes a 2D fft centered at 0. Or in other words, compute
the 2D fft, and then fftshift it.
"""
M = np.size(Z, 1)
N = np.size(Z, 0)
k = np.array(range(-M // 2, M // 2))
freqX = k / (M / FsX)
k = np.array(range(-N // 2, N // 2))
freqY = k / (N / FsY)
fft = np.fft.fft2(Z) / (M * N)
fft2 = (1 / (FsY * FsX)) * np.fft.fft2(Z)
fft = np.fft.fftshift(fft)
fft2 = np.fft.fftshift(fft2)
return (freqX, freqY, fft, fft2)
| [
"numpy.size",
"numpy.fft.fftshift",
"numpy.fft.fft2"
] | [((177, 190), 'numpy.size', 'np.size', (['Z', '(1)'], {}), '(Z, 1)\n', (184, 190), True, 'import numpy as np\n'), ((199, 212), 'numpy.size', 'np.size', (['Z', '(0)'], {}), '(Z, 0)\n', (206, 212), True, 'import numpy as np\n'), ((441, 461), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft'], {}), '(fft)\n', (456, 461), True, 'import numpy as np\n'), ((473, 494), 'numpy.fft.fftshift', 'np.fft.fftshift', (['fft2'], {}), '(fft2)\n', (488, 494), True, 'import numpy as np\n'), ((360, 374), 'numpy.fft.fft2', 'np.fft.fft2', (['Z'], {}), '(Z)\n', (371, 374), True, 'import numpy as np\n'), ((416, 430), 'numpy.fft.fft2', 'np.fft.fft2', (['Z'], {}), '(Z)\n', (427, 430), True, 'import numpy as np\n')] |
from math import ceil, sqrt
import numpy as np
def hoeffding_n_given_t_and_p_one_sided(t:np.double, p:np.double, C=0.5) -> int:
"""
Return n such that with probability at least p, P(E[X] < \bar X_n + t).
Where \bar X_n is the mean of n samples.
Parameters
----------
t : double
one sided confidence interval width
p : double
probability of bound holding
C : double
Width of sample support domain. E.g. 0.5 if all samples fall in
[0.5, 1.0]
Returns
-------
"""
return int(ceil(C ** 2 * np.log(1 - p) / (-2 * t ** 2)))
def hoeffding_n_given_t_and_p_two_sided(t:np.double, p:np.double, C=0.5) -> int:
"""
Return n such that with probability at least p, P(|E[X] - \bar X_n| <= t).
Where \bar X_n is the mean of n samples.
Parameters
----------
t : double
two sided confidence interval width
p : double
probability of bound holding
C : double
Width of sample support domain. E.g. 0.5 if all samples fall in
[0.5, 1.0]
Returns
-------
"""
return int(ceil(C ** 2 * np.log( 0.5*(1 - p) ) / (-2 * t ** 2)))
def chebyshev_k_from_upper_bound_prob(p_bound_holds:np.double) -> int:
"""
Return k such with with probability at least p_bound_holds X will be < mu + k*sigma
Parameters
----------
p_bound_holds : double
Returns
-------
"""
p_bound_violated = 1 - p_bound_holds
return int(ceil(sqrt(1 / p_bound_violated)))
def accuracy_to_statistical_distance(accuracy):
return (accuracy - 0.5) * 2
def statistical_distance_to_accuracy(statistical_distance):
return 0.5 + 0.5 * statistical_distance | [
"numpy.log",
"math.sqrt"
] | [((1501, 1527), 'math.sqrt', 'sqrt', (['(1 / p_bound_violated)'], {}), '(1 / p_bound_violated)\n', (1505, 1527), False, 'from math import ceil, sqrt\n'), ((576, 589), 'numpy.log', 'np.log', (['(1 - p)'], {}), '(1 - p)\n', (582, 589), True, 'import numpy as np\n'), ((1139, 1160), 'numpy.log', 'np.log', (['(0.5 * (1 - p))'], {}), '(0.5 * (1 - p))\n', (1145, 1160), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 12:48:00 2020
@author: kpmurphy
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LogisticRegression
import matplotlib.colors as mcol
import os
degree = 4
# C =1/lambda, so large C is large variance is small regularization
C_list = [1e0, 1e4]
plot_list = C_list
err_train_list = []
err_test_list = []
w_list = []
for i, C in enumerate(C_list):
transformer = PolynomialFeatures(degree)
name = 'Reg{:d}-Degree{}'.format(int(C), degree)
XXtrain = transformer.fit_transform(Xtrain)[:, 1:] # skip the first column of 1s
model = LogisticRegression(C=C)
model = model.fit(XXtrain, ytrain)
w = model.coef_[0]
w_list.append(w)
ytrain_pred = model.predict(XXtrain)
nerrors_train = np.sum(ytrain_pred != ytrain)
err_train_list.append(nerrors_train / ntrain)
XXtest = transformer.fit_transform(Xtest)[:, 1:] # skip the first column of 1s
ytest_pred = model.predict(XXtest)
nerrors_test = np.sum(ytest_pred != ytest)
err_test_list.append(nerrors_test / ntest)
if C in plot_list:
fig, ax = plt.subplots()
plot_predictions(ax, xx, yy, transformer, model)
plot_data(ax, Xtrain, ytrain, is_train=True)
#plot_data(ax, Xtest, ytest, is_train=False)
ax.set_title(name)
fname = 'logreg_poly_surface-{}.png'.format(name)
save_fig(fname)
plt.draw()
plt.figure()
plt.plot(C_list, err_train_list, 'x-', label='train')
plt.plot(C_list, err_test_list, 'o-', label='test')
plt.legend()
plt.xlabel('Inverse regularization')
plt.ylabel('error rate')
save_fig('logreg_poly_vs_reg.png') | [
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.figure",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((1613, 1625), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1623, 1625), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1679), 'matplotlib.pyplot.plot', 'plt.plot', (['C_list', 'err_train_list', '"""x-"""'], {'label': '"""train"""'}), "(C_list, err_train_list, 'x-', label='train')\n", (1634, 1679), True, 'import matplotlib.pyplot as plt\n'), ((1680, 1731), 'matplotlib.pyplot.plot', 'plt.plot', (['C_list', 'err_test_list', '"""o-"""'], {'label': '"""test"""'}), "(C_list, err_test_list, 'o-', label='test')\n", (1688, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1744), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1742, 1744), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1781), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Inverse regularization"""'], {}), "('Inverse regularization')\n", (1755, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1806), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error rate"""'], {}), "('error rate')\n", (1792, 1806), True, 'import matplotlib.pyplot as plt\n'), ((590, 616), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['degree'], {}), '(degree)\n', (608, 616), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((768, 791), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C'}), '(C=C)\n', (786, 791), False, 'from sklearn.linear_model import LogisticRegression\n'), ((936, 965), 'numpy.sum', 'np.sum', (['(ytrain_pred != ytrain)'], {}), '(ytrain_pred != ytrain)\n', (942, 965), True, 'import numpy as np\n'), ((1180, 1207), 'numpy.sum', 'np.sum', (['(ytest_pred != ytest)'], {}), '(ytest_pred != ytest)\n', (1186, 1207), True, 'import numpy as np\n'), ((1301, 1315), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1313, 1315), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1606), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1604, 1606), True, 'import matplotlib.pyplot as plt\n')] |
from tqdm import tqdm
import os
import argparse
import logging
import numpy as np
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from model.networks import BaseNet
from model.losses import loss_fn
from model.dataset_utils import CenterCrop, Normalise, ToTensor
from model.datasets import CardiacMR_2D_UKBB, CardiacMR_2D_Eval_UKBB
from model.submodules import resample_transform
from eval import evaluate
from utils import xutils, flow_utils
def train(model, optimizer, loss_fn, dataloader, params, epoch, summary_writer):
"""
Train the model for one epoch
Args:
model: (torch.nn.Module instance) the neural network
optimizer: (torch.optim instance) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
dataloader: (DataLoader instance) a torch.utils.data.DataLoader object that fetches training data
params: (Params instance) configuration parameters
epoch: (int) number of epoch this is training (for the summary writer)
summary_writer: TensorBoardX SummaryWriter()
"""
# training mode
model.train()
with tqdm(total=len(dataloader)) as t:
for it, (target, source) in enumerate(dataloader):
# target shape (1, 1, H, W), source shape (1, seq_length, H, W)
# send input data and the model to device
# expand target and source images to a view of (seq_length, 1, H, W)
target = target.to(device=args.device).expand(source.size()[1], -1, -1, -1)
source = source.to(device=args.device).permute(1, 0, 2, 3)
# forward pass
dvf = model(target, source) # (N, 2, H, W)
loss, losses = loss_fn(dvf, target, source, params)
# backprop and update
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save summary of loss every some steps
if it % params.save_summary_steps == 0:
summary_writer.add_scalar('loss', loss.data, global_step=epoch * len(dataloader) + it)
for loss_name, loss_value in losses.items():
summary_writer.add_scalar('losses/{}'.format(loss_name), loss_value.data, global_step=epoch * len(dataloader) + it)
# update tqdm, show the loss value after the progress bar
t.set_postfix(loss='{:05.3f}'.format(loss.data))
t.update()
# save visualisation of training results
if (epoch + 1) % params.save_result_epochs == 0 or (epoch + 1) == params.num_epochs:
if it == len(dataloader) - 1:
# warp source image with full resolution dvf
warped_source = resample_transform(source, dvf)
# [dvf and warped source] -> cpu -> numpy array
dvf_np = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)
warped_source = warped_source.data.cpu().numpy()[:, 0, :, :] * 255 # (N, H, W)
# [input images] -> cpu -> numpy array -> [0, 255]
target = target.data.cpu().numpy()[:, 0, :, :] * 255 # (N, H, W)
source = source.data.cpu().numpy()[:, 0, :, :] * 255 # (N, H, W), here N = frames -1
# set up the result dir for this epoch
save_result_dir = os.path.join(args.model_dir, "train_results", "epoch_{}".format(epoch + 1))
if not os.path.exists(save_result_dir):
os.makedirs(save_result_dir)
# NOTE: the following code saves all N frames in a batch
# save dvf (hsv + quiver), target, source, warped source and error
# flow_utils.save_flow_hsv(op_flow, target, save_result_dir, fps=params.fps)
flow_utils.save_warp_n_error(warped_source, target, source, save_result_dir, fps=params.fps)
flow_utils.save_flow_quiver(dvf_np * (target.shape[-1] / 2), source, save_result_dir,
fps=params.fps)
def train_and_validate(model, optimizer, loss_fn, dataloaders, params):
"""Train the model and evaluate every epoch.
Args:
model: (torch.nn.Module) the neural network
dataloaders: (dict) train and val dataloaders
optimizer: (torch.optim) optimizer for parameters of model
loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch
params: (instance of Params) configuration parameters
"""
# reload weights from a specified file to resume training
if args.restore_file is not None:
restore_path = os.path.join(args.model_dir, args.restore_file)
logging.info("Restoring parameters from {}".format(restore_path))
xutils.load_checkpoint(restore_path, model, optimizer)
# set up TensorboardX summary writers
train_summary_writer = xutils.set_summary_writer(args.model_dir, 'train')
val_summary_writer = xutils.set_summary_writer(args.model_dir, 'val')
# unpack dataloaders
train_dataloader = dataloaders['train']
val_dataloader = dataloaders['val']
"""
Training loop
"""
for epoch in range(params.num_epochs):
logging.info('Epoch number {}/{}'.format(epoch + 1, params.num_epochs))
# train the model for one epoch
logging.info("Training...")
train(model, optimizer, loss_fn, train_dataloader, params, epoch, train_summary_writer)
# validation
if (epoch + 1) % params.val_epochs == 0 or (epoch + 1) == params.num_epochs:
logging.info("Validating at epoch: {} ...".format(epoch + 1))
val_metrics = evaluate(model, val_dataloader, params, args, val=True)
# save the most recent results in a JSON file
save_path = os.path.join(args.model_dir, f"val_results_last_3slices_{not args.all_slices}.json")
xutils.save_dict_to_json(val_metrics, save_path)
# calculate the metrics mean & std
val_metrics['val_dice_mean'] = np.mean([val_metrics['dice_lv_mean'], val_metrics['dice_myo_mean'], val_metrics['dice_rv_mean']])
val_metrics['val_mcd_mean'] = np.mean([val_metrics['mcd_lv_mean'], val_metrics['mcd_myo_mean'], val_metrics['mcd_rv_mean']])
val_metrics['val_hd_mean'] = np.mean([val_metrics['hd_lv_mean'], val_metrics['hd_myo_mean'], val_metrics['hd_rv_mean']])
val_metrics['val_dice_std'] = np.mean([val_metrics['dice_lv_std'], val_metrics['dice_myo_std'], val_metrics['dice_rv_std']])
val_metrics['val_mcd_std'] = np.mean([val_metrics['mcd_lv_std'], val_metrics['mcd_myo_std'], val_metrics['mcd_rv_std']])
val_metrics['val_hd_std'] = np.mean([val_metrics['hd_lv_std'], val_metrics['hd_myo_std'], val_metrics['hd_rv_std']])
logging.info("Mean val dice: {:05.3f}".format(val_metrics['val_dice_mean']))
logging.info("Mean val mcd: {:05.3f}".format(val_metrics['val_mcd_mean']))
logging.info("Mean val hd: {:05.3f}".format(val_metrics['val_hd_mean']))
logging.info("Mean val negative detJ: {:05.3f}".format(val_metrics['negative_detJ_mean']))
logging.info("Mean val mag grad detJ: {:05.3f}".format(val_metrics['mean_mag_grad_detJ_mean']))
assert val_metrics['negative_detJ_mean'] <= 1, "Invalid det Jac: Ratio of folding points > 1" # sanity check
# determine if the best model
is_best = False
current_one_metric = val_metrics['val_dice_mean'] # use mean val dice to choose best model
if epoch + 1 == params.val_epochs: # first validation
best_one_metric = current_one_metric
if current_one_metric >= best_one_metric:
is_best = True
best_one_metric = current_one_metric
# save model checkpoint
xutils.save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict()},
is_best=is_best,
checkpoint=args.model_dir)
for key, value in val_metrics.items():
val_summary_writer.add_scalar('metrics/{}'.format(key), value, global_step=epoch * len(train_dataloader))
# save the validation results for the best model separately
if is_best:
save_path = os.path.join(args.model_dir, f"val_results_best_3slices_{not args.all_slices}.json")
xutils.save_dict_to_json(val_metrics, save_path)
# close TensorBoard summary writers
train_summary_writer.close()
val_summary_writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir',
default=None,
help="Main directory for the model (with params.json)")
parser.add_argument('--restore_file',
default=None,
help="(Optional) Name of the file in --model_dir storing model to load before training")
parser.add_argument('--all_slices',
action='store_true',
help="Evaluate metrics on all slices instead of only 3.")
parser.add_argument('--no_cuda',
action='store_true')
parser.add_argument('--gpu',
default=0,
help='Choose GPU')
parser.add_argument('--num_workers',
default=8,
help='Number of dataloader workers, 0 for main process only')
args = parser.parse_args()
"""
Setting up
"""
# set device
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
args.device = torch.device('cuda')
else:
args.device = torch.device('cpu')
# set up model dir
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
# set up the logger
xutils.set_logger(os.path.join(args.model_dir, 'train.log'))
logging.info("Model: {}".format(args.model_dir))
# load setting parameters from a JSON file
json_path = os.path.join(args.model_dir, 'params.json')
assert os.path.isfile(json_path), "No JSON configuration file found at {}".format(json_path)
params = xutils.Params(json_path)
""""""
"""
Data
"""
# set up dataset and DataLoader
logging.info("Setting up data loaders...")
dataloaders = {}
# training dataset
train_dataset = CardiacMR_2D_UKBB(params.train_data_path,
seq=params.seq,
seq_length=params.seq_length,
transform=transforms.Compose([
CenterCrop(params.crop_size),
Normalise(),
ToTensor()
]))
# training dataloader
dataloaders['train'] = DataLoader(train_dataset,
batch_size=params.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=args.cuda)
# validation dataset
val_dataset = CardiacMR_2D_Eval_UKBB(params.val_data_path,
seq=params.seq,
label_prefix=params.label_prefix,
transform=transforms.Compose([
CenterCrop(params.crop_size),
Normalise(),
ToTensor()]),
label_transform=transforms.Compose([
CenterCrop(params.crop_size),
ToTensor()])
)
dataloaders['val'] = DataLoader(val_dataset,
batch_size=params.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=args.cuda)
logging.info("- Done.")
""""""
"""
Model and Optimiser
"""
# instantiate model and move to device
model = BaseNet()
model = model.to(device=args.device)
# set up optimiser
optimizer = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
""""""
"""
Run train and validate
"""
logging.info("Starting training and validation for {} epochs.".format(params.num_epochs))
train_and_validate(model, optimizer, loss_fn, dataloaders, params)
logging.info("Training and validation complete.")
""""""
| [
"argparse.ArgumentParser",
"os.path.isfile",
"numpy.mean",
"torch.device",
"utils.xutils.Params",
"utils.xutils.save_dict_to_json",
"os.path.join",
"model.dataset_utils.ToTensor",
"utils.xutils.set_summary_writer",
"torch.utils.data.DataLoader",
"model.networks.BaseNet",
"os.path.exists",
"m... | [((5072, 5122), 'utils.xutils.set_summary_writer', 'xutils.set_summary_writer', (['args.model_dir', '"""train"""'], {}), "(args.model_dir, 'train')\n", (5097, 5122), False, 'from utils import xutils, flow_utils\n'), ((5148, 5196), 'utils.xutils.set_summary_writer', 'xutils.set_summary_writer', (['args.model_dir', '"""val"""'], {}), "(args.model_dir, 'val')\n", (5173, 5196), False, 'from utils import xutils, flow_utils\n'), ((8966, 8991), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8989, 8991), False, 'import argparse\n'), ((10492, 10535), 'os.path.join', 'os.path.join', (['args.model_dir', '"""params.json"""'], {}), "(args.model_dir, 'params.json')\n", (10504, 10535), False, 'import os\n'), ((10547, 10572), 'os.path.isfile', 'os.path.isfile', (['json_path'], {}), '(json_path)\n', (10561, 10572), False, 'import os\n'), ((10646, 10670), 'utils.xutils.Params', 'xutils.Params', (['json_path'], {}), '(json_path)\n', (10659, 10670), False, 'from utils import xutils, flow_utils\n'), ((10749, 10791), 'logging.info', 'logging.info', (['"""Setting up data loaders..."""'], {}), "('Setting up data loaders...')\n", (10761, 10791), False, 'import logging\n'), ((11366, 11492), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'params.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': 'args.cuda'}), '(train_dataset, batch_size=params.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=args.cuda)\n', (11376, 11492), False, 'from torch.utils.data import DataLoader\n'), ((12407, 12531), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'params.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': 'args.cuda'}), '(val_dataset, batch_size=params.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=args.cuda)\n', (12417, 12531), False, 'from torch.utils.data import DataLoader\n'), ((12676, 12699), 'logging.info', 'logging.info', (['"""- Done."""'], {}), "('- Done.')\n", (12688, 12699), False, 'import logging\n'), ((12808, 12817), 'model.networks.BaseNet', 'BaseNet', ([], {}), '()\n', (12815, 12817), False, 'from model.networks import BaseNet\n'), ((13188, 13237), 'logging.info', 'logging.info', (['"""Training and validation complete."""'], {}), "('Training and validation complete.')\n", (13200, 13237), False, 'import logging\n'), ((4817, 4864), 'os.path.join', 'os.path.join', (['args.model_dir', 'args.restore_file'], {}), '(args.model_dir, args.restore_file)\n', (4829, 4864), False, 'import os\n'), ((4947, 5001), 'utils.xutils.load_checkpoint', 'xutils.load_checkpoint', (['restore_path', 'model', 'optimizer'], {}), '(restore_path, model, optimizer)\n', (4969, 5001), False, 'from utils import xutils, flow_utils\n'), ((5514, 5541), 'logging.info', 'logging.info', (['"""Training..."""'], {}), "('Training...')\n", (5526, 5541), False, 'import logging\n'), ((10043, 10068), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10066, 10068), False, 'import torch\n'), ((10109, 10129), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (10121, 10129), False, 'import torch\n'), ((10162, 10181), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10174, 10181), False, 'import torch\n'), ((10217, 10247), 'os.path.exists', 'os.path.exists', (['args.model_dir'], {}), '(args.model_dir)\n', (10231, 10247), False, 'import os\n'), ((10257, 10284), 'os.makedirs', 'os.makedirs', (['args.model_dir'], {}), '(args.model_dir)\n', (10268, 10284), False, 'import os\n'), ((10332, 10373), 'os.path.join', 'os.path.join', (['args.model_dir', '"""train.log"""'], {}), "(args.model_dir, 'train.log')\n", (10344, 10373), False, 'import os\n'), ((1800, 1836), 'model.losses.loss_fn', 'loss_fn', (['dvf', 'target', 'source', 'params'], {}), '(dvf, target, source, params)\n', (1807, 1836), False, 'from model.losses import loss_fn\n'), ((5845, 5900), 'eval.evaluate', 'evaluate', (['model', 'val_dataloader', 'params', 'args'], {'val': '(True)'}), '(model, val_dataloader, params, args, val=True)\n', (5853, 5900), False, 'from eval import evaluate\n'), ((5984, 6072), 'os.path.join', 'os.path.join', (['args.model_dir', 'f"""val_results_last_3slices_{not args.all_slices}.json"""'], {}), "(args.model_dir,\n f'val_results_last_3slices_{not args.all_slices}.json')\n", (5996, 6072), False, 'import os\n'), ((6081, 6129), 'utils.xutils.save_dict_to_json', 'xutils.save_dict_to_json', (['val_metrics', 'save_path'], {}), '(val_metrics, save_path)\n', (6105, 6129), False, 'from utils import xutils, flow_utils\n'), ((6221, 6322), 'numpy.mean', 'np.mean', (["[val_metrics['dice_lv_mean'], val_metrics['dice_myo_mean'], val_metrics[\n 'dice_rv_mean']]"], {}), "([val_metrics['dice_lv_mean'], val_metrics['dice_myo_mean'],\n val_metrics['dice_rv_mean']])\n", (6228, 6322), True, 'import numpy as np\n'), ((6361, 6459), 'numpy.mean', 'np.mean', (["[val_metrics['mcd_lv_mean'], val_metrics['mcd_myo_mean'], val_metrics[\n 'mcd_rv_mean']]"], {}), "([val_metrics['mcd_lv_mean'], val_metrics['mcd_myo_mean'],\n val_metrics['mcd_rv_mean']])\n", (6368, 6459), True, 'import numpy as np\n'), ((6497, 6593), 'numpy.mean', 'np.mean', (["[val_metrics['hd_lv_mean'], val_metrics['hd_myo_mean'], val_metrics[\n 'hd_rv_mean']]"], {}), "([val_metrics['hd_lv_mean'], val_metrics['hd_myo_mean'], val_metrics\n ['hd_rv_mean']])\n", (6504, 6593), True, 'import numpy as np\n'), ((6632, 6730), 'numpy.mean', 'np.mean', (["[val_metrics['dice_lv_std'], val_metrics['dice_myo_std'], val_metrics[\n 'dice_rv_std']]"], {}), "([val_metrics['dice_lv_std'], val_metrics['dice_myo_std'],\n val_metrics['dice_rv_std']])\n", (6639, 6730), True, 'import numpy as np\n'), ((6768, 6864), 'numpy.mean', 'np.mean', (["[val_metrics['mcd_lv_std'], val_metrics['mcd_myo_std'], val_metrics[\n 'mcd_rv_std']]"], {}), "([val_metrics['mcd_lv_std'], val_metrics['mcd_myo_std'], val_metrics\n ['mcd_rv_std']])\n", (6775, 6864), True, 'import numpy as np\n'), ((6900, 6993), 'numpy.mean', 'np.mean', (["[val_metrics['hd_lv_std'], val_metrics['hd_myo_std'], val_metrics['hd_rv_std']]"], {}), "([val_metrics['hd_lv_std'], val_metrics['hd_myo_std'], val_metrics[\n 'hd_rv_std']])\n", (6907, 6993), True, 'import numpy as np\n'), ((8668, 8756), 'os.path.join', 'os.path.join', (['args.model_dir', 'f"""val_results_best_3slices_{not args.all_slices}.json"""'], {}), "(args.model_dir,\n f'val_results_best_3slices_{not args.all_slices}.json')\n", (8680, 8756), False, 'import os\n'), ((8769, 8817), 'utils.xutils.save_dict_to_json', 'xutils.save_dict_to_json', (['val_metrics', 'save_path'], {}), '(val_metrics, save_path)\n', (8793, 8817), False, 'from utils import xutils, flow_utils\n'), ((2825, 2856), 'model.submodules.resample_transform', 'resample_transform', (['source', 'dvf'], {}), '(source, dvf)\n', (2843, 2856), False, 'from model.submodules import resample_transform\n'), ((3949, 4045), 'utils.flow_utils.save_warp_n_error', 'flow_utils.save_warp_n_error', (['warped_source', 'target', 'source', 'save_result_dir'], {'fps': 'params.fps'}), '(warped_source, target, source, save_result_dir,\n fps=params.fps)\n', (3977, 4045), False, 'from utils import xutils, flow_utils\n'), ((4062, 4167), 'utils.flow_utils.save_flow_quiver', 'flow_utils.save_flow_quiver', (['(dvf_np * (target.shape[-1] / 2))', 'source', 'save_result_dir'], {'fps': 'params.fps'}), '(dvf_np * (target.shape[-1] / 2), source,\n save_result_dir, fps=params.fps)\n', (4089, 4167), False, 'from utils import xutils, flow_utils\n'), ((11132, 11160), 'model.dataset_utils.CenterCrop', 'CenterCrop', (['params.crop_size'], {}), '(params.crop_size)\n', (11142, 11160), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((11204, 11215), 'model.dataset_utils.Normalise', 'Normalise', ([], {}), '()\n', (11213, 11215), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((11259, 11269), 'model.dataset_utils.ToTensor', 'ToTensor', ([], {}), '()\n', (11267, 11269), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((11980, 12008), 'model.dataset_utils.CenterCrop', 'CenterCrop', (['params.crop_size'], {}), '(params.crop_size)\n', (11990, 12008), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((12055, 12066), 'model.dataset_utils.Normalise', 'Normalise', ([], {}), '()\n', (12064, 12066), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((12113, 12123), 'model.dataset_utils.ToTensor', 'ToTensor', ([], {}), '()\n', (12121, 12123), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((12250, 12278), 'model.dataset_utils.CenterCrop', 'CenterCrop', (['params.crop_size'], {}), '(params.crop_size)\n', (12260, 12278), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((12325, 12335), 'model.dataset_utils.ToTensor', 'ToTensor', ([], {}), '()\n', (12333, 12335), False, 'from model.dataset_utils import CenterCrop, Normalise, ToTensor\n'), ((3581, 3612), 'os.path.exists', 'os.path.exists', (['save_result_dir'], {}), '(save_result_dir)\n', (3595, 3612), False, 'import os\n'), ((3638, 3666), 'os.makedirs', 'os.makedirs', (['save_result_dir'], {}), '(save_result_dir)\n', (3649, 3666), False, 'import os\n')] |
import numpy as np
import pytest
from gpflow.kernels import SquaredExponential
@pytest.fixture
def test_data():
x_dim, y_dim, w_dim = 2, 1, 2
num_data = 31
x_data = np.random.random((num_data, x_dim)) * 5
w_data = np.random.random((num_data, w_dim))
w_data[: (num_data // 2), :] = 0.2 * w_data[: (num_data // 2), :] + 5
input_data = np.concatenate([x_data, w_data], axis=1)
assert input_data.shape == (num_data, x_dim + w_dim)
y_data = np.random.multivariate_normal(
mean=np.zeros(num_data), cov=SquaredExponential(variance=0.1)(input_data), size=y_dim
).T
assert y_data.shape == (num_data, y_dim)
return x_data, y_data
| [
"numpy.random.random",
"numpy.zeros",
"numpy.concatenate",
"gpflow.kernels.SquaredExponential"
] | [((233, 268), 'numpy.random.random', 'np.random.random', (['(num_data, w_dim)'], {}), '((num_data, w_dim))\n', (249, 268), True, 'import numpy as np\n'), ((361, 401), 'numpy.concatenate', 'np.concatenate', (['[x_data, w_data]'], {'axis': '(1)'}), '([x_data, w_data], axis=1)\n', (375, 401), True, 'import numpy as np\n'), ((180, 215), 'numpy.random.random', 'np.random.random', (['(num_data, x_dim)'], {}), '((num_data, x_dim))\n', (196, 215), True, 'import numpy as np\n'), ((516, 534), 'numpy.zeros', 'np.zeros', (['num_data'], {}), '(num_data)\n', (524, 534), True, 'import numpy as np\n'), ((540, 572), 'gpflow.kernels.SquaredExponential', 'SquaredExponential', ([], {'variance': '(0.1)'}), '(variance=0.1)\n', (558, 572), False, 'from gpflow.kernels import SquaredExponential\n')] |
import numpy as np
from subsbml import *
cell = System('cell')
# B1 - promoter sigX - utr1 - tetR
# B1 - pLac - utr1 - sigmaX (constituitively expressed protein sigmaX - input plasmid)
B1 = cell.createSubsystem('models/B1.xml','B1')
# SBML model gets converted to Level 3 Version 1
libsbml.writeSBML(B1.getSBMLDocument(),'models/B1converted.xml')
# Simulate using bioscrape
timepoints = np.linspace(0,14*60*60000,1000)
B1.plotBioscrape(['protein tetRdimer','protein sigmaX'],timepoints)
| [
"numpy.linspace"
] | [((389, 426), 'numpy.linspace', 'np.linspace', (['(0)', '(14 * 60 * 60000)', '(1000)'], {}), '(0, 14 * 60 * 60000, 1000)\n', (400, 426), True, 'import numpy as np\n')] |
import functools
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as ly
from tensorflow.python.framework import ops
from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv
print('small')
USE_BOTTLENECK = False
SIZE = 64
NUM_BLOCKS = 1
CRAMER = False
def one_hot_to_dense(labels):
# Assume on value is 1
batch_size = int(labels.get_shape()[0])
return tf.reshape(tf.where(tf.equal(labels, 1))[:, 1], (batch_size,))
def batchnorm(inputs, data_format=None, activation_fn=None, labels=None, n_labels=None):
"""conditional batchnorm (dumoulin et al 2016) for BCHW conv filtermaps"""
if data_format != 'NCHW':
raise Exception('unsupported')
mean, var = tf.nn.moments(inputs, (0, 2, 3), keep_dims=True)
shape = mean.get_shape().as_list() # shape is [1,n,1,1]
offset_m = tf.get_variable('offset', initializer=np.zeros([n_labels, shape[1]], dtype='float32'))
scale_m = tf.get_variable('scale', initializer=np.ones([n_labels, shape[1]], dtype='float32'))
offset = tf.nn.embedding_lookup(offset_m, labels)
scale = tf.nn.embedding_lookup(scale_m, labels)
result = tf.nn.batch_normalization(inputs, mean, var, offset[:, :, None, None], scale[:, :, None, None], 1e-5)
return result
def lrelu(x, leak=0.3, name="lrelu"):
with tf.variable_scope(name):
return tf.maximum(leak * x, x)
def prelu(x, name="prelu"):
with tf.variable_scope(name):
leak = tf.get_variable("param", shape=None, initializer=0.2, regularizer=None,
trainable=True, caching_device=None)
return tf.maximum(leak * x, x)
def miu_relu(x, miu=0.7, name="miu_relu"):
with tf.variable_scope(name):
return (x + tf.sqrt((1 - miu) ** 2 + x ** 2)) / 2.
def p_miu_relu(x, name="p_miu_relu"):
with tf.variable_scope(name):
miu = tf.get_variable("param_miu", shape=None, initializer=0.7, regularizer=None,
trainable=True, caching_device=None)
return (x + tf.sqrt((1 - miu) ** 2 + x ** 2)) / 2.
def matsushita_entropy(x, name="matsushita_entropy"):
with tf.variable_scope(name):
return (1 + x / tf.sqrt(1 + x ** 2)) / 2.
def image_encoder_s1_gru(x, num_classes, reuse=False, data_format='NCHW', labels=None, scope_name=None):
print("CONV_GRU")
assert data_format == 'NCHW'
size = SIZE
num_blocks = NUM_BLOCKS
resize_func = tf.image.resize_bilinear
if normalizer_params_e is not None and normalizer_fn_e != ly.batch_norm and normalizer_fn_e != ly.layer_norm:
normalizer_params_e['labels'] = labels
normalizer_params_e['n_labels'] = num_classes
if data_format == 'NCHW':
resized_x = []
resized_ = x
resized_x.append(resized_)
for i in range(4):
resized_ = mean_pool(resized_, data_format=data_format)
resized_x.append(resized_)
resized_x = resized_x[::-1]
else:
raise NotImplementedError
output_list = []
# with tf.variable_scope(scope_name) as scope:
# if reuse:
# scope.reuse_variables()
x_list = resized_x
h0 = ly.conv2d(x_list[-1], size * 1, kernel_size=7, stride=2, data_format=data_format,
activation_fn=activation_fn_e,
normalizer_fn=normalizer_fn_e,
normalizer_params=normalizer_params_e,
weights_initializer=weight_initializer)
# Initial memory state
hidden_state_shape = h0.get_shape().as_list()
batch_size = hidden_state_shape[0]
hidden_state_shape[0] = 1
hts_0 = [h0]
for i in range(1, num_blocks):
h0 = tf.tile(tf.get_variable("initial_hidden_state_%d" % i, shape=hidden_state_shape, dtype=tf.float32,
initializer=tf.zeros_initializer()), [batch_size, 1, 1, 1])
hts_0.append(h0)
hts_1 = unrolled_gru_conv(x_list[-2], hts_0,
size * 1, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=True, last_unit=False,
activation_fn=activation_fn_e,
normalizer_fn=normalizer_fn_e,
normalizer_params=normalizer_params_e,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=1)
output_list.append(hts_1[-1])
hts_2 = unrolled_gru_conv(x_list[-3], hts_1,
size * 2, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_e,
normalizer_fn=normalizer_fn_e,
normalizer_params=normalizer_params_e,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=2)
output_list.append(hts_2[-1])
hts_3 = unrolled_gru_conv(x_list[-4], hts_2,
size * 4, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_e,
normalizer_fn=normalizer_fn_e,
normalizer_params=normalizer_params_e,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=3)
output_list.append(hts_3[-1])
hts_4 = unrolled_gru_conv(x_list[-5], hts_3,
size * 8, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=True,
activation_fn=activation_fn_e,
normalizer_fn=normalizer_fn_e,
normalizer_params=normalizer_params_e,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=4)
output_list.append(hts_4[-1])
return output_list
# GRU
def generator_l_s1_skip(z, output_channel, num_classes, reuse=False, data_format='NCHW',
labels=None, scope_name=None):
print("DECONV_GRU")
size = SIZE
num_blocks = NUM_BLOCKS
input_dims = z.get_shape().as_list()
resize_func = tf.image.resize_area
if data_format == 'NCHW':
height = input_dims[2]
width = input_dims[3]
z_orig = tf.identity(z)
z = tf.transpose(z, [0, 2, 3, 1])
resized_z = [
tf.transpose(resize_func(z, [int(height / 32), int(width / 32)]), [0, 3, 1, 2]),
tf.transpose(resize_func(z, [int(height / 16), int(width / 16)]), [0, 3, 1, 2]),
tf.transpose(resize_func(z, [int(height / 8), int(width / 8)]), [0, 3, 1, 2]),
tf.transpose(resize_func(z, [int(height / 4), int(width / 4)]), [0, 3, 1, 2]),
tf.transpose(resize_func(z, [int(height / 2), int(width / 2)]), [0, 3, 1, 2]),
]
z = z_orig
else:
height = input_dims[1]
width = input_dims[2]
resized_z = [
resize_func(z, [int(height / 32), int(width / 32)]),
resize_func(z, [int(height / 16), int(width / 16)]),
resize_func(z, [int(height / 8), int(width / 8)]),
resize_func(z, [int(height / 4), int(width / 4)]),
resize_func(z, [int(height / 2), int(width / 2)]),
]
if data_format == 'NCHW':
concat_axis = 1
else:
concat_axis = 3
output_list = []
if normalizer_params_g is not None and normalizer_fn_g != ly.batch_norm and normalizer_fn_g != ly.layer_norm:
normalizer_params_g['labels'] = labels
normalizer_params_g['n_labels'] = num_classes
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
z_encoded = image_encoder_s1_gru(z, num_classes=num_classes, reuse=reuse, data_format=data_format,
labels=labels, scope_name=scope_name)
input_e_dims = z_encoded[-1].get_shape().as_list()
input_e_dims[concat_axis] = int(input_e_dims[concat_axis] / 2.)
noise = tf.random_normal(shape=(input_e_dims[0], 256), dtype=tf.float32)
noise = ly.fully_connected(noise, int(np.prod(input_e_dims[1:])), activation_fn=activation_fn_g)
noise = tf.reshape(noise, shape=input_e_dims)
# Initial memory state
hidden_state_shape = z_encoded[-1].get_shape().as_list()
batch_size = hidden_state_shape[0]
hidden_state_shape[0] = 1
hts_0 = [z_encoded[-1]]
for i in range(1, num_blocks):
h0 = tf.tile(tf.get_variable("initial_hidden_state_%d" % i, shape=hidden_state_shape, dtype=tf.float32,
initializer=tf.random_normal_initializer()), [batch_size, 1, 1, 1])
hts_0.append(h0)
input_0 = tf.concat([resized_z[0], noise], axis=concat_axis)
hts_1 = unrolled_gru_deconv(input_0, hts_0,
size * 6, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=True, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=0)
# output_list.append(ly.conv2d(hts_1[-1], 3, 3, stride=1, data_format=data_format,
# normalizer_fn=None, activation_fn=tf.nn.tanh,
# weights_initializer=weight_initializer))
input_1 = tf.concat([resized_z[1], z_encoded[-2]], axis=concat_axis)
hts_2 = unrolled_gru_deconv(input_1, hts_1,
size * 4, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=2)
# output_list.append(ly.conv2d(hts_2[-1], 3, 3, stride=1, data_format=data_format,
# normalizer_fn=None, activation_fn=tf.nn.tanh,
# weights_initializer=weight_initializer))
input_2 = tf.concat([resized_z[2], z_encoded[-3]], axis=concat_axis)
hts_3 = unrolled_gru_deconv(input_2, hts_2,
size * 2, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=4)
# output_list.append(ly.conv2d(hts_3[-1], 3, 3, stride=1, data_format=data_format,
# normalizer_fn=None, activation_fn=tf.nn.tanh,
# weights_initializer=weight_initializer))
input_3 = tf.concat([resized_z[3], z_encoded[-4]], axis=concat_axis)
hts_4 = unrolled_gru_deconv(input_3, hts_3,
size * 2, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=6)
# output_list.append(ly.conv2d(hts_4[-1], 3, 3, stride=1, data_format=data_format,
# normalizer_fn=None, activation_fn=tf.nn.tanh,
# weights_initializer=weight_initializer))
hts_5 = unrolled_gru_deconv(resized_z[4], hts_4,
size * 1, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=True,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=8)
output_list.append(ly.conv2d(hts_5[-1], 3, 7, stride=1, data_format=data_format,
normalizer_fn=None, activation_fn=tf.nn.tanh,
weights_initializer=weight_initializer))
# out = ly.conv2d(train, output_channel, 7, stride=1, data_format=data_format,
# activation_fn=tf.nn.tanh, weights_initializer=weight_initializer)
assert output_list[-1].get_shape().as_list()[2] == 64
return output_list
# GRU
def generator_l_s2(z, extra, output_channel, num_classes, reuse=False, data_format='NCHW',
labels=None, scope_name=None):
print("DECONV_GRU")
size = SIZE
num_blocks = NUM_BLOCKS
if type(z) is list:
z = z[-1]
input_dims = extra.get_shape().as_list()
resize_func = tf.image.resize_area
if data_format == 'NCHW':
height = input_dims[2]
width = input_dims[3]
extra_orig = tf.identity(extra)
extra = tf.transpose(extra, [0, 2, 3, 1])
resized_extra = [
tf.transpose(resize_func(extra, [int(height / 32), int(width / 32)]), [0, 3, 1, 2]),
tf.transpose(resize_func(extra, [int(height / 16), int(width / 16)]), [0, 3, 1, 2]),
# tf.transpose(resize_func(extra, [int(height / 8), int(width / 8)]), [0, 3, 1, 2]),
tf.transpose(resize_func(extra, [int(height / 8), int(width / 8)]), [0, 3, 1, 2]),
tf.transpose(resize_func(extra, [int(height / 4), int(width / 4)]), [0, 3, 1, 2]),
tf.transpose(resize_func(extra, [int(height / 2), int(width / 2)]), [0, 3, 1, 2]),
]
extra = extra_orig
else:
raise NotImplementedError
height = input_dims[1]
width = input_dims[2]
resized_extra = [
# resize_func(extra, [int(height / 32), int(width / 32)]),
# resize_func(extra, [int(height / 16), int(width / 16)]),
resize_func(extra, [int(height / 8), int(width / 8)]),
resize_func(extra, [int(height / 4), int(width / 4)]),
resize_func(extra, [int(height / 2), int(width / 2)]),
]
if data_format == 'NCHW':
concat_axis = 1
else:
concat_axis = 3
output_list = []
if normalizer_params_g is not None and normalizer_fn_g != ly.batch_norm and normalizer_fn_g != ly.layer_norm:
normalizer_params_g['labels'] = labels
normalizer_params_g['n_labels'] = num_classes
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
z_encoded = image_encoder_s2(z, num_classes=num_classes, reuse=reuse, data_format=data_format,
labels=labels, scope_name=scope_name)
# Initial memory state
hidden_state_shape = z_encoded.get_shape().as_list()
batch_size = hidden_state_shape[0]
hidden_state_shape[0] = 1
hts_0 = [z_encoded]
for i in range(1, num_blocks):
h0 = tf.tile(tf.get_variable("initial_hidden_state_%d" % i, shape=hidden_state_shape, dtype=tf.float32,
initializer=tf.random_normal_initializer()), [batch_size, 1, 1, 1])
hts_0.append(h0)
hts_1 = unrolled_gru_deconv(resized_extra[0], hts_0,
size * 8, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=True, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=0)
# hts_1 = unrolled_gru_deconv(resized_extra[0], hts_1,
# size * 8, stride=1, data_format=data_format, num_blocks=num_blocks,
# first_unit=False, last_unit=False,
# activation_fn=activation_fn_g,
# normalizer_fn=normalizer_fn_g,
# normalizer_params=normalizer_params_g,
# weights_initializer=weight_initializer,
# use_bottleneck=USE_BOTTLENECK,
# unit_num=1)
hts_1 = unrolled_gru_deconv(resized_extra[1], hts_1,
size * 8, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=2)
hts_2 = unrolled_gru_deconv(resized_extra[2], hts_1,
size * 4, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=11)
hts_3 = unrolled_gru_deconv(resized_extra[3], hts_2,
size * 2, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=12)
hts_4 = unrolled_gru_deconv(resized_extra[4], hts_3,
size * 1, stride=2, data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=True,
activation_fn=activation_fn_g,
normalizer_fn=normalizer_fn_g,
normalizer_params=normalizer_params_g,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=13)
output_list.append(ly.conv2d(hts_4[-1], 3, 7, stride=1, data_format=data_format,
normalizer_fn=None, activation_fn=tf.nn.tanh,
weights_initializer=weight_initializer))
print("G_s2 out: %d" % output_list[-1].get_shape().as_list()[2])
return output_list
# GRU
def critic_l_multiple_s1(x, num_classes, reuse=False, data_format='NCHW', scope_name=None, cramer=CRAMER):
print("CONV_GRU")
assert data_format == 'NCHW'
size = SIZE
num_blocks = NUM_BLOCKS
resize_func = tf.image.resize_bilinear
if data_format == 'NCHW':
concat_axis = 1
else:
concat_axis = 3
if type(x) is list:
x = x[-1]
# if cond is not None:
# x = tf.concat([x, cond], axis=concat_axis)
if data_format == 'NCHW':
resized_x = []
resized_ = x
resized_x.append(resized_)
for i in range(4):
resized_ = mean_pool(resized_, data_format=data_format)
resized_x.append(resized_)
resized_x = resized_x[::-1]
else:
raise NotImplementedError
output_list = []
output_dim = 256 if cramer else 1
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
x_list = resized_x
h0 = ly.conv2d(x_list[-1], 6, kernel_size=7, stride=1, data_format=data_format,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer)
# Initial memory state
hidden_state_shape = h0.get_shape().as_list()
batch_size = hidden_state_shape[0]
hidden_state_shape[0] = 1
hts_0 = [h0]
for i in range(1, num_blocks):
h0 = tf.tile(tf.get_variable("initial_hidden_state_%d" % i, shape=hidden_state_shape, dtype=tf.float32,
initializer=tf.zeros_initializer()), [batch_size, 1, 1, 1])
hts_0.append(h0)
hts_1 = unrolled_gru_conv(x_list[-1], hts_0,
size * 2, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=True, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=1)
hts_2 = unrolled_gru_conv(x_list[-2], hts_1,
size * 4, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=2)
hts_3 = unrolled_gru_conv(x_list[-3], hts_2,
size * 8, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=3)
hts_4 = unrolled_gru_conv(x_list[-4], hts_3,
size * 16, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=True,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=4)
img = hts_4[-1]
# discriminator end
disc = ly.conv2d(img, output_dim, kernel_size=1, stride=1, data_format=data_format,
activation_fn=None, normalizer_fn=None,
weights_initializer=weight_initializer)
# classification end
img = tf.reduce_mean(img, axis=(2, 3) if data_format == 'NCHW' else (1, 2))
logits = ly.fully_connected(img, num_classes, activation_fn=None, normalizer_fn=None)
return disc, logits
# GRU
def critic_l_multiple_s2(x, num_classes, reuse=False, data_format='NCHW', scope_name=None, cramer=CRAMER):
print("CONV_GRU")
assert data_format == 'NCHW'
size = SIZE
num_blocks = NUM_BLOCKS
resize_func = tf.image.resize_bilinear
if data_format == 'NCHW':
concat_axis = 1
else:
concat_axis = 3
if type(x) is list:
x = x[-1]
# if cond is not None:
# x = tf.concat([x, cond], axis=concat_axis)
if data_format == 'NCHW':
resized_x = []
resized_ = x
resized_x.append(resized_)
resized_ = mean_pool(resized_, data_format=data_format)
for i in range(6):
resized_ = mean_pool(resized_, data_format=data_format)
resized_x.append(resized_)
resized_x = resized_x[::-1]
else:
raise NotImplementedError
output_list = []
output_dim = 256 if cramer else 1
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
x_list = resized_x
h0 = ly.conv2d(x_list[-1], 6, kernel_size=7, stride=2, data_format=data_format,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer)
# Initial memory state
hidden_state_shape = h0.get_shape().as_list()
batch_size = hidden_state_shape[0]
hidden_state_shape[0] = 1
hts_0 = [h0]
for i in range(1, num_blocks):
h0 = tf.tile(tf.get_variable("initial_hidden_state_%d" % i, shape=hidden_state_shape, dtype=tf.float32,
initializer=tf.zeros_initializer()), [batch_size, 1, 1, 1])
hts_0.append(h0)
inp_0 = ly.conv2d(x_list[-1], 6, kernel_size=7, stride=2, data_format=data_format,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer)
hts_1 = unrolled_gru_conv(inp_0, hts_0,
size * 1, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=True, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=1)
hts_2 = unrolled_gru_conv(x_list[-2], hts_1,
size * 2, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=2)
hts_3 = unrolled_gru_conv(x_list[-3], hts_2,
size * 4, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=3)
hts_4 = unrolled_gru_conv(x_list[-4], hts_3,
size * 8, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=4)
hts_5 = unrolled_gru_conv(x_list[-5], hts_4,
size * 16, stride=2, dilate_rate=1,
data_format=data_format, num_blocks=num_blocks,
first_unit=False, last_unit=False,
activation_fn=activation_fn_d,
normalizer_fn=normalizer_fn_d,
normalizer_params=normalizer_params_d,
weights_initializer=weight_initializer,
use_bottleneck=USE_BOTTLENECK,
unit_num=5)
# hts_6 = unrolled_gru_conv(x_list[-6], hts_5,
# size * 16, stride=2, dilate_rate=1,
# data_format=data_format, num_blocks=num_blocks,
# first_unit=False, last_unit=True,
# activation_fn=activation_fn_d,
# normalizer_fn=normalizer_fn_d,
# normalizer_params=normalizer_params_d,
# weights_initializer=weight_initializer,
# use_bottleneck=USE_BOTTLENECK,
# unit_num=6)
img = hts_5[-1]
# img = tf.concat(output_list, axis=concat_axis)
# img = tf.add_n(
# [img[:, :, fc00:e968:6179::de52:7100, ::2], img[:, :, fc00:db20:35b:7399::5, ::2], img[:, :, fc00:e968:6179::de52:7100, 1::2], img[:, :, fc00:db20:35b:7399::5, fc00:db20:35b:7399::5]]) / 4.
# discriminator end
disc = ly.conv2d(img, output_dim, kernel_size=1, stride=1, data_format=data_format,
activation_fn=None, normalizer_fn=None,
weights_initializer=weight_initializer)
# classification end
img = tf.reduce_mean(img, axis=(2, 3) if data_format == 'NCHW' else (1, 2))
logits = ly.fully_connected(img, num_classes, activation_fn=None, normalizer_fn=None)
return disc, logits
weight_initializer = tf.random_normal_initializer(0, 0.02)
# weight_initializer = ly.xavier_initializer_conv2d()
def set_param(data_format='NCHW'):
global model_data_format, normalizer_fn_e, normalizer_fn_g, normalizer_fn_d, normalizer_fn_ce,\
normalizer_params_e, normalizer_params_g, normalizer_params_d, normalizer_params_ce
model_data_format = data_format
# normalizer_fn_e = ly.batch_norm
# normalizer_params_e = {'fused': True, 'data_format': model_data_format,
# 'is_training': True}
normalizer_fn_e = batchnorm
normalizer_params_e = {'data_format': model_data_format}
normalizer_fn_g = batchnorm
normalizer_params_g = {'data_format': model_data_format}
# normalizer_fn_e = None
# normalizer_params_e = None
# normalizer_fn_g = None
# normalizer_params_g = None
# normalizer_fn_g = ly.layer_norm
# normalizer_params_g = None
normalizer_fn_d = None
normalizer_params_d = None
normalizer_fn_ce = None
normalizer_params_ce = None
model_data_format = None
normalizer_fn_e = ly.batch_norm
normalizer_params_e = {'fused': True, 'data_format': model_data_format,
'is_training': True}
# normalizer_params_e = {'fused': True, 'data_format': model_data_format,
# 'is_training': True, 'decay': 0.95}
normalizer_fn_g = ly.batch_norm
normalizer_params_g = {'fused': True, 'data_format': model_data_format,
'is_training': True}
# normalizer_params_g = {'fused': True, 'data_format': model_data_format,
# 'is_training': True, 'decay': 0.95}
normalizer_fn_d = None
normalizer_params_d = None
normalizer_fn_ce = None
normalizer_params_ce = None
activation_fn_e = miu_relu
activation_fn_g = miu_relu
activation_fn_d = prelu
print('prelu')
activation_fn_d_last = None
# activation_fn_d_last = None
# activation_fn_ce = prelu
generator_s1 = generator_l_s1_skip
generator_s2 = generator_l_s2
critic_s1 = critic_l_multiple_s1
critic_s2 = critic_l_multiple_s2
# critic_e = critic_e_fc
| [
"tensorflow.nn.batch_normalization",
"tensorflow.identity",
"tensorflow.maximum",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.sqrt",
"numpy.prod",
"tensorflow.get_variable",
"tensorflow.nn.moments",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.equal",
"tensorflow.contrib... | [((33383, 33420), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0)', '(0.02)'], {}), '(0, 0.02)\n', (33411, 33420), True, 'import tensorflow as tf\n'), ((849, 897), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', '(0, 2, 3)'], {'keep_dims': '(True)'}), '(inputs, (0, 2, 3), keep_dims=True)\n', (862, 897), True, 'import tensorflow as tf\n'), ((1173, 1213), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['offset_m', 'labels'], {}), '(offset_m, labels)\n', (1195, 1213), True, 'import tensorflow as tf\n'), ((1226, 1265), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['scale_m', 'labels'], {}), '(scale_m, labels)\n', (1248, 1265), True, 'import tensorflow as tf\n'), ((1279, 1385), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (['inputs', 'mean', 'var', 'offset[:, :, None, None]', 'scale[:, :, None, None]', '(1e-05)'], {}), '(inputs, mean, var, offset[:, :, None, None],\n scale[:, :, None, None], 1e-05)\n', (1304, 1385), True, 'import tensorflow as tf\n'), ((3293, 3529), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['x_list[-1]', '(size * 1)'], {'kernel_size': '(7)', 'stride': '(2)', 'data_format': 'data_format', 'activation_fn': 'activation_fn_e', 'normalizer_fn': 'normalizer_fn_e', 'normalizer_params': 'normalizer_params_e', 'weights_initializer': 'weight_initializer'}), '(x_list[-1], size * 1, kernel_size=7, stride=2, data_format=\n data_format, activation_fn=activation_fn_e, normalizer_fn=\n normalizer_fn_e, normalizer_params=normalizer_params_e,\n weights_initializer=weight_initializer)\n', (3302, 3529), True, 'import tensorflow.contrib.layers as ly\n'), ((4038, 4396), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-2]', 'hts_0', '(size * 1)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(True)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_e', 'normalizer_fn': 'normalizer_fn_e', 'normalizer_params': 'normalizer_params_e', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(1)'}), '(x_list[-2], hts_0, size * 1, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=True,\n last_unit=False, activation_fn=activation_fn_e, normalizer_fn=\n normalizer_fn_e, normalizer_params=normalizer_params_e,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=1)\n', (4055, 4396), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((4692, 5051), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-3]', 'hts_1', '(size * 2)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_e', 'normalizer_fn': 'normalizer_fn_e', 'normalizer_params': 'normalizer_params_e', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(2)'}), '(x_list[-3], hts_1, size * 2, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_e, normalizer_fn=\n normalizer_fn_e, normalizer_params=normalizer_params_e,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=2)\n', (4709, 5051), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((5347, 5706), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-4]', 'hts_2', '(size * 4)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_e', 'normalizer_fn': 'normalizer_fn_e', 'normalizer_params': 'normalizer_params_e', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(3)'}), '(x_list[-4], hts_2, size * 4, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_e, normalizer_fn=\n normalizer_fn_e, normalizer_params=normalizer_params_e,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=3)\n', (5364, 5706), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((6002, 6360), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-5]', 'hts_3', '(size * 8)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(True)', 'activation_fn': 'activation_fn_e', 'normalizer_fn': 'normalizer_fn_e', 'normalizer_params': 'normalizer_params_e', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(4)'}), '(x_list[-5], hts_3, size * 8, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=True, activation_fn=activation_fn_e, normalizer_fn=\n normalizer_fn_e, normalizer_params=normalizer_params_e,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=4)\n', (6019, 6360), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((1448, 1471), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1465, 1471), True, 'import tensorflow as tf\n'), ((1488, 1511), 'tensorflow.maximum', 'tf.maximum', (['(leak * x)', 'x'], {}), '(leak * x, x)\n', (1498, 1511), True, 'import tensorflow as tf\n'), ((1551, 1574), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1568, 1574), True, 'import tensorflow as tf\n'), ((1591, 1703), 'tensorflow.get_variable', 'tf.get_variable', (['"""param"""'], {'shape': 'None', 'initializer': '(0.2)', 'regularizer': 'None', 'trainable': '(True)', 'caching_device': 'None'}), "('param', shape=None, initializer=0.2, regularizer=None,\n trainable=True, caching_device=None)\n", (1606, 1703), True, 'import tensorflow as tf\n'), ((1746, 1769), 'tensorflow.maximum', 'tf.maximum', (['(leak * x)', 'x'], {}), '(leak * x, x)\n', (1756, 1769), True, 'import tensorflow as tf\n'), ((1824, 1847), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1841, 1847), True, 'import tensorflow as tf\n'), ((1957, 1980), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (1974, 1980), True, 'import tensorflow as tf\n'), ((1996, 2112), 'tensorflow.get_variable', 'tf.get_variable', (['"""param_miu"""'], {'shape': 'None', 'initializer': '(0.7)', 'regularizer': 'None', 'trainable': '(True)', 'caching_device': 'None'}), "('param_miu', shape=None, initializer=0.7, regularizer=None,\n trainable=True, caching_device=None)\n", (2011, 2112), True, 'import tensorflow as tf\n'), ((2263, 2286), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2280, 2286), True, 'import tensorflow as tf\n'), ((7078, 7092), 'tensorflow.identity', 'tf.identity', (['z'], {}), '(z)\n', (7089, 7092), True, 'import tensorflow as tf\n'), ((7105, 7134), 'tensorflow.transpose', 'tf.transpose', (['z', '[0, 2, 3, 1]'], {}), '(z, [0, 2, 3, 1])\n', (7117, 7134), True, 'import tensorflow as tf\n'), ((8404, 8433), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (8421, 8433), True, 'import tensorflow as tf\n'), ((8834, 8898), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '(input_e_dims[0], 256)', 'dtype': 'tf.float32'}), '(shape=(input_e_dims[0], 256), dtype=tf.float32)\n', (8850, 8898), True, 'import tensorflow as tf\n'), ((9020, 9057), 'tensorflow.reshape', 'tf.reshape', (['noise'], {'shape': 'input_e_dims'}), '(noise, shape=input_e_dims)\n', (9030, 9057), True, 'import tensorflow as tf\n'), ((9576, 9626), 'tensorflow.concat', 'tf.concat', (['[resized_z[0], noise]'], {'axis': 'concat_axis'}), '([resized_z[0], noise], axis=concat_axis)\n', (9585, 9626), True, 'import tensorflow as tf\n'), ((9643, 9982), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['input_0', 'hts_0', '(size * 6)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(True)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(0)'}), '(input_0, hts_0, size * 6, stride=2, data_format=\n data_format, num_blocks=num_blocks, first_unit=True, last_unit=False,\n activation_fn=activation_fn_g, normalizer_fn=normalizer_fn_g,\n normalizer_params=normalizer_params_g, weights_initializer=\n weight_initializer, use_bottleneck=USE_BOTTLENECK, unit_num=0)\n', (9662, 9982), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((10527, 10585), 'tensorflow.concat', 'tf.concat', (['[resized_z[1], z_encoded[-2]]'], {'axis': 'concat_axis'}), '([resized_z[1], z_encoded[-2]], axis=concat_axis)\n', (10536, 10585), True, 'import tensorflow as tf\n'), ((10602, 10942), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['input_1', 'hts_1', '(size * 4)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(2)'}), '(input_1, hts_1, size * 4, stride=2, data_format=\n data_format, num_blocks=num_blocks, first_unit=False, last_unit=False,\n activation_fn=activation_fn_g, normalizer_fn=normalizer_fn_g,\n normalizer_params=normalizer_params_g, weights_initializer=\n weight_initializer, use_bottleneck=USE_BOTTLENECK, unit_num=2)\n', (10621, 10942), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((11487, 11545), 'tensorflow.concat', 'tf.concat', (['[resized_z[2], z_encoded[-3]]'], {'axis': 'concat_axis'}), '([resized_z[2], z_encoded[-3]], axis=concat_axis)\n', (11496, 11545), True, 'import tensorflow as tf\n'), ((11562, 11902), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['input_2', 'hts_2', '(size * 2)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(4)'}), '(input_2, hts_2, size * 2, stride=2, data_format=\n data_format, num_blocks=num_blocks, first_unit=False, last_unit=False,\n activation_fn=activation_fn_g, normalizer_fn=normalizer_fn_g,\n normalizer_params=normalizer_params_g, weights_initializer=\n weight_initializer, use_bottleneck=USE_BOTTLENECK, unit_num=4)\n', (11581, 11902), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((12447, 12505), 'tensorflow.concat', 'tf.concat', (['[resized_z[3], z_encoded[-4]]'], {'axis': 'concat_axis'}), '([resized_z[3], z_encoded[-4]], axis=concat_axis)\n', (12456, 12505), True, 'import tensorflow as tf\n'), ((12522, 12862), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['input_3', 'hts_3', '(size * 2)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(6)'}), '(input_3, hts_3, size * 2, stride=2, data_format=\n data_format, num_blocks=num_blocks, first_unit=False, last_unit=False,\n activation_fn=activation_fn_g, normalizer_fn=normalizer_fn_g,\n normalizer_params=normalizer_params_g, weights_initializer=\n weight_initializer, use_bottleneck=USE_BOTTLENECK, unit_num=6)\n', (12541, 12862), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((13405, 13749), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_z[4]', 'hts_4', '(size * 1)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(True)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(8)'}), '(resized_z[4], hts_4, size * 1, stride=2, data_format=\n data_format, num_blocks=num_blocks, first_unit=False, last_unit=True,\n activation_fn=activation_fn_g, normalizer_fn=normalizer_fn_g,\n normalizer_params=normalizer_params_g, weights_initializer=\n weight_initializer, use_bottleneck=USE_BOTTLENECK, unit_num=8)\n', (13424, 13749), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((14996, 15014), 'tensorflow.identity', 'tf.identity', (['extra'], {}), '(extra)\n', (15007, 15014), True, 'import tensorflow as tf\n'), ((15031, 15064), 'tensorflow.transpose', 'tf.transpose', (['extra', '[0, 2, 3, 1]'], {}), '(extra, [0, 2, 3, 1])\n', (15043, 15064), True, 'import tensorflow as tf\n'), ((16525, 16554), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (16542, 16554), True, 'import tensorflow as tf\n'), ((17306, 17657), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_extra[0]', 'hts_0', '(size * 8)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(True)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(0)'}), '(resized_extra[0], hts_0, size * 8, stride=2,\n data_format=data_format, num_blocks=num_blocks, first_unit=True,\n last_unit=False, activation_fn=activation_fn_g, normalizer_fn=\n normalizer_fn_g, normalizer_params=normalizer_params_g,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=0)\n', (17325, 17657), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((18595, 18947), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_extra[1]', 'hts_1', '(size * 8)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(2)'}), '(resized_extra[1], hts_1, size * 8, stride=2,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_g, normalizer_fn=\n normalizer_fn_g, normalizer_params=normalizer_params_g,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=2)\n', (18614, 18947), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((19231, 19584), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_extra[2]', 'hts_1', '(size * 4)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(11)'}), '(resized_extra[2], hts_1, size * 4, stride=2,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_g, normalizer_fn=\n normalizer_fn_g, normalizer_params=normalizer_params_g,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=11)\n', (19250, 19584), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((19868, 20221), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_extra[3]', 'hts_2', '(size * 2)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(12)'}), '(resized_extra[3], hts_2, size * 2, stride=2,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_g, normalizer_fn=\n normalizer_fn_g, normalizer_params=normalizer_params_g,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=12)\n', (19887, 20221), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((20505, 20857), 'resnet_rnn.unrolled_gru_deconv', 'unrolled_gru_deconv', (['resized_extra[4]', 'hts_3', '(size * 1)'], {'stride': '(2)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(True)', 'activation_fn': 'activation_fn_g', 'normalizer_fn': 'normalizer_fn_g', 'normalizer_params': 'normalizer_params_g', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(13)'}), '(resized_extra[4], hts_3, size * 1, stride=2,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=True, activation_fn=activation_fn_g, normalizer_fn=\n normalizer_fn_g, normalizer_params=normalizer_params_g,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=13)\n', (20524, 20857), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((22339, 22368), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (22356, 22368), True, 'import tensorflow as tf\n'), ((22475, 22703), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['x_list[-1]', '(6)'], {'kernel_size': '(7)', 'stride': '(1)', 'data_format': 'data_format', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer'}), '(x_list[-1], 6, kernel_size=7, stride=1, data_format=data_format,\n activation_fn=activation_fn_d, normalizer_fn=normalizer_fn_d,\n normalizer_params=normalizer_params_d, weights_initializer=\n weight_initializer)\n', (22484, 22703), True, 'import tensorflow.contrib.layers as ly\n'), ((23269, 23627), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-1]', 'hts_0', '(size * 2)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(True)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(1)'}), '(x_list[-1], hts_0, size * 2, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=True,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=1)\n', (23286, 23627), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((23929, 24288), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-2]', 'hts_1', '(size * 4)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(2)'}), '(x_list[-2], hts_1, size * 4, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=2)\n', (23946, 24288), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((24590, 24949), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-3]', 'hts_2', '(size * 8)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(3)'}), '(x_list[-3], hts_2, size * 8, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=3)\n', (24607, 24949), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((25251, 25610), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-4]', 'hts_3', '(size * 16)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(True)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(4)'}), '(x_list[-4], hts_3, size * 16, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=True, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=4)\n', (25268, 25610), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((25965, 26130), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['img', 'output_dim'], {'kernel_size': '(1)', 'stride': '(1)', 'data_format': 'data_format', 'activation_fn': 'None', 'normalizer_fn': 'None', 'weights_initializer': 'weight_initializer'}), '(img, output_dim, kernel_size=1, stride=1, data_format=data_format,\n activation_fn=None, normalizer_fn=None, weights_initializer=\n weight_initializer)\n', (25974, 26130), True, 'import tensorflow.contrib.layers as ly\n'), ((26216, 26285), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['img'], {'axis': "((2, 3) if data_format == 'NCHW' else (1, 2))"}), "(img, axis=(2, 3) if data_format == 'NCHW' else (1, 2))\n", (26230, 26285), True, 'import tensorflow as tf\n'), ((26303, 26379), 'tensorflow.contrib.layers.fully_connected', 'ly.fully_connected', (['img', 'num_classes'], {'activation_fn': 'None', 'normalizer_fn': 'None'}), '(img, num_classes, activation_fn=None, normalizer_fn=None)\n', (26321, 26379), True, 'import tensorflow.contrib.layers as ly\n'), ((27003, 27047), 'resnet_rnn.mean_pool', 'mean_pool', (['resized_'], {'data_format': 'data_format'}), '(resized_, data_format=data_format)\n', (27012, 27047), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((27332, 27361), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope_name'], {}), '(scope_name)\n', (27349, 27361), True, 'import tensorflow as tf\n'), ((27468, 27696), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['x_list[-1]', '(6)'], {'kernel_size': '(7)', 'stride': '(2)', 'data_format': 'data_format', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer'}), '(x_list[-1], 6, kernel_size=7, stride=2, data_format=data_format,\n activation_fn=activation_fn_d, normalizer_fn=normalizer_fn_d,\n normalizer_params=normalizer_params_d, weights_initializer=\n weight_initializer)\n', (27477, 27696), True, 'import tensorflow.contrib.layers as ly\n'), ((28262, 28490), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['x_list[-1]', '(6)'], {'kernel_size': '(7)', 'stride': '(2)', 'data_format': 'data_format', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer'}), '(x_list[-1], 6, kernel_size=7, stride=2, data_format=data_format,\n activation_fn=activation_fn_d, normalizer_fn=normalizer_fn_d,\n normalizer_params=normalizer_params_d, weights_initializer=\n weight_initializer)\n', (28271, 28490), True, 'import tensorflow.contrib.layers as ly\n'), ((28598, 28951), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['inp_0', 'hts_0', '(size * 1)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(True)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(1)'}), '(inp_0, hts_0, size * 1, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=True,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=1)\n', (28615, 28951), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((29253, 29612), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-2]', 'hts_1', '(size * 2)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(2)'}), '(x_list[-2], hts_1, size * 2, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=2)\n', (29270, 29612), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((29914, 30273), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-3]', 'hts_2', '(size * 4)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(3)'}), '(x_list[-3], hts_2, size * 4, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=3)\n', (29931, 30273), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((30575, 30934), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-4]', 'hts_3', '(size * 8)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(4)'}), '(x_list[-4], hts_3, size * 8, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=4)\n', (30592, 30934), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((31236, 31596), 'resnet_rnn.unrolled_gru_conv', 'unrolled_gru_conv', (['x_list[-5]', 'hts_4', '(size * 16)'], {'stride': '(2)', 'dilate_rate': '(1)', 'data_format': 'data_format', 'num_blocks': 'num_blocks', 'first_unit': '(False)', 'last_unit': '(False)', 'activation_fn': 'activation_fn_d', 'normalizer_fn': 'normalizer_fn_d', 'normalizer_params': 'normalizer_params_d', 'weights_initializer': 'weight_initializer', 'use_bottleneck': 'USE_BOTTLENECK', 'unit_num': '(5)'}), '(x_list[-5], hts_4, size * 16, stride=2, dilate_rate=1,\n data_format=data_format, num_blocks=num_blocks, first_unit=False,\n last_unit=False, activation_fn=activation_fn_d, normalizer_fn=\n normalizer_fn_d, normalizer_params=normalizer_params_d,\n weights_initializer=weight_initializer, use_bottleneck=USE_BOTTLENECK,\n unit_num=5)\n', (31253, 31596), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((32920, 33085), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['img', 'output_dim'], {'kernel_size': '(1)', 'stride': '(1)', 'data_format': 'data_format', 'activation_fn': 'None', 'normalizer_fn': 'None', 'weights_initializer': 'weight_initializer'}), '(img, output_dim, kernel_size=1, stride=1, data_format=data_format,\n activation_fn=None, normalizer_fn=None, weights_initializer=\n weight_initializer)\n', (32929, 33085), True, 'import tensorflow.contrib.layers as ly\n'), ((33171, 33240), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['img'], {'axis': "((2, 3) if data_format == 'NCHW' else (1, 2))"}), "(img, axis=(2, 3) if data_format == 'NCHW' else (1, 2))\n", (33185, 33240), True, 'import tensorflow as tf\n'), ((33258, 33334), 'tensorflow.contrib.layers.fully_connected', 'ly.fully_connected', (['img', 'num_classes'], {'activation_fn': 'None', 'normalizer_fn': 'None'}), '(img, num_classes, activation_fn=None, normalizer_fn=None)\n', (33276, 33334), True, 'import tensorflow.contrib.layers as ly\n'), ((1012, 1059), 'numpy.zeros', 'np.zeros', (['[n_labels, shape[1]]'], {'dtype': '"""float32"""'}), "([n_labels, shape[1]], dtype='float32')\n", (1020, 1059), True, 'import numpy as np\n'), ((1112, 1158), 'numpy.ones', 'np.ones', (['[n_labels, shape[1]]'], {'dtype': '"""float32"""'}), "([n_labels, shape[1]], dtype='float32')\n", (1119, 1158), True, 'import numpy as np\n'), ((2964, 3008), 'resnet_rnn.mean_pool', 'mean_pool', (['resized_'], {'data_format': 'data_format'}), '(resized_, data_format=data_format)\n', (2973, 3008), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((14047, 14199), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['hts_5[-1]', '(3)', '(7)'], {'stride': '(1)', 'data_format': 'data_format', 'normalizer_fn': 'None', 'activation_fn': 'tf.nn.tanh', 'weights_initializer': 'weight_initializer'}), '(hts_5[-1], 3, 7, stride=1, data_format=data_format, normalizer_fn\n =None, activation_fn=tf.nn.tanh, weights_initializer=weight_initializer)\n', (14056, 14199), True, 'import tensorflow.contrib.layers as ly\n'), ((21152, 21304), 'tensorflow.contrib.layers.conv2d', 'ly.conv2d', (['hts_4[-1]', '(3)', '(7)'], {'stride': '(1)', 'data_format': 'data_format', 'normalizer_fn': 'None', 'activation_fn': 'tf.nn.tanh', 'weights_initializer': 'weight_initializer'}), '(hts_4[-1], 3, 7, stride=1, data_format=data_format, normalizer_fn\n =None, activation_fn=tf.nn.tanh, weights_initializer=weight_initializer)\n', (21161, 21304), True, 'import tensorflow.contrib.layers as ly\n'), ((22105, 22149), 'resnet_rnn.mean_pool', 'mean_pool', (['resized_'], {'data_format': 'data_format'}), '(resized_, data_format=data_format)\n', (22114, 22149), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((27098, 27142), 'resnet_rnn.mean_pool', 'mean_pool', (['resized_'], {'data_format': 'data_format'}), '(resized_, data_format=data_format)\n', (27107, 27142), False, 'from resnet_rnn import resnet_block, resnet_deconv_block, resnet_conv, resnet_deconv, upsample_conv, mean_pool, unrolled_lstm_conv, unrolled_lstm_deconv, unrolled_gru_conv, unrolled_gru_deconv\n'), ((551, 570), 'tensorflow.equal', 'tf.equal', (['labels', '(1)'], {}), '(labels, 1)\n', (559, 570), True, 'import tensorflow as tf\n'), ((1869, 1901), 'tensorflow.sqrt', 'tf.sqrt', (['((1 - miu) ** 2 + x ** 2)'], {}), '((1 - miu) ** 2 + x ** 2)\n', (1876, 1901), True, 'import tensorflow as tf\n'), ((2159, 2191), 'tensorflow.sqrt', 'tf.sqrt', (['((1 - miu) ** 2 + x ** 2)'], {}), '((1 - miu) ** 2 + x ** 2)\n', (2166, 2191), True, 'import tensorflow as tf\n'), ((8945, 8970), 'numpy.prod', 'np.prod', (['input_e_dims[1:]'], {}), '(input_e_dims[1:])\n', (8952, 8970), True, 'import numpy as np\n'), ((2312, 2331), 'tensorflow.sqrt', 'tf.sqrt', (['(1 + x ** 2)'], {}), '(1 + x ** 2)\n', (2319, 2331), True, 'import tensorflow as tf\n'), ((3952, 3974), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (3972, 3974), True, 'import tensorflow as tf\n'), ((9472, 9502), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (9500, 9502), True, 'import tensorflow as tf\n'), ((17204, 17234), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (17232, 17234), True, 'import tensorflow as tf\n'), ((23175, 23197), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (23195, 23197), True, 'import tensorflow as tf\n'), ((28168, 28190), 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (28188, 28190), True, 'import tensorflow as tf\n')] |
"""Inferer for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
import argparse
import functools
import paddle.fluid as fluid
from data_utils.data import DataGenerator
from model_utils.model import DeepSpeech2Model
from model_utils.model_check import check_cuda, check_version
from utils.error_rate import wer, cer
from utils.utility import add_arguments, print_arguments
import create_manifest
import json
import codecs
import soundfile
import time
import numpy as np
ds2_model = None
data_generator = None
vocab_list = None
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('num_samples', int, 128, "# of samples to infer.")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 8, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 1024, "# of recurrent cells per layer.")
add_arg('alpha', float, 2.5, "Coef of LM for beam search.")
add_arg('beta', float, 0.3, "Coef of WC for beam search.")
add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
add_arg('use_gru', bool, True, "Use GRUs instead of simple RNNs.")
add_arg('use_gpu', bool, True, "Use GPU or not.")
add_arg('share_rnn_weights',bool, False, "Share input-hidden weights across "
"bi-directional RNNs. Not for GRU.")
add_arg('target_dir', str,
'/content/Baidu-Deepspeech2-For-Python3/dataset/librispeech/test-clean/LibriSpeech/test-clean/',
"Filepath of voice sample testing folder.")
add_arg('infer_manifest', str,
'data/manifest.test-clean',
"Filepath of manifest to infer.")
add_arg('mean_std_path', str,
'models/baidu_en8k/mean_std.npz',
"Filepath of normalizer's mean & std.")
add_arg('vocab_path', str,
'models/baidu_en8k/vocab.txt',
"Filepath of vocabulary.")
add_arg('lang_model_path', str,
'models/lm/common_crawl_00.prune01111.trie.klm',
"Filepath for language model.")
add_arg('model_path', str,
'models/baidu_en8k',
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model.")
add_arg('decoding_method', str,
'ctc_beam_search',
"Decoding method. Options: ctc_beam_search, ctc_greedy",
choices = ['ctc_beam_search', 'ctc_greedy'])
add_arg('error_rate_type', str,
'wer',
"Error rate type for evaluation.",
choices=['wer', 'cer'])
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
add_arg('audio_path', str,
'',
"Audio path to test",
choices=['linear', 'mfcc'])
# yapf: disable
args = parser.parse_args()
def prepare_manifest():
print("Preparing Manifest")
create_manifest.prepare_dataset(target_dir=args.target_dir, manifest_path=args.infer_manifest)
def load_model():
# check if set use_gpu=True in paddlepaddle cpu version
check_cuda(args.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
if args.use_gpu:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
# Load model
data_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config='{}',
specgram_type=args.specgram_type,
keep_transcription_text=True,
place = place,
is_training = False)
ds2_model = DeepSpeech2Model(
vocab_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_layer_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights,
place=place,
init_from_pretrained_model=args.model_path)
# decoders only accept string encoded in utf-8
vocab_list = data_generator.vocab_list
ds2_model.init_ext_scorer(args.alpha, args.beta, args.lang_model_path,
vocab_list)
return ds2_model, data_generator, vocab_list
def infer(ds2_model, data_generator, vocab_list):
"""Inference for DeepSpeech2."""
# Prepare manifest
if args.audio_path:
json_lines = []
audio_data, samplerate = soundfile.read(args.audio_path)
duration = float(len(audio_data)) / samplerate
json_lines.append(
json.dumps({
'audio_filepath': args.audio_path,
'duration': duration,
'text': 'NO TRANSCRIPT'
}))
with codecs.open(args.infer_manifest, 'w', 'utf-8') as out_file:
for line in json_lines:
out_file.write(line + '\n')
else:
prepare_manifest()
# Load audio
batch_reader = data_generator.batch_reader_creator(
manifest_path=args.infer_manifest,
batch_size=args.num_samples,
sortagrad=False,
shuffle_method=None)
#infer_data = next(batch_reader()) # (padded_audios, texts, audio_lens, masks, audio_file_path)
error_rate_func = cer if args.error_rate_type == 'cer' else wer
errors_sum, len_refs, num_ins = 0.0, 0, 0
error_arr = [];wer_arr=[]
ds2_model.logger.info("\nEverything Prepared .. Starting inference ...\n")
for infer_data in batch_reader():
probs_split= ds2_model.infer_batch_probs(
infer_data=infer_data,
feeding_dict=data_generator.feeding)
result_transcripts= ds2_model.decode_batch_beam_search(
probs_split=probs_split,
beam_alpha=args.alpha,
beam_beta=args.beta,
beam_size=args.beam_size,
cutoff_prob=args.cutoff_prob,
cutoff_top_n=args.cutoff_top_n,
vocab_list=vocab_list,
num_processes=args.num_proc_bsearch)
target_transcripts = infer_data[1]
audio_file_paths = infer_data[4]
json_lines = []
print("Writing Results on TRANSCRIPTION.json ...")
for target, result, audio_file_path in zip(target_transcripts, result_transcripts,audio_file_paths):
target = target.replace("’", "'")
erroris = error_rate_func(target, result)
json_lines.append(
json.dumps({
'Audio file path': audio_file_path,
'Target Transcription': target,
'Output Transcription': result,
'The {} '.format(args.error_rate_type): erroris,
}, indent=4, ensure_ascii=False, sort_keys=True))
error_arr.append(erroris)
wer_arr = np.array(error_arr)
print("Current Error Rate is : ",str(np.average(wer_arr)))
with codecs.open('TRANSCRIPTION.json', 'a+', 'utf-8') as out_file:
for line in json_lines:
out_file.write(line + '\n')
with codecs.open('TRANSCRIPTION.json', 'a+', 'utf-8') as out_file:
out_file.write("Average Error Rate is : " + str(np.average(wer_arr)) +'\n')
ds2_model.logger.info("Finished Inference.")
if args.audio_path:
return result_transcripts.pop()
def main():
global ds2_model
global data_generator
global vocab_list
print_arguments(args)
#args.audio_path = audio_path
if not ds2_model:
print("\nModel Loading Initiated ...")
ds2_model, data_generator, vocab_list = load_model()
print("\nModel Loaded Successfully ...\n")
tic = time.time()
result_transcripts = infer(ds2_model, data_generator, vocab_list)
toc = time.time()
print("{} Mins Required For Transcription".format(toc-tic/60))
else:
tic = time.time()
result_transcripts = infer(ds2_model, data_generator, vocab_list)
toc = time.time()
print("{} Mins Required For Transcription".format(toc-tic/60))
return result_transcripts
if __name__ == '__main__':
print(main()) | [
"functools.partial",
"paddle.fluid.CUDAPlace",
"soundfile.read",
"numpy.average",
"argparse.ArgumentParser",
"model_utils.model.DeepSpeech2Model",
"utils.utility.print_arguments",
"data_utils.data.DataGenerator",
"codecs.open",
"json.dumps",
"model_utils.model_check.check_cuda",
"time.time",
... | [((679, 723), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (702, 723), False, 'import argparse\n'), ((734, 784), 'functools.partial', 'functools.partial', (['add_arguments'], {'argparser': 'parser'}), '(add_arguments, argparser=parser)\n', (751, 784), False, 'import functools\n'), ((3415, 3514), 'create_manifest.prepare_dataset', 'create_manifest.prepare_dataset', ([], {'target_dir': 'args.target_dir', 'manifest_path': 'args.infer_manifest'}), '(target_dir=args.target_dir, manifest_path=\n args.infer_manifest)\n', (3446, 3514), False, 'import create_manifest\n'), ((3602, 3626), 'model_utils.model_check.check_cuda', 'check_cuda', (['args.use_gpu'], {}), '(args.use_gpu)\n', (3612, 3626), False, 'from model_utils.model_check import check_cuda, check_version\n'), ((3680, 3695), 'model_utils.model_check.check_version', 'check_version', ([], {}), '()\n', (3693, 3695), False, 'from model_utils.model_check import check_cuda, check_version\n'), ((3839, 4059), 'data_utils.data.DataGenerator', 'DataGenerator', ([], {'vocab_filepath': 'args.vocab_path', 'mean_std_filepath': 'args.mean_std_path', 'augmentation_config': '"""{}"""', 'specgram_type': 'args.specgram_type', 'keep_transcription_text': '(True)', 'place': 'place', 'is_training': '(False)'}), "(vocab_filepath=args.vocab_path, mean_std_filepath=args.\n mean_std_path, augmentation_config='{}', specgram_type=args.\n specgram_type, keep_transcription_text=True, place=place, is_training=False\n )\n", (3852, 4059), False, 'from data_utils.data import DataGenerator\n'), ((4127, 4427), 'model_utils.model.DeepSpeech2Model', 'DeepSpeech2Model', ([], {'vocab_size': 'data_generator.vocab_size', 'num_conv_layers': 'args.num_conv_layers', 'num_rnn_layers': 'args.num_rnn_layers', 'rnn_layer_size': 'args.rnn_layer_size', 'use_gru': 'args.use_gru', 'share_rnn_weights': 'args.share_rnn_weights', 'place': 'place', 'init_from_pretrained_model': 'args.model_path'}), '(vocab_size=data_generator.vocab_size, num_conv_layers=args\n .num_conv_layers, num_rnn_layers=args.num_rnn_layers, rnn_layer_size=\n args.rnn_layer_size, use_gru=args.use_gru, share_rnn_weights=args.\n share_rnn_weights, place=place, init_from_pretrained_model=args.model_path)\n', (4143, 4427), False, 'from model_utils.model import DeepSpeech2Model\n'), ((7998, 8019), 'utils.utility.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (8013, 8019), False, 'from utils.utility import add_arguments, print_arguments\n'), ((3734, 3752), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (3749, 3752), True, 'import paddle.fluid as fluid\n'), ((3779, 3795), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (3793, 3795), True, 'import paddle.fluid as fluid\n'), ((4959, 4990), 'soundfile.read', 'soundfile.read', (['args.audio_path'], {}), '(args.audio_path)\n', (4973, 4990), False, 'import soundfile\n'), ((7398, 7417), 'numpy.array', 'np.array', (['error_arr'], {}), '(error_arr)\n', (7406, 7417), True, 'import numpy as np\n'), ((7649, 7697), 'codecs.open', 'codecs.open', (['"""TRANSCRIPTION.json"""', '"""a+"""', '"""utf-8"""'], {}), "('TRANSCRIPTION.json', 'a+', 'utf-8')\n", (7660, 7697), False, 'import codecs\n'), ((8272, 8283), 'time.time', 'time.time', ([], {}), '()\n', (8281, 8283), False, 'import time\n'), ((8372, 8383), 'time.time', 'time.time', ([], {}), '()\n', (8381, 8383), False, 'import time\n'), ((8479, 8490), 'time.time', 'time.time', ([], {}), '()\n', (8488, 8490), False, 'import time\n'), ((8579, 8590), 'time.time', 'time.time', ([], {}), '()\n', (8588, 8590), False, 'import time\n'), ((5093, 5191), 'json.dumps', 'json.dumps', (["{'audio_filepath': args.audio_path, 'duration': duration, 'text':\n 'NO TRANSCRIPT'}"], {}), "({'audio_filepath': args.audio_path, 'duration': duration, 'text':\n 'NO TRANSCRIPT'})\n", (5103, 5191), False, 'import json\n'), ((5296, 5342), 'codecs.open', 'codecs.open', (['args.infer_manifest', '"""w"""', '"""utf-8"""'], {}), "(args.infer_manifest, 'w', 'utf-8')\n", (5307, 5342), False, 'import codecs\n'), ((7498, 7546), 'codecs.open', 'codecs.open', (['"""TRANSCRIPTION.json"""', '"""a+"""', '"""utf-8"""'], {}), "('TRANSCRIPTION.json', 'a+', 'utf-8')\n", (7509, 7546), False, 'import codecs\n'), ((7463, 7482), 'numpy.average', 'np.average', (['wer_arr'], {}), '(wer_arr)\n', (7473, 7482), True, 'import numpy as np\n'), ((7771, 7790), 'numpy.average', 'np.average', (['wer_arr'], {}), '(wer_arr)\n', (7781, 7790), True, 'import numpy as np\n')] |
import numpy as np
class AnchorParameters:
def __init__(self, sizes, strides, ratios, scales):
self.sizes = sizes
self.strides = strides
self.ratios = ratios
self.scales = scales
def num_anchors(self):
return len(self.ratios) * len(self.scales)
AnchorParameters.default = AnchorParameters(
sizes = [32, 64, 128, 256, 512],
strides = [8, 16, 32, 64, 128],
ratios = np.array([0.5, 1, 2], np.float),
scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], np.float),
)
def generate_anchors(base_size=16):
ratios = AnchorParameters.default.ratios
scales = AnchorParameters.default.scales
# num_anchors = 9
num_anchors = len(ratios) * len(scales)
# anchors - 9,4
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# 计算先验框的面积
areas = anchors[:, 2] * anchors[:, 3]
# np.repeat(ratios, len(scales)) [0.5 0.5 0.5 1. 1. 1. 2. 2. 2. ]
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = np.sqrt(areas * np.repeat(ratios, len(scales)))
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def shift(shape, stride, anchors):
# 生成特征层的网格中心
shift_x = (np.arange(0, shape[1], dtype=np.float) + 0.5) * stride
shift_y = (np.arange(0, shape[0], dtype=np.float) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shift_x = np.reshape(shift_x, [-1])
shift_y = np.reshape(shift_y, [-1])
# 将网格中心进行堆叠
shifts = np.stack([
shift_x,
shift_y,
shift_x,
shift_y
], axis=0)
shifts = np.transpose(shifts)
number_of_anchors = np.shape(anchors)[0]
k = np.shape(shifts)[0]
# shifted_anchors k, 9, 4 -> k*9, 4
shifted_anchors = np.reshape(anchors, [1, number_of_anchors, 4]) + np.array(np.reshape(shifts, [k, 1, 4]))
shifted_anchors = np.reshape(shifted_anchors, [k * number_of_anchors, 4])
return shifted_anchors
def get_anchors(image_size):
#------------------------------#
# 获得五个特征层的宽高
#------------------------------#
features = [image_size/8, image_size/16, image_size/32, image_size/64, image_size/128]
all_anchors = []
for i in range(5):
#------------------------------#
# 先生成每个特征点的9个先验框
# anchors 9, 4
#------------------------------#
anchors = generate_anchors(AnchorParameters.default.sizes[i])
shifted_anchors = shift([features[i],features[i]], AnchorParameters.default.strides[i], anchors)
all_anchors.append(shifted_anchors)
# 将每个特征层的先验框进行堆叠。
all_anchors = np.concatenate(all_anchors,axis=0)
all_anchors = all_anchors / image_size
return all_anchors
| [
"numpy.stack",
"numpy.meshgrid",
"numpy.transpose",
"numpy.zeros",
"numpy.shape",
"numpy.array",
"numpy.reshape",
"numpy.tile",
"numpy.arange",
"numpy.concatenate"
] | [((777, 803), 'numpy.zeros', 'np.zeros', (['(num_anchors, 4)'], {}), '((num_anchors, 4))\n', (785, 803), True, 'import numpy as np\n'), ((1510, 1539), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (1521, 1539), True, 'import numpy as np\n'), ((1554, 1579), 'numpy.reshape', 'np.reshape', (['shift_x', '[-1]'], {}), '(shift_x, [-1])\n', (1564, 1579), True, 'import numpy as np\n'), ((1594, 1619), 'numpy.reshape', 'np.reshape', (['shift_y', '[-1]'], {}), '(shift_y, [-1])\n', (1604, 1619), True, 'import numpy as np\n'), ((1650, 1704), 'numpy.stack', 'np.stack', (['[shift_x, shift_y, shift_x, shift_y]'], {'axis': '(0)'}), '([shift_x, shift_y, shift_x, shift_y], axis=0)\n', (1658, 1704), True, 'import numpy as np\n'), ((1768, 1788), 'numpy.transpose', 'np.transpose', (['shifts'], {}), '(shifts)\n', (1780, 1788), True, 'import numpy as np\n'), ((2038, 2093), 'numpy.reshape', 'np.reshape', (['shifted_anchors', '[k * number_of_anchors, 4]'], {}), '(shifted_anchors, [k * number_of_anchors, 4])\n', (2048, 2093), True, 'import numpy as np\n'), ((2786, 2821), 'numpy.concatenate', 'np.concatenate', (['all_anchors'], {'axis': '(0)'}), '(all_anchors, axis=0)\n', (2800, 2821), True, 'import numpy as np\n'), ((435, 466), 'numpy.array', 'np.array', (['[0.5, 1, 2]', 'np.float'], {}), '([0.5, 1, 2], np.float)\n', (443, 466), True, 'import numpy as np\n'), ((482, 546), 'numpy.array', 'np.array', (['[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]', 'np.float'], {}), '([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], np.float)\n', (490, 546), True, 'import numpy as np\n'), ((1171, 1207), 'numpy.tile', 'np.tile', (['(anchors[:, 2] * 0.5)', '(2, 1)'], {}), '(anchors[:, 2] * 0.5, (2, 1))\n', (1178, 1207), True, 'import numpy as np\n'), ((1235, 1271), 'numpy.tile', 'np.tile', (['(anchors[:, 3] * 0.5)', '(2, 1)'], {}), '(anchors[:, 3] * 0.5, (2, 1))\n', (1242, 1271), True, 'import numpy as np\n'), ((1813, 1830), 'numpy.shape', 'np.shape', (['anchors'], {}), '(anchors)\n', (1821, 1830), True, 'import numpy as np\n'), ((1843, 1859), 'numpy.shape', 'np.shape', (['shifts'], {}), '(shifts)\n', (1851, 1859), True, 'import numpy as np\n'), ((1927, 1973), 'numpy.reshape', 'np.reshape', (['anchors', '[1, number_of_anchors, 4]'], {}), '(anchors, [1, number_of_anchors, 4])\n', (1937, 1973), True, 'import numpy as np\n'), ((1362, 1400), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]'], {'dtype': 'np.float'}), '(0, shape[1], dtype=np.float)\n', (1371, 1400), True, 'import numpy as np\n'), ((1432, 1470), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]'], {'dtype': 'np.float'}), '(0, shape[0], dtype=np.float)\n', (1441, 1470), True, 'import numpy as np\n'), ((1985, 2014), 'numpy.reshape', 'np.reshape', (['shifts', '[k, 1, 4]'], {}), '(shifts, [k, 1, 4])\n', (1995, 2014), True, 'import numpy as np\n')] |
from __future__ import division
import os
import torch
import numpy as np
import torch.nn.functional as F
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel
from gae.datasets import build_dataset, build_dataloader
from gae.online_evaluation import online_evaluate
from utils import (clusters2labels, intdict2ndarray, get_cluster_idxs,
write_meta)
from proposals.graph import graph_clustering_dynamic_th
from evaluation import evaluate
def test(model, dataset, cfg, logger):
if cfg.load_from:
print('load from {}'.format(cfg.load_from))
load_checkpoint(model, cfg.load_from, strict=True, logger=logger)
losses = []
edges = []
scores = []
if cfg.gpus == 1:
data_loader = build_dataloader(dataset,
cfg.batch_size_per_gpu,
cfg.workers_per_gpu,
train=False)
model = MMDataParallel(model, device_ids=range(cfg.gpus))
if cfg.cuda:
model.cuda()
model.eval()
for i, (data, cid, node_list) in enumerate(data_loader):
with torch.no_grad():
_, A, h1id, gtmat = data #x, A, one_hops_list, labels
pred, loss = model(data, return_loss=True) # x, loss => A, loss
losses += [loss.item()]
pred = F.softmax(pred, dim=1)
if i % cfg.log_config.interval == 0:
if dataset.ignore_label:
logger.info('[Test] Iter {}/{}'.format(
i, len(data_loader)))
else:
acc, p, r = online_evaluate(A, pred) #gtmat -> A
logger.info(
'[Test] Iter {}/{}: Loss {:.4f}, '
'Accuracy {:.4f}, Precision {:.4f}, Recall {:.4f}'.
format(i, len(data_loader), loss, acc, p, r))
node_list = node_list.numpy()
bs = len(cid)
h1id_num = len(h1id[0])
for b in range(bs):
cidb = cid[b].int().item()
nlst = node_list[b]
center_idx = nlst[cidb]
for j, n in enumerate(h1id[b]):
edges.append([center_idx, nlst[n.item()]])
scores.append(pred[b * h1id_num + j, 1].item())
else:
raise NotImplementedError
if not dataset.ignore_label:
avg_loss = sum(losses) / len(losses)
logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss))
return np.array(edges), np.array(scores), len(dataset)
def test_gae(model, cfg, logger):
for k, v in cfg.model['kwargs'].items():
setattr(cfg.test_data, k, v)
dataset = build_dataset(cfg.test_data)
ofn_pred = os.path.join(cfg.work_dir, 'pred_edges_scores.npz')
if os.path.isfile(ofn_pred) and not cfg.force:
data = np.load(ofn_pred)
edges = data['edges']
scores = data['scores']
inst_num = data['inst_num']
if inst_num != len(dataset):
logger.warn(
'instance number in {} is different from dataset: {} vs {}'.
format(ofn_pred, inst_num, len(dataset)))
else:
edges, scores, inst_num = test(model, dataset, cfg, logger)
# produce predicted labels
clusters = graph_clustering_dynamic_th(edges,
scores,
max_sz=cfg.max_sz,
step=cfg.step,
pool=cfg.pool)
pred_idx2lb = clusters2labels(clusters)
pred_labels = intdict2ndarray(pred_idx2lb)
if cfg.save_output:
print('save predicted edges and scores to {}'.format(ofn_pred))
np.savez_compressed(ofn_pred,
edges=edges,
scores=scores,
inst_num=inst_num)
ofn_meta = os.path.join(cfg.work_dir, 'pred_labels.txt')
write_meta(ofn_meta, pred_idx2lb, inst_num=inst_num)
# evaluation
if not dataset.ignore_label:
print('==> evaluation')
gt_labels = dataset.labels
for metric in cfg.metrics:
evaluate(gt_labels, pred_labels, metric)
single_cluster_idxs = get_cluster_idxs(clusters, size=1)
print('==> evaluation (removing {} single clusters)'.format(
len(single_cluster_idxs)))
remain_idxs = np.setdiff1d(np.arange(len(dataset)),
np.array(single_cluster_idxs))
remain_idxs = np.array(remain_idxs)
for metric in cfg.metrics:
evaluate(gt_labels[remain_idxs], pred_labels[remain_idxs], metric)
| [
"utils.get_cluster_idxs",
"numpy.load",
"gae.datasets.build_dataset",
"gae.datasets.build_dataloader",
"utils.write_meta",
"utils.clusters2labels",
"evaluation.evaluate",
"torch.nn.functional.softmax",
"os.path.isfile",
"numpy.savez_compressed",
"numpy.array",
"proposals.graph.graph_clustering... | [((2864, 2892), 'gae.datasets.build_dataset', 'build_dataset', (['cfg.test_data'], {}), '(cfg.test_data)\n', (2877, 2892), False, 'from gae.datasets import build_dataset, build_dataloader\n'), ((2909, 2960), 'os.path.join', 'os.path.join', (['cfg.work_dir', '"""pred_edges_scores.npz"""'], {}), "(cfg.work_dir, 'pred_edges_scores.npz')\n", (2921, 2960), False, 'import os\n'), ((3465, 3560), 'proposals.graph.graph_clustering_dynamic_th', 'graph_clustering_dynamic_th', (['edges', 'scores'], {'max_sz': 'cfg.max_sz', 'step': 'cfg.step', 'pool': 'cfg.pool'}), '(edges, scores, max_sz=cfg.max_sz, step=cfg.step,\n pool=cfg.pool)\n', (3492, 3560), False, 'from proposals.graph import graph_clustering_dynamic_th\n'), ((3747, 3772), 'utils.clusters2labels', 'clusters2labels', (['clusters'], {}), '(clusters)\n', (3762, 3772), False, 'from utils import clusters2labels, intdict2ndarray, get_cluster_idxs, write_meta\n'), ((3791, 3819), 'utils.intdict2ndarray', 'intdict2ndarray', (['pred_idx2lb'], {}), '(pred_idx2lb)\n', (3806, 3819), False, 'from utils import clusters2labels, intdict2ndarray, get_cluster_idxs, write_meta\n'), ((611, 676), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['model', 'cfg.load_from'], {'strict': '(True)', 'logger': 'logger'}), '(model, cfg.load_from, strict=True, logger=logger)\n', (626, 676), False, 'from mmcv.runner import load_checkpoint\n'), ((770, 857), 'gae.datasets.build_dataloader', 'build_dataloader', (['dataset', 'cfg.batch_size_per_gpu', 'cfg.workers_per_gpu'], {'train': '(False)'}), '(dataset, cfg.batch_size_per_gpu, cfg.workers_per_gpu,\n train=False)\n', (786, 857), False, 'from gae.datasets import build_dataset, build_dataloader\n'), ((2684, 2699), 'numpy.array', 'np.array', (['edges'], {}), '(edges)\n', (2692, 2699), True, 'import numpy as np\n'), ((2701, 2717), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2709, 2717), True, 'import numpy as np\n'), ((2968, 2992), 'os.path.isfile', 'os.path.isfile', (['ofn_pred'], {}), '(ofn_pred)\n', (2982, 2992), False, 'import os\n'), ((3027, 3044), 'numpy.load', 'np.load', (['ofn_pred'], {}), '(ofn_pred)\n', (3034, 3044), True, 'import numpy as np\n'), ((3925, 4001), 'numpy.savez_compressed', 'np.savez_compressed', (['ofn_pred'], {'edges': 'edges', 'scores': 'scores', 'inst_num': 'inst_num'}), '(ofn_pred, edges=edges, scores=scores, inst_num=inst_num)\n', (3944, 4001), True, 'import numpy as np\n'), ((4105, 4150), 'os.path.join', 'os.path.join', (['cfg.work_dir', '"""pred_labels.txt"""'], {}), "(cfg.work_dir, 'pred_labels.txt')\n", (4117, 4150), False, 'import os\n'), ((4159, 4211), 'utils.write_meta', 'write_meta', (['ofn_meta', 'pred_idx2lb'], {'inst_num': 'inst_num'}), '(ofn_meta, pred_idx2lb, inst_num=inst_num)\n', (4169, 4211), False, 'from utils import clusters2labels, intdict2ndarray, get_cluster_idxs, write_meta\n'), ((4449, 4483), 'utils.get_cluster_idxs', 'get_cluster_idxs', (['clusters'], {'size': '(1)'}), '(clusters, size=1)\n', (4465, 4483), False, 'from utils import clusters2labels, intdict2ndarray, get_cluster_idxs, write_meta\n'), ((4740, 4761), 'numpy.array', 'np.array', (['remain_idxs'], {}), '(remain_idxs)\n', (4748, 4761), True, 'import numpy as np\n'), ((4377, 4417), 'evaluation.evaluate', 'evaluate', (['gt_labels', 'pred_labels', 'metric'], {}), '(gt_labels, pred_labels, metric)\n', (4385, 4417), False, 'from evaluation import evaluate\n'), ((4687, 4716), 'numpy.array', 'np.array', (['single_cluster_idxs'], {}), '(single_cluster_idxs)\n', (4695, 4716), True, 'import numpy as np\n'), ((4809, 4875), 'evaluation.evaluate', 'evaluate', (['gt_labels[remain_idxs]', 'pred_labels[remain_idxs]', 'metric'], {}), '(gt_labels[remain_idxs], pred_labels[remain_idxs], metric)\n', (4817, 4875), False, 'from evaluation import evaluate\n'), ((1189, 1204), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1202, 1204), False, 'import torch\n'), ((1419, 1441), 'torch.nn.functional.softmax', 'F.softmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (1428, 1441), True, 'import torch.nn.functional as F\n'), ((1716, 1740), 'gae.online_evaluation.online_evaluate', 'online_evaluate', (['A', 'pred'], {}), '(A, pred)\n', (1731, 1740), False, 'from gae.online_evaluation import online_evaluate\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
"""generate_data - Contains data processing functions for the Graph Generation
Component"""
# =============================================================================
# Imports
# =============================================================================
import torch
import pickle
import numpy as np
import networkx as nx
from os import listdir
from torch.utils import data
from os.path import isfile, join
def create_graphs(args):
graphs=[]
graphlist = [f for f in listdir("./dataset") if isfile(join("./dataset", f))]
for name in graphlist:
if 'arch' in name:
G = nx.read_graphml('dataset/' + name)
G = G.to_undirected()
G = max(nx.connected_component_subgraphs(G), key=len)
nodes = G.nodes._nodes
G_sub = G.subgraph(nodes)
graphs.append(G_sub)
args.max_prev_node = 300 # Original: 300
return graphs
def get_graph(adj):
'''
get a graph from zero-padded adj
:param adj:
:return:
'''
# remove all zeros rows and columns
# adj = adj[~np.all(adj == 0, axis=1)]
# adj = adj[:, ~np.all(adj == 0, axis=0)]
adj = np.asmatrix(adj)
G = nx.from_numpy_matrix(adj, create_using=nx.DiGraph)
return G
def save_graph_list(G_list, fname):
with open(fname, "wb") as f:
pickle.dump(G_list, f)
def bfs_seq(G, start_id):
'''
get a bfs node sequence
:param G:
:param start_id:
:return:
'''
dictionary = dict(nx.bfs_successors(G, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
next = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
next = next + neighbor
output = output + next
start = next
return output
def encode_adj(adj, max_prev_node=10, is_full = False):
'''
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
'''
if is_full:
max_prev_node = adj.shape[0]-1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i,:] = adj_output[i,:][::-1] # reverse order
return adj_output
def decode_adj(adj_output):
'''
recover to adj from adj_output
note: here adj_output have shape (n-1)*m
'''
max_prev_node = adj_output.shape[1]
adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))
for i in range(adj_output.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)
output_end = max_prev_node
adj[i, input_start:input_end] = adj_output[i,::-1][output_start:output_end] # reverse order
adj_full = np.zeros((adj_output.shape[0]+1, adj_output.shape[0]+1))
n = adj_full.shape[0]
adj_full[1:n, 0:n-1] = np.tril(adj, 0)
# adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_flexible(adj):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n-1]
adj_output = []
input_start = 0
for i in range(adj.shape[0]):
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
adj_output.append(adj_slice)
non_zero = np.nonzero(adj_slice)[0]
input_start = input_end-len(adj_slice)+np.amin(non_zero)
return adj_output
class Graph_sequence_sampler_pytorch(torch.utils.data.Dataset):
def __init__(self, G_list, max_num_node=None, max_prev_node=None, iteration=20000):
self.adj_all = []
self.len_all = []
self.nodeatt = []
for G in G_list:
self.adj_all.append(np.asarray(nx.to_numpy_matrix(G)))
self.len_all.append(G.number_of_nodes())
self.nodeatt.append(np.asarray(nx.get_node_attributes(G, 'data')))
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
print('calculating max previous node, total iteration: {}'.format(iteration))
self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
print('max previous node: {}'.format(self.max_prev_node))
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0,:] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
# generate input x, y pairs
len_batch = adj_copy.shape[0]
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
return {'x':x_batch,'y':y_batch, 'len':len_batch}
def calc_max_prev_node(self, iter=20000,topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print('iter {} times'.format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1*topk:]
return max_prev_node | [
"pickle.dump",
"networkx.from_numpy_matrix",
"numpy.amin",
"numpy.tril",
"numpy.ix_",
"numpy.zeros",
"networkx.to_numpy_matrix",
"networkx.read_graphml",
"numpy.nonzero",
"networkx.bfs_successors",
"numpy.asmatrix",
"numpy.random.randint",
"networkx.get_node_attributes",
"numpy.random.perm... | [((1285, 1301), 'numpy.asmatrix', 'np.asmatrix', (['adj'], {}), '(adj)\n', (1296, 1301), True, 'import numpy as np\n'), ((1310, 1360), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj'], {'create_using': 'nx.DiGraph'}), '(adj, create_using=nx.DiGraph)\n', (1330, 1360), True, 'import networkx as nx\n'), ((2330, 2348), 'numpy.tril', 'np.tril', (['adj'], {'k': '(-1)'}), '(adj, k=-1)\n', (2337, 2348), True, 'import numpy as np\n'), ((2494, 2533), 'numpy.zeros', 'np.zeros', (['(adj.shape[0], max_prev_node)'], {}), '((adj.shape[0], max_prev_node))\n', (2502, 2533), True, 'import numpy as np\n'), ((3093, 3145), 'numpy.zeros', 'np.zeros', (['(adj_output.shape[0], adj_output.shape[0])'], {}), '((adj_output.shape[0], adj_output.shape[0]))\n', (3101, 3145), True, 'import numpy as np\n'), ((3494, 3554), 'numpy.zeros', 'np.zeros', (['(adj_output.shape[0] + 1, adj_output.shape[0] + 1)'], {}), '((adj_output.shape[0] + 1, adj_output.shape[0] + 1))\n', (3502, 3554), True, 'import numpy as np\n'), ((3604, 3619), 'numpy.tril', 'np.tril', (['adj', '(0)'], {}), '(adj, 0)\n', (3611, 3619), True, 'import numpy as np\n'), ((3914, 3932), 'numpy.tril', 'np.tril', (['adj'], {'k': '(-1)'}), '(adj, k=-1)\n', (3921, 3932), True, 'import numpy as np\n'), ((1452, 1474), 'pickle.dump', 'pickle.dump', (['G_list', 'f'], {}), '(G_list, f)\n', (1463, 1474), False, 'import pickle\n'), ((1616, 1646), 'networkx.bfs_successors', 'nx.bfs_successors', (['G', 'start_id'], {}), '(G, start_id)\n', (1633, 1646), True, 'import networkx as nx\n'), ((5367, 5405), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (5375, 5405), True, 'import numpy as np\n'), ((5526, 5564), 'numpy.zeros', 'np.zeros', (['(self.n, self.max_prev_node)'], {}), '((self.n, self.max_prev_node))\n', (5534, 5564), True, 'import numpy as np\n'), ((5696, 5736), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (5717, 5736), True, 'import numpy as np\n'), ((5813, 5834), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (5824, 5834), True, 'import numpy as np\n'), ((5847, 5884), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (5867, 5884), True, 'import networkx as nx\n'), ((5945, 5981), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (5962, 5981), True, 'import numpy as np\n'), ((612, 632), 'os.listdir', 'listdir', (['"""./dataset"""'], {}), "('./dataset')\n", (619, 632), False, 'from os import listdir\n'), ((737, 771), 'networkx.read_graphml', 'nx.read_graphml', (["('dataset/' + name)"], {}), "('dataset/' + name)\n", (752, 771), True, 'import networkx as nx\n'), ((4187, 4208), 'numpy.nonzero', 'np.nonzero', (['adj_slice'], {}), '(adj_slice)\n', (4197, 4208), True, 'import numpy as np\n'), ((4259, 4276), 'numpy.amin', 'np.amin', (['non_zero'], {}), '(non_zero)\n', (4266, 4276), True, 'import numpy as np\n'), ((5765, 5785), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (5771, 5785), True, 'import numpy as np\n'), ((6058, 6078), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (6064, 6078), True, 'import numpy as np\n'), ((6802, 6842), 'numpy.random.permutation', 'np.random.permutation', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (6823, 6842), True, 'import numpy as np\n'), ((6927, 6948), 'numpy.asmatrix', 'np.asmatrix', (['adj_copy'], {}), '(adj_copy)\n', (6938, 6948), True, 'import numpy as np\n'), ((6965, 7002), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_copy_matrix'], {}), '(adj_copy_matrix)\n', (6985, 7002), True, 'import networkx as nx\n'), ((7071, 7107), 'numpy.random.randint', 'np.random.randint', (['adj_copy.shape[0]'], {}), '(adj_copy.shape[0])\n', (7088, 7107), True, 'import numpy as np\n'), ((643, 663), 'os.path.join', 'join', (['"""./dataset"""', 'f'], {}), "('./dataset', f)\n", (647, 663), False, 'from os.path import isfile, join\n'), ((826, 861), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G'], {}), '(G)\n', (858, 861), True, 'import networkx as nx\n'), ((6875, 6895), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (6881, 6895), True, 'import numpy as np\n'), ((7192, 7212), 'numpy.ix_', 'np.ix_', (['x_idx', 'x_idx'], {}), '(x_idx, x_idx)\n', (7198, 7212), True, 'import numpy as np\n'), ((4598, 4619), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['G'], {}), '(G)\n', (4616, 4619), True, 'import networkx as nx\n'), ((4718, 4751), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""data"""'], {}), "(G, 'data')\n", (4740, 4751), True, 'import networkx as nx\n')] |
from cvgutils.nn.jaxUtils.unet_model import UNet
import jax.numpy as jnp
import jax
import optax
from jaxopt import OptaxSolver
import tensorflow as tf
import tqdm
import numpy as np
from deepfnf_utils.dataset import Dataset
import cvgutils.Utils as cvgutil
import deepfnf_utils.tf_utils as tfu
import cvgutils.Viz as Viz
import cvgutils.Linalg as linalg
import argparse
from jaxopt import implicit_diff, linear_solve
from implicit_diff import diff_solver, fnf_regularizer
def parse_arguments(parser):
parser.add_argument('--model', type=str, default='overfit_unet',
choices=['overfit_straight','interpolate_straight','overfit_unet','interpolate_unet','UNet_Hardcode'],help='Which model to use')
parser.add_argument('--lr', default=1e-4, type=float,help='Maximum rotation')
parser.add_argument('--display_freq', default=1000, type=int,help='Display frequency by iteration count')
parser.add_argument('--val_freq', default=101, type=int,help='Display frequency by iteration count')
parser.add_argument('--save_param_freq', default=100,type=int, help='Maximum rotation')
parser.add_argument('--max_iter', default=1000000, type=int,help='Maximum iteration count')
parser.add_argument('--unet_depth', default=4, type=int,help='Depth of neural net')
return parser
parser = argparse.ArgumentParser()
parser = parse_arguments(parser)
parser = Viz.logger.parse_arguments(parser)
parser = Dataset.parse_arguments(parser)
parser = diff_solver.parse_arguments(parser)
parser = UNet.parse_arguments(parser)
opts = parser.parse_args()
opts = cvgutil.loadPickle('./params.pickle')
# cvgutil.savePickle('./params.pickle',opts)
# exit(0)
tf.config.set_visible_devices([], device_type='GPU')
dataset = Dataset(opts)
logger = Viz.logger(opts,opts.__dict__)
batch = dataset.next_batch(False)
im = batch['net_input']
def init_model(rng, x,batch,test=False,group_norm=True):
return fnf_regularizer(UNet(opts.in_features,opts.out_features,opts.bilinear,opts.test,opts.group_norm)).init(rng, fnf_regularizer.init_point(batch),batch)
# def model_test(params, im,group_norm=True):
# return UNet(n_channels,n_classes,bilinear,True,group_norm).apply(params, im)
def unet_train(params, im,batch,group_norm=True):
if(group_norm):
return fnf_regularizer(UNet(opts.in_features,opts.out_features,opts.bilinear,False,opts.group_norm)).apply(params, im,batch)
else:
return fnf_regularizer(UNet(opts.in_features,opts.out_features,opts.bilinear,False,opts.group_norm)).apply(params, im,batch,mutable=['batch_stats'])
diffable_solver = diff_solver()
diffable_solver.init(opts,unet_train,fnf_regularizer.init_point)
# @jax.jit
def model_test(params, batch,group_norm=True):
return diffable_solver.nonlinear_solver_id(params,batch)
# @jax.jit
def model_train(params, batch,group_norm=True):
return diffable_solver.nonlinear_solver_id(params,batch)
rng = jax.random.PRNGKey(1)
rng, init_rng = jax.random.split(rng)
params = init_model(init_rng, jnp.array(im),batch)
# @jax.jit
def loss(params, batch,test=False):
pred, aux = model_train(params,batch)
return ((batch['ambient'] - pred/batch['alpha']) ** 2).sum(), aux
# @jax.jit
def update(params_p,state_p,batch_p):
params_p, state_p = solver.update(params_p, state_p,batch=batch_p)
return params_p, state_p
def camera_to_rgb(im,batch):
return tfu.camera_to_rgb_jax(
im, batch['color_matrix'], batch['adapt_matrix'])
data = logger.load_params()
start_idx=0
if(data is not None):
# state = data['state']
batch = data['state']
params = data['params']
start_idx = data['idx']
batch = dataset.next_batch(False)
solver = OptaxSolver(fun=loss, opt=optax.adam(opts.lr),has_aux=True)
state = solver.init_state(params)
pred, aux = loss(params,batch)
grad = jax.grad(loss,has_aux=True)
grad_pred, aux_grad = grad(params,batch)
with tqdm.trange(start_idx,opts.max_iter) as t:
for i in t:
val_iter = i % opts.val_freq == 0
mode = 'val' if val_iter else 'train'
batch = dataset.next_batch(val_iter)
l1,l2 = loss(params,batch)
params, state = update(params,state,batch)
l,_ = loss(params,batch)
t.set_description('loss '+str(np.array(l)))
if(i % opts.display_freq == 0 or val_iter):
predicted = model_test(params, batch)
g = camera_to_rgb(predicted[0]/batch['alpha'], batch)
ambient = camera_to_rgb(batch['ambient'], batch)
flash = camera_to_rgb(batch['flash'], batch)
noisy = camera_to_rgb(batch['noisy']/batch['alpha'], batch)
psnr = linalg.get_psnr_jax(jax.lax.stop_gradient(g),ambient)
imshow = jnp.clip(jnp.concatenate((ambient,g,noisy,flash),axis=-2),0,1)
logger.addImage(imshow[0],'image',mode=mode)
logger.addScalar(psnr,'psnr',mode=mode)
if(i % opts.save_param_freq == 0):
logger.save_params(params,batch,i)
logger.addScalar(l,'loss',mode=mode)
logger.takeStep()
| [
"cvgutils.nn.jaxUtils.unet_model.UNet",
"optax.adam",
"argparse.ArgumentParser",
"jax.random.PRNGKey",
"cvgutils.Utils.loadPickle",
"implicit_diff.diff_solver.parse_arguments",
"tqdm.trange",
"jax.numpy.concatenate",
"deepfnf_utils.dataset.Dataset.parse_arguments",
"deepfnf_utils.tf_utils.camera_t... | [((1312, 1337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1335, 1337), False, 'import argparse\n'), ((1380, 1414), 'cvgutils.Viz.logger.parse_arguments', 'Viz.logger.parse_arguments', (['parser'], {}), '(parser)\n', (1406, 1414), True, 'import cvgutils.Viz as Viz\n'), ((1424, 1455), 'deepfnf_utils.dataset.Dataset.parse_arguments', 'Dataset.parse_arguments', (['parser'], {}), '(parser)\n', (1447, 1455), False, 'from deepfnf_utils.dataset import Dataset\n'), ((1465, 1500), 'implicit_diff.diff_solver.parse_arguments', 'diff_solver.parse_arguments', (['parser'], {}), '(parser)\n', (1492, 1500), False, 'from implicit_diff import diff_solver, fnf_regularizer\n'), ((1510, 1538), 'cvgutils.nn.jaxUtils.unet_model.UNet.parse_arguments', 'UNet.parse_arguments', (['parser'], {}), '(parser)\n', (1530, 1538), False, 'from cvgutils.nn.jaxUtils.unet_model import UNet\n'), ((1575, 1612), 'cvgutils.Utils.loadPickle', 'cvgutil.loadPickle', (['"""./params.pickle"""'], {}), "('./params.pickle')\n", (1593, 1612), True, 'import cvgutils.Utils as cvgutil\n'), ((1668, 1720), 'tensorflow.config.set_visible_devices', 'tf.config.set_visible_devices', (['[]'], {'device_type': '"""GPU"""'}), "([], device_type='GPU')\n", (1697, 1720), True, 'import tensorflow as tf\n'), ((1731, 1744), 'deepfnf_utils.dataset.Dataset', 'Dataset', (['opts'], {}), '(opts)\n', (1738, 1744), False, 'from deepfnf_utils.dataset import Dataset\n'), ((1754, 1785), 'cvgutils.Viz.logger', 'Viz.logger', (['opts', 'opts.__dict__'], {}), '(opts, opts.__dict__)\n', (1764, 1785), True, 'import cvgutils.Viz as Viz\n'), ((2583, 2596), 'implicit_diff.diff_solver', 'diff_solver', ([], {}), '()\n', (2594, 2596), False, 'from implicit_diff import diff_solver, fnf_regularizer\n'), ((2908, 2929), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(1)'], {}), '(1)\n', (2926, 2929), False, 'import jax\n'), ((2946, 2967), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (2962, 2967), False, 'import jax\n'), ((3801, 3829), 'jax.grad', 'jax.grad', (['loss'], {'has_aux': '(True)'}), '(loss, has_aux=True)\n', (3809, 3829), False, 'import jax\n'), ((2998, 3011), 'jax.numpy.array', 'jnp.array', (['im'], {}), '(im)\n', (3007, 3011), True, 'import jax.numpy as jnp\n'), ((3371, 3442), 'deepfnf_utils.tf_utils.camera_to_rgb_jax', 'tfu.camera_to_rgb_jax', (['im', "batch['color_matrix']", "batch['adapt_matrix']"], {}), "(im, batch['color_matrix'], batch['adapt_matrix'])\n", (3392, 3442), True, 'import deepfnf_utils.tf_utils as tfu\n'), ((3876, 3913), 'tqdm.trange', 'tqdm.trange', (['start_idx', 'opts.max_iter'], {}), '(start_idx, opts.max_iter)\n', (3887, 3913), False, 'import tqdm\n'), ((2023, 2056), 'implicit_diff.fnf_regularizer.init_point', 'fnf_regularizer.init_point', (['batch'], {}), '(batch)\n', (2049, 2056), False, 'from implicit_diff import diff_solver, fnf_regularizer\n'), ((3694, 3713), 'optax.adam', 'optax.adam', (['opts.lr'], {}), '(opts.lr)\n', (3704, 3713), False, 'import optax\n'), ((1931, 2020), 'cvgutils.nn.jaxUtils.unet_model.UNet', 'UNet', (['opts.in_features', 'opts.out_features', 'opts.bilinear', 'opts.test', 'opts.group_norm'], {}), '(opts.in_features, opts.out_features, opts.bilinear, opts.test, opts.\n group_norm)\n', (1935, 2020), False, 'from cvgutils.nn.jaxUtils.unet_model import UNet\n'), ((4647, 4671), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['g'], {}), '(g)\n', (4668, 4671), False, 'import jax\n'), ((4711, 4763), 'jax.numpy.concatenate', 'jnp.concatenate', (['(ambient, g, noisy, flash)'], {'axis': '(-2)'}), '((ambient, g, noisy, flash), axis=-2)\n', (4726, 4763), True, 'import jax.numpy as jnp\n'), ((2294, 2379), 'cvgutils.nn.jaxUtils.unet_model.UNet', 'UNet', (['opts.in_features', 'opts.out_features', 'opts.bilinear', '(False)', 'opts.group_norm'], {}), '(opts.in_features, opts.out_features, opts.bilinear, False, opts.group_norm\n )\n', (2298, 2379), False, 'from cvgutils.nn.jaxUtils.unet_model import UNet\n'), ((2437, 2522), 'cvgutils.nn.jaxUtils.unet_model.UNet', 'UNet', (['opts.in_features', 'opts.out_features', 'opts.bilinear', '(False)', 'opts.group_norm'], {}), '(opts.in_features, opts.out_features, opts.bilinear, False, opts.group_norm\n )\n', (2441, 2522), False, 'from cvgutils.nn.jaxUtils.unet_model import UNet\n'), ((4235, 4246), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (4243, 4246), True, 'import numpy as np\n')] |
import os, sys
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_PATH)
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import psi, polygamma
from keras.utils import to_categorical
from modules.data_loaders.base_line_loaders import load_fashion_mnist
from transformations import Transformer
from models.wide_residual_network import create_wide_residual_network
import time
import datetime
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
from tqdm import tqdm
from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr
if __name__ == "__main__":
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
sess = tf.Session(config=config)
set_session(sess)
single_class_ind = 1
(x_train, y_train), (x_test, y_test) = load_fashion_mnist()
print(x_train.shape)
print(x_test.shape)
transformer = Transformer(8, 8)
n, k = (10, 4)
mdl = create_wide_residual_network(input_shape=x_train.shape[1:],
num_classes=transformer.n_transforms,
depth=n, widen_factor=k)
mdl.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['acc'])
print(mdl.summary())
# get inliers of specific class
x_train_task = x_train[y_train.flatten() == single_class_ind]
print(x_train_task.shape)
# [0_i, ..., (N_transforms-1)_i, ..., ..., 0_N_samples, ...,
# (N_transforms-1)_N_samples] shape: (N_transforms*N_samples,)
transformations_inds = np.tile(np.arange(transformer.n_transforms),
len(x_train_task))
print(len(transformations_inds))
#
start_time = time.time()
x_train_task_transformed = transformer.transform_batch(
np.repeat(x_train_task, transformer.n_transforms, axis=0),
transformations_inds)
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time to perform transforms: " + time_usage)
print(x_train_task_transformed.shape)
batch_size = 128
start_time = time.time()
mdl.fit(x=x_train_task_transformed, y=to_categorical(transformations_inds),
batch_size=batch_size,
epochs=int(np.ceil(200 / transformer.n_transforms))
)
time_usage = str(datetime.timedelta(
seconds=int(round(time.time() - start_time))))
print("Time to train model: " + time_usage)
scores = np.zeros((len(x_test),))
observed_data = x_train_task
# # testing inside for
# t_ind = np.random.randint(transformer.n_transforms)
# observed_dirichlet = mdl.predict(
# transformer.transform_batch(observed_data, [t_ind] * len(observed_data)),
# batch_size=1024)
# predicted_labels = np.argmax(observed_dirichlet, axis=-1)
# print('index to predict: ', t_ind, '\nPredicted counts: ',
# np.unique(predicted_labels, return_counts=True))
# log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)
# print('log_p_hat_train.shape: ', log_p_hat_train.shape)
# alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
# print('alpha_sum_approx.shape: ', alpha_sum_approx.shape)
# alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx
# print('alpha_0.shape: ', alpha_0.shape)
# mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)
# print('mle_alpha_t.shape: ', mle_alpha_t.shape)
# x_test_p = mdl.predict(
# transformer.transform_batch(x_test, [t_ind] * len(x_test)),
# batch_size=1024)
# predicted_test_labels = np.argmax(x_test_p, axis=-1)
# print('index to predict: ', t_ind, '\nPredicted test counts: ',
# np.unique(predicted_test_labels, return_counts=True))
#
# score_for_specific_transform = dirichlet_normality_score(mle_alpha_t,
# x_test_p)
# print('score_for_specific_transform.shape: ',
# score_for_specific_transform.shape)
# Dirichlet transforms
for t_ind in tqdm(range(transformer.n_transforms)):
# predictions for a single transformation
observed_dirichlet = mdl.predict(
transformer.transform_batch(observed_data,
[t_ind] * len(observed_data)),
batch_size=1024)
log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)
alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx
mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)
x_test_p = mdl.predict(
transformer.transform_batch(x_test, [t_ind] * len(x_test)),
batch_size=1024)
scores += dirichlet_normality_score(mle_alpha_t, x_test_p)
scores /= transformer.n_transforms
labels = y_test.flatten() == single_class_ind
plot_histogram_disc_loss_acc_thr(scores[labels], scores[~labels], path='../results',
x_label_name='Transformations_Dscores_fashion')
# Dirichlet transforms with arcsin
neg_scores = -scores
norm_scores = neg_scores - np.min(neg_scores)
norm_scores = norm_scores / np.max(norm_scores)
arcsinh_scores = np.arcsinh(norm_scores * 10000)
inlier_arcsinh_score = arcsinh_scores[labels]
outlier_arcsinh_score = arcsinh_scores[~labels]
plot_histogram_disc_loss_acc_thr(inlier_arcsinh_score, outlier_arcsinh_score,
'../results',
'Transformations_arcsinh*10000_Dscores_fashion')
# Transforms without dirichlet
plain_scores = np.zeros((len(x_test),))
for t_ind in tqdm(range(transformer.n_transforms)):
# predictions for a single transformation
x_test_p = mdl.predict(
transformer.transform_batch(x_test, [t_ind] * len(x_test)),
batch_size=1024)
plain_scores += x_test_p[:, t_ind]
plain_scores /= transformer.n_transforms
labels = y_test.flatten() == single_class_ind
plot_histogram_disc_loss_acc_thr(plain_scores[labels], plain_scores[~labels], path='../results',
x_label_name='Transformations_scores_fashion')
# Transforms without dirichlet arcsinh
plain_neg_scores = 1-plain_scores
plain_norm_scores = plain_neg_scores - np.min(plain_neg_scores)
plain_norm_scores = plain_norm_scores / plain_norm_scores.max()
plain_arcsinh_scores = np.arcsinh(plain_norm_scores * 10000)
plot_histogram_disc_loss_acc_thr(plain_arcsinh_scores[labels],
plain_arcsinh_scores[~labels], path='../results',
x_label_name='Transformations_arcsinh*10000_scores_fashion') | [
"tensorflow.ConfigProto",
"modules.data_loaders.base_line_loaders.load_fashion_mnist",
"numpy.arange",
"keras.backend.tensorflow_backend.set_session",
"sys.path.append",
"scripts.detached_transformer_od_hits.calc_approx_alpha_sum",
"os.path.dirname",
"scripts.detached_transformer_od_hits.fixed_point_d... | [((99, 128), 'sys.path.append', 'sys.path.append', (['PROJECT_PATH'], {}), '(PROJECT_PATH)\n', (114, 128), False, 'import os, sys\n'), ((770, 786), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (784, 786), True, 'import tensorflow as tf\n'), ((884, 909), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (894, 909), True, 'import tensorflow as tf\n'), ((912, 929), 'keras.backend.tensorflow_backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (923, 929), False, 'from keras.backend.tensorflow_backend import set_session\n'), ((996, 1016), 'modules.data_loaders.base_line_loaders.load_fashion_mnist', 'load_fashion_mnist', ([], {}), '()\n', (1014, 1016), False, 'from modules.data_loaders.base_line_loaders import load_fashion_mnist\n'), ((1079, 1096), 'transformations.Transformer', 'Transformer', (['(8)', '(8)'], {}), '(8, 8)\n', (1090, 1096), False, 'from transformations import Transformer\n'), ((1123, 1250), 'models.wide_residual_network.create_wide_residual_network', 'create_wide_residual_network', ([], {'input_shape': 'x_train.shape[1:]', 'num_classes': 'transformer.n_transforms', 'depth': 'n', 'widen_factor': 'k'}), '(input_shape=x_train.shape[1:], num_classes=\n transformer.n_transforms, depth=n, widen_factor=k)\n', (1151, 1250), False, 'from models.wide_residual_network import create_wide_residual_network\n'), ((1871, 1882), 'time.time', 'time.time', ([], {}), '()\n', (1880, 1882), False, 'import time\n'), ((2254, 2265), 'time.time', 'time.time', ([], {}), '()\n', (2263, 2265), False, 'import time\n'), ((4948, 5085), 'scripts.detached_transformer_od_hits.plot_histogram_disc_loss_acc_thr', 'plot_histogram_disc_loss_acc_thr', (['scores[labels]', 'scores[~labels]'], {'path': '"""../results"""', 'x_label_name': '"""Transformations_Dscores_fashion"""'}), "(scores[labels], scores[~labels], path=\n '../results', x_label_name='Transformations_Dscores_fashion')\n", (4980, 5085), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((5294, 5325), 'numpy.arcsinh', 'np.arcsinh', (['(norm_scores * 10000)'], {}), '(norm_scores * 10000)\n', (5304, 5325), True, 'import numpy as np\n'), ((5426, 5574), 'scripts.detached_transformer_od_hits.plot_histogram_disc_loss_acc_thr', 'plot_histogram_disc_loss_acc_thr', (['inlier_arcsinh_score', 'outlier_arcsinh_score', '"""../results"""', '"""Transformations_arcsinh*10000_Dscores_fashion"""'], {}), "(inlier_arcsinh_score,\n outlier_arcsinh_score, '../results',\n 'Transformations_arcsinh*10000_Dscores_fashion')\n", (5458, 5574), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((6068, 6216), 'scripts.detached_transformer_od_hits.plot_histogram_disc_loss_acc_thr', 'plot_histogram_disc_loss_acc_thr', (['plain_scores[labels]', 'plain_scores[~labels]'], {'path': '"""../results"""', 'x_label_name': '"""Transformations_scores_fashion"""'}), "(plain_scores[labels], plain_scores[~labels\n ], path='../results', x_label_name='Transformations_scores_fashion')\n", (6100, 6216), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((6482, 6519), 'numpy.arcsinh', 'np.arcsinh', (['(plain_norm_scores * 10000)'], {}), '(plain_norm_scores * 10000)\n', (6492, 6519), True, 'import numpy as np\n'), ((6523, 6705), 'scripts.detached_transformer_od_hits.plot_histogram_disc_loss_acc_thr', 'plot_histogram_disc_loss_acc_thr', (['plain_arcsinh_scores[labels]', 'plain_arcsinh_scores[~labels]'], {'path': '"""../results"""', 'x_label_name': '"""Transformations_arcsinh*10000_scores_fashion"""'}), "(plain_arcsinh_scores[labels],\n plain_arcsinh_scores[~labels], path='../results', x_label_name=\n 'Transformations_arcsinh*10000_scores_fashion')\n", (6555, 6705), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((65, 90), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (80, 90), False, 'import os, sys\n'), ((1728, 1763), 'numpy.arange', 'np.arange', (['transformer.n_transforms'], {}), '(transformer.n_transforms)\n', (1737, 1763), True, 'import numpy as np\n'), ((1947, 2004), 'numpy.repeat', 'np.repeat', (['x_train_task', 'transformer.n_transforms'], {'axis': '(0)'}), '(x_train_task, transformer.n_transforms, axis=0)\n', (1956, 2004), True, 'import numpy as np\n'), ((4496, 4537), 'scripts.detached_transformer_od_hits.calc_approx_alpha_sum', 'calc_approx_alpha_sum', (['observed_dirichlet'], {}), '(observed_dirichlet)\n', (4517, 4537), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((4622, 4673), 'scripts.detached_transformer_od_hits.fixed_point_dirichlet_mle', 'fixed_point_dirichlet_mle', (['alpha_0', 'log_p_hat_train'], {}), '(alpha_0, log_p_hat_train)\n', (4647, 4673), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((4810, 4858), 'scripts.detached_transformer_od_hits.dirichlet_normality_score', 'dirichlet_normality_score', (['mle_alpha_t', 'x_test_p'], {}), '(mle_alpha_t, x_test_p)\n', (4835, 4858), False, 'from scripts.detached_transformer_od_hits import calc_approx_alpha_sum, fixed_point_dirichlet_mle, dirichlet_normality_score, plot_histogram_disc_loss_acc_thr\n'), ((5206, 5224), 'numpy.min', 'np.min', (['neg_scores'], {}), '(neg_scores)\n', (5212, 5224), True, 'import numpy as np\n'), ((5255, 5274), 'numpy.max', 'np.max', (['norm_scores'], {}), '(norm_scores)\n', (5261, 5274), True, 'import numpy as np\n'), ((6366, 6390), 'numpy.min', 'np.min', (['plain_neg_scores'], {}), '(plain_neg_scores)\n', (6372, 6390), True, 'import numpy as np\n'), ((2306, 2342), 'keras.utils.to_categorical', 'to_categorical', (['transformations_inds'], {}), '(transformations_inds)\n', (2320, 2342), False, 'from keras.utils import to_categorical\n'), ((2398, 2437), 'numpy.ceil', 'np.ceil', (['(200 / transformer.n_transforms)'], {}), '(200 / transformer.n_transforms)\n', (2405, 2437), True, 'import numpy as np\n'), ((4432, 4458), 'numpy.log', 'np.log', (['observed_dirichlet'], {}), '(observed_dirichlet)\n', (4438, 4458), True, 'import numpy as np\n'), ((2097, 2108), 'time.time', 'time.time', ([], {}), '()\n', (2106, 2108), False, 'import time\n'), ((2514, 2525), 'time.time', 'time.time', ([], {}), '()\n', (2523, 2525), False, 'import time\n')] |
"""
-*- binomial tree class for pricing options -*-
"""
class tree:
def __init__(self, params = None, **kwargs):
import numpy as np
import pathlib
import os
################## unpacking of params dict and/or keyword arguments ######################
kwunion = kwargs
if isinstance(params, dict):
kwunion = {**params, **kwargs}
nothingPassed = [isinstance(params, type(None)), len(kwargs) == 0]
wrongParams = [not isinstance(params, (type(None), dict)), len(kwargs) == 0]
if all(nothingPassed) or all(wrongParams):
self.help(['params', 'params_examples'])
raise ValueError('See specifications above')
self.kwunion = kwunion
########################## directory, filename specification ##############################
direc = kwunion.get('direc', None) # if None -> current working directory
folname = kwunion.get('folname', None) # 'OutputFolder' if direc is None
fname = kwunion.get('fname', 'binotree')
def findmakedirec(direc = None, folname = None, fname = 'binotree', filetype = '.xlsx'):
foldir = str()
dirfile = str()
if direc is None:
curwd = os.path.abspath(os.getcwd())
if folname is None:
foldir = os.path.join(curwd, 'OutputFolder')
else:
foldir = os.path.join(curwd, str(folname))
pdirec = pathlib.Path(foldir).resolve()
if pdirec.is_file():
pdirec = pdirec.parent
if pdirec.suffix != '':
pdirec = pdirec.parent
foldir = str(pdirec)
dirfile = os.path.join(foldir, str(fname) + filetype)
elif direc is not None:
if folname is not None:
pdirec = pathlib.Path(os.path.join(str(direc), str(folname))).resolve()
else:
pdirec = pathlib.Path(str(direc)).resolve()
if pdirec.is_file():
pdirec = pdirec.parent
if pdirec.suffix != '':
pdirec = pdirec.parent
foldir = str(pdirec)
dirfile = os.path.join(foldir, str(fname) + filetype)
filename = fname + filetype
return foldir, dirfile, filename
foldir, dirfile, filename = findmakedirec(direc = direc, folname = folname, fname = fname)
self.foldir = foldir
self.dirfile = dirfile
self.filename = filename
############################## spot and strike specification ##############################
spot = kwunion.get('spot', False)
if spot is False:
self.help(['params'])
raise ValueError("'spot' parameter must be specified")
strike = kwunion.get('strike', False)
if strike is False:
self.help(['params'])
raise ValueError("'strike' parameter must be specified")
if not isinstance(spot, (float, int)):
self.help(['params_examples'])
raise TypeError('spot must be a number (float or int)')
elif not isinstance(strike, (float, int)):
self.help(['params_examples'])
raise TypeError('strike must be a number (float or int)')
self.spot = spot
self.strike = strike
############################## time parameters specification ##############################
T = kwunion.get('T', None)
dt = kwunion.get('dt', None)
periods = kwunion.get('periods', None)
if isinstance(periods, float):
periods = int(periods)
dtfreq = kwunion.get('dtfreq', None)
noPeriods = [T is not None, dt is not None, periods is None]
nodt = [T is not None, periods is not None, dt is None]
noT = [dt is not None, periods is not None, T is None]
allPassed = [dt is not None, periods is not None, T is not None]
nonePassed = [dt is None, periods is None, T is None]
if all(noPeriods):
periods = int(T / dt)
elif all(nodt):
dt = T / periods
elif all(noT):
T = dt * periods
elif all(allPassed):
dt = T / periods
elif all(nonePassed):
self.help(['params_examples', 'time'])
else:
self.help(['params_examples', 'time'])
self.T = T
self.dt = dt
self.periods = int(periods)
self.dtfreq = dtfreq
# dtfreq and header
dtfreqBool = [dtfreq == 'd', dtfreq == 'w', dtfreq == 'm']
if any(dtfreqBool):
self.headerformat = kwunion.get('headerformat', 'dt')
else:
self.headerformat = kwunion.get('headerformat', 'periods')
treeheader = ['Periods ->'] + np.arange(self.periods + 1).tolist()
headerformat_is_dt = self.headerformat == 'dt'
nodtfreq = self.dtfreq is None
dt_is_d = self.dt == 1/365
dt_is_w = self.dt == 1/52
dt_is_m = self.dt == 1/12
if headerformat_is_dt and nodtfreq and not any([dt_is_d, dt_is_w, dt_is_m]):
treeheader = ['dt ->'] + np.arange(0,
self.T + self.dt, self.dt).tolist()
elif headerformat_is_dt and nodtfreq and dt_is_d:
dtstep = int(self.dt * 365)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1) * dtstep).astype(str),
np.array(['/365'] * (self.periods + 1))).tolist()
elif headerformat_is_dt and nodtfreq and dt_is_w:
dtstep = int(self.dt * 52)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1) * dtstep).astype(str),
np.array(['/52'] * (self.periods + 1))).tolist()
elif headerformat_is_dt and nodtfreq and dt_is_m:
dtstep = int(self.dt * 12)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1) * dtstep).astype(str),
np.array(['/12'] * (self.periods + 1))).tolist()
elif headerformat_is_dt and self.dtfreq == 'd':
dtstep = int(self.dt * 365)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1)*dtstep).astype(str),
np.array(['/365'] * (self.periods + 1))).tolist()
elif headerformat_is_dt and self.dtfreq == 'w':
dtstep = int(self.dt * 52)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1)*dtstep).astype(str),
np.array(['/52'] * (self.periods + 1))).tolist()
elif headerformat_is_dt and self.dtfreq == 'm':
dtstep = int(self.dt * 12)
treeheader = ['dt ->'] + np.char.add((np.arange(self.periods + 1)*dtstep).astype(str),
np.array(['/12'] * (self.periods + 1))).tolist()
self.treeheader = treeheader
############################## interest rate++ specification ##############################
r = kwunion.get('r', 0.0)
rcont = kwunion.get('rcont', True)
divyield = kwunion.get('divyield', 0)
if rcont:
discountRate = np.exp(-r * dt)
discountDiv = np.exp(-divyield * dt)
discountRateMinusDiv = np.exp((r - divyield) * dt)
else:
discountDiv = 1 / ((1 + divyield)**dt)
discountRate = 1 / ((1 + r)**dt)
discountRateMinusDiv = (1 + (r - divyield))**dt
discdiv = kwunion.get('discdiv', None)
nonrec = kwunion.get('nonrec', False)
treetype = 'normal'
zeroDiscreteDividends = [discdiv == 0, discdiv == float(0)]
discdivAndNonrec = [discdiv is not None, nonrec is True]
discdivAndFsol = [ discdiv is not None, nonrec is False]
if any(zeroDiscreteDividends):
discdiv = None
if all(discdivAndNonrec):
treetype = 'nonrecombining'
elif all(discdivAndFsol):
treetype = 'fsolution'
self.r = r
self.rcont = rcont
self.divyield = divyield
self.discountRate = discountRate
self.discountDiv = discountDiv
self.discountRateMinusDiv = discountRateMinusDiv
self.discdiv = discdiv
self.nonrec = nonrec
self.treetype = treetype
############################## up, down & vola specification ##############################
def udfunc_default(vola = None, T = None, dt = None, periods = None,
r = None, divyield = None, discountRate = None,
discountDiv = None, discountRateMinusDiv = None,
spot = None, strike = None):
import numpy as np
u = np.exp(vola * np.sqrt(dt))
d = 1 / u
return u, d
self.udfunc = kwunion.get('udfunc', udfunc_default)
if not callable(self.udfunc):
self.udfunc = udfunc_default
vola = kwunion.get('vola', False)
u = kwunion.get('u', False)
d = kwunion.get('d', False)
volaPassed = [vola is not False, u is False, d is False]
uPassed = [u is not False, vola is False, d is False]
dPassed = [d is not False, vola is False, u is False]
udPassed = [vola is False, u is not False, d is not False]
volauPassed = [d is False, vola is not False, u is not False]
voladPassed = [u is False, vola is not False, d is not False]
volaudPassed = [u is not False, d is not False, vola is not False]
if all(volaPassed):
u, d = self.udfunc(vola = vola, T = T, dt = dt, periods = periods,
r = r, divyield = divyield, discountRate = discountRate,
discountDiv = discountDiv, discountRateMinusDiv = discountRateMinusDiv,
spot = spot, strike = strike)
elif all(uPassed):
d = 1 / u
vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
elif all(dPassed):
u = 1 / d
vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
elif all(udPassed):
vola = (np.log(u) -
np.log(d)) / (2 * np.sqrt(dt))
elif all(volauPassed):
d = u / (np.exp(2 * vola *
np.sqrt(dt)))
elif all(voladPassed):
u = d * (np.exp(2 * vola *
np.sqrt(dt)))
elif all(volaudPassed):
vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
if self.udfunc == udfunc_default:
print(f"Since 'u', 'd', and 'vola' were passed explicitly"
f"\n-> generated new vola: {round(vola * 100, int(kwunion.get('rounding', 2)))}%"
f"\nfrom formula: vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))\n")
else:
print(f"Since 'u', 'd', and 'vola' were passed explicitly"
f"\n-> generated new vola: {round(vola * 100, int(kwunion.get('rounding', 2)))}%"
f"\nfrom formula: {self.udfunc}\n")
else:
self.help(['params', 'params_examples'])
raise KeyError("Neither 'vola', 'u', or 'd' were found in passed parameters. \n"
"At least one of 'vola', 'u', or 'd' must be passed\n"
"See how to pass parameters above")
self.vola = vola
self.u = u
self.d = d
self.collapsed = kwunion.get('collapsed', False)
######################### risk-neutral probability specification ##########################
self.q = (discountRateMinusDiv - d) / (u - d)
################################## class wide functions ###################################
def makenl(arr):
ind = np.arange(1, len(arr)).cumsum()
nl = arr[np.tril_indices_from(arr)]
nl = np.split(nl, ind)
nl = list(map(lambda x: x.tolist(), nl))
return nl
self.makenl = makenl
def updownzip(periods):
columnsnr = int(periods + 1)
ind = np.arange(1, columnsnr).cumsum()
# up indices
upind = np.ones((columnsnr, columnsnr))
upind[np.triu_indices_from(upind, 0)] = 0
upind = upind.cumsum(0)
upind = upind[np.tril_indices_from(upind)].astype(int)
upind = np.split(upind, ind)
# down indices
downind = np.arange(columnsnr) * np.ones((columnsnr, columnsnr))
downind[np.triu_indices_from(downind, 1)] = 0
downind = downind[np.tril_indices_from(downind)].astype(int)
downind = np.split(downind, ind)
updownzipped = list(zip(upind, downind))
return updownzipped
self.updownzip = updownzip
################################### tree specification ####################################
self.showIntrinsic = kwunion.get('showIntrinsic', True)
if self.showIntrinsic is True:
self.rowPad = 3
elif self.showIntrinsic is False:
self.rowPad = 2
self.rounding = int(kwunion.get('rounding', 2))
################################ which trees to calculate #################################
self.maketrees = kwunion.get('maketrees', ['ec', 'ep', 'ac', 'ap'])
self.makedfs = kwunion.get('makedfs', True)
self.trees = dict()
########################### run proper tree construction method ###########################
self.dfcalled = kwunion.get('dfcalled', False)
self.called = kwunion.get('called', False)
self.portfolios = kwunion.get('portfolios', False)
if kwunion.get('test', False) is True:
pass
else:
self.calculate()
if kwunion.get('write', False) is True:
self.write()
###########################################################################################
def spotsUpDownInd(self, spot, periods, archive = True, spotname = 'spotarr', ftree = False):
import numpy as np
colnum = int(periods + 1)
u = self.u
d = self.d
if ftree:
volaF = (self.spot / spot) * self.vola
u, d = self.udfunc(vola = volaF, T = self.T, dt = self.dt, periods = self.periods,
r = self.r, divyield = self.divyield, discountRate = self.discountRate,
discountDiv = self.discountDiv, discountRateMinusDiv = self.discountRateMinusDiv,
spot = self.spot, strike = self.strike)
self.volaF = volaF
up = np.ones((colnum, colnum))
up[np.triu_indices_from(up, 0)] = 0
up = up.cumsum(0)
ua = u**up
ua[np.triu_indices_from(ua, 1)] = 0
do = np.arange(colnum) * np.ones_like(up)
do[np.triu_indices_from(do, 1)] = 0
daa = d**do
daa[np.triu_indices_from(daa, 1)] = 0
updownArr = ua * daa
updoind = [up.astype(int), do.astype(int)]
spotarr = (updownArr * spot).round(self.rounding)
if archive is True:
self.updoind = updoind
setattr(self, spotname, spotarr)
return spotarr, updoind
def normaltrees(self, optType, spots, manualOpt = None,
manualDeltas = None, manualBonds = None, manualIntr = None):
import numpy as np
# intrinsic values
intrinsic = np.maximum(spots - self.strike, np.zeros_like(spots))
if optType[1] == 'p':
intrinsic = np.maximum(self.strike - spots, np.zeros_like(spots))
intrinsic[np.triu_indices_from(intrinsic, 1)] = 0
if manualIntr is not None:
intrinsic = manualIntr
# premiums
options = np.zeros_like(spots)
options[-1] = intrinsic[-1]
if manualOpt is not None:
options[-1] = manualOpt
# check against based on type
ind = np.arange(1, len(spots)-1).cumsum()
checkagainst = np.zeros_like(spots[:-1, :-1])
checkagainst = np.split(checkagainst[np.tril_indices_from(checkagainst)], ind)
checkagainst = checkagainst[::-1]
if optType[0] == 'a':
checkagainst = intrinsic[:-1, :-1].copy()
checkagainst = np.split(checkagainst[np.tril_indices_from(checkagainst)], ind)
checkagainst = checkagainst[::-1]
for col in enumerate(self.updownzip(len(spots)-1)[::-1][1:]):
up = (col[1][0].astype(int) + col[1][1].astype(int) + 1, col[1][1].astype(int))
down = (col[1][0].astype(int) + col[1][1].astype(int) + 1, col[1][1].astype(int) + 1)
optnew = np.maximum(self.discountRate * (self.q * options[up] + (1 - self.q) * options[down]),
checkagainst[col[0]])
options[(up[0] - 1, up[1])] = optnew
# portfolios
optu = options[np.tril_indices_from(options, -1)]
optd = options[1:, 1:][np.tril_indices_from(options[1:, 1:])]
spotu = spots[np.tril_indices_from(spots, -1)]
spotd = spots[1:, 1:][np.tril_indices_from(spots[1:, 1:])]
d = (self.discountDiv * ((optu - optd) / (spotu - spotd)))
deltas = np.zeros_like(spots)
deltas[np.tril_indices_from(spots[:-1, :-1])] = d
if manualDeltas is not None:
deltas[-1] = manualDeltas
b = (self.discountRate * ((self.u * optd - self.d * optu) / (self.u - self.d)))
bonds = np.zeros_like(spots)
bonds[np.tril_indices_from(spots[:-1, :-1])] = b
if manualBonds is not None:
bonds[-1] = manualBonds
intrinsic = intrinsic.round(self.rounding)
options = options.round(self.rounding)
deltas = deltas.round(self.rounding)
bonds = bonds.round(self.rounding)
return intrinsic, options, deltas, bonds
def getOptionsNormal(self, optType):
import numpy as np
spotarr = getattr(self, 'spotarr', self.spotsUpDownInd(self.spot, self.periods)[0])
updoind = getattr(self, 'updoind', self.spotsUpDownInd(self.spot, self.periods)[1])
intrinsic, options, deltas, bonds = self.normaltrees(optType, spotarr)
# flat tree indices for array
upflat = updoind[0][np.tril_indices_from(updoind[0])]
downflat = updoind[1][np.tril_indices_from(updoind[1])]
treecols = upflat + downflat
treerows = (self.periods * self.rowPad) - (upflat * self.rowPad) + (downflat * self.rowPad)
rows = int(2 * (self.periods * self.rowPad) + self.rowPad)
if self.collapsed is True:
treerows = downflat * self.rowPad
rows = int((self.periods + 1) * self.rowPad)
# tree construction
if self.makedfs is True:
class treeWithDF:
def __init__(self, spots, intrinsic, options, deltas, bonds, ups, downs, colIndFlat, rowIndFlat,
header, mainobject, rows, optType):
import pandas as pd
self.optType = optType
# nested list trees
self.spots = mainobject.makenl(spots)
self.intrinsic = mainobject.makenl(intrinsic)
self.options = mainobject.makenl(options)
self.deltas = mainobject.makenl(deltas)
self.bonds = mainobject.makenl(bonds)
self.ups = mainobject.makenl(ups)
self.downs = mainobject.makenl(downs)
# flat trees
self.spotsflat = spots[np.tril_indices_from(spots)]
self.intrinsicflat = intrinsic[np.tril_indices_from(intrinsic)]
self.optionsflat = options[np.tril_indices_from(options)]
self.deltasflat = deltas[np.tril_indices_from(deltas)]
self.bondsflat = bonds[np.tril_indices_from(bonds)]
self.upflat = ups[np.tril_indices_from(ups)]
self.downflat = downs[np.tril_indices_from(downs)]
self.mainobject = mainobject
if mainobject.portfolios:
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
self.portsflat = portstring6
# dataframe array
NoneType = None
self.rows = rows
dfarr = np.full((rows, mainobject.periods + 1), None)
dfarr[rowIndFlat, colIndFlat] = self.spotsflat.round(mainobject.rounding)
if mainobject.showIntrinsic is True:
intrinsicString = np.char.add(np.array(['['] * len(self.intrinsicflat)),
self.intrinsicflat.round(mainobject.rounding).astype(str))
intrinsicString2 = np.char.add(intrinsicString, np.array([']'] * len(self.intrinsicflat)))
dfarr[rowIndFlat + 1, colIndFlat] = intrinsicString2
optionsString = np.char.add(np.array(['('] * len(self.optionsflat)),
self.optionsflat.round(mainobject.rounding).astype(str))
optionsString2 = np.char.add(optionsString, np.array([')'] * len(self.optionsflat)))
dfarr[rowIndFlat + 2, colIndFlat] = optionsString2
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '[Intrinsic]', '(Premium)'] + [''] * (rows - 3))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
else:
optionsString = np.char.add(np.array(['('] * len(self.optionsflat)),
self.optionsflat.round(mainobject.rounding).astype(str))
optionsString2 = np.char.add(optionsString, np.array([')'] * len(self.optionsflat)))
dfarr[rowIndFlat + 1, colIndFlat] = optionsString2
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '(Premium)'] + [''] * (rows - 2))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
self.colIndFlat = colIndFlat
self.rowIndFlat = rowIndFlat
self.treeheader = header
self.df = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = header)
def portfoliosDF(self):
import pandas as pd
import numpy as np
NoneType = None
def getports():
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
return portstring6
ports = getattr(self, 'portsflat', getports())
dfarr = np.full((self.rows, self.mainobject.periods + 1), None)
dfarr[self.rowIndFlat, self.colIndFlat] = self.spotsflat
if self.mainobject.showIntrinsic is True:
intrinsicString = np.char.add(np.array(['['] * len(self.intrinsicflat)),
self.intrinsicflat.astype(str))
intrinsicString2 = np.char.add(intrinsicString, np.array([']'] * len(self.intrinsicflat)))
dfarr[self.rowIndFlat + 1, self.colIndFlat] = intrinsicString2
dfarr[self.rowIndFlat + 2, self.colIndFlat] = ports
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
portdf = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = self.treeheader)
else:
dfarr[self.rowIndFlat + 1, self.colIndFlat] = ports
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '(Opt = S*∆ + B)'] + [''] * (rows - 2))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
portdf = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = self.treeheader)
return portdf
def getnode(self, up, down):
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __call__(self, up, down):
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __repr__(self):
return self.df.__repr__()
mytree = treeWithDF(spotarr, intrinsic, options, deltas, bonds,
updoind[0], updoind[1], treecols, treerows, self.treeheader, self, rows, optType)
else:
class treeWithoutDF:
def __init__(self, spots, intrinsic, options, deltas, bonds,
ups, downs, colIndFlat, rowIndFlat, header, mainobject, optType):
self.optType = optType
# nested list trees
self.spots = mainobject.makenl(spots)
self.intrinsic = mainobject.makenl(intrinsic)
self.options = mainobject.makenl(options)
self.deltas = mainobject.makenl(deltas)
self.bonds = mainobject.makenl(bonds)
self.ups = mainobject.makenl(ups)
self.downs = mainobject.makenl(downs)
# flat trees
self.spotsflat = spots[np.tril_indices_from(spots)]
self.intrinsicflat = intrinsic[np.tril_indices_from(intrinsic)]
self.optionsflat = options[np.tril_indices_from(options)]
self.deltasflat = deltas[np.tril_indices_from(deltas)]
self.bondsflat = bonds[np.tril_indices_from(bonds)]
self.upflat = ups[np.tril_indices_from(ups)]
self.downflat = downs[np.tril_indices_from(downs)]
if mainobject.portfolios:
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
self.portsflat = portstring6
self.colIndFlat = colIndFlat
self.rowIndFlat = rowIndFlat
self.treeheader = header
def getnode(self, up, down):
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __call__(self, up, down):
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
mytree = treeWithoutDF(spotarr, intrinsic, options, deltas,
bonds, updoind[0], updoind[1], treecols, treerows, self.treeheader, self, optType)
# setting tree object as attribute and return
setattr(self, optType+'Tree', mytree)
setattr(self, optType+'OptionPrice', mytree.optionsflat[0])
setattr(self, optType+'Intrinsics', intrinsic)
self.trees.update({optType + 'Tree': mytree})
if optType == 'ec':
self.BScall()
elif optType == 'ep':
self.BSput()
return mytree
def getOptionsFsol(self, optType):
import numpy as np
# indices, etc.
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divs = np.array(self.discdiv)[:, 1]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
# present value of all dividends
def getpvdiv(binoObject, divs, divind):
pvdiv = (divs * (binoObject.discountRate**divind)).sum()
binoObject.pvdiv = pvdiv
return pvdiv
pvdiv = getattr(self, 'pvdiv', getpvdiv(self, divs, divind))
F0 = (self.spot - pvdiv).round(self.rounding)
# F tree
Ftree = getattr(self, 'Ftree', self.spotsUpDownInd(F0, self.periods, True, 'Ftree', ftree = True)[0])
FtreeShaved = Ftree[:divind.max() + 1, :divind.max() + 1]
updoind = getattr(self, 'updoind', self.spotsUpDownInd(F0, self.periods, True, 'Ftree', ftree = True)[1])
# S tree
divpowpv = np.linspace(divind, divind - len(Ftree) + 1, len(Ftree)).astype(int).T
divpowpv[divpowpv < 0] = 0
antidivind = np.where(divpowpv == 0)
divpowpv = self.discountRate**divpowpv
divpowpv[antidivind] = 0
divpowpv = (divpowpv.T[:] * divs).T
divpowpv = divpowpv.sum(0)
divpowpv[divind] += divs
spotarr = (Ftree.T + divpowpv).T.round(self.rounding)
spotarr[np.triu_indices_from(spotarr, 1)] = 0
self.spotarr = spotarr
# options, intrinsics, etc.
intrinsic, options, deltas, bonds = self.normaltrees(optType, spotarr)
# flat tree indices for arrays - spotarr and FtreeShaved
upflat = updoind[0][np.tril_indices_from(updoind[0])]
downflat = updoind[1][np.tril_indices_from(updoind[1])]
treecols = upflat + downflat
treerows = (self.periods * self.rowPad) - (upflat * self.rowPad) + (downflat * self.rowPad)
upindF = updoind[0][:divind.max() + 1, :divind.max() + 1]
doindF = updoind[1][:divind.max() + 1, :divind.max() + 1]
upflatF = upindF[np.tril_indices_from(upindF)]
downflatF = doindF[np.tril_indices_from(upindF)]
treecolsF = upflatF + downflatF
treerowsF = treerows[:len(treecolsF)]
treerowsS = treerows.copy()
treerowsS[:len(treerowsF)] -= 1
# rows ++
rows = int(2 * (self.periods * self.rowPad) + self.rowPad)
if self.collapsed is True:
rows = int((self.periods + 1) * self.rowPad)
treerowsF = (downflatF * (self.rowPad + 1)) + 1
treerows = np.hstack((treerowsF, downflat[len(treerowsF):] * self.rowPad))
treerowsS = np.hstack((treerowsF - 1, downflat[len(treerowsF):] * self.rowPad))
# check if div is in last period
if dt_all[-1] in divdt:
treerows += 1
treerowsS += 1
rows += 1
# tree construction
if self.makedfs is True:
class FtreeWithDF:
def __init__(self, spots, intrinsic, options, deltas, bonds, ups, downs, colIndFlat, rowIndFlat,
header, mainobject, rows, Ftree, colIndFlatF, rowIndFlatF, rowIndFlatS):
import pandas as pd
# nested list trees
self.spots = mainobject.makenl(spots)
self.Ftree = mainobject.makenl(Ftree)
self.intrinsic = mainobject.makenl(intrinsic)
self.options = mainobject.makenl(options)
self.deltas = mainobject.makenl(deltas)
self.bonds = mainobject.makenl(bonds)
self.ups = mainobject.makenl(ups)
self.downs = mainobject.makenl(downs)
# flat trees
self.spotsflat = spots[np.tril_indices_from(spots)]
self.Ftreeflat = Ftree[np.tril_indices_from(Ftree)]
self.intrinsicflat = intrinsic[np.tril_indices_from(intrinsic)]
self.optionsflat = options[np.tril_indices_from(options)]
self.deltasflat = deltas[np.tril_indices_from(deltas)]
self.bondsflat = bonds[np.tril_indices_from(bonds)]
self.upflat = ups[np.tril_indices_from(ups)]
self.downflat = downs[np.tril_indices_from(downs)]
self.mainobject = mainobject
if mainobject.portfolios:
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
self.portsflat = portstring6
# dataframe array
NoneType = None
self.rows = rows
dfarr = np.full((rows, mainobject.periods + 1), None)
dfarr[rowIndFlatS, colIndFlat] = self.spotsflat.round(mainobject.rounding)
FspotString = np.char.add(np.array(['{'] * len(self.Ftreeflat)),
self.Ftreeflat.round(mainobject.rounding).astype(str))
FspotString2 = np.char.add(FspotString, np.array(['}'] * len(self.Ftreeflat)))
dfarr[rowIndFlatF, colIndFlatF] = FspotString2
if mainobject.showIntrinsic is True:
intrinsicString = np.char.add(np.array(['['] * len(self.intrinsicflat)),
self.intrinsicflat.round(mainobject.rounding).astype(str))
intrinsicString2 = np.char.add(intrinsicString, np.array([']'] * len(self.intrinsicflat)))
dfarr[rowIndFlat + 1, colIndFlat] = intrinsicString2
optionsString = np.char.add(np.array(['('] * len(self.optionsflat)),
self.optionsflat.round(mainobject.rounding).astype(str))
optionsString2 = np.char.add(optionsString, np.array([')'] * len(self.optionsflat)))
dfarr[rowIndFlat + 2, colIndFlat] = optionsString2
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '{F-Spot}', '[Intrinsic]', '(Premium)'] + [''] * (rows - 4))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
else:
optionsString = np.char.add(np.array(['('] * len(self.optionsflat)),
self.optionsflat.round(mainobject.rounding).astype(str))
optionsString2 = np.char.add(optionsString, np.array([')'] * len(self.optionsflat)))
dfarr[rowIndFlat + 1, colIndFlat] = optionsString2
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '{F-Spot}', '(Premium)'] + [''] * (rows - 3))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
self.colIndFlat = colIndFlat
self.colIndFlatF = colIndFlatF
self.rowIndFlat = rowIndFlat
self.rowIndFlatS = rowIndFlatS
self.rowIndFlatF = rowIndFlatF
self.treeheader = header
self.df = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = header)
def portfoliosDF(self):
import pandas as pd
import numpy as np
NoneType = None
def getports():
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
return portstring6
ports = getattr(self, 'portsflat', getports())
dfarr = np.full((self.rows, self.mainobject.periods + 1), None)
dfarr[self.rowIndFlatS, self.colIndFlat] = self.spotsflat
FspotString = np.char.add(np.array(['{'] * len(self.Ftreeflat)),
self.Ftreeflat.round(self.mainobject.rounding).astype(str))
FspotString2 = np.char.add(FspotString, np.array(['}'] * len(self.Ftreeflat)))
dfarr[self.rowIndFlatF, self.colIndFlatF] = FspotString2
if self.mainobject.showIntrinsic is True:
intrinsicString = np.char.add(np.array(['['] * len(self.intrinsicflat)),
self.intrinsicflat.astype(str))
intrinsicString2 = np.char.add(intrinsicString, np.array([']'] * len(self.intrinsicflat)))
dfarr[self.rowIndFlat + 1, self.colIndFlat] = intrinsicString2
dfarr[self.rowIndFlat + 2, self.colIndFlat] = ports
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '{F-Spot}', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (self.rows - 4))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
portdf = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = self.treeheader)
else:
dfarr[self.rowIndFlat + 1, self.colIndFlat] = ports
dfarr[np.where(dfarr == NoneType)] = ''
fc = np.array(['Spot', '{F-Spot}', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))
dfarr = np.hstack((fc.reshape(fc.shape[0], 1), dfarr))
portdf = pd.DataFrame(dfarr, index = [''] * len(dfarr), columns = self.treeheader)
return portdf
def getnode(self, up, down):
fdict = dict()
if up + down <= self.colIndFlatF[-1]:
fdict = {'F-Spot': self.Ftree[up + down][down]}
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, **fdict, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __call__(self, up, down):
fdict = dict()
if up + down <= self.colIndFlatF[-1]:
fdict = {'F-Spot': self.Ftree[up + down][down]}
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, **fdict, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __repr__(self):
return self.df.__repr__()
mytree = FtreeWithDF(spotarr, intrinsic, options, deltas, bonds, updoind[0], updoind[1],
treecols, treerows, self.treeheader, self, rows,
FtreeShaved, treecolsF, treerowsF, treerowsS)
else:
class FtreeWithoutDF:
def __init__(self, spots, intrinsic, options, deltas, bonds, ups, downs, colIndFlat, rowIndFlat,
header, mainobject, Ftree, colIndFlatF, rowIndFlatF, rowIndFlatS):
# nested list trees
self.spots = mainobject.makenl(spots)
self.Ftree = mainobject.makenl(Ftree)
self.intrinsic = mainobject.makenl(intrinsic)
self.options = mainobject.makenl(options)
self.deltas = mainobject.makenl(deltas)
self.bonds = mainobject.makenl(bonds)
self.ups = mainobject.makenl(ups)
self.downs = mainobject.makenl(downs)
# flat trees
self.spotsflat = spots[np.tril_indices_from(spots)]
self.Ftreeflat = Ftree[np.tril_indices_from(Ftree)]
self.intrinsicflat = intrinsic[np.tril_indices_from(intrinsic)]
self.optionsflat = options[np.tril_indices_from(options)]
self.deltasflat = deltas[np.tril_indices_from(deltas)]
self.bondsflat = bonds[np.tril_indices_from(bonds)]
self.upflat = ups[np.tril_indices_from(ups)]
self.downflat = downs[np.tril_indices_from(downs)]
self.colIndFlat = colIndFlat
self.colIndFlatF = colIndFlatF
self.rowIndFlat = rowIndFlat
self.rowIndFlatS = rowIndFlatS
self.rowIndFlatF = rowIndFlatF
self.treeheader = header
if mainobject.portfolios:
portstring1 = np.char.add(self.optionsflat.astype(str), ' = ')
portstring2 = np.char.add(portstring1, self.spotsflat.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, self.deltasflat.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, self.bondsflat.astype(str))
self.portsflat = portstring6
def getnode(self, up, down):
fdict = dict()
if up + down <= self.colIndFlatF[-1]:
fdict = {'F-Spot': self.Ftree[up + down][down]}
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, **fdict, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
def __call__(self, up, down):
fdict = dict()
if up + down <= self.colIndFlatF[-1]:
fdict = {'F-Spot': self.Ftree[up + down][down]}
spot = self.spots[up + down][down]
intrinsic = self.intrinsic[up + down][down]
opt = self.options[up + down][down]
delta = self.deltas[up + down][down]
bond = self.bonds[up + down][down]
return dict(Spot = spot, **fdict, Intrinsic = intrinsic, Premium = opt, Delta = delta, Bond = bond)
mytree = FtreeWithoutDF(spotarr, intrinsic, options, deltas, bonds, updoind[0], updoind[1],
treecols, treerows, self.treeheader, self, FtreeShaved, treecolsF, treerowsF,
treerowsS)
# setting tree object as attribute and return
setattr(self, optType+'Tree', mytree)
setattr(self, optType+'OptionPrice', mytree.optionsflat[0])
setattr(self, optType + 'Intrinsics', intrinsic)
self.trees.update({optType + 'Tree': mytree})
if optType == 'ec':
self.BScall()
elif optType == 'ep':
self.BSput()
return mytree
def getOptionsNonrec(self, optType):
import numpy as np
import itertools
def updoNonrec(ups = 0, downs = 0, periods = 1):
upArange = np.arange(ups, ups + periods + 1)
up = np.linspace(upArange, upArange - len(upArange) + 1, len(upArange)).astype(int).T
up[np.triu_indices_from(up, 1)] = 0
do = np.arange(downs, downs + periods + 1) * np.ones_like(up)
do[np.triu_indices_from(do, 1)] = 0
return np.array([up, do])
self.updoNonrec = updoNonrec
# indices, etc.
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divs = np.array(self.discdiv)[:, 1]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
startindPadded = np.hstack(([0], divind, [self.periods]))
startperiods = startindPadded[1:] - startindPadded[:-1]
startind = startindPadded[:-1]
ind = np.arange(1, self.periods + 1).cumsum()
def getpvdiv(binoObject, divs, divind):
pvdiv = (divs * (binoObject.discountRate**divind)).sum()
binoObject.pvdiv = pvdiv
return pvdiv
pvdiv = getattr(self, 'pvdiv', getpvdiv(self, divs, divind))
# calculating all spot trees
def makeSpotarrUpindDoind(mainobj, startperiods, divs):
spotarr = [[self.spotsUpDownInd(self.spot, startperiods[0], False)[0].round(self.rounding)]]
upind = [[np.array(self.spotsUpDownInd(self.spot, startperiods[0], False)[1][0])]]
doind = [[np.array(self.spotsUpDownInd(self.spot, startperiods[0], False)[1][1])]]
def sortNLarrays(nlarrs, flip = True):
arr = np.array(list(itertools.chain(*nlarrs)))
if flip is True:
arraySorted = np.sort(arr, 0)[::-1]
else:
arraySorted = np.sort(arr, 0)
arraySortedSplit = np.split(arraySorted, arraySorted.shape[0])
sortedlist = list(map(lambda x: np.squeeze(x), arraySortedSplit))
return sortedlist
for indper in enumerate(startperiods[1:]):
periodind = indper[0]
periodsLoop = indper[1]
previousTrees = spotarr[-1] # list of arrays/trees
spotexdiv = list(map(lambda x: x[-1] - divs[periodind], previousTrees)) # list of arrays
ups = list(map(lambda x: x[-1], upind[-1]))
downs = list(map(lambda x: x[-1], doind[-1]))
tempS = []
tempU = []
tempD = []
for i in enumerate(spotexdiv):
spotsLoop = list(map(lambda x:
np.array(self.spotsUpDownInd(x, periodsLoop, False)[0]).round(self.rounding),
i[1]))
upLoop = list(map(lambda u, d:
self.updoNonrec(u, d, periodsLoop)[0], ups[i[0]],
downs[i[0]]))
doLoop = list(map(lambda u, d:
self.updoNonrec(u, d, periodsLoop)[1], ups[i[0]],
downs[i[0]]))
tempS.append(spotsLoop)
tempU.append(upLoop)
tempD.append(doLoop)
spotarr.append(sortNLarrays(tempS))
upind.append(sortNLarrays(tempU))
doind.append(sortNLarrays(tempD, flip = False))
return spotarr, upind, doind
spotarr, upind, doind = makeSpotarrUpindDoind(self, startperiods, divs)
self.spotarr = spotarr
# calculating intrinsics
def addbackDiv(sa, d):
spots = sa[np.tril_indices_from(sa)]
spots[0] +=d
mal = np.zeros_like(sa)
mal[np.tril_indices_from(mal)] = spots
return mal
def intrinsicsCalc(s):
intrinsic = np.maximum(s - self.strike, np.zeros_like(s)).round(self.rounding)
if optType[1] == 'p':
intrinsic = np.maximum(self.strike - s, np.zeros_like(s)).round(self.rounding)
intrinsic[np.triu_indices_from(intrinsic, 1)] = 0
return intrinsic
def manualIntrinsics(spotarr, divs):
spotspre = []
dd = np.hsplit(divs, len(divs))
dd.insert(0, [0])
for sl, div in zip(spotarr, dd):
spotspre.append(list(map(addbackDiv, sl, [div] * len(sl))))
intrinsics = []
for spots in spotspre:
intrinsics.append(list(map(intrinsicsCalc, spots)))
return intrinsics
intrinsic = manualIntrinsics(spotarr, divs)
# options, etc
def makeOptionsDeltasEtc(spotarr, mainobj, optType, intrinsics):
options, deltas, bonds = [], [], []
manualOpt = None
for arrs, intr in zip(spotarr[::-1], intrinsics[::-1]):
if len(options) != 0:
manualOpt = np.array(options[-1])[:, 0, 0]
manualOpt = np.split(manualOpt, len(arrs))
iodb_collection = list(map(lambda arr, manO, manI:
mainobj.normaltrees(optType, arr, manualOpt = manO, manualIntr = manI),
arrs, manualOpt, intr))
else:
iodb_collection = list(map(lambda arr, manI:
mainobj.normaltrees(optType, arr, manualIntr = manI),
arrs, intr))
opts = list(map(lambda coll: coll[1].round(self.rounding), iodb_collection))
delts = list(map(lambda coll: coll[2].round(self.rounding), iodb_collection))
bnds = list(map(lambda coll: coll[3].round(self.rounding), iodb_collection))
# intrinsics.append(intr)
options.append(opts)
deltas.append(delts)
bonds.append(bnds)
options, deltas, bonds = options[::-1], deltas[::-1], bonds[::-1]
return options, deltas, bonds
options, deltas, bonds = makeOptionsDeltasEtc(spotarr, self, optType, intrinsic)
# rows
rows = int(2 * (self.periods * self.rowPad) + self.rowPad)
if self.collapsed is True:
rows = int((self.periods + 1) * self.rowPad)
# row indices
def makeTreerows(upind, doind, rowpad, periods):
treerows = []
for u1, d1 in zip(upind, doind):
rowsTemp = []
for u, d in zip(u1, d1):
treerowsTemp = (periods * rowpad) - (u * rowpad) + (d * rowpad)
treerowsTemp[np.triu_indices_from(treerowsTemp, 1)] = 0
rowsTemp.append(treerowsTemp)
treerows.append(rowsTemp)
return treerows
treerows = makeTreerows(upind, doind, self.rowPad, self.periods)
# column indices
def makeTreesInNode(startind, startindPadded, periods):
def treeones(totPeriods, start, end, startAdj):
treelist = []
for i, a in zip(range(start + 1), startAdj):
mal = np.zeros((int(totPeriods + 1), int(totPeriods + 1))).astype(int)
mal[start:end + 1, i:end + 1 - start + i][
np.tril_indices_from(mal[start:end + 1, i:end + 1 - start + i])] = 1
treelist.append(mal * a)
treearr = np.array(treelist).sum(0)
return treearr
mal = np.zeros((int(periods + 1), int(periods + 1))).astype(int)
mal[startind[0]:startindPadded[1:][0] + 1,
:startindPadded[1:][0] + 1 - startind[0]][np.tril_indices_from(
mal[startind[0]:startindPadded[1:][0] + 1, :startindPadded[1:][0] + 1 - startind[0]])] = 1
treesInNode = [mal]
for s, e in zip(startind[1:], startindPadded[2:]):
treesInNode.append(treeones(periods, s, e, treesInNode[-1][s, :s + 1]))
return treesInNode
treesInNode = makeTreesInNode(startind, startindPadded, self.periods)
def makeTreecols(treesInNode, startind, startperiods, startindPadded):
treecols = [[(treesInNode[0] * np.arange(len(treesInNode[0])).reshape(len(treesInNode[0]), 1))
[:startind[1] + 1, :startind[1] + 1]]]
for treeset in zip(treesInNode[1:], startperiods[1:], startind[1:], startindPadded[2:]):
startcol = np.array(treecols[-1])[:, -1].flatten()
start = np.min(startcol)
points = np.max(treeset[0][treeset[2]:treeset[3] + 1][0])
colcluster = [np.linspace([start] * treeset[0][treeset[2]:treeset[3] + 1][0].shape[0],
np.array([start] * treeset[0][treeset[2]:treeset[3] + 1][0].shape[0]) +
treeset[0][treeset[2]:treeset[3] + 1][0].max() - 1,
points).astype(int).T.tolist()]
for row in treeset[0][treeset[2]:treeset[3] + 1][1:]:
start = np.max(np.array(colcluster[-1])) + 1
lin = np.linspace([start] * row.shape[0], np.array([start] * row.shape[0]) + row.max() - 1,
row.max()).astype(int)
colcluster.append(lin.T.tolist())
trilind = np.tril_indices(treeset[1] + 1)
trilrows = trilind[0]
trilcols = trilind[1]
tempcol = []
for i in range(treeset[2] + 1):
for _ in range(treeset[0][treeset[2]][i]):
columnspop = list(map(lambda x: x.pop(0), np.array(colcluster, dtype=object)[(trilrows, trilcols + i)]))
tempmal = np.zeros((treeset[1] + 1, treeset[1] + 1)).astype(int)
tempmal[trilrows, trilcols] = columnspop
tempcol.append(tempmal)
treecols.append(tempcol)
return treecols
treecols = makeTreecols(treesInNode, startind, startperiods, startindPadded)
# enumerated prediv spots and indices
def lastnodes(Nlistarrays):
retlist = []
for arrlist in itertools.chain(*Nlistarrays[:-1]):
retlist.append(arrlist[-1])
return retlist
spotsPrediv = lastnodes(spotarr)
def lastnodesRows(Nlistarrays):
retlist = []
for arrlist in itertools.chain(*Nlistarrays[:-1]):
retlist.append(arrlist[-1] - 1)
return retlist
rowsPreddiv = lastnodesRows(treerows)
colsPrediv = lastnodes(treecols)
# cleanup in indices and value arrays, enumerated
def nlCleanup(Nlistarrays):
retlist = []
toloop = Nlistarrays[:-1]
for tree in itertools.chain(*toloop):
retlist.append(tree[:-1, :-1][np.tril_indices_from(tree[:-1, :-1])])
for tree in Nlistarrays[-1]:
retlist.append(tree[np.tril_indices_from(tree)])
return retlist
spotsToWrite = nlCleanup(spotarr)
optsToWrite = nlCleanup(options)
intrToWrite = nlCleanup(intrinsic)
deltasToWrite = nlCleanup(deltas)
bondsToWrite = nlCleanup(bonds)
rowsToWrite = nlCleanup(treerows)
colsToWrite = nlCleanup(treecols)
# header merge ind
def mergeheaderind(treecols):
treecolsNew = []
for l in treecols:
treecolsNew.append(list(map(lambda x: x[:-1, :-1], l)))
treecolsNew[-1] = treecols[-1]
# list with start/end merge for header
mergelist = []
for l in treecolsNew:
stacked = np.array(l)
for i in range(stacked.shape[1]):
stackedperiod = stacked[:, i, :]
periods = stackedperiod[:, :i + 1]
lowest = periods.flatten().min()
highest = periods.flatten().max()
mergelist.append([lowest, highest])
return mergelist
headermergelist = mergeheaderind(treecols)
# tree construction
class treeWithoutDF:
def __init__(self, spots, intrinsics, options, deltas, bonds, colIndices, rowIndices,
predivspots, predivrows, predivcols, headermerge, portfolios, ups, downs):
# enumerated flat trees
self.spots = spots
self.intrinsics = intrinsics
self.options = options
self.deltas = deltas
self.bonds = bonds
self.colIndFlat = colIndices
self.rowIndFlat = rowIndices
self.predivspots = predivspots
self.predivrows = predivrows
self.predivcols = predivcols
self.headermerge = headermerge
self.upind = ups
self.doind = downs
if portfolios:
ports = []
def makeportstring(spotarr, optarr, deltasarr, bondsarr):
portstring1 = np.char.add(optarr.astype(str), ' = ')
portstring2 = np.char.add(portstring1, spotarr.astype(str))
portstring3 = np.char.add(portstring2, '*')
portstring4 = np.char.add(portstring3, deltasarr.astype(str))
portstring5 = np.char.add(portstring4, ' + ')
portstring6 = np.char.add(portstring5, bondsarr.astype(str))
return portstring6
for s, o, d, b in zip(spots, options, deltas, bonds):
ports.append(makeportstring(s, o, d, b))
self.portsflat = ports
mytree = treeWithoutDF(spotsToWrite, intrToWrite, optsToWrite, deltasToWrite, bondsToWrite, colsToWrite,
rowsToWrite, spotsPrediv, rowsPreddiv, colsPrediv, headermergelist, self.portfolios,
upind, doind)
# setting tree object as attribute and return
setattr(self, optType + 'Tree', mytree)
setattr(self, optType + 'OptionPrice', mytree.options[0][0])
setattr(self, optType + 'Intrinsics', intrToWrite)
self.trees.update({optType + 'Tree': mytree})
if optType == 'ec':
self.BScall()
elif optType == 'ep':
self.BSput()
return mytree
def makeTreeEC(self):
treelist = self.maketrees
treelist.append('ec')
self.maketrees = list(set(treelist))
self.calculate()
return self.trees['ecTree']
def makeTreeEP(self):
treelist = self.maketrees
treelist.append('ep')
self.maketrees = list(set(treelist))
self.calculate()
return self.trees['epTree']
def makeTreeAC(self):
treelist = self.maketrees
treelist.append('ac')
self.maketrees = list(set(treelist))
self.calculate()
return self.trees['acTree']
def makeTreeAP(self):
treelist = self.maketrees
treelist.append('ap')
self.maketrees = list(set(treelist))
self.calculate()
return self.trees['apTree']
def BScall(self, strike = None):
from scipy.stats import norm
import numpy as np
if self.discdiv is not None:
S = self.spot - self.pvdiv
else:
S = self.spot
if strike is None:
strikeBS = self.strike
else:
strikeBS = strike
d1 = (np.log(S/strikeBS) + ((self.r - self.divyield) + (0.5*(self.vola**2))) * self.T) \
/(self.vola * np.sqrt(self.T))
d2 = d1 - (self.vola * np.sqrt(self.T))
BSpremium = S * np.exp(-self.divyield * self.T) * norm.cdf(d1) - \
strikeBS * np.exp(-self.r * self.T) * norm.cdf(d2)
self.ecOptionPriceBSdelta = np.exp(-self.divyield * self.T) * norm.cdf(d1)
self.ecOptionPriceBS = BSpremium
return BSpremium
def BSput(self, strike = None):
from scipy.stats import norm
import numpy as np
if self.discdiv is not None:
S = self.spot - self.pvdiv
else:
S = self.spot
if strike is None:
strikeBS = self.strike
else:
strikeBS = strike
d1 = (np.log(S / strikeBS) + ((self.r - self.divyield) + (0.5 * (self.vola**2))) * self.T) \
/ (self.vola * np.sqrt(self.T))
d2 = d1 - (self.vola * np.sqrt(self.T))
BSpremium = strikeBS * np.exp(-self.r * self.T) * norm.cdf(-d2) - \
S * np.exp(-self.divyield * self.T) * norm.cdf(-d1)
self.epOptionPriceBSdelta = - np.exp(-self.divyield * self.T) * norm.cdf(-d1)
self.epOptionPriceBS = BSpremium
return BSpremium
def ecPlotDeltas(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
ecOptionDelta = []
ecOptionBSDelta = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1/(self.d**self.periods)
if rstart <= (divs_sum*d_denominator):
rstart = int((divs_sum*d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['ecOptionPriceBSdelta', 'ecTree'], spot = s, maketrees = ['ec'], rounding = 16, **kwargs)
spotList.append(s)
ecOptionDelta.append(dummydict['ecTree'].deltas[0][0])
ecOptionBSDelta.append(dummydict['ecOptionPriceBSdelta'])
plt.figure(figsize = (8, 6))
plt.plot(spotList, ecOptionDelta, label = 'Binomial tree delta')
plt.plot(spotList, ecOptionBSDelta, label = 'Black-Scholes delta')
plt.title('Deltas of European Call')
plt.xlabel('Current spot')
plt.ylabel('Delta')
plt.legend()
plt.grid()
plt.show()
def epPlotDeltas(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
epOptionDelta = []
epOptionBSDelta = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['epOptionPriceBSdelta', 'epTree'], spot = s, maketrees = ['ep'], rounding = 16, **kwargs)
spotList.append(s)
epOptionDelta.append(dummydict['epTree'].deltas[0][0])
epOptionBSDelta.append(dummydict['epOptionPriceBSdelta'])
plt.figure(figsize = (8, 6))
plt.plot(spotList, epOptionDelta, label = 'Binomial tree delta')
plt.plot(spotList, epOptionBSDelta, label = 'Black-Scholes delta')
plt.title('Deltas of European Put')
plt.xlabel('Current spot')
plt.ylabel('Delta')
plt.legend()
plt.grid()
plt.show()
def acPlotDeltas(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
acOptionDelta = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
spotList.append(s)
acOptionDelta.append(self('acTree', spot = s, maketrees = ['ac'], rounding = 16, **kwargs).deltas[0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, acOptionDelta, label = 'Binomial tree delta')
plt.title('Deltas of American Call')
plt.xlabel('Current spot')
plt.ylabel('Delta')
plt.legend()
plt.grid()
plt.show()
def apPlotDeltas(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
apOptionDelta = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
spotList.append(s)
apOptionDelta.append(self('apTree', spot = s, maketrees = ['ap'], rounding = 16, **kwargs).deltas[0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, apOptionDelta, label = 'Binomial tree delta')
plt.title('Deltas of American Put')
plt.xlabel('Current spot')
plt.ylabel('Delta')
plt.legend()
plt.grid()
plt.show()
def ecPlotPrice(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
ecOptionPriceList = []
ecOptionPriceBSList = []
ecIntrinsicsList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['ecOptionPrice', 'ecOptionPriceBS', 'ecIntrinsics'], **kwargs,
spot = s, maketrees = ['ec'], rounding = 16)
spotList.append(s)
ecOptionPriceList.append(dummydict['ecOptionPrice'])
ecOptionPriceBSList.append(dummydict['ecOptionPriceBS'])
ecIntrinsicsList.append(dummydict['ecIntrinsics'][0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, ecOptionPriceList, label = 'Binomial tree price')
plt.plot(spotList, ecOptionPriceBSList, label = 'Black-Scholes price')
plt.plot(spotList, ecIntrinsicsList, label = 'Intrinsic value')
plt.title('Price of European Call')
plt.xlabel('Current spot')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def epPlotPrice(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
epOptionPriceList = []
epOptionPriceBSList = []
epIntrinsicsList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['epOptionPrice', 'epOptionPriceBS', 'epIntrinsics'], **kwargs,
spot = s, maketrees = ['ep'], rounding = 16)
spotList.append(s)
epOptionPriceList.append(dummydict['epOptionPrice'])
epOptionPriceBSList.append(dummydict['epOptionPriceBS'])
epIntrinsicsList.append(dummydict['epIntrinsics'][0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, epOptionPriceList, label = 'Binomial tree price')
plt.plot(spotList, epOptionPriceBSList, label = 'Black-Scholes price')
plt.plot(spotList, epIntrinsicsList, label = 'Intrinsic value')
plt.title('Price of European Put')
plt.xlabel('Current spot')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def acPlotPrice(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
acOptionPriceList = []
acIntrinsicsList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['acOptionPrice', 'acIntrinsics'], **kwargs,
spot = s, maketrees = ['ac'], rounding = 16)
spotList.append(s)
acOptionPriceList.append(dummydict['acOptionPrice'])
acIntrinsicsList.append(dummydict['acIntrinsics'][0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, acOptionPriceList, label = 'Binomial tree price')
plt.plot(spotList, acIntrinsicsList, label = 'Intrinsic value')
plt.title('Price of American Call')
plt.xlabel('Current spot')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def apPlotPrice(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
spotList = []
apOptionPriceList = []
apIntrinsicsList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 2*self.spot
if rangestart is not None:
if int(rangestart) <= 0:
rstart = 1
else:
rstart = int(rangestart)
else:
rstart = 1
if self.treetype != 'normal':
import numpy as np
divs_sum = np.array(self.discdiv)[:, 1].sum()
d_denominator = 1 / (self.d**self.periods)
if rstart <= (divs_sum * d_denominator):
rstart = int((divs_sum * d_denominator)) + 1
for s in range(rstart, rstop):
dummydict = self(['apOptionPrice', 'apIntrinsics'], **kwargs,
spot = s, maketrees = ['ap'], rounding = 16)
spotList.append(s)
apOptionPriceList.append(dummydict['apOptionPrice'])
apIntrinsicsList.append(dummydict['apIntrinsics'][0][0])
plt.figure(figsize = (8, 6))
plt.plot(spotList, apOptionPriceList, label = 'Binomial tree price')
plt.plot(spotList, apIntrinsicsList, label = 'Intrinsic value')
plt.title('Price of American Put')
plt.xlabel('Current spot')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def ecPlotPeriods(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
periodsList = []
ecOptionPriceList = []
ecOptionPriceBSList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 151
if self.treetype == 'nonrecombining':
lowerrst = len(self.discdiv) + 3
else:
lowerrst = 1
if rangestart is not None:
if int(rangestart) < lowerrst:
rstart = lowerrst
else:
rstart = int(rangestart)
else:
rstart = lowerrst
for p in range(rstart, rstop):
dummydict = self(['ecOptionPrice', 'ecOptionPriceBS'], periods = p, maketrees = ['ec'], rounding = 16, **kwargs)
periodsList.append(p)
ecOptionPriceList.append(dummydict['ecOptionPrice'])
ecOptionPriceBSList.append(dummydict['ecOptionPriceBS'])
plt.figure(figsize = (8, 6))
plt.plot(periodsList, ecOptionPriceList, label = 'Binomial tree price')
plt.plot(periodsList, ecOptionPriceBSList, label = 'Black-Scholes price')
plt.title('Price of European Call')
plt.xlabel('Periods')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def epPlotPeriods(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
periodsList = []
epOptionPriceList = []
epOptionPriceBSList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 151
if self.treetype == 'nonrecombining':
lowerrst = len(self.discdiv) + 3
else:
lowerrst = 1
if rangestart is not None:
if int(rangestart) < lowerrst:
rstart = lowerrst
else:
rstart = int(rangestart)
else:
rstart = lowerrst
for p in range(rstart, rstop):
dummydict = self(['epOptionPrice', 'epOptionPriceBS'], periods = p, maketrees = ['ep'], rounding = 16, **kwargs)
periodsList.append(p)
epOptionPriceList.append(dummydict['epOptionPrice'])
epOptionPriceBSList.append(dummydict['epOptionPriceBS'])
plt.figure(figsize = (8, 6))
plt.plot(periodsList, epOptionPriceList, label = 'Binomial tree price')
plt.plot(periodsList, epOptionPriceBSList, label = 'Black-Scholes price')
plt.title('Price of European Put')
plt.xlabel('Periods')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def acPlotPeriods(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
periodsList = []
acOptionPriceList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 151
if self.treetype == 'nonrecombining':
lowerrst = len(self.discdiv) + 3
else:
lowerrst = 1
if rangestart is not None:
if int(rangestart) < lowerrst:
rstart = lowerrst
else:
rstart = int(rangestart)
else:
rstart = lowerrst
for p in range(rstart, rstop):
acOptionPriceList.append(self('acOptionPrice', periods = p, maketrees = ['ac'], rounding = 16, **kwargs))
periodsList.append(p)
plt.figure(figsize = (8, 6))
plt.plot(periodsList, acOptionPriceList, label = 'Binomial tree price')
plt.title('Price of American Call')
plt.xlabel('Periods')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def apPlotPeriods(self, rangestart = None, rangestop = None, **kwargs):
import matplotlib.pyplot as plt
periodsList = []
apOptionPriceList = []
if rangestop is not None:
rstop = int(rangestop)
else:
rstop = 151
if self.treetype == 'nonrecombining':
lowerrst = len(self.discdiv) + 3
else:
lowerrst = 1
if rangestart is not None:
if int(rangestart) < lowerrst:
rstart = lowerrst
else:
rstart = int(rangestart)
else:
rstart = lowerrst
for p in range(rstart, rstop):
apOptionPriceList.append(self('apOptionPrice', periods = p, maketrees = ['ap'], rounding = 16, **kwargs))
periodsList.append(p)
plt.figure(figsize = (8, 6))
plt.plot(periodsList, apOptionPriceList, label = 'Binomial tree price')
plt.title('Price of American Put')
plt.xlabel('Periods')
plt.ylabel('Price')
plt.legend()
plt.grid()
plt.show()
def plotSpots(self, **kwargs):
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# setting plt style to seaborn
plt.style.use('seaborn')
if self.treetype == 'normal':
spotarrRet = self(['spotarr', 'dt'], **kwargs)
new_dt = spotarrRet['dt']
spots = self.makenl(spotarrRet['spotarr'])
spotsplot = []
timeplot = []
for spotlistEnum in enumerate(spots):
time = len(spotlistEnum[1]) * [new_dt * spotlistEnum[0]]
timeplot.extend(time)
spotsplot.extend(spotlistEnum[1])
fig = plt.figure(figsize = (8, 6))
gs = GridSpec(4, 4)
ax_scatter = fig.add_subplot(gs[0:4, 0:3])
ax_hist = fig.add_subplot(gs[0:4, 3])
sns.scatterplot(x = timeplot, y = spotsplot, ax = ax_scatter)
sns.histplot(y = spots[-1], kde = True, ax = ax_hist)
ax_scatter.set_ylabel('Spot')
ax_scatter.set_xlabel('Time')
ax_scatter.set_title('Binomial spot tree')
ax_hist.set_title('Last period dist.')
plt.show()
elif self.treetype == 'fsolution':
spotarrRet = self(['spotarr', 'Ftree', 'dt'], **kwargs)
spots = self.makenl(spotarrRet['spotarr'])
Fspots = self.makenl(spotarrRet['Ftree'])
new_dt = spotarrRet['dt']
spotsplot = []
Fspotsplot = []
timeplot = []
for spotlistEnum in enumerate(zip(spots, Fspots)):
time = len(spotlistEnum[1][0]) * [new_dt * spotlistEnum[0]]
timeplot.extend(time)
spotsplot.extend(spotlistEnum[1][0])
Fspotsplot.extend(spotlistEnum[1][1])
fig = plt.figure(figsize = (8, 6))
gs = GridSpec(4, 4)
ax_scatter = fig.add_subplot(gs[0:4, 0:3])
ax_hist = fig.add_subplot(gs[0:4, 3])
sns.scatterplot(x = timeplot, y = spotsplot, ax = ax_scatter, label = 'Spots')
sns.scatterplot(x = timeplot, y = Fspotsplot, ax = ax_scatter, label = 'F-Spots')
sns.histplot(y = spots[-1], kde = True, ax = ax_hist)
ax_scatter.set_ylabel('Spot')
ax_scatter.set_xlabel('Time')
ax_scatter.set_title('Binomial spot tree')
ax_hist.set_title('Last period dist.')
ax_scatter.legend()
plt.show()
elif self.treetype == 'nonrecombining':
import numpy as np
fig = plt.figure(figsize = (8, 6))
gs = GridSpec(4, 4)
ax_scatter = fig.add_subplot(gs[0:4, 0:3])
ax_hist = fig.add_subplot(gs[0:4, 3])
start = 0
last = None
spotarrRet = self(['spotarr', 'dt'], **kwargs)
new_dt = spotarrRet['dt']
for spotarrList in spotarrRet['spotarr']:
for spotarr in spotarrList:
spots = self.makenl(spotarr)
spotsplot = []
timeplot = []
for spotlistEnum in enumerate(spots, start = start):
time = len(spotlistEnum[1]) * [new_dt * spotlistEnum[0]]
timeplot.extend(time)
spotsplot.extend(spotlistEnum[1])
last = spotlistEnum[0]
sns.scatterplot(x = timeplot, y = spotsplot, ax = ax_scatter)
# change start val
start = last
spothist = np.array(self.spotarr[-1])[:, -1].flatten()
sns.histplot(y = spothist, kde = True, ax = ax_hist)
ax_scatter.set_ylabel('Spot')
ax_scatter.set_xlabel('Time')
ax_scatter.set_title('Binomial spot tree')
ax_hist.set_title('Last period dist.')
plt.show()
# resetting plt style
plt.style.use('default')
def stringTree(self):
anytree = self.trees[list(self.trees.keys())[0]]
stringvals = dict()
if self.treetype == 'normal':
ups = anytree.upflat
downs = anytree.downflat
rows = anytree.rowIndFlat
cols = anytree.colIndFlat
stringvals.setdefault('ups', ups)
stringvals.setdefault('downs', downs)
stringvals.setdefault('rows', rows)
stringvals.setdefault('cols', cols)
elif self.treetype == 'fsolution':
ups = anytree.upflat
downs = anytree.downflat
rows = anytree.rowIndFlat
cols = anytree.colIndFlat
rowsS = anytree.rowIndFlatS
rowsF = anytree.rowIndFlatF
colsF = anytree.colIndFlatF
upsF = ups[:len(rowsF)]
downsF = downs[:len(rowsF)]
stringvals.setdefault('ups', ups)
stringvals.setdefault('downs', downs)
stringvals.setdefault('rows', rows)
stringvals.setdefault('rowsS', rowsS)
stringvals.setdefault('rowsF', rowsF)
stringvals.setdefault('cols', cols)
stringvals.setdefault('colsF', colsF)
stringvals.setdefault('upsF', upsF)
stringvals.setdefault('downsF', downsF)
elif self.treetype == 'nonrecombining':
import numpy as np
import itertools
upsALL = anytree.upind
downsALL = anytree.doind
upsPrediv = []
downsPrediv = []
for ul, dl in zip(upsALL[:-1], downsALL[:-1]):
for ull, dll, in zip(ul, dl):
upsPrediv.append(ull[-1])
downsPrediv.append(dll[-1])
rowsPrediv = anytree.predivrows
colsPrediv = anytree.predivcols
ups = []
downs = []
for ul, dl in zip(upsALL[:-1], downsALL[:-1]):
for ull, dll, in zip(ul, dl):
ups.append(ull[:-1, :-1][np.tril_indices_from(ull[:-1, :-1])])
downs.append(dll[:-1, :-1][np.tril_indices_from(dll[:-1, :-1])])
for ul, dl in zip(upsALL[-1], downsALL[-1]):
ups.append(ul[np.tril_indices_from(ul)])
downs.append(dl[np.tril_indices_from(dl)])
rows = anytree.rowIndFlat
cols = anytree.colIndFlat
stringvals.setdefault('ups', ups)
stringvals.setdefault('downs', downs)
stringvals.setdefault('rows', rows)
stringvals.setdefault('cols', cols)
stringvals.setdefault('upsPrediv', upsPrediv)
stringvals.setdefault('downsPrediv', downsPrediv)
stringvals.setdefault('rowsPrediv', rowsPrediv)
stringvals.setdefault('colsPrediv', colsPrediv)
return stringvals
def removeDiv(self):
self.discdiv = None
self.treetype = 'normal'
self.calculate()
def addDiv(self, discdiv, nonrec = False):
zeroDiscreteDividends = [discdiv == 0, discdiv == float(0)]
if all(zeroDiscreteDividends):
pass
else:
self.discdiv = discdiv
if nonrec:
self.treetype = 'nonrecombining'
else:
self.treetype = 'fsolution'
self.calculate()
def calculate(self):
if self.treetype == 'normal':
for i in self.maketrees:
self.getOptionsNormal(i)
elif self.treetype == 'fsolution':
for i in self.maketrees:
self.getOptionsFsol(i)
elif self.treetype == 'nonrecombining':
for i in self.maketrees:
self.getOptionsNonrec(i)
if 'ec' in self.maketrees:
self.BScall()
if 'ep' in self.maketrees:
self.BSput()
def write(self, fname_override = None, width = 2100, height = 1200):
import xlsxwriter
import numpy as np
import sys
import os
################## make directory/folders if they don't exist ##################
if fname_override is None:
filepath = self.dirfile
else:
filepath = os.path.join(self.foldir, str(fname_override) + '.xlsx')
if not os.path.exists(self.foldir):
os.makedirs(self.foldir)
################## universal variables ##################
zoompct = 140
interimColWidth = 2.18579234972678
defaultRowHeight = 16
colWidth_65pixels = 10.15625
portWidthExplanationCells_75pixels = 11.71875
ovParamRightColWidth_110pixels = 17.5
portColWidth_135pixels = 21.8359375
try:
anytree = self.trees[list(self.trees.keys())[0]]
except (AttributeError, IndexError):
self.makeTreeEC()
anytree = self.trees[list(self.trees.keys())[0]]
if self.treetype == 'fsolution':
startingRow = anytree.rowIndFlat[0] + 1
elif self.treetype == 'nonrecombining':
startingRow = anytree.rowIndFlat[0][0] + 2
else:
startingRow = anytree.rowIndFlat[0] + 2
if self.rcont:
rcontWrite = 'True'
else:
rcontWrite = 'False'
timeheader = self.treeheader
paramsLeft = ['S',
'K',
'𝜎',
'u',
'd',
'r',
'T',
'dt',
'periods',
'∂',
'Discrete div.',
'Continous r']
paramsRightDef = ['Underlying spot now',
'Strike',
'Volatility',
'Up movement',
'Down movement',
'p.a. Risk-free rate',
'Years to maturity',
'Period length',
'Periods',
'Dividend yield',
'Discrete dividends',
'False -> discrete r']
greenRow = len(paramsLeft) + 4
if self.discdiv is None:
exceldiscdiv = 'None'
else:
exceldiscdiv = 'True'
paramsRight1 = [self.spot,
self.strike,
self.vola,
self.u,
self.d,
self.r]
if self.dtfreq is not None and self.headerformat == 'dt':
paramT = timeheader[-1]
paramdt = timeheader[2]
else:
paramT = self.T
paramdt = self.dt
paramsRight2 = [paramT,
paramdt]
paramsRight3 = [self.periods,
self.divyield,
exceldiscdiv,
rcontWrite]
workbook = xlsxwriter.Workbook(filepath, {'strings_to_numbers': True})
################## format objects ##################
formatstring = '#,##0.' + '0'*self.rounding
timeHeaderFormat = workbook.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter',
'bottom': 2,
'font_size': 12})
paramHeaderFormat = workbook.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter',
'fg_color': '#F3B084',
'border': 2,
'font_size': 12})
paramLeftFormat = workbook.add_format({'align': 'right',
'valign': 'vcenter',
'fg_color': '#F3B084',
'font_size': 12,
'left': 2,
'bottom': 1})
paramRightFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#F3B084',
'num_format': formatstring,
'font_size': 12,
'right': 2,
'bottom': 1})
paramRightFormat2 = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#F3B084',
'font_size': 12,
'right': 2,
'bottom': 1})
interimParamBottomFormat = workbook.add_format({'top':
2})
startrowLeftFormat = workbook.add_format({'align': 'right',
'valign': 'vcenter',
'fg_color': '#E2EFDA',
'top': 1,
'left': 1,
'bottom': 1,
'font_size': 12})
startRowRightFormat = workbook.add_format({'align': 'left',
'valign': 'vcenter',
'fg_color': '#E2EFDA',
'top': 1,
'right': 1,
'bottom': 1,
'font_size': 12})
spotFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#FFFF00',
'num_format': formatstring,
'left': 1,
'top': 1,
'right': 1,
'bottom': 4,
'font_size': 12})
spotPredivFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#CC5555',
'num_format': formatstring,
'left': 1,
'top': 1,
'right': 1,
'bottom': 4,
'font_size': 12})
intrinsicFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#00FF85',
'num_format': formatstring,
'left': 1,
'top': 4,
'right': 1,
'bottom': 4,
'font_size': 12})
optionFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#BDD7EE',
'num_format': formatstring,
'left': 1,
'bottom': 1,
'right': 1,
'top': 4,
'font_size': 12})
FspotFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#B0FFFE',
'num_format': formatstring,
'left': 1,
'top': 4,
'right': 1,
'bottom': 4,
'font_size': 12})
discHeaderLFormat = workbook.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter',
'fg_color': '#D6B4FF',
'left': 2,
'top': 2,
'bottom': 2,
'font_size': 12})
discHeaderRFormat = workbook.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter',
'fg_color': '#D6B4FF',
'right': 2,
'top': 2,
'bottom': 2,
'font_size': 12})
discLFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#D6B4FF',
'left': 2,
'top': 1,
'bottom': 1,
'font_size': 12})
discRFormat = workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#D6B4FF',
'num_format': formatstring,
'right': 2,
'top': 1,
'bottom': 1,
'font_size': 12})
supscript = workbook.add_format({'font_script':
1})
subscript = workbook.add_format({'font_script':
2})
formulasFormat = workbook.add_format({'bold': True,
'underline': True,
'font_size': 12,
'align': 'center',
'valign': 'vcenter'})
with workbook as workbook:
workbook.set_size(width, height)
# writing overview page
def overviewPage(tree = None):
ov = workbook.add_worksheet('Def. and overview')
ov.set_column(0, 0, interimColWidth)
ov.set_column(3, 3, interimColWidth)
ov.set_zoom(zoompct)
ov.set_default_row(defaultRowHeight)
# writing timeheader and setting column widths
if self.treetype == 'nonrecombining':
ov.set_column(4, tree.headermerge[-1][-1] + 5, colWidth_65pixels)
ov.freeze_panes(1, 0)
ov.write(0, 4, timeheader[0], timeHeaderFormat)
for headerMergeIndices, headerWrite in zip(tree.headermerge, timeheader[1:]):
if headerMergeIndices[0] != headerMergeIndices[1]:
ov.merge_range(0, headerMergeIndices[0] + 5, 0, headerMergeIndices[1] + 5,
headerWrite, timeHeaderFormat)
elif headerMergeIndices[0] == headerMergeIndices[1]:
ov.write(0, headerMergeIndices[0] + 5,
headerWrite, timeHeaderFormat)
else:
ov.set_column(4, (4 + len(timeheader) - 1), colWidth_65pixels)
ov.freeze_panes(1, 0)
ov.write_row(0, 4, timeheader, timeHeaderFormat)
ov.merge_range(2, 1, 2, 2, 'Parameters', paramHeaderFormat)
ov.write_column(3, 1, paramsLeft, paramLeftFormat)
ov.write_row(greenRow - 1, 1, ["", ""], interimParamBottomFormat)
ov.write(greenRow, 1, 'S start row:', startrowLeftFormat)
ov.write(1, 4, 'Spot', spotFormat)
ov.write(greenRow, 2, startingRow, startRowRightFormat)
# Description cells of tree
if self.treetype == 'normal':
if self.showIntrinsic:
ov.write(2, 4, 'Intrinsic', intrinsicFormat)
ov.write(3, 4, 'Premium', optionFormat)
else:
ov.write(2, 4, 'Premium', optionFormat)
elif self.treetype == 'fsolution':
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
divtimes = np.array(self.treeheader)[divind + 1]
divs = np.array(self.discdiv)[:, 1]
ov.write(18, 1, 't', discHeaderLFormat)
ov.write(18, 2, 'Dividends', discHeaderRFormat)
ov.write_column(19, 1, divtimes, discLFormat)
ov.write_column(19, 2, divs, discRFormat)
if self.showIntrinsic:
ov.write(2, 4, 'F-Spot', FspotFormat)
ov.write(3, 4, 'Intrinsic', intrinsicFormat)
ov.write(4, 4, 'Premium', optionFormat)
else:
ov.write(2, 4, 'F-Spot', FspotFormat)
ov.write(3, 4, 'Premium', optionFormat)
elif self.treetype == 'nonrecombining':
ov.write(2, 4, 'Spot', spotFormat)
ov.write(1, 4, 'Pre-Div Spot', spotPredivFormat)
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
divs = np.array(self.discdiv)[:, 1]
divtimes = np.array(self.treeheader)[divind + 1]
ov.write(18, 1, 't', discHeaderLFormat)
ov.write(18, 2, 'Dividends', discHeaderRFormat)
ov.write_column(19, 1, divtimes, discLFormat)
ov.write_column(19, 2, divs, discRFormat)
if self.showIntrinsic:
ov.write(3, 4, 'Intrinsic', intrinsicFormat)
ov.write(4, 4, 'Premium', optionFormat)
else:
ov.write(3, 4, 'Premium', optionFormat)
ov.set_column(1, 1, colWidth_65pixels)
ov.set_column(2, 2, ovParamRightColWidth_110pixels)
ov.write_column(3, 2, paramsRightDef, paramRightFormat)
# formula images
formulaStart = startingRow + 6
ov.merge_range(formulaStart, 4, formulaStart, 5, 'Formulas used:', formulasFormat)
if self.udfunc.__name__ == 'udfunc_default':
ov.insert_image(formulaStart+1, 4, 'src/binotree/images/uFormula.png',
{'x_scale': 0.4, 'y_scale': 0.4, 'y_offset': 5})
ov.insert_image(formulaStart+3, 4, 'src/binotree/images/dFormula.png',
{'x_scale': 0.4, 'y_scale': 0.4, 'y_offset': 5})
if self.rcont:
ov.insert_image(formulaStart+7, 4, 'src/binotree/images/qFormula.png',
{'x_scale': 0.37, 'y_scale': 0.42})
else:
ov.insert_image(formulaStart+7, 4, 'src/binotree/images/qFormulaNoncont.png',
{'x_scale': 0.35, 'y_scale': 0.4})
ov.insert_image(formulaStart+12, 4, 'src/binotree/images/riskNeutralPricing.png',
{'x_scale': 0.33, 'y_scale': 0.33, 'y_offset': -7})
ov.insert_image(formulaStart+15, 4, 'src/binotree/images/replicatingPricing.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 7})
if self.rcont:
ov.insert_image(formulaStart+17, 4, 'src/binotree/images/replicatingDelta.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 5})
ov.insert_image(formulaStart+21, 4, 'src/binotree/images/replicatingBond.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': -8})
else:
ov.insert_image(formulaStart+17, 4, 'src/binotree/images/replicatingDeltaNoncont.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 4})
ov.insert_image(formulaStart+21, 4, 'src/binotree/images/replicatingBondNoncont.png',
{'x_scale': 0.34, 'y_scale': 0.34, 'y_offset': -8})
ov.insert_image(formulaStart+24, 4, 'src/binotree/images/replicatingExplanation.png',
{'x_scale': 0.35, 'y_scale': 0.35})
else:
if self.rcont:
ov.insert_image(formulaStart+1, 4, 'src/binotree/images/qFormula.png',
{'x_scale': 0.37, 'y_scale': 0.42})
else:
ov.insert_image(formulaStart+1, 4, 'src/binotree/images/qFormulaNoncont.png',
{'x_scale': 0.35, 'y_scale': 0.4})
ov.insert_image(formulaStart+6, 4, 'src/binotree/images/riskNeutralPricing.png',
{'x_scale': 0.33, 'y_scale': 0.33, 'y_offset': -7})
ov.insert_image(formulaStart+9, 4, 'src/binotree/images/replicatingPricing.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 7})
if self.rcont:
ov.insert_image(formulaStart+11, 4, 'src/binotree/images/replicatingDelta.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 5})
ov.insert_image(formulaStart+15, 4, 'src/binotree/images/replicatingBond.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': -8})
else:
ov.insert_image(formulaStart+11, 4, 'src/binotree/images/replicatingDeltaNoncont.png',
{'x_scale': 0.35, 'y_scale': 0.35, 'y_offset': 4})
ov.insert_image(formulaStart+15, 4, 'src/binotree/images/replicatingBondNoncont.png',
{'x_scale': 0.34, 'y_scale': 0.34, 'y_offset': -8})
ov.insert_image(formulaStart+18, 4, 'src/binotree/images/replicatingExplanation.png',
{'x_scale': 0.35, 'y_scale': 0.35})
return ov
ov = overviewPage(anytree)
def overviewcells(stringvals, ov):
if self.treetype == 'normal':
if self.showIntrinsic:
for u, d, r, c in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols']):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
optseq = ['C', subscript, f'{u}u_{d}d']
intseq = ['Intrinsic', subscript, f'{u}u_{d}d']
ov.write_rich_string(r, c, *spotseq, spotFormat)
ov.write_rich_string(r + 1, c, *intseq, intrinsicFormat)
ov.write_rich_string(r + 2, c, *optseq, optionFormat)
else:
for u, d, r, c in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols']):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
optseq = ['C', subscript, f'{u}u_{d}d']
ov.write_rich_string(r, c, *spotseq, spotFormat)
ov.write_rich_string(r + 1, c, *optseq, optionFormat)
elif self.treetype == 'fsolution':
for u, d, r, c in zip(stringvals['upsF'],
stringvals['downsF'],
stringvals['rowsF'],
stringvals['colsF']):
r += 1
c += 5
Fseq = ['F∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
ov.write_rich_string(r, c, *Fseq, FspotFormat)
if self.showIntrinsic:
for u, d, r, c, rS in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols'],
stringvals['rowsS']):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
intseq = ['Intrinsic', subscript, f'{u}u_{d}d']
optseq = ['C', subscript, f'{u}u_{d}d']
ov.write_rich_string(rS+1, c, *spotseq, spotFormat)
ov.write_rich_string(r + 1, c, *intseq, intrinsicFormat)
ov.write_rich_string(r + 2, c, *optseq, optionFormat)
else:
for u, d, r, c, rS in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols'],
stringvals['rowsS']):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
optseq = ['C', subscript, f'{u}u_{d}d']
ov.write_rich_string(rS+1, c, *spotseq, spotFormat)
ov.write_rich_string(r + 1, c, *optseq, optionFormat)
elif self.treetype == 'nonrecombining':
import itertools
colours = ['#000000',
'#0000FF',
'#FF0000',
'#00E100',
'#FF40FF',
'#FF8C00',
'#535093',
'#00B2FF',
'#885B1F']
spotformats, spotPredivformats, intrinsicformats, optionformats = [], [], [], []
for col in colours:
spotformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#FFFF00',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 2,
'right': 2,
'bottom': 4,
'font_size': 12}))
spotPredivformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#CC5555',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 2,
'right': 2,
'bottom': 4,
'font_size': 12}))
intrinsicformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#00FF85',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 4,
'right': 2,
'bottom': 4,
'font_size': 12}))
optionformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#BDD7EE',
'border_color': col,
'num_format': formatstring,
'left': 2,
'bottom': 2,
'right': 2,
'top': 4,
'font_size': 12}))
spotIter = itertools.cycle(spotformats)
spotPredivIter = itertools.cycle(spotPredivformats)
intrinsicIter = itertools.cycle(intrinsicformats)
optionIter = itertools.cycle(optionformats)
if self.showIntrinsic:
for uarr, darr, rarr, carr in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols']):
spotformat = next(spotIter)
optionformat = next(optionIter)
intrinsicformat = next(intrinsicIter)
for u, d, r, c, in zip(uarr, darr, rarr, carr):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
optseq = ['C', subscript, f'{u}u_{d}d']
intseq = ['Intrinsic', subscript, f'{u}u_{d}d']
ov.write_rich_string(r, c, *spotseq, spotformat)
ov.write_rich_string(r + 1, c, *intseq, intrinsicformat)
ov.write_rich_string(r + 2, c, *optseq, optionformat)
for uarr, darr, rarr, carr in zip(stringvals['upsPrediv'],
stringvals['downsPrediv'],
stringvals['rowsPrediv'],
stringvals['colsPrediv']):
spotpredivsformat = next(spotPredivIter)
for u, d, r, c, in zip(uarr, darr, rarr, carr):
r += 1
c += 5
predivseq = ['S', subscript, 'pre-div', '∗u', supscript, str(u), '∗d', supscript, str(d)]
ov.write_rich_string(r, c, *predivseq, spotpredivsformat)
else:
for uarr, darr, rarr, carr in zip(stringvals['ups'],
stringvals['downs'],
stringvals['rows'],
stringvals['cols']):
spotformat = next(spotIter)
optionformat = next(optionIter)
for u, d, r, c, in zip(uarr, darr, rarr, carr):
r += 1
c += 5
spotseq = ['S∗', 'u', supscript, str(u), '∗d', supscript, str(d)]
optseq = ['C', subscript, f'{u}u_{d}d']
ov.write_rich_string(r, c, *spotseq, spotformat)
ov.write_rich_string(r + 1, c, *optseq, optionformat)
for uarr, darr, rarr, carr in zip(stringvals['upsPrediv'],
stringvals['downsPrediv'],
stringvals['rowsPrediv'],
stringvals['colsPrediv']):
spotpredivsformat = next(spotPredivIter)
for u, d, r, c, in zip(uarr, darr, rarr, carr):
r += 1
c += 5
predivseq = ['S', subscript, 'pre-div', '∗u', supscript, str(u), '∗d', supscript, str(d)]
ov.write_rich_string(r, c, *predivseq, spotpredivsformat)
stringvals = self.stringTree()
overviewcells(stringvals, ov)
def sheetsfundamentalLayout(sheet, port = False):
sheet.set_column(0, 0, interimColWidth)
sheet.set_column(3, 3, interimColWidth)
sheet.set_zoom(zoompct)
sheet.set_default_row(defaultRowHeight)
if port:
sheet.set_column(4, 4, portWidthExplanationCells_75pixels)
sheet.set_column(5, (4 + len(timeheader) - 1), portColWidth_135pixels)
else:
sheet.set_column(4, (4 + len(timeheader) - 1), colWidth_65pixels)
sheet.freeze_panes(1, 0)
sheet.write_row(0, 4, timeheader, timeHeaderFormat)
sheet.merge_range(2, 1, 2, 2, 'Parameters', paramHeaderFormat)
sheet.write_column(3, 1, paramsLeft, paramLeftFormat)
sheet.write_row(greenRow - 1, 1, ["", ""], interimParamBottomFormat)
sheet.write(greenRow, 1, 'S start row:', startrowLeftFormat)
sheet.write(1, 4, 'Spot', spotFormat)
if self.showIntrinsic is False:
if port:
sheet.write_rich_string(2, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(2, 4, 'Premium', optionFormat)
elif self.showIntrinsic is True:
sheet.write(2, 4, 'Intrinsic', intrinsicFormat)
if port:
sheet.write_rich_string(3, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(3, 4, 'Premium', optionFormat)
sheet.set_column(1, 1, colWidth_65pixels)
sheet.set_column(2, 2, colWidth_65pixels)
sheet.write_column(3, 2, paramsRight1, paramRightFormat)
sheet.write_column(9, 2, paramsRight2, paramRightFormat2)
sheet.write_column(11, 2, paramsRight3, paramRightFormat2)
sheet.write(greenRow, 2, startingRow, startRowRightFormat)
def writecells(tree, sheet, ports = False):
if ports:
options = tree.portsflat
else:
options = tree.optionsflat
spots = tree.spotsflat
colind = tree.colIndFlat + 5
rowind = tree.rowIndFlat + 1
for collection in list(zip(spots, rowind, colind)):
spot = collection[0]
row = collection[1]
column = collection[2]
sheet.write(row, column, spot, spotFormat)
if self.showIntrinsic is False:
for collection in list(zip(options, rowind, colind)):
option = collection[0]
row = collection[1]
column = collection[2]
sheet.write(row + 1, column, option, optionFormat)
elif self.showIntrinsic is True:
intrinsics = tree.intrinsicflat
for collection in list(zip(options, intrinsics, rowind, colind)):
option = collection[0]
intrinsic = collection[1]
row = collection[2]
column = collection[3]
sheet.write(row + 1, column, intrinsic, intrinsicFormat)
sheet.write(row + 2, column, option, optionFormat)
def sheetsfundamentalLayoutF(sheet, port = False):
sheet.set_column(0, 0, interimColWidth)
sheet.set_column(3, 3, interimColWidth)
sheet.set_zoom(zoompct)
sheet.set_default_row(defaultRowHeight)
if port:
sheet.set_column(4, 4, portWidthExplanationCells_75pixels)
sheet.set_column(5, (4 + len(timeheader) - 1), portColWidth_135pixels)
else:
sheet.set_column(4, (4 + len(timeheader) - 1), colWidth_65pixels)
sheet.freeze_panes(1, 0)
sheet.write_row(0, 4, timeheader, timeHeaderFormat)
sheet.merge_range(2, 1, 2, 2, 'Parameters', paramHeaderFormat)
sheet.write_column(3, 1, paramsLeft, paramLeftFormat)
sheet.write_row(greenRow - 1, 1, ["", ""], interimParamBottomFormat)
sheet.write(greenRow, 1, 'S start row:', startrowLeftFormat)
sheet.write(1, 4, 'Spot', spotFormat)
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
divs = np.array(self.discdiv)[:, 1]
divtimes = np.array(self.treeheader)[divind + 1]
sheet.write(18, 1, 't', discHeaderLFormat)
sheet.write(18, 2, 'Dividends', discHeaderRFormat)
sheet.write_column(19, 1, divtimes, discLFormat)
sheet.write_column(19, 2, divs, discRFormat)
if self.showIntrinsic is False:
sheet.write(2, 4, 'F-Spot', FspotFormat)
if port:
sheet.write_rich_string(3, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(3, 4, 'Premium', optionFormat)
elif self.showIntrinsic is True:
sheet.write(2, 4, 'F-Spot', FspotFormat)
sheet.write(3, 4, 'Intrinsic', intrinsicFormat)
if port:
sheet.write_rich_string(4, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(4, 4, 'Premium', optionFormat)
sheet.set_column(1, 1, colWidth_65pixels)
sheet.set_column(2, 2, colWidth_65pixels)
sheet.write_column(3, 2, paramsRight1, paramRightFormat)
sheet.write_column(9, 2, paramsRight2, paramRightFormat2)
sheet.write_column(11, 2, paramsRight3, paramRightFormat2)
sheet.write(greenRow, 2, startingRow, startRowRightFormat)
def writecellsF(tree, sheet, ports = False):
spots = tree.spotsflat
Ftree = tree.Ftreeflat
if ports:
options = tree.portsflat
else:
options = tree.optionsflat
colind = tree.colIndFlat + 5
colindF = tree.colIndFlatF + 5
rowind = tree.rowIndFlat + 1
rowindS = tree.rowIndFlatS + 1
rowindF = tree.rowIndFlatF + 1
if self.showIntrinsic is False:
for collection in list(zip(spots, options, colind, rowind, rowindS)):
spot, option, col, row, rowS = collection
sheet.write(rowS, col, spot, spotFormat)
sheet.write(row+1, col, option, optionFormat)
for collectionF in list(zip(Ftree, colindF, rowindF)):
Fspot, col, row = collectionF
sheet.write(row, col, Fspot, FspotFormat)
elif self.showIntrinsic is True:
intrinsics = tree.intrinsicflat
for collection in list(zip(spots, intrinsics, options, colind, rowind, rowindS)):
spot, intrinsic, option, col, row, rowS = collection
sheet.write(rowS, col, spot, spotFormat)
sheet.write(row+1, col, intrinsic, intrinsicFormat)
sheet.write(row+2, col, option, optionFormat)
for collectionF in list(zip(Ftree, colindF, rowindF)):
Fspot, col, row = collectionF
sheet.write(row, col, Fspot, FspotFormat)
def sheetsfundamentalLayoutNonrec(sheet, tree, port = False):
sheet.set_column(0, 0, interimColWidth)
sheet.set_column(3, 3, interimColWidth)
sheet.set_zoom(zoompct)
sheet.set_default_row(defaultRowHeight)
if port:
sheet.set_column(4, 4, portWidthExplanationCells_75pixels)
sheet.set_column(5, tree.headermerge[-1][-1]+5, portColWidth_135pixels)
else:
sheet.set_column(4, tree.headermerge[-1][-1]+5, colWidth_65pixels)
sheet.freeze_panes(1, 0)
sheet.write(0, 4, timeheader[0], timeHeaderFormat)
for i, h in zip(tree.headermerge, timeheader[1:]):
if i[0] != i[1]:
sheet.merge_range(0, i[0]+5, 0, i[1]+5, h, timeHeaderFormat)
elif i[0] == i[1]:
sheet.write(0, i[0]+5, h, timeHeaderFormat)
sheet.merge_range(2, 1, 2, 2, 'Parameters', paramHeaderFormat)
sheet.write_column(3, 1, paramsLeft, paramLeftFormat)
sheet.write_row(greenRow - 1, 1, ["", ""], interimParamBottomFormat)
sheet.write(greenRow, 1, 'S start row:', startrowLeftFormat)
sheet.write(1, 4, 'Pre-Div Spot', spotPredivFormat)
sheet.write(2, 4, 'Spot', spotFormat)
dt_all = np.arange(0, self.T + self.dt, self.dt)
divdt = np.array(self.discdiv)[:, 0]
divind = np.abs(np.subtract.outer(dt_all, divdt)).argmin(0)
divs = np.array(self.discdiv)[:, 1]
divtimes = np.array(self.treeheader)[divind + 1]
sheet.write(18, 1, 't', discHeaderLFormat)
sheet.write(18, 2, 'Dividends', discHeaderRFormat)
sheet.write_column(19, 1, divtimes, discLFormat)
sheet.write_column(19, 2, divs, discRFormat)
# explanatory cells
if self.showIntrinsic is False:
sheet.write(3, 4, 'Premium', optionFormat)
if port:
sheet.write_rich_string(3, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(3, 4, 'Premium', optionFormat)
elif self.showIntrinsic is True:
sheet.write(3, 4, 'Intrinsic', intrinsicFormat)
if port:
sheet.write_rich_string(4, 4, 'V = S', subscript, 'u_d', '∗∆ + B', optionFormat)
else:
sheet.write(4, 4, 'Premium', optionFormat)
sheet.set_column(1, 1, colWidth_65pixels)
sheet.set_column(2, 2, colWidth_65pixels)
sheet.write_column(3, 2, paramsRight1, paramRightFormat)
sheet.write_column(9, 2, paramsRight2, paramRightFormat2)
sheet.write_column(11, 2, paramsRight3, paramRightFormat2)
sheet.write(greenRow, 2, startingRow, startRowRightFormat)
def writecellsNonrec(tree, sheet, ports = False):
import itertools
colours = ['#000000',
'#0000FF',
'#FF0000',
'#00E100',
'#FF40FF',
'#FF8C00',
'#535093',
'#00B2FF',
'#885B1F']
spotformats, spotPredivformats, intrinsicformats, optionformats = [], [], [], []
for col in colours:
spotformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#FFFF00',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 2,
'right': 2,
'bottom': 4,
'font_size': 12}))
spotPredivformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#CC5555',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 2,
'right': 2,
'bottom': 4,
'font_size': 12}))
intrinsicformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#00FF85',
'border_color': col,
'num_format': formatstring,
'left': 2,
'top': 4,
'right': 2,
'bottom': 4,
'font_size': 12}))
optionformats.append(workbook.add_format({'align': 'center',
'valign': 'vcenter',
'fg_color': '#BDD7EE',
'border_color': col,
'num_format': formatstring,
'left': 2,
'bottom': 2,
'right': 2,
'top': 4,
'font_size': 12}))
spotIter = itertools.cycle(spotformats)
spotPredivIter = itertools.cycle(spotPredivformats)
intrinsicIter = itertools.cycle(intrinsicformats)
optionIter = itertools.cycle(optionformats)
spots = tree.spots
predivspots = tree.predivspots
if ports:
options = tree.portsflat
else:
options = tree.options
rowind = tree.rowIndFlat
colind = tree.colIndFlat
rowindPrediv = tree.predivrows
colindPrediv = tree.predivcols
# write spots
if self.showIntrinsic is False:
for spot, option, row, column in zip(spots, options, rowind, colind):
spotformat = next(spotIter)
optionformat = next(optionIter)
for s, o, r, c in zip(spot, option, row, column):
sheet.write(r + 1, c + 5, s, spotformat)
sheet.write(r + 2, c + 5, o, optionformat)
for prespot, prow, pcol in zip(predivspots, rowindPrediv, colindPrediv):
spotpredivsformat = next(spotPredivIter)
for ps, pr, pc in zip(prespot, prow, pcol):
sheet.write(pr + 1, pc + 5, ps, spotpredivsformat)
elif self.showIntrinsic is True:
intrinsics = tree.intrinsics
for spot, option, intrinsic, row, column in zip(spots, options, intrinsics, rowind, colind):
spotformat = next(spotIter)
intrinsicformat = next(intrinsicIter)
optionformat = next(optionIter)
for s, o, i, r, c in zip(spot, option, intrinsic, row, column):
sheet.write(r + 1, c + 5, s, spotformat)
sheet.write(r + 2, c + 5, i, intrinsicformat)
sheet.write(r + 3, c + 5, o, optionformat)
for prespot, prow, pcol in zip(predivspots, rowindPrediv, colindPrediv):
spotpredivsformat = next(spotPredivIter)
for ps, pr, pc in zip(prespot, prow, pcol):
sheet.write(pr + 1, pc + 5, ps, spotpredivsformat)
if self.treetype == 'normal':
for treeName, treeObj in self.trees.items():
sheet = workbook.add_worksheet(treeName)
sheetsfundamentalLayout(sheet)
writecells(treeObj, sheet)
if self.portfolios:
sheetport = workbook.add_worksheet(treeName + 'Portfolios')
sheetsfundamentalLayout(sheetport, True)
writecells(treeObj, sheetport, True)
elif self.treetype == 'fsolution':
for treeName, treeObj in self.trees.items():
sheet = workbook.add_worksheet(treeName)
sheetsfundamentalLayoutF(sheet)
writecellsF(treeObj, sheet)
if self.portfolios:
sheetport = workbook.add_worksheet(treeName + 'Portfolios')
sheetsfundamentalLayoutF(sheetport, True)
writecellsF(treeObj, sheetport, True)
elif self.treetype == 'nonrecombining':
for treeName, treeObj in self.trees.items():
sheet = workbook.add_worksheet(treeName)
sheetsfundamentalLayoutNonrec(sheet, treeObj)
writecellsNonrec(treeObj, sheet)
if self.portfolios:
sheetport = workbook.add_worksheet(treeName + 'Portfolios')
sheetsfundamentalLayoutNonrec(sheetport, treeObj, True)
writecellsNonrec(treeObj, sheetport, True)
print(f'File was made at: {filepath}')
def __call__(self, toreturn, **kwargs):
"""
Purpose:
-> override parameters
-> choose parameters to return
Time call:
new T -> keep dt -> new periods
-> keep dtfreq
new dt -> keep T -> new periods
-> discard dtfrq
new periods -> keep T -> new dt
-> discard dtfrq
2 or 3 new -> act as __init__
-> if for example 'T' is passed in call (kwargs)
-> fetch dt from self.kwunion and pass those two
Make Dataframes:
-> if Dataframes are to be made from __call__, you must parse 'dfs' in toreturn parameter
"""
toreturnParams = ['direc',
'dirfile',
'fname',
'spot',
'spotarr',
'strike',
'vola',
'r',
'discountRate',
'discountDiv',
'discountRateMinusDiv',
'u',
'd',
'q',
'T',
'dt',
'periods',
'dtfreq',
'headerformat',
'treeheader',
'divyield',
'discdiv',
'treetype',
'trees',
'ecTree',
'epTree',
'acTree',
'apTree',
'ecOptionPrice',
'epOptionPrice',
'acOptionPrice',
'apOptionPrice',
'ecOptionPriceBS',
'epOptionPriceBS',
'dfs',
'showIntrinsic',
'nonrec',
'makedfs',
'rcont',
'collapsed',
'makedfs',
'udfunc',
'kwunion',
'rounding',
'updoind',
'maketrees']
if toreturn is None and kwargs is None:
self.help(['callable'])
print('\n\n\n')
print("Possible parameters for 'toreturn' input (either a list of strings or standalone string):")
for i in toreturnParams:
print(i)
print("\nPossible parameters for 'toreturn' input (either in a list or alone) ↑")
return None
else:
T = kwargs.get('T', False)
dt = kwargs.get('dt', False)
periods = kwargs.get('periods', False)
dtfreq = kwargs.get('dtfreq', None)
if toreturn == 'dfs' or 'dfs' in toreturn:
dfcalled = True
makedfs = True
else:
dfcalled = False
makedfs = False
newT = [T is not False]
newdt = [dt is not False]
newPeriods = [periods is not False]
dummyobject = None
if any(newT) and not any([dt, periods]):
dt = self.dt
periods = None
dtfreq = self.dtfreq
timeparams = dict(T = T, dt = dt, periods = periods, dtfreq = dtfreq)
newkwargs = {**kwargs, **timeparams}
dummyobject = tree(self.kwunion, **newkwargs, portfolios = False, write = False,
makedfs = makedfs, called = True, dfcalled = dfcalled)
elif any(newdt) and not any([T, periods]):
T = self.T
periods = None
timeparams = dict(T = T, dt = dt, periods = periods, dtfreq = dtfreq)
newkwargs = {**kwargs, **timeparams}
dummyobject = tree(self.kwunion, **newkwargs, portfolios = False, write = False,
makedfs = makedfs, called = True, dfcalled = dfcalled)
elif any(newPeriods) and not any([T, dt]):
T = self.T
dt = None
timeparams = dict(T = T, dt = dt, periods = periods, dtfreq = dtfreq)
newkwargs = {**kwargs, **timeparams}
dummyobject = tree(self.kwunion, **newkwargs, portfolios = False, write = False,
makedfs = makedfs, called = True, dfcalled = dfcalled)
elif not any([T, dt, periods]):
dummyobject = tree(self.kwunion, **kwargs, portfolios = False, write = False,
makedfs = makedfs, called = True, dfcalled = dfcalled)
if isinstance(toreturn, (list, tuple)):
stuff = dict()
if 'dfs' in toreturn:
toreturn.remove('dfs')
for name, df in dummyobject.trees.items():
stuff.setdefault(name, df)
for a in toreturn:
stuff.setdefault(a, getattr(dummyobject, a))
else:
for a in toreturn:
stuff.setdefault(a, getattr(dummyobject, a))
else:
stuff = dict()
if toreturn == 'dfs':
for name, df in dummyobject.trees.items():
stuff.setdefault(name, df)
else:
stuff = getattr(dummyobject, toreturn)
return stuff
@staticmethod
def help(tohelp = None):
parameters = """
################################ Possible parameters #################################
direc: Default: None -> string
folname: Default: None -> string
fname: Default: 'binotree' -> string
spot: Default: None -> numerical
strike: Default: None -> numerical
T: Default: None -> numerical
dt: Default: None -> numerical
periods: Default: None -> integer
dtfreq: Default: None -> string
r: Default: 0.00 -> numerical
rcont: Default: True -> boolean
divyield: Default: 0.00 -> numerical
vola: Default: None -> numerical
u: Default: None -> numerical
d: Default: None -> numerical
udfunc: Default: udfunc -> callable function
discdiv: Default: None -> dict or paired tuple/list
nonrec: Default: False -> boolean
collapsed: Default: False -> boolean
write: Default: False -> boolean
maketrees: Default: None -> list
headerformat: Default: None -> string
rounding: Default: 2 -> integer
makedfs: Default: True -> boolean
portfolios: Default: False -> boolean
"""
parametersExamples = """
############################## Parameter specification ###############################
direc: Default: None
-> chosen directory
folname: Default: None
-> optional folder for output in chosen directory
fname: Default: 'binotree'
-> filename (not including filetype suffix)
spot:
-> Numerical value for current spot, e.g. 100
strike:
-> Numerical value for strike on options, e.g. 95
T:
-> Time to maturity in terms of years, e.g.
-> 30 days: 30/365
-> 3 weeks: 3/52
-> 3 months: 3/12
-> 2 years: 2
dt:
-> Length of each period in terms of years, e.g.
-> 1 day: 1/365
-> 1 week: 1/52
-> 1 month: 1/12
-> 1 year: 1
periods:
-> Number (integer) of periods in binomial tree, e.g. 3
dtfreq:
-> Needed for formatting of header if wanted
-> one of 'd', 'w', 'm'
r:
-> Yearly risk-free interest rate in decimal percentage, e.g.
For 4% you would parse -> 0.04
rcont:
-> Continous or discrete compunding interest rate and dividend yield, e.g.
True -> Continous
False -> Discrete
divyield:
-> Yearly dividend yield in decimal percentage, e.g. for 4% you would parse -> 0.04
vola:
-> Volatility in terms of yearly standard deviation (sigma), e.g. 0.20
u:
-> Up factor for each node movement up, e.g. 1.10
d:
-> Down factor for each node movement down, e.g. 0.90
udfunc:
IMPORTANT:
-> The function must include **kwargs argument
-> All parameters must/should be parsed as keywords
Can take any of these parameters:
-> vola, T, dt, periods, r, divyield, discountRate, discountDiv, discountRateMinusDiv, spot, strike
Must return the two parameters:
-> u, d
discdiv:
-> paired tuple/lists, e.g.
-> [(1/12, 2), (4/12, 5)]
-> [[1/12, 2], [4/12, 5]]
nonrec:
-> determines if tree is non-recombining, if False(default) -> F-solution
collapsed:
-> if True -> no empty cells between nodes
write:
-> if True -> write excel output directly from construction
maketrees:
-> list of options to calculate e.g. -> maketrees = ['ec', 'ep', 'ac', 'ap']
headerformat:
-> Determines if header is formated in terms of periods or 'actual' time, e.g.
-> string -> 'periods'
or
-> string -> 'dt'
rounding:
-> integer specifying rounding for decimals, e.g. 2
makedfs:
-> True or False -> determining if dataframes are to be constructed (speeds up code when False)
portfolios:
-> True or False -> determining if replicating are to be written in excel output
"""
updownpriohelp = """
############################ up, down, volatility specification ############################
vola -> u, d = udfunc(vola, dt)
u -> d = 1/u
-> vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
d -> u = 1/d
-> vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
u, d -> vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
vola, u -> d = u / (np.exp(2 * vola * np.sqrt(dt)))
vola, d -> u = d * (np.exp(2 * vola * np.sqrt(dt)))
# u and d dominates if all 3 are passed
all 3 -> vola = (np.log(u) - np.log(d)) / (2 * np.sqrt(dt))
"""
timehelp = """
################################### time specification ###################################
T, dt -> periods = T/dt
T, periods -> dt = T/periods
dt, periods -> T = dt * periods
T, dt, periods -> dt = T/periods
"""
udfunchelp = """
udfunc:
IMPORTANT:
-> The function must include **kwargs argument
-> All parameters must/should be parsed as keywords
Can take any of these parameters:
-> vola, T, dt, periods, r, divyield, discountRate, discountDiv, discountRateMinusDiv, spot, strike
Must return the two parameters:
-> u, d
"""
nonepassed = """
Parameters must be passed as a dictionary and/or keywords
(keywords override passed params dictionary)
"""
callablehelp = """
Purpose:
-> override parameters
-> choose parameters to return
Time call:
new T -> keep dt -> new periods
-> keep dtfreq
new dt -> keep T -> new periods
-> discard dtfrq
new periods -> keep T -> new dt
-> discard dtfrq
2 or 3 new -> act as __init__
-> if for example 'T' is passed in call (kwargs)
-> fetch dt from self.kwunion and pass those two
Make Dataframes:
-> if Dataframes are to be made from __call__, you must parse 'dfs' in toreturn parameter
"""
helpstatement = {'params': parameters,
'params_examples': parametersExamples,
'updown': updownpriohelp,
'time': timehelp,
'udfunc': udfunchelp,
'noinput': nonepassed,
'callable': callablehelp}
if tohelp is None:
for i in helpstatement.values():
print(i)
print('\n\n§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§'
'§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§§')
else:
for i in tohelp:
print(helpstatement[i])
| [
"matplotlib.pyplot.title",
"numpy.maximum",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.style.use",
"numpy.arange",
"numpy.exp",
"pathlib.Path",
"itertools.cycle",
"os.path.join",
"numpy.full",
"numpy.zeros_like",
"os.path.exists",
"numpy.tril_indices_from",
"scipy.stats... | [((15056, 15081), 'numpy.ones', 'np.ones', (['(colnum, colnum)'], {}), '((colnum, colnum))\n', (15063, 15081), True, 'import numpy as np\n'), ((16210, 16230), 'numpy.zeros_like', 'np.zeros_like', (['spots'], {}), '(spots)\n', (16223, 16230), True, 'import numpy as np\n'), ((16450, 16480), 'numpy.zeros_like', 'np.zeros_like', (['spots[:-1, :-1]'], {}), '(spots[:-1, :-1])\n', (16463, 16480), True, 'import numpy as np\n'), ((17664, 17684), 'numpy.zeros_like', 'np.zeros_like', (['spots'], {}), '(spots)\n', (17677, 17684), True, 'import numpy as np\n'), ((17924, 17944), 'numpy.zeros_like', 'np.zeros_like', (['spots'], {}), '(spots)\n', (17937, 17944), True, 'import numpy as np\n'), ((30526, 30565), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (30535, 30565), True, 'import numpy as np\n'), ((31541, 31564), 'numpy.where', 'np.where', (['(divpowpv == 0)'], {}), '(divpowpv == 0)\n', (31549, 31564), True, 'import numpy as np\n'), ((47369, 47408), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (47378, 47408), True, 'import numpy as np\n'), ((47591, 47631), 'numpy.hstack', 'np.hstack', (['([0], divind, [self.periods])'], {}), '(([0], divind, [self.periods]))\n', (47600, 47631), True, 'import numpy as np\n'), ((65216, 65242), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (65226, 65242), True, 'import matplotlib.pyplot as plt\n'), ((65253, 65315), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'ecOptionDelta'], {'label': '"""Binomial tree delta"""'}), "(spotList, ecOptionDelta, label='Binomial tree delta')\n", (65261, 65315), True, 'import matplotlib.pyplot as plt\n'), ((65326, 65390), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'ecOptionBSDelta'], {'label': '"""Black-Scholes delta"""'}), "(spotList, ecOptionBSDelta, label='Black-Scholes delta')\n", (65334, 65390), True, 'import matplotlib.pyplot as plt\n'), ((65401, 65437), 'matplotlib.pyplot.title', 'plt.title', (['"""Deltas of European Call"""'], {}), "('Deltas of European Call')\n", (65410, 65437), True, 'import matplotlib.pyplot as plt\n'), ((65446, 65472), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (65456, 65472), True, 'import matplotlib.pyplot as plt\n'), ((65481, 65500), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Delta"""'], {}), "('Delta')\n", (65491, 65500), True, 'import matplotlib.pyplot as plt\n'), ((65509, 65521), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (65519, 65521), True, 'import matplotlib.pyplot as plt\n'), ((65530, 65540), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (65538, 65540), True, 'import matplotlib.pyplot as plt\n'), ((65549, 65559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65557, 65559), True, 'import matplotlib.pyplot as plt\n'), ((66705, 66731), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (66715, 66731), True, 'import matplotlib.pyplot as plt\n'), ((66742, 66804), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'epOptionDelta'], {'label': '"""Binomial tree delta"""'}), "(spotList, epOptionDelta, label='Binomial tree delta')\n", (66750, 66804), True, 'import matplotlib.pyplot as plt\n'), ((66815, 66879), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'epOptionBSDelta'], {'label': '"""Black-Scholes delta"""'}), "(spotList, epOptionBSDelta, label='Black-Scholes delta')\n", (66823, 66879), True, 'import matplotlib.pyplot as plt\n'), ((66890, 66925), 'matplotlib.pyplot.title', 'plt.title', (['"""Deltas of European Put"""'], {}), "('Deltas of European Put')\n", (66899, 66925), True, 'import matplotlib.pyplot as plt\n'), ((66934, 66960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (66944, 66960), True, 'import matplotlib.pyplot as plt\n'), ((66969, 66988), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Delta"""'], {}), "('Delta')\n", (66979, 66988), True, 'import matplotlib.pyplot as plt\n'), ((66997, 67009), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (67007, 67009), True, 'import matplotlib.pyplot as plt\n'), ((67018, 67028), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (67026, 67028), True, 'import matplotlib.pyplot as plt\n'), ((67037, 67047), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (67045, 67047), True, 'import matplotlib.pyplot as plt\n'), ((68024, 68050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (68034, 68050), True, 'import matplotlib.pyplot as plt\n'), ((68061, 68123), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'acOptionDelta'], {'label': '"""Binomial tree delta"""'}), "(spotList, acOptionDelta, label='Binomial tree delta')\n", (68069, 68123), True, 'import matplotlib.pyplot as plt\n'), ((68134, 68170), 'matplotlib.pyplot.title', 'plt.title', (['"""Deltas of American Call"""'], {}), "('Deltas of American Call')\n", (68143, 68170), True, 'import matplotlib.pyplot as plt\n'), ((68179, 68205), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (68189, 68205), True, 'import matplotlib.pyplot as plt\n'), ((68214, 68233), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Delta"""'], {}), "('Delta')\n", (68224, 68233), True, 'import matplotlib.pyplot as plt\n'), ((68242, 68254), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (68252, 68254), True, 'import matplotlib.pyplot as plt\n'), ((68263, 68273), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (68271, 68273), True, 'import matplotlib.pyplot as plt\n'), ((68282, 68292), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (68290, 68292), True, 'import matplotlib.pyplot as plt\n'), ((69269, 69295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (69279, 69295), True, 'import matplotlib.pyplot as plt\n'), ((69306, 69368), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'apOptionDelta'], {'label': '"""Binomial tree delta"""'}), "(spotList, apOptionDelta, label='Binomial tree delta')\n", (69314, 69368), True, 'import matplotlib.pyplot as plt\n'), ((69379, 69414), 'matplotlib.pyplot.title', 'plt.title', (['"""Deltas of American Put"""'], {}), "('Deltas of American Put')\n", (69388, 69414), True, 'import matplotlib.pyplot as plt\n'), ((69423, 69449), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (69433, 69449), True, 'import matplotlib.pyplot as plt\n'), ((69458, 69477), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Delta"""'], {}), "('Delta')\n", (69468, 69477), True, 'import matplotlib.pyplot as plt\n'), ((69486, 69498), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (69496, 69498), True, 'import matplotlib.pyplot as plt\n'), ((69507, 69517), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (69515, 69517), True, 'import matplotlib.pyplot as plt\n'), ((69526, 69536), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (69534, 69536), True, 'import matplotlib.pyplot as plt\n'), ((70832, 70858), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (70842, 70858), True, 'import matplotlib.pyplot as plt\n'), ((70869, 70935), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'ecOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(spotList, ecOptionPriceList, label='Binomial tree price')\n", (70877, 70935), True, 'import matplotlib.pyplot as plt\n'), ((70946, 71014), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'ecOptionPriceBSList'], {'label': '"""Black-Scholes price"""'}), "(spotList, ecOptionPriceBSList, label='Black-Scholes price')\n", (70954, 71014), True, 'import matplotlib.pyplot as plt\n'), ((71025, 71086), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'ecIntrinsicsList'], {'label': '"""Intrinsic value"""'}), "(spotList, ecIntrinsicsList, label='Intrinsic value')\n", (71033, 71086), True, 'import matplotlib.pyplot as plt\n'), ((71097, 71132), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of European Call"""'], {}), "('Price of European Call')\n", (71106, 71132), True, 'import matplotlib.pyplot as plt\n'), ((71141, 71167), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (71151, 71167), True, 'import matplotlib.pyplot as plt\n'), ((71176, 71195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (71186, 71195), True, 'import matplotlib.pyplot as plt\n'), ((71204, 71216), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (71214, 71216), True, 'import matplotlib.pyplot as plt\n'), ((71225, 71235), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (71233, 71235), True, 'import matplotlib.pyplot as plt\n'), ((71244, 71254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (71252, 71254), True, 'import matplotlib.pyplot as plt\n'), ((72550, 72576), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (72560, 72576), True, 'import matplotlib.pyplot as plt\n'), ((72587, 72653), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'epOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(spotList, epOptionPriceList, label='Binomial tree price')\n", (72595, 72653), True, 'import matplotlib.pyplot as plt\n'), ((72664, 72732), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'epOptionPriceBSList'], {'label': '"""Black-Scholes price"""'}), "(spotList, epOptionPriceBSList, label='Black-Scholes price')\n", (72672, 72732), True, 'import matplotlib.pyplot as plt\n'), ((72743, 72804), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'epIntrinsicsList'], {'label': '"""Intrinsic value"""'}), "(spotList, epIntrinsicsList, label='Intrinsic value')\n", (72751, 72804), True, 'import matplotlib.pyplot as plt\n'), ((72815, 72849), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of European Put"""'], {}), "('Price of European Put')\n", (72824, 72849), True, 'import matplotlib.pyplot as plt\n'), ((72858, 72884), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (72868, 72884), True, 'import matplotlib.pyplot as plt\n'), ((72893, 72912), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (72903, 72912), True, 'import matplotlib.pyplot as plt\n'), ((72921, 72933), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (72931, 72933), True, 'import matplotlib.pyplot as plt\n'), ((72942, 72952), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (72950, 72952), True, 'import matplotlib.pyplot as plt\n'), ((72961, 72971), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (72969, 72971), True, 'import matplotlib.pyplot as plt\n'), ((74146, 74172), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (74156, 74172), True, 'import matplotlib.pyplot as plt\n'), ((74183, 74249), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'acOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(spotList, acOptionPriceList, label='Binomial tree price')\n", (74191, 74249), True, 'import matplotlib.pyplot as plt\n'), ((74260, 74321), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'acIntrinsicsList'], {'label': '"""Intrinsic value"""'}), "(spotList, acIntrinsicsList, label='Intrinsic value')\n", (74268, 74321), True, 'import matplotlib.pyplot as plt\n'), ((74332, 74367), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of American Call"""'], {}), "('Price of American Call')\n", (74341, 74367), True, 'import matplotlib.pyplot as plt\n'), ((74376, 74402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (74386, 74402), True, 'import matplotlib.pyplot as plt\n'), ((74411, 74430), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (74421, 74430), True, 'import matplotlib.pyplot as plt\n'), ((74439, 74451), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (74449, 74451), True, 'import matplotlib.pyplot as plt\n'), ((74460, 74470), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (74468, 74470), True, 'import matplotlib.pyplot as plt\n'), ((74479, 74489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (74487, 74489), True, 'import matplotlib.pyplot as plt\n'), ((75664, 75690), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (75674, 75690), True, 'import matplotlib.pyplot as plt\n'), ((75701, 75767), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'apOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(spotList, apOptionPriceList, label='Binomial tree price')\n", (75709, 75767), True, 'import matplotlib.pyplot as plt\n'), ((75778, 75839), 'matplotlib.pyplot.plot', 'plt.plot', (['spotList', 'apIntrinsicsList'], {'label': '"""Intrinsic value"""'}), "(spotList, apIntrinsicsList, label='Intrinsic value')\n", (75786, 75839), True, 'import matplotlib.pyplot as plt\n'), ((75850, 75884), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of American Put"""'], {}), "('Price of American Put')\n", (75859, 75884), True, 'import matplotlib.pyplot as plt\n'), ((75893, 75919), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Current spot"""'], {}), "('Current spot')\n", (75903, 75919), True, 'import matplotlib.pyplot as plt\n'), ((75928, 75947), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (75938, 75947), True, 'import matplotlib.pyplot as plt\n'), ((75956, 75968), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (75966, 75968), True, 'import matplotlib.pyplot as plt\n'), ((75977, 75987), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (75985, 75987), True, 'import matplotlib.pyplot as plt\n'), ((75996, 76006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (76004, 76006), True, 'import matplotlib.pyplot as plt\n'), ((77013, 77039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (77023, 77039), True, 'import matplotlib.pyplot as plt\n'), ((77050, 77119), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'ecOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(periodsList, ecOptionPriceList, label='Binomial tree price')\n", (77058, 77119), True, 'import matplotlib.pyplot as plt\n'), ((77130, 77201), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'ecOptionPriceBSList'], {'label': '"""Black-Scholes price"""'}), "(periodsList, ecOptionPriceBSList, label='Black-Scholes price')\n", (77138, 77201), True, 'import matplotlib.pyplot as plt\n'), ((77212, 77247), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of European Call"""'], {}), "('Price of European Call')\n", (77221, 77247), True, 'import matplotlib.pyplot as plt\n'), ((77256, 77277), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Periods"""'], {}), "('Periods')\n", (77266, 77277), True, 'import matplotlib.pyplot as plt\n'), ((77286, 77305), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (77296, 77305), True, 'import matplotlib.pyplot as plt\n'), ((77314, 77326), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (77324, 77326), True, 'import matplotlib.pyplot as plt\n'), ((77335, 77345), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (77343, 77345), True, 'import matplotlib.pyplot as plt\n'), ((77354, 77364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (77362, 77364), True, 'import matplotlib.pyplot as plt\n'), ((78371, 78397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (78381, 78397), True, 'import matplotlib.pyplot as plt\n'), ((78408, 78477), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'epOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(periodsList, epOptionPriceList, label='Binomial tree price')\n", (78416, 78477), True, 'import matplotlib.pyplot as plt\n'), ((78488, 78559), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'epOptionPriceBSList'], {'label': '"""Black-Scholes price"""'}), "(periodsList, epOptionPriceBSList, label='Black-Scholes price')\n", (78496, 78559), True, 'import matplotlib.pyplot as plt\n'), ((78570, 78604), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of European Put"""'], {}), "('Price of European Put')\n", (78579, 78604), True, 'import matplotlib.pyplot as plt\n'), ((78613, 78634), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Periods"""'], {}), "('Periods')\n", (78623, 78634), True, 'import matplotlib.pyplot as plt\n'), ((78643, 78662), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (78653, 78662), True, 'import matplotlib.pyplot as plt\n'), ((78671, 78683), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (78681, 78683), True, 'import matplotlib.pyplot as plt\n'), ((78692, 78702), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (78700, 78702), True, 'import matplotlib.pyplot as plt\n'), ((78711, 78721), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (78719, 78721), True, 'import matplotlib.pyplot as plt\n'), ((79554, 79580), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (79564, 79580), True, 'import matplotlib.pyplot as plt\n'), ((79591, 79660), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'acOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(periodsList, acOptionPriceList, label='Binomial tree price')\n", (79599, 79660), True, 'import matplotlib.pyplot as plt\n'), ((79671, 79706), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of American Call"""'], {}), "('Price of American Call')\n", (79680, 79706), True, 'import matplotlib.pyplot as plt\n'), ((79715, 79736), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Periods"""'], {}), "('Periods')\n", (79725, 79736), True, 'import matplotlib.pyplot as plt\n'), ((79745, 79764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (79755, 79764), True, 'import matplotlib.pyplot as plt\n'), ((79773, 79785), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (79783, 79785), True, 'import matplotlib.pyplot as plt\n'), ((79794, 79804), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (79802, 79804), True, 'import matplotlib.pyplot as plt\n'), ((79813, 79823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (79821, 79823), True, 'import matplotlib.pyplot as plt\n'), ((80656, 80682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (80666, 80682), True, 'import matplotlib.pyplot as plt\n'), ((80693, 80762), 'matplotlib.pyplot.plot', 'plt.plot', (['periodsList', 'apOptionPriceList'], {'label': '"""Binomial tree price"""'}), "(periodsList, apOptionPriceList, label='Binomial tree price')\n", (80701, 80762), True, 'import matplotlib.pyplot as plt\n'), ((80773, 80807), 'matplotlib.pyplot.title', 'plt.title', (['"""Price of American Put"""'], {}), "('Price of American Put')\n", (80782, 80807), True, 'import matplotlib.pyplot as plt\n'), ((80816, 80837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Periods"""'], {}), "('Periods')\n", (80826, 80837), True, 'import matplotlib.pyplot as plt\n'), ((80846, 80865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (80856, 80865), True, 'import matplotlib.pyplot as plt\n'), ((80874, 80886), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (80884, 80886), True, 'import matplotlib.pyplot as plt\n'), ((80895, 80905), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (80903, 80905), True, 'import matplotlib.pyplot as plt\n'), ((80914, 80924), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (80922, 80924), True, 'import matplotlib.pyplot as plt\n'), ((81129, 81153), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (81142, 81153), True, 'import matplotlib.pyplot as plt\n'), ((84932, 84956), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (84945, 84956), True, 'import matplotlib.pyplot as plt\n'), ((92001, 92060), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['filepath', "{'strings_to_numbers': True}"], {}), "(filepath, {'strings_to_numbers': True})\n", (92020, 92060), False, 'import xlsxwriter\n'), ((7464, 7479), 'numpy.exp', 'np.exp', (['(-r * dt)'], {}), '(-r * dt)\n', (7470, 7479), True, 'import numpy as np\n'), ((7506, 7528), 'numpy.exp', 'np.exp', (['(-divyield * dt)'], {}), '(-divyield * dt)\n', (7512, 7528), True, 'import numpy as np\n'), ((7564, 7591), 'numpy.exp', 'np.exp', (['((r - divyield) * dt)'], {}), '((r - divyield) * dt)\n', (7570, 7591), True, 'import numpy as np\n'), ((12256, 12273), 'numpy.split', 'np.split', (['nl', 'ind'], {}), '(nl, ind)\n', (12264, 12273), True, 'import numpy as np\n'), ((12549, 12580), 'numpy.ones', 'np.ones', (['(columnsnr, columnsnr)'], {}), '((columnsnr, columnsnr))\n', (12556, 12580), True, 'import numpy as np\n'), ((12758, 12778), 'numpy.split', 'np.split', (['upind', 'ind'], {}), '(upind, ind)\n', (12766, 12778), True, 'import numpy as np\n'), ((13037, 13059), 'numpy.split', 'np.split', (['downind', 'ind'], {}), '(downind, ind)\n', (13045, 13059), True, 'import numpy as np\n'), ((15093, 15120), 'numpy.triu_indices_from', 'np.triu_indices_from', (['up', '(0)'], {}), '(up, 0)\n', (15113, 15120), True, 'import numpy as np\n'), ((15182, 15209), 'numpy.triu_indices_from', 'np.triu_indices_from', (['ua', '(1)'], {}), '(ua, 1)\n', (15202, 15209), True, 'import numpy as np\n'), ((15229, 15246), 'numpy.arange', 'np.arange', (['colnum'], {}), '(colnum)\n', (15238, 15246), True, 'import numpy as np\n'), ((15249, 15265), 'numpy.ones_like', 'np.ones_like', (['up'], {}), '(up)\n', (15261, 15265), True, 'import numpy as np\n'), ((15277, 15304), 'numpy.triu_indices_from', 'np.triu_indices_from', (['do', '(1)'], {}), '(do, 1)\n', (15297, 15304), True, 'import numpy as np\n'), ((15342, 15370), 'numpy.triu_indices_from', 'np.triu_indices_from', (['daa', '(1)'], {}), '(daa, 1)\n', (15362, 15370), True, 'import numpy as np\n'), ((15908, 15928), 'numpy.zeros_like', 'np.zeros_like', (['spots'], {}), '(spots)\n', (15921, 15928), True, 'import numpy as np\n'), ((17117, 17228), 'numpy.maximum', 'np.maximum', (['(self.discountRate * (self.q * options[up] + (1 - self.q) * options[down]))', 'checkagainst[col[0]]'], {}), '(self.discountRate * (self.q * options[up] + (1 - self.q) *\n options[down]), checkagainst[col[0]])\n', (17127, 17228), True, 'import numpy as np\n'), ((17351, 17384), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options', '(-1)'], {}), '(options, -1)\n', (17371, 17384), True, 'import numpy as np\n'), ((17417, 17454), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options[1:, 1:]'], {}), '(options[1:, 1:])\n', (17437, 17454), True, 'import numpy as np\n'), ((17479, 17510), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots', '(-1)'], {}), '(spots, -1)\n', (17499, 17510), True, 'import numpy as np\n'), ((17542, 17577), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots[1:, 1:]'], {}), '(spots[1:, 1:])\n', (17562, 17577), True, 'import numpy as np\n'), ((17700, 17737), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots[:-1, :-1]'], {}), '(spots[:-1, :-1])\n', (17720, 17737), True, 'import numpy as np\n'), ((17959, 17996), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots[:-1, :-1]'], {}), '(spots[:-1, :-1])\n', (17979, 17996), True, 'import numpy as np\n'), ((18715, 18747), 'numpy.tril_indices_from', 'np.tril_indices_from', (['updoind[0]'], {}), '(updoind[0])\n', (18735, 18747), True, 'import numpy as np\n'), ((18779, 18811), 'numpy.tril_indices_from', 'np.tril_indices_from', (['updoind[1]'], {}), '(updoind[1])\n', (18799, 18811), True, 'import numpy as np\n'), ((30582, 30604), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (30590, 30604), True, 'import numpy as np\n'), ((30626, 30648), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (30634, 30648), True, 'import numpy as np\n'), ((31837, 31869), 'numpy.triu_indices_from', 'np.triu_indices_from', (['spotarr', '(1)'], {}), '(spotarr, 1)\n', (31857, 31869), True, 'import numpy as np\n'), ((32116, 32148), 'numpy.tril_indices_from', 'np.tril_indices_from', (['updoind[0]'], {}), '(updoind[0])\n', (32136, 32148), True, 'import numpy as np\n'), ((32180, 32212), 'numpy.tril_indices_from', 'np.tril_indices_from', (['updoind[1]'], {}), '(updoind[1])\n', (32200, 32212), True, 'import numpy as np\n'), ((32509, 32537), 'numpy.tril_indices_from', 'np.tril_indices_from', (['upindF'], {}), '(upindF)\n', (32529, 32537), True, 'import numpy as np\n'), ((32566, 32594), 'numpy.tril_indices_from', 'np.tril_indices_from', (['upindF'], {}), '(upindF)\n', (32586, 32594), True, 'import numpy as np\n'), ((46947, 46980), 'numpy.arange', 'np.arange', (['ups', '(ups + periods + 1)'], {}), '(ups, ups + periods + 1)\n', (46956, 46980), True, 'import numpy as np\n'), ((47270, 47288), 'numpy.array', 'np.array', (['[up, do]'], {}), '([up, do])\n', (47278, 47288), True, 'import numpy as np\n'), ((47425, 47447), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (47433, 47447), True, 'import numpy as np\n'), ((47469, 47491), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (47477, 47491), True, 'import numpy as np\n'), ((50675, 50692), 'numpy.zeros_like', 'np.zeros_like', (['sa'], {}), '(sa)\n', (50688, 50692), True, 'import numpy as np\n'), ((57321, 57355), 'itertools.chain', 'itertools.chain', (['*Nlistarrays[:-1]'], {}), '(*Nlistarrays[:-1])\n', (57336, 57355), False, 'import itertools\n'), ((57563, 57597), 'itertools.chain', 'itertools.chain', (['*Nlistarrays[:-1]'], {}), '(*Nlistarrays[:-1])\n', (57578, 57597), False, 'import itertools\n'), ((57944, 57968), 'itertools.chain', 'itertools.chain', (['*toloop'], {}), '(*toloop)\n', (57959, 57968), False, 'import itertools\n'), ((63139, 63170), 'numpy.exp', 'np.exp', (['(-self.divyield * self.T)'], {}), '(-self.divyield * self.T)\n', (63145, 63170), True, 'import numpy as np\n'), ((63173, 63185), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (63181, 63185), False, 'from scipy.stats import norm\n'), ((63996, 64009), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (64004, 64009), False, 'from scipy.stats import norm\n'), ((81630, 81656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (81640, 81656), True, 'import matplotlib.pyplot as plt\n'), ((81676, 81690), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (81684, 81690), False, 'from matplotlib.gridspec import GridSpec\n'), ((81810, 81865), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'timeplot', 'y': 'spotsplot', 'ax': 'ax_scatter'}), '(x=timeplot, y=spotsplot, ax=ax_scatter)\n', (81825, 81865), True, 'import seaborn as sns\n'), ((81884, 81931), 'seaborn.histplot', 'sns.histplot', ([], {'y': 'spots[-1]', 'kde': '(True)', 'ax': 'ax_hist'}), '(y=spots[-1], kde=True, ax=ax_hist)\n', (81896, 81931), True, 'import seaborn as sns\n'), ((82142, 82152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (82150, 82152), True, 'import matplotlib.pyplot as plt\n'), ((89286, 89313), 'os.path.exists', 'os.path.exists', (['self.foldir'], {}), '(self.foldir)\n', (89300, 89313), False, 'import os\n'), ((89327, 89351), 'os.makedirs', 'os.makedirs', (['self.foldir'], {}), '(self.foldir)\n', (89338, 89351), False, 'import os\n'), ((12212, 12237), 'numpy.tril_indices_from', 'np.tril_indices_from', (['arr'], {}), '(arr)\n', (12232, 12237), True, 'import numpy as np\n'), ((12599, 12629), 'numpy.triu_indices_from', 'np.triu_indices_from', (['upind', '(0)'], {}), '(upind, 0)\n', (12619, 12629), True, 'import numpy as np\n'), ((12829, 12849), 'numpy.arange', 'np.arange', (['columnsnr'], {}), '(columnsnr)\n', (12838, 12849), True, 'import numpy as np\n'), ((12852, 12883), 'numpy.ones', 'np.ones', (['(columnsnr, columnsnr)'], {}), '((columnsnr, columnsnr))\n', (12859, 12883), True, 'import numpy as np\n'), ((12904, 12936), 'numpy.triu_indices_from', 'np.triu_indices_from', (['downind', '(1)'], {}), '(downind, 1)\n', (12924, 12936), True, 'import numpy as np\n'), ((16017, 16037), 'numpy.zeros_like', 'np.zeros_like', (['spots'], {}), '(spots)\n', (16030, 16037), True, 'import numpy as np\n'), ((16061, 16095), 'numpy.triu_indices_from', 'np.triu_indices_from', (['intrinsic', '(1)'], {}), '(intrinsic, 1)\n', (16081, 16095), True, 'import numpy as np\n'), ((16526, 16560), 'numpy.tril_indices_from', 'np.tril_indices_from', (['checkagainst'], {}), '(checkagainst)\n', (16546, 16560), True, 'import numpy as np\n'), ((47094, 47121), 'numpy.triu_indices_from', 'np.triu_indices_from', (['up', '(1)'], {}), '(up, 1)\n', (47114, 47121), True, 'import numpy as np\n'), ((47145, 47182), 'numpy.arange', 'np.arange', (['downs', '(downs + periods + 1)'], {}), '(downs, downs + periods + 1)\n', (47154, 47182), True, 'import numpy as np\n'), ((47185, 47201), 'numpy.ones_like', 'np.ones_like', (['up'], {}), '(up)\n', (47197, 47201), True, 'import numpy as np\n'), ((47217, 47244), 'numpy.triu_indices_from', 'np.triu_indices_from', (['do', '(1)'], {}), '(do, 1)\n', (47237, 47244), True, 'import numpy as np\n'), ((47749, 47779), 'numpy.arange', 'np.arange', (['(1)', '(self.periods + 1)'], {}), '(1, self.periods + 1)\n', (47758, 47779), True, 'import numpy as np\n'), ((48749, 48792), 'numpy.split', 'np.split', (['arraySorted', 'arraySorted.shape[0]'], {}), '(arraySorted, arraySorted.shape[0])\n', (48757, 48792), True, 'import numpy as np\n'), ((50606, 50630), 'numpy.tril_indices_from', 'np.tril_indices_from', (['sa'], {}), '(sa)\n', (50626, 50630), True, 'import numpy as np\n'), ((50709, 50734), 'numpy.tril_indices_from', 'np.tril_indices_from', (['mal'], {}), '(mal)\n', (50729, 50734), True, 'import numpy as np\n'), ((54717, 54828), 'numpy.tril_indices_from', 'np.tril_indices_from', (['mal[startind[0]:startindPadded[1:][0] + 1, :startindPadded[1:][0] + 1 -\n startind[0]]'], {}), '(mal[startind[0]:startindPadded[1:][0] + 1, :\n startindPadded[1:][0] + 1 - startind[0]])\n', (54737, 54828), True, 'import numpy as np\n'), ((55583, 55599), 'numpy.min', 'np.min', (['startcol'], {}), '(startcol)\n', (55589, 55599), True, 'import numpy as np\n'), ((55625, 55673), 'numpy.max', 'np.max', (['treeset[0][treeset[2]:treeset[3] + 1][0]'], {}), '(treeset[0][treeset[2]:treeset[3] + 1][0])\n', (55631, 55673), True, 'import numpy as np\n'), ((56450, 56481), 'numpy.tril_indices', 'np.tril_indices', (['(treeset[1] + 1)'], {}), '(treeset[1] + 1)\n', (56465, 56481), True, 'import numpy as np\n'), ((58862, 58873), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (58870, 58873), True, 'import numpy as np\n'), ((62780, 62800), 'numpy.log', 'np.log', (['(S / strikeBS)'], {}), '(S / strikeBS)\n', (62786, 62800), True, 'import numpy as np\n'), ((62890, 62905), 'numpy.sqrt', 'np.sqrt', (['self.T'], {}), '(self.T)\n', (62897, 62905), True, 'import numpy as np\n'), ((62938, 62953), 'numpy.sqrt', 'np.sqrt', (['self.T'], {}), '(self.T)\n', (62945, 62953), True, 'import numpy as np\n'), ((63014, 63026), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (63022, 63026), False, 'from scipy.stats import norm\n'), ((63089, 63101), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (63097, 63101), False, 'from scipy.stats import norm\n'), ((63594, 63614), 'numpy.log', 'np.log', (['(S / strikeBS)'], {}), '(S / strikeBS)\n', (63600, 63614), True, 'import numpy as np\n'), ((63709, 63724), 'numpy.sqrt', 'np.sqrt', (['self.T'], {}), '(self.T)\n', (63716, 63724), True, 'import numpy as np\n'), ((63757, 63772), 'numpy.sqrt', 'np.sqrt', (['self.T'], {}), '(self.T)\n', (63764, 63772), True, 'import numpy as np\n'), ((63833, 63846), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (63841, 63846), False, 'from scipy.stats import norm\n'), ((63909, 63922), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (63917, 63922), False, 'from scipy.stats import norm\n'), ((63962, 63993), 'numpy.exp', 'np.exp', (['(-self.divyield * self.T)'], {}), '(-self.divyield * self.T)\n', (63968, 63993), True, 'import numpy as np\n'), ((82797, 82823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (82807, 82823), True, 'import matplotlib.pyplot as plt\n'), ((82843, 82857), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (82851, 82857), False, 'from matplotlib.gridspec import GridSpec\n'), ((82977, 83047), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'timeplot', 'y': 'spotsplot', 'ax': 'ax_scatter', 'label': '"""Spots"""'}), "(x=timeplot, y=spotsplot, ax=ax_scatter, label='Spots')\n", (82992, 83047), True, 'import seaborn as sns\n'), ((83068, 83141), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'timeplot', 'y': 'Fspotsplot', 'ax': 'ax_scatter', 'label': '"""F-Spots"""'}), "(x=timeplot, y=Fspotsplot, ax=ax_scatter, label='F-Spots')\n", (83083, 83141), True, 'import seaborn as sns\n'), ((83162, 83209), 'seaborn.histplot', 'sns.histplot', ([], {'y': 'spots[-1]', 'kde': '(True)', 'ax': 'ax_hist'}), '(y=spots[-1], kde=True, ax=ax_hist)\n', (83174, 83209), True, 'import seaborn as sns\n'), ((83452, 83462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (83460, 83462), True, 'import matplotlib.pyplot as plt\n'), ((125939, 125978), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (125948, 125978), True, 'import numpy as np\n'), ((130821, 130860), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (130830, 130860), True, 'import numpy as np\n'), ((136332, 136360), 'itertools.cycle', 'itertools.cycle', (['spotformats'], {}), '(spotformats)\n', (136347, 136360), False, 'import itertools\n'), ((136394, 136428), 'itertools.cycle', 'itertools.cycle', (['spotPredivformats'], {}), '(spotPredivformats)\n', (136409, 136428), False, 'import itertools\n'), ((136461, 136494), 'itertools.cycle', 'itertools.cycle', (['intrinsicformats'], {}), '(intrinsicformats)\n', (136476, 136494), False, 'import itertools\n'), ((136524, 136554), 'itertools.cycle', 'itertools.cycle', (['optionformats'], {}), '(optionformats)\n', (136539, 136554), False, 'import itertools\n'), ((1288, 1299), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1297, 1299), False, 'import os\n'), ((1366, 1401), 'os.path.join', 'os.path.join', (['curwd', '"""OutputFolder"""'], {}), "(curwd, 'OutputFolder')\n", (1378, 1401), False, 'import os\n'), ((4925, 4952), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (4934, 4952), True, 'import numpy as np\n'), ((9054, 9065), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (9061, 9065), True, 'import numpy as np\n'), ((12470, 12493), 'numpy.arange', 'np.arange', (['(1)', 'columnsnr'], {}), '(1, columnsnr)\n', (12479, 12493), True, 'import numpy as np\n'), ((16745, 16779), 'numpy.tril_indices_from', 'np.tril_indices_from', (['checkagainst'], {}), '(checkagainst)\n', (16765, 16779), True, 'import numpy as np\n'), ((21308, 21353), 'numpy.full', 'np.full', (['(rows, mainobject.periods + 1)', 'None'], {}), '((rows, mainobject.periods + 1), None)\n', (21315, 21353), True, 'import numpy as np\n'), ((24246, 24301), 'numpy.full', 'np.full', (['(self.rows, self.mainobject.periods + 1)', 'None'], {}), '((self.rows, self.mainobject.periods + 1), None)\n', (24253, 24301), True, 'import numpy as np\n'), ((30679, 30711), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (30696, 30711), True, 'import numpy as np\n'), ((35602, 35647), 'numpy.full', 'np.full', (['(rows, mainobject.periods + 1)', 'None'], {}), '((rows, mainobject.periods + 1), None)\n', (35609, 35647), True, 'import numpy as np\n'), ((39072, 39127), 'numpy.full', 'np.full', (['(self.rows, self.mainobject.periods + 1)', 'None'], {}), '((self.rows, self.mainobject.periods + 1), None)\n', (39079, 39127), True, 'import numpy as np\n'), ((47522, 47554), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (47539, 47554), True, 'import numpy as np\n'), ((48698, 48713), 'numpy.sort', 'np.sort', (['arr', '(0)'], {}), '(arr, 0)\n', (48705, 48713), True, 'import numpy as np\n'), ((51045, 51079), 'numpy.triu_indices_from', 'np.triu_indices_from', (['intrinsic', '(1)'], {}), '(intrinsic, 1)\n', (51065, 51079), True, 'import numpy as np\n'), ((62980, 63011), 'numpy.exp', 'np.exp', (['(-self.divyield * self.T)'], {}), '(-self.divyield * self.T)\n', (62986, 63011), True, 'import numpy as np\n'), ((63062, 63086), 'numpy.exp', 'np.exp', (['(-self.r * self.T)'], {}), '(-self.r * self.T)\n', (63068, 63086), True, 'import numpy as np\n'), ((63806, 63830), 'numpy.exp', 'np.exp', (['(-self.r * self.T)'], {}), '(-self.r * self.T)\n', (63812, 63830), True, 'import numpy as np\n'), ((63875, 63906), 'numpy.exp', 'np.exp', (['(-self.divyield * self.T)'], {}), '(-self.divyield * self.T)\n', (63881, 63906), True, 'import numpy as np\n'), ((83562, 83588), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (83572, 83588), True, 'import matplotlib.pyplot as plt\n'), ((83608, 83622), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(4)', '(4)'], {}), '(4, 4)\n', (83616, 83622), False, 'from matplotlib.gridspec import GridSpec\n'), ((84625, 84671), 'seaborn.histplot', 'sns.histplot', ([], {'y': 'spothist', 'kde': '(True)', 'ax': 'ax_hist'}), '(y=spothist, kde=True, ax=ax_hist)\n', (84637, 84671), True, 'import seaborn as sns\n'), ((84882, 84892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (84890, 84892), True, 'import matplotlib.pyplot as plt\n'), ((126003, 126025), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (126011, 126025), True, 'import numpy as np\n'), ((126132, 126154), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (126140, 126154), True, 'import numpy as np\n'), ((126188, 126213), 'numpy.array', 'np.array', (['self.treeheader'], {}), '(self.treeheader)\n', (126196, 126213), True, 'import numpy as np\n'), ((130885, 130907), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (130893, 130907), True, 'import numpy as np\n'), ((131014, 131036), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (131022, 131036), True, 'import numpy as np\n'), ((131070, 131095), 'numpy.array', 'np.array', (['self.treeheader'], {}), '(self.treeheader)\n', (131078, 131095), True, 'import numpy as np\n'), ((1513, 1533), 'pathlib.Path', 'pathlib.Path', (['foldir'], {}), '(foldir)\n', (1525, 1533), False, 'import pathlib\n'), ((5288, 5327), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (5297, 5327), True, 'import numpy as np\n'), ((10270, 10279), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (10276, 10279), True, 'import numpy as np\n'), ((10282, 10291), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (10288, 10291), True, 'import numpy as np\n'), ((10300, 10311), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10307, 10311), True, 'import numpy as np\n'), ((12697, 12724), 'numpy.tril_indices_from', 'np.tril_indices_from', (['upind'], {}), '(upind)\n', (12717, 12724), True, 'import numpy as np\n'), ((12972, 13001), 'numpy.tril_indices_from', 'np.tril_indices_from', (['downind'], {}), '(downind)\n', (12992, 13001), True, 'import numpy as np\n'), ((20044, 20071), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots'], {}), '(spots)\n', (20064, 20071), True, 'import numpy as np\n'), ((20124, 20155), 'numpy.tril_indices_from', 'np.tril_indices_from', (['intrinsic'], {}), '(intrinsic)\n', (20144, 20155), True, 'import numpy as np\n'), ((20204, 20233), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options'], {}), '(options)\n', (20224, 20233), True, 'import numpy as np\n'), ((20280, 20308), 'numpy.tril_indices_from', 'np.tril_indices_from', (['deltas'], {}), '(deltas)\n', (20300, 20308), True, 'import numpy as np\n'), ((20353, 20380), 'numpy.tril_indices_from', 'np.tril_indices_from', (['bonds'], {}), '(bonds)\n', (20373, 20380), True, 'import numpy as np\n'), ((20420, 20445), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ups'], {}), '(ups)\n', (20440, 20445), True, 'import numpy as np\n'), ((20489, 20516), 'numpy.tril_indices_from', 'np.tril_indices_from', (['downs'], {}), '(downs)\n', (20509, 20516), True, 'import numpy as np\n'), ((20831, 20860), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (20842, 20860), True, 'import numpy as np\n'), ((20991, 21022), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (21002, 21022), True, 'import numpy as np\n'), ((22389, 22455), 'numpy.array', 'np.array', (["(['Spot', '[Intrinsic]', '(Premium)'] + [''] * (rows - 3))"], {}), "(['Spot', '[Intrinsic]', '(Premium)'] + [''] * (rows - 3))\n", (22397, 22455), True, 'import numpy as np\n'), ((23041, 23092), 'numpy.array', 'np.array', (["(['Spot', '(Premium)'] + [''] * (rows - 2))"], {}), "(['Spot', '(Premium)'] + [''] * (rows - 2))\n", (23049, 23092), True, 'import numpy as np\n'), ((23823, 23852), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (23834, 23852), True, 'import numpy as np\n'), ((23983, 24014), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (23994, 24014), True, 'import numpy as np\n'), ((24998, 25075), 'numpy.array', 'np.array', (["(['Spot', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))"], {}), "(['Spot', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))\n", (25006, 25075), True, 'import numpy as np\n'), ((25459, 25516), 'numpy.array', 'np.array', (["(['Spot', '(Opt = S*∆ + B)'] + [''] * (rows - 2))"], {}), "(['Spot', '(Opt = S*∆ + B)'] + [''] * (rows - 2))\n", (25467, 25516), True, 'import numpy as np\n'), ((27703, 27730), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots'], {}), '(spots)\n', (27723, 27730), True, 'import numpy as np\n'), ((27783, 27814), 'numpy.tril_indices_from', 'np.tril_indices_from', (['intrinsic'], {}), '(intrinsic)\n', (27803, 27814), True, 'import numpy as np\n'), ((27863, 27892), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options'], {}), '(options)\n', (27883, 27892), True, 'import numpy as np\n'), ((27939, 27967), 'numpy.tril_indices_from', 'np.tril_indices_from', (['deltas'], {}), '(deltas)\n', (27959, 27967), True, 'import numpy as np\n'), ((28012, 28039), 'numpy.tril_indices_from', 'np.tril_indices_from', (['bonds'], {}), '(bonds)\n', (28032, 28039), True, 'import numpy as np\n'), ((28079, 28104), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ups'], {}), '(ups)\n', (28099, 28104), True, 'import numpy as np\n'), ((28148, 28175), 'numpy.tril_indices_from', 'np.tril_indices_from', (['downs'], {}), '(downs)\n', (28168, 28175), True, 'import numpy as np\n'), ((28440, 28469), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (28451, 28469), True, 'import numpy as np\n'), ((28600, 28631), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (28611, 28631), True, 'import numpy as np\n'), ((34266, 34293), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots'], {}), '(spots)\n', (34286, 34293), True, 'import numpy as np\n'), ((34338, 34365), 'numpy.tril_indices_from', 'np.tril_indices_from', (['Ftree'], {}), '(Ftree)\n', (34358, 34365), True, 'import numpy as np\n'), ((34418, 34449), 'numpy.tril_indices_from', 'np.tril_indices_from', (['intrinsic'], {}), '(intrinsic)\n', (34438, 34449), True, 'import numpy as np\n'), ((34498, 34527), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options'], {}), '(options)\n', (34518, 34527), True, 'import numpy as np\n'), ((34574, 34602), 'numpy.tril_indices_from', 'np.tril_indices_from', (['deltas'], {}), '(deltas)\n', (34594, 34602), True, 'import numpy as np\n'), ((34647, 34674), 'numpy.tril_indices_from', 'np.tril_indices_from', (['bonds'], {}), '(bonds)\n', (34667, 34674), True, 'import numpy as np\n'), ((34714, 34739), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ups'], {}), '(ups)\n', (34734, 34739), True, 'import numpy as np\n'), ((34783, 34810), 'numpy.tril_indices_from', 'np.tril_indices_from', (['downs'], {}), '(downs)\n', (34803, 34810), True, 'import numpy as np\n'), ((35125, 35154), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (35136, 35154), True, 'import numpy as np\n'), ((35285, 35316), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (35296, 35316), True, 'import numpy as np\n'), ((37038, 37116), 'numpy.array', 'np.array', (["(['Spot', '{F-Spot}', '[Intrinsic]', '(Premium)'] + [''] * (rows - 4))"], {}), "(['Spot', '{F-Spot}', '[Intrinsic]', '(Premium)'] + [''] * (rows - 4))\n", (37046, 37116), True, 'import numpy as np\n'), ((37702, 37765), 'numpy.array', 'np.array', (["(['Spot', '{F-Spot}', '(Premium)'] + [''] * (rows - 3))"], {}), "(['Spot', '{F-Spot}', '(Premium)'] + [''] * (rows - 3))\n", (37710, 37765), True, 'import numpy as np\n'), ((38649, 38678), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (38660, 38678), True, 'import numpy as np\n'), ((38809, 38840), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (38820, 38840), True, 'import numpy as np\n'), ((40194, 40288), 'numpy.array', 'np.array', (["(['Spot', '{F-Spot}', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (self.rows -\n 4))"], {}), "(['Spot', '{F-Spot}', '[Intrinsic]', '(Opt = S*∆ + B)'] + [''] * (\n self.rows - 4))\n", (40202, 40288), True, 'import numpy as np\n'), ((40667, 40741), 'numpy.array', 'np.array', (["(['Spot', '{F-Spot}', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))"], {}), "(['Spot', '{F-Spot}', '(Opt = S*∆ + B)'] + [''] * (self.rows - 3))\n", (40675, 40741), True, 'import numpy as np\n'), ((43403, 43430), 'numpy.tril_indices_from', 'np.tril_indices_from', (['spots'], {}), '(spots)\n', (43423, 43430), True, 'import numpy as np\n'), ((43475, 43502), 'numpy.tril_indices_from', 'np.tril_indices_from', (['Ftree'], {}), '(Ftree)\n', (43495, 43502), True, 'import numpy as np\n'), ((43555, 43586), 'numpy.tril_indices_from', 'np.tril_indices_from', (['intrinsic'], {}), '(intrinsic)\n', (43575, 43586), True, 'import numpy as np\n'), ((43635, 43664), 'numpy.tril_indices_from', 'np.tril_indices_from', (['options'], {}), '(options)\n', (43655, 43664), True, 'import numpy as np\n'), ((43711, 43739), 'numpy.tril_indices_from', 'np.tril_indices_from', (['deltas'], {}), '(deltas)\n', (43731, 43739), True, 'import numpy as np\n'), ((43784, 43811), 'numpy.tril_indices_from', 'np.tril_indices_from', (['bonds'], {}), '(bonds)\n', (43804, 43811), True, 'import numpy as np\n'), ((43851, 43876), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ups'], {}), '(ups)\n', (43871, 43876), True, 'import numpy as np\n'), ((43920, 43947), 'numpy.tril_indices_from', 'np.tril_indices_from', (['downs'], {}), '(downs)\n', (43940, 43947), True, 'import numpy as np\n'), ((44509, 44538), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (44520, 44538), True, 'import numpy as np\n'), ((44669, 44700), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (44680, 44700), True, 'import numpy as np\n'), ((48526, 48550), 'itertools.chain', 'itertools.chain', (['*nlarrs'], {}), '(*nlarrs)\n', (48541, 48550), False, 'import itertools\n'), ((48620, 48635), 'numpy.sort', 'np.sort', (['arr', '(0)'], {}), '(arr, 0)\n', (48627, 48635), True, 'import numpy as np\n'), ((50850, 50866), 'numpy.zeros_like', 'np.zeros_like', (['s'], {}), '(s)\n', (50863, 50866), True, 'import numpy as np\n'), ((51909, 51930), 'numpy.array', 'np.array', (['options[-1]'], {}), '(options[-1])\n', (51917, 51930), True, 'import numpy as np\n'), ((53672, 53709), 'numpy.triu_indices_from', 'np.triu_indices_from', (['treerowsTemp', '(1)'], {}), '(treerowsTemp, 1)\n', (53692, 53709), True, 'import numpy as np\n'), ((54331, 54394), 'numpy.tril_indices_from', 'np.tril_indices_from', (['mal[start:end + 1, i:end + 1 - start + i]'], {}), '(mal[start:end + 1, i:end + 1 - start + i])\n', (54351, 54394), True, 'import numpy as np\n'), ((54472, 54490), 'numpy.array', 'np.array', (['treelist'], {}), '(treelist)\n', (54480, 54490), True, 'import numpy as np\n'), ((58016, 58052), 'numpy.tril_indices_from', 'np.tril_indices_from', (['tree[:-1, :-1]'], {}), '(tree[:-1, :-1])\n', (58036, 58052), True, 'import numpy as np\n'), ((58133, 58159), 'numpy.tril_indices_from', 'np.tril_indices_from', (['tree'], {}), '(tree)\n', (58153, 58159), True, 'import numpy as np\n'), ((60451, 60480), 'numpy.char.add', 'np.char.add', (['portstring2', '"""*"""'], {}), "(portstring2, '*')\n", (60462, 60480), True, 'import numpy as np\n'), ((60605, 60636), 'numpy.char.add', 'np.char.add', (['portstring4', '""" + """'], {}), "(portstring4, ' + ')\n", (60616, 60636), True, 'import numpy as np\n'), ((64680, 64702), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (64688, 64702), True, 'import numpy as np\n'), ((66163, 66185), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (66171, 66185), True, 'import numpy as np\n'), ((67622, 67644), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (67630, 67644), True, 'import numpy as np\n'), ((68867, 68889), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (68875, 68889), True, 'import numpy as np\n'), ((70177, 70199), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (70185, 70199), True, 'import numpy as np\n'), ((71895, 71917), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (71903, 71917), True, 'import numpy as np\n'), ((73579, 73601), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (73587, 73601), True, 'import numpy as np\n'), ((75097, 75119), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (75105, 75119), True, 'import numpy as np\n'), ((102884, 102923), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (102893, 102923), True, 'import numpy as np\n'), ((10382, 10391), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (10388, 10391), True, 'import numpy as np\n'), ((10394, 10403), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (10400, 10403), True, 'import numpy as np\n'), ((10412, 10423), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10419, 10423), True, 'import numpy as np\n'), ((22325, 22352), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (22333, 22352), True, 'import numpy as np\n'), ((22977, 23004), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (22985, 23004), True, 'import numpy as np\n'), ((24934, 24961), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (24942, 24961), True, 'import numpy as np\n'), ((25395, 25422), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (25403, 25422), True, 'import numpy as np\n'), ((36974, 37001), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (36982, 37001), True, 'import numpy as np\n'), ((37638, 37665), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (37646, 37665), True, 'import numpy as np\n'), ((40130, 40157), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (40138, 40157), True, 'import numpy as np\n'), ((40603, 40630), 'numpy.where', 'np.where', (['(dfarr == NoneType)'], {}), '(dfarr == NoneType)\n', (40611, 40630), True, 'import numpy as np\n'), ((48841, 48854), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (48851, 48854), True, 'import numpy as np\n'), ((50980, 50996), 'numpy.zeros_like', 'np.zeros_like', (['s'], {}), '(s)\n', (50993, 50996), True, 'import numpy as np\n'), ((55519, 55541), 'numpy.array', 'np.array', (['treecols[-1]'], {}), '(treecols[-1])\n', (55527, 55541), True, 'import numpy as np\n'), ((56166, 56190), 'numpy.array', 'np.array', (['colcluster[-1]'], {}), '(colcluster[-1])\n', (56174, 56190), True, 'import numpy as np\n'), ((84418, 84473), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': 'timeplot', 'y': 'spotsplot', 'ax': 'ax_scatter'}), '(x=timeplot, y=spotsplot, ax=ax_scatter)\n', (84433, 84473), True, 'import seaborn as sns\n'), ((102952, 102974), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (102960, 102974), True, 'import numpy as np\n'), ((103092, 103117), 'numpy.array', 'np.array', (['self.treeheader'], {}), '(self.treeheader)\n', (103100, 103117), True, 'import numpy as np\n'), ((103157, 103179), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (103165, 103179), True, 'import numpy as np\n'), ((104045, 104084), 'numpy.arange', 'np.arange', (['(0)', '(self.T + self.dt)', 'self.dt'], {}), '(0, self.T + self.dt, self.dt)\n', (104054, 104084), True, 'import numpy as np\n'), ((117324, 117352), 'itertools.cycle', 'itertools.cycle', (['spotformats'], {}), '(spotformats)\n', (117339, 117352), False, 'import itertools\n'), ((117390, 117424), 'itertools.cycle', 'itertools.cycle', (['spotPredivformats'], {}), '(spotPredivformats)\n', (117405, 117424), False, 'import itertools\n'), ((117461, 117494), 'itertools.cycle', 'itertools.cycle', (['intrinsicformats'], {}), '(intrinsicformats)\n', (117476, 117494), False, 'import itertools\n'), ((117528, 117558), 'itertools.cycle', 'itertools.cycle', (['optionformats'], {}), '(optionformats)\n', (117543, 117558), False, 'import itertools\n'), ((126064, 126096), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (126081, 126096), True, 'import numpy as np\n'), ((130946, 130978), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (130963, 130978), True, 'import numpy as np\n'), ((5632, 5671), 'numpy.array', 'np.array', (["(['/365'] * (self.periods + 1))"], {}), "(['/365'] * (self.periods + 1))\n", (5640, 5671), True, 'import numpy as np\n'), ((10473, 10482), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (10479, 10482), True, 'import numpy as np\n'), ((10505, 10514), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (10511, 10514), True, 'import numpy as np\n'), ((10523, 10534), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10530, 10534), True, 'import numpy as np\n'), ((56861, 56903), 'numpy.zeros', 'np.zeros', (['(treeset[1] + 1, treeset[1] + 1)'], {}), '((treeset[1] + 1, treeset[1] + 1))\n', (56869, 56903), True, 'import numpy as np\n'), ((84569, 84595), 'numpy.array', 'np.array', (['self.spotarr[-1]'], {}), '(self.spotarr[-1])\n', (84577, 84595), True, 'import numpy as np\n'), ((87210, 87234), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ul'], {}), '(ul)\n', (87230, 87234), True, 'import numpy as np\n'), ((87269, 87293), 'numpy.tril_indices_from', 'np.tril_indices_from', (['dl'], {}), '(dl)\n', (87289, 87293), True, 'import numpy as np\n'), ((104113, 104135), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (104121, 104135), True, 'import numpy as np\n'), ((104250, 104272), 'numpy.array', 'np.array', (['self.discdiv'], {}), '(self.discdiv)\n', (104258, 104272), True, 'import numpy as np\n'), ((104310, 104335), 'numpy.array', 'np.array', (['self.treeheader'], {}), '(self.treeheader)\n', (104318, 104335), True, 'import numpy as np\n'), ((5929, 5967), 'numpy.array', 'np.array', (["(['/52'] * (self.periods + 1))"], {}), "(['/52'] * (self.periods + 1))\n", (5937, 5967), True, 'import numpy as np\n'), ((56764, 56798), 'numpy.array', 'np.array', (['colcluster'], {'dtype': 'object'}), '(colcluster, dtype=object)\n', (56772, 56798), True, 'import numpy as np\n'), ((86999, 87034), 'numpy.tril_indices_from', 'np.tril_indices_from', (['ull[:-1, :-1]'], {}), '(ull[:-1, :-1])\n', (87019, 87034), True, 'import numpy as np\n'), ((87084, 87119), 'numpy.tril_indices_from', 'np.tril_indices_from', (['dll[:-1, :-1]'], {}), '(dll[:-1, :-1])\n', (87104, 87119), True, 'import numpy as np\n'), ((103017, 103049), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (103034, 103049), True, 'import numpy as np\n'), ((6225, 6263), 'numpy.array', 'np.array', (["(['/12'] * (self.periods + 1))"], {}), "(['/12'] * (self.periods + 1))\n", (6233, 6263), True, 'import numpy as np\n'), ((10634, 10645), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10641, 10645), True, 'import numpy as np\n'), ((56258, 56290), 'numpy.array', 'np.array', (['([start] * row.shape[0])'], {}), '([start] * row.shape[0])\n', (56266, 56290), True, 'import numpy as np\n'), ((104178, 104210), 'numpy.subtract.outer', 'np.subtract.outer', (['dt_all', 'divdt'], {}), '(dt_all, divdt)\n', (104195, 104210), True, 'import numpy as np\n'), ((5532, 5559), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (5541, 5559), True, 'import numpy as np\n'), ((6519, 6558), 'numpy.array', 'np.array', (["(['/365'] * (self.periods + 1))"], {}), "(['/365'] * (self.periods + 1))\n", (6527, 6558), True, 'import numpy as np\n'), ((10746, 10757), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10753, 10757), True, 'import numpy as np\n'), ((10812, 10821), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (10818, 10821), True, 'import numpy as np\n'), ((10824, 10833), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (10830, 10833), True, 'import numpy as np\n'), ((10842, 10853), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (10849, 10853), True, 'import numpy as np\n'), ((5829, 5856), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (5838, 5856), True, 'import numpy as np\n'), ((6813, 6851), 'numpy.array', 'np.array', (["(['/52'] * (self.periods + 1))"], {}), "(['/52'] * (self.periods + 1))\n", (6821, 6851), True, 'import numpy as np\n'), ((6125, 6152), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (6134, 6152), True, 'import numpy as np\n'), ((7106, 7144), 'numpy.array', 'np.array', (["(['/12'] * (self.periods + 1))"], {}), "(['/12'] * (self.periods + 1))\n", (7114, 7144), True, 'import numpy as np\n'), ((55820, 55889), 'numpy.array', 'np.array', (['([start] * treeset[0][treeset[2]:treeset[3] + 1][0].shape[0])'], {}), '([start] * treeset[0][treeset[2]:treeset[3] + 1][0].shape[0])\n', (55828, 55889), True, 'import numpy as np\n'), ((6421, 6448), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (6430, 6448), True, 'import numpy as np\n'), ((6715, 6742), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (6724, 6742), True, 'import numpy as np\n'), ((7008, 7035), 'numpy.arange', 'np.arange', (['(self.periods + 1)'], {}), '(self.periods + 1)\n', (7017, 7035), True, 'import numpy as np\n')] |
"""Lite graph objects used by pecanpy."""
import numpy as np
from numba import boolean, jit
class SparseGraph:
"""Sparse Graph object that stores graph as adjacency list.
Note:
By default the ``SparseGraph`` object converts the data to Compact
Sparse Row (csr) format after reading data from an edge list file
(``.edg``). This format enables more cache optimized computation.
Examples:
Read ``.edg`` file and create ``SparseGraph`` object using ``.read_edg``
method.
>>> from pecanpy.graph import SparseGraph
>>>
>>> g = SparseGraph() # initialize SparseGraph object
>>> g.read_edg(path_to_edg_file, weighted=True, directed=False) # read graph from edgelist
>>>
>>> dense_mat = g.to_dense() # convert to dense adjacency matrix
>>>
"""
def __init__(self):
"""Initialize SparseGraph object."""
self.data = []
self.indptr = None
self.indices = None
self.IDlst = []
self.IDmap = {} # id -> index
def read_edg(self, edg_fp, weighted, directed, csr=True):
"""Read an edgelist file and create sparse graph.
Note:
Implicitly discard zero weighted edges; if the same edge is defined
multiple times with different edge weights, then the last specified
weight will be used (warning for such behavior will be printed)
Args:
edg_fp (str): path to edgelist file, where the file is tab
seperated and contains 2 or 3 columns depending on whether
the input graph is weighted, where the the first column
contains the source nodes and the second column contains the
destination nodes that interact with the corresponding source
nodes.
weighted (bool): whether the graph is weighted. If unweighted,
only two columns are expected in the edgelist file, and the
edge weights are implicitely set to 1 for all interactions. If
weighted, a third column encoding the weight of the interaction
in numeric value is expected.
directed (bool): whether the graph is directed, if undirected, the
edge connecting from destination node to source node is created
with same edge weight from source node to destination node.
csr (bool): whether or not to convert to compact sparse row format
after finished reading the whole edge list for a more compact
storage and more optimized cache utilization.
"""
current_node = 0
with open(edg_fp, "r") as f:
for line in f:
if weighted:
id1, id2, weight = line.split("\t")
weight = float(weight)
if weight == 0:
continue
else:
terms = line.split("\t")
id1, id2 = terms[0], terms[1]
weight = float(1)
id1 = id1.strip()
id2 = id2.strip()
# check if ID exist, add to IDmap if not
for id_ in id1, id2:
if id_ not in self.IDmap:
self.IDmap[id_] = current_node
self.data.append({})
current_node += 1
idx1, idx2 = self.IDmap[id1], self.IDmap[id2]
# check if edge exists
if idx2 in self.data[idx1]:
if self.data[idx1][idx2] != weight:
print(f"WARNING: edge from {id1} to {id2} exists, with "
f"value of {self.data[idx1][idx2]:.2f}. "
f"Now overwrite to {weight:.2f}.")
# update edge weight
self.data[idx1][idx2] = weight
if not directed:
self.data[idx2][idx1] = weight
self.IDlst = sorted(self.IDmap, key=self.IDmap.get)
if csr:
self.to_csr()
def from_mat(self, adj_mat, ids):
"""Construct graph using adjacency matrix and node ids.
Args:
adj_mat(:obj:`numpy.ndarray`): 2D numpy array of adjacency matrix
ids(:obj:`list` of str): node ID list
"""
data = [] # construct edge list
for row in adj_mat:
data.append({})
for j, weight in enumerate(row):
if weight != 0:
data[-1][j] = weight
# save edgelist and id data and convert to csr format
self.data, self.IDlst = data, ids
self.IDmap = {j: i for i, j in enumerate(ids)}
self.to_csr()
def get_has_nbrs(self):
"""Wrap ``has_nbrs``."""
indptr = self.indptr
@jit(nopython=True, nogil=True)
def has_nbrs(idx):
return indptr[idx] != indptr[idx + 1]
return has_nbrs
def get_average_weights(self):
"""Compute average edge weights."""
data = self.data
indptr = self.indptr
num_nodes = len(self.IDlst)
average_weight_ary = np.zeros(num_nodes, dtype=np.float64)
for idx in range(num_nodes):
average_weight_ary[idx] = data[indptr[idx]: indptr[idx + 1]].mean()
return average_weight_ary
@staticmethod
@jit(nopython=True, nogil=True)
def get_normalized_probs(data, indices, indptr, p, q, cur_idx, prev_idx, average_weight_ary):
"""Calculate node2vec transition probabilities.
Calculate 2nd order transition probabilities by first finidng the
neighbors of the current state that are not reachable from the previous
state, and devide the according edge weights by the in-out parameter
``q``. Then devide the edge weight from previous state by the return
parameter ``p``. Finally, the transition probabilities are computed by
normalizing the biased edge weights.
Note:
If ``prev_idx`` present, calculate 2nd order biased transition,
otherwise calculate 1st order transition.
"""
def get_nbrs_idx(idx):
return indices[indptr[idx]: indptr[idx + 1]]
def get_nbrs_weight(idx):
return data[indptr[idx]: indptr[idx + 1]].copy()
nbrs_idx = get_nbrs_idx(cur_idx)
unnormalized_probs = get_nbrs_weight(cur_idx)
if prev_idx is not None: # 2nd order biased walk
prev_ptr = np.where(nbrs_idx == prev_idx)[0] # find previous state index
src_nbrs_idx = get_nbrs_idx(prev_idx) # neighbors of previous state
non_com_nbr = isnotin(nbrs_idx, src_nbrs_idx) # neighbors of current but not previous
non_com_nbr[prev_ptr] = False # exclude previous state from out biases
unnormalized_probs[non_com_nbr] /= q # apply out biases
unnormalized_probs[prev_ptr] /= p # apply the return bias
normalized_probs = unnormalized_probs / unnormalized_probs.sum()
return normalized_probs
@staticmethod
@jit(nopython=True, nogil=True)
def get_extended_normalized_probs(data, indices, indptr, p, q, cur_idx, prev_idx, average_weight_ary):
"""Calculate node2vec+ transition probabilities."""
def get_nbrs_idx(idx):
return indices[indptr[idx]: indptr[idx + 1]]
def get_nbrs_weight(idx):
return data[indptr[idx]: indptr[idx + 1]].copy()
nbrs_idx = get_nbrs_idx(cur_idx)
unnormalized_probs = get_nbrs_weight(cur_idx)
if prev_idx is not None: # 2nd order biased walk
prev_ptr = np.where(nbrs_idx == prev_idx)[0] # find previous state index
src_nbrs_idx = get_nbrs_idx(prev_idx) # neighbors of previous state
out_ind, t = isnotin_extended(nbrs_idx, src_nbrs_idx,
get_nbrs_weight(prev_idx),
average_weight_ary) # determine out edges
out_ind[prev_ptr] = False # exclude previous state from out biases
# compute out biases
alpha = (1 / q + (1 - 1 / q) * t[out_ind])
# surpress noisy edges
alpha[unnormalized_probs[out_ind] < average_weight_ary[cur_idx]] = np.minimum(1, 1 / q)
unnormalized_probs[out_ind] *= alpha # apply out biases
unnormalized_probs[prev_ptr] /= p # apply the return bias
normalized_probs = unnormalized_probs / unnormalized_probs.sum()
return normalized_probs
def to_csr(self):
"""Construct compressed sparse row matrix."""
indptr = np.zeros(len(self.IDlst) + 1, dtype=np.uint32)
for i, row_data in enumerate(self.data):
indptr[i + 1] = indptr[i] + len(row_data)
# last element of indptr indicates the total number of nonzero entries
indices = np.zeros(indptr[-1], dtype=np.uint32)
data = np.zeros(indptr[-1], dtype=np.float64)
for i in reversed(range(len(self.data))):
start = indptr[i]
end = indptr[i + 1]
tmp = self.data.pop()
sorted_keys = sorted(tmp)
indices[start:end] = np.fromiter(sorted_keys, dtype=np.uint32)
data[start:end] = np.fromiter(map(tmp.get, sorted_keys), dtype=np.float64)
self.indptr = indptr
self.data = data
self.indices = indices
def to_dense(self):
"""Construct dense adjacency matrix.
Note:
This method does not return DenseGraph object, but instead return
dense adjacency matrix as ``numpy.ndarray``, the index is the same
as that of IDlst.
Return:
numpy.ndarray: full adjacency matrix indexed by IDmap as 2d numpy
array.
"""
n_nodes = len(self.IDlst)
mat = np.zeros((n_nodes, n_nodes))
for src_node, src_nbrs in enumerate(self.data):
for dst_node in src_nbrs:
mat[src_node, dst_node] = src_nbrs[dst_node]
return mat
class DenseGraph:
"""Dense Graph object that stores graph as array.
Examples:
Read ``.npz`` files and create ``DenseGraph`` object using ``.read_npz``
method.
>>> from pecanpy.graph import DenseGraph
>>> g = DenseGraph() # initialize DenseGraph object
>>> g.read_npz(paht_to_npz_file, weighted=True, directed=False) # read graph from npz
Read ``.edg`` files and create ``DenseGraph`` object using ``.read_edg``
method.
>>> from pecanpy.graph import DenseGraph
>>> g = DenseGraph() # initialize DenseGraph object
>>> g.read_edg(path_to_edg_file, weighted=True, directed=False) # read graph from edgelist
>>>
>>> g.save(npz_outpath) # save the network as npz file, which could be loaded faster if network is dense
>>>
"""
def __init__(self):
"""Initialize DenseGraph object."""
self.data = None
self.nonzero = None
self.IDlst = []
self.IDmap = {} # id -> index
def read_npz(self, npz_fp, weighted, directed):
"""Read ``.npz`` file and create dense graph.
Args:
npz_fp (str): path to ``.npz`` file.
weighted (bool): whether the graph is weighted, if unweighted,
all none zero weights will be converted to 1.
directed (bool): not used, for compatibility with ``SparseGraph``.
"""
raw = np.load(npz_fp)
self.data = raw["data"]
self.nonzero = self.data != 0
if not weighted: # convert edge weights to binary
self.data = self.nonzero * 1
self.IDlst = list(raw["IDs"])
self.IDmap = {j: i for i, j in enumerate(self.IDlst)}
def read_edg(self, edg_fp, weighted, directed):
"""Read an edgelist file and construct dense graph."""
sparse_graph = SparseGraph()
sparse_graph.read_edg(edg_fp, weighted, directed, csr=False)
self.IDlst = sparse_graph.IDlst
self.IDmap = sparse_graph.IDmap
self.data = sparse_graph.to_dense()
self.nonzero = self.data != 0
def from_mat(self, adj_mat, ids):
"""Construct graph using adjacency matrix and node ids.
Args:
adj_mat(:obj:`numpy.ndarray`): 2D numpy array of adjacency matrix
ids(:obj:`list` of str): node ID list
"""
self.data = adj_mat
self.nonzero = adj_mat != 0
self.IDlst = ids
self.IDmap = {j: i for i, j in enumerate(self.IDlst)}
def save(self, fp):
"""Save as ``.npz`` file."""
np.savez(fp, data=self.data, IDs=self.IDlst)
def get_average_weights(self):
"""Compute average edge weights."""
deg_ary = self.data.sum(axis=1)
n_nbrs_ary = self.nonzero.sum(axis=1)
return deg_ary / n_nbrs_ary
def get_has_nbrs(self):
"""Wrap ``has_nbrs``."""
nonzero = self.nonzero
@jit(nopython=True, nogil=True)
def has_nbrs(idx):
for j in range(nonzero.shape[1]):
if nonzero[idx, j]:
return True
return False
return has_nbrs
@staticmethod
@jit(nopython=True, nogil=True)
def get_normalized_probs(data, nonzero, p, q, cur_idx, prev_idx, average_weight_ary):
"""Calculate node2vec transition probabilities.
Calculate 2nd order transition probabilities by first finidng the
neighbors of the current state that are not reachable from the previous
state, and devide the according edge weights by the in-out parameter
``q``. Then devide the edge weight from previous state by the return
parameter ``p``. Finally, the transition probabilities are computed by
normalizing the biased edge weights.
Note:
If ``prev_idx`` present, calculate 2nd order biased transition,
otherwise calculate 1st order transition.
"""
nbrs_ind = nonzero[cur_idx]
unnormalized_probs = data[cur_idx].copy()
if prev_idx is not None: # 2nd order biased walks
non_com_nbr = np.logical_and(nbrs_ind, ~nonzero[prev_idx]) # nbrs of cur but not prev
non_com_nbr[prev_idx] = False # exclude previous state from out biases
unnormalized_probs[non_com_nbr] /= q # apply out biases
unnormalized_probs[prev_idx] /= p # apply the return bias
unnormalized_probs = unnormalized_probs[nbrs_ind]
normalized_probs = unnormalized_probs / unnormalized_probs.sum()
return normalized_probs
@staticmethod
@jit(nopython=True, nogil=True)
def get_extended_normalized_probs(data, nonzero, p, q, cur_idx, prev_idx, average_weight_ary):
"""Calculate node2vec+ transition probabilities."""
cur_nbrs_ind = nonzero[cur_idx]
unnormalized_probs = data[cur_idx].copy()
if prev_idx is not None: # 2nd order biased walks
prev_nbrs_weight = data[prev_idx].copy()
inout_ind = cur_nbrs_ind & (prev_nbrs_weight < average_weight_ary)
inout_ind[prev_idx] = False # exclude previous state from out biases
# print("CURRENT: ", cur_idx)
# print("INOUT: ", np.where(inout_ind)[0])
# print("NUM INOUT: ", inout_ind.sum(), "\n")
t = prev_nbrs_weight[inout_ind] / average_weight_ary[inout_ind]
# b = 1; t = b * t / (1 - (b - 1) * t) # optional nonlinear parameterization
# compute out biases
alpha = 1 / q + (1 - 1 / q) * t
# suppress noisy edges
alpha[unnormalized_probs[inout_ind] < average_weight_ary[cur_idx]] = np.minimum(1, 1 / q)
unnormalized_probs[inout_ind] *= alpha # apply out biases
unnormalized_probs[prev_idx] /= p # apply the return bias
unnormalized_probs = unnormalized_probs[cur_nbrs_ind]
normalized_probs = unnormalized_probs / unnormalized_probs.sum()
return normalized_probs
@jit(nopython=True, nogil=True)
def isnotin(ptr_ary1, ptr_ary2):
"""Find node2vec out edges.
The node2vec out edges is determined by non-common neighbors. This function
find out neighbors of node1 that are not neighbors of node2, by picking out
values in ``ptr_ary1`` but not in ``ptr_ary2``, which correspond to the
neighbor pointers for the current state and the previous state, resp.
Note:
This function does not remove the index of the previous state. Instead,
the index of the previous state will be removed once the indicator is
returned to the ``get_normalized_probs``.
Args:
ptr_ary1 (:obj:`numpy.ndarray` of :obj:`uint32`): array of pointers to
the neighbors of the current state
ptr_ary2 (:obj:`numpy.ndarray` of :obj:`uint32`): array of pointers to
the neighbors of the previous state
Returns:
Indicator of whether a neighbor of the current state is considered as
an "out edge"
Example:
The values in the two neighbor pointer arrays are sorted ascendingly.
The main idea is to scan through ``ptr_ary1`` and compare the values in
``ptr_ary2``. In this way, at most one pass per array is needed to find
out the non-common neighbor pointers instead of a nested loop (for each
element in ``ptr_ary1``, compare against every element in``ptr_ary2``),
which is much slower. Checkout the following example for more intuition.
The ``*`` above ``ptr_ary1`` and ``ptr_ary2`` indicate the indices
``idx1`` and ``idx2``, respectively, which keep track of the scaning
progress.
>>> ptr_ary1 = [1, 2, 5]
>>> ptr_ary2 = [1, 5]
>>>
>>> # iteration1: indicator = [False, True, True]
>>> *
>>> [1, 2, 5]
>>> *
>>> [1, 5]
>>>
>>> # iteration2: indicator = [False, True, True]
>>> *
>>> [1, 2, 5]
>>> *
>>> [1, 5]
>>>
>>> # iteration3: indicator = [False, True, False]
>>> *
>>> [1, 2, 5]
>>> *
>>> [1, 5]
>>>
>>> # end of loop
"""
indicator = np.ones(ptr_ary1.size, dtype=boolean)
idx2 = 0
for idx1 in range(ptr_ary1.size):
if idx2 == ptr_ary2.size: # end of ary2
break
ptr1 = ptr_ary1[idx1]
ptr2 = ptr_ary2[idx2]
if ptr1 < ptr2:
continue
elif ptr1 == ptr2: # found a matching value
indicator[idx1] = False
idx2 += 1
elif ptr1 > ptr2:
# sweep through ptr_ary2 until ptr2 catch up on ptr1
for j in range(idx2, ptr_ary2.size):
ptr2 = ptr_ary2[j]
if ptr2 == ptr1:
indicator[idx1] = False
idx2 = j + 1
break
elif ptr2 > ptr1:
idx2 = j
break
return indicator
@jit(nopython=True, nogil=True)
def isnotin_extended(ptr_ary1, ptr_ary2, wts_ary2, avg_wts):
"""Find node2vec+ out edges.
The node2vec+ out edges is determined by considering the edge weights
connecting node2 (the potential next state) to the previous state. Unlinke
node2vec, which only considers neighbors of current state that are not
neighbors of the previous state, node2vec+ also considers neighbors of
the previous state as out edges if the edge weight is below average.
Args:
ptr_ary1 (:obj:`numpy.ndarray` of :obj:`uint32`): array of pointers to
the neighbors of the current state
ptr_ary2 (:obj:`numpy.ndarray` of :obj:`uint32`): array of pointers to
the neighbors of the previous state
wts_ary2 (:obj: `numpy.ndarray` of :obj:`float64`): array of edge
weights of the previous state
avg_wts (:obj: `numpy.ndarray` of :obj:`float64`): array of average
edge weights of each node
Return:
Indicator of whether a neighbor of the current state is considered as
an "out edge", with the corresponding parameters used to fine tune
the out biases
"""
indicator = np.ones(ptr_ary1.size, dtype=boolean)
t = np.zeros(ptr_ary1.size, dtype=np.float64)
idx2 = 0
for idx1 in range(ptr_ary1.size):
if idx2 == ptr_ary2.size: # end of ary2
break
ptr1 = ptr_ary1[idx1]
ptr2 = ptr_ary2[idx2]
if ptr1 < ptr2:
continue
elif ptr1 == ptr2: # found a matching value
if wts_ary2[idx2] >= avg_wts[ptr2]: # check if loose
indicator[idx1] = False
else:
t[idx1] = wts_ary2[idx2] / avg_wts[ptr2]
idx2 += 1
elif ptr1 > ptr2:
# sweep through ptr_ary2 until ptr2 catch up on ptr1
for j in range(idx2, ptr_ary2.size):
ptr2 = ptr_ary2[j]
if ptr2 == ptr1:
if wts_ary2[j] >= avg_wts[ptr2]:
indicator[idx1] = False
else:
t[idx1] = wts_ary2[j] / avg_wts[ptr2]
idx2 = j + 1
break
elif ptr2 > ptr1:
idx2 = j
break
return indicator, t
| [
"numpy.load",
"numpy.minimum",
"numpy.logical_and",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"numba.jit",
"numpy.fromiter",
"numpy.savez"
] | [((16215, 16245), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (16218, 16245), False, 'from numba import boolean, jit\n'), ((19246, 19276), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (19249, 19276), False, 'from numba import boolean, jit\n'), ((5463, 5493), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (5466, 5493), False, 'from numba import boolean, jit\n'), ((7196, 7226), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (7199, 7226), False, 'from numba import boolean, jit\n'), ((13383, 13413), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (13386, 13413), False, 'from numba import boolean, jit\n'), ((14806, 14836), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (14809, 14836), False, 'from numba import boolean, jit\n'), ((18444, 18481), 'numpy.ones', 'np.ones', (['ptr_ary1.size'], {'dtype': 'boolean'}), '(ptr_ary1.size, dtype=boolean)\n', (18451, 18481), True, 'import numpy as np\n'), ((20464, 20501), 'numpy.ones', 'np.ones', (['ptr_ary1.size'], {'dtype': 'boolean'}), '(ptr_ary1.size, dtype=boolean)\n', (20471, 20501), True, 'import numpy as np\n'), ((20510, 20551), 'numpy.zeros', 'np.zeros', (['ptr_ary1.size'], {'dtype': 'np.float64'}), '(ptr_ary1.size, dtype=np.float64)\n', (20518, 20551), True, 'import numpy as np\n'), ((4916, 4946), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (4919, 4946), False, 'from numba import boolean, jit\n'), ((5249, 5286), 'numpy.zeros', 'np.zeros', (['num_nodes'], {'dtype': 'np.float64'}), '(num_nodes, dtype=np.float64)\n', (5257, 5286), True, 'import numpy as np\n'), ((9014, 9051), 'numpy.zeros', 'np.zeros', (['indptr[-1]'], {'dtype': 'np.uint32'}), '(indptr[-1], dtype=np.uint32)\n', (9022, 9051), True, 'import numpy as np\n'), ((9067, 9105), 'numpy.zeros', 'np.zeros', (['indptr[-1]'], {'dtype': 'np.float64'}), '(indptr[-1], dtype=np.float64)\n', (9075, 9105), True, 'import numpy as np\n'), ((9988, 10016), 'numpy.zeros', 'np.zeros', (['(n_nodes, n_nodes)'], {}), '((n_nodes, n_nodes))\n', (9996, 10016), True, 'import numpy as np\n'), ((11636, 11651), 'numpy.load', 'np.load', (['npz_fp'], {}), '(npz_fp)\n', (11643, 11651), True, 'import numpy as np\n'), ((12787, 12831), 'numpy.savez', 'np.savez', (['fp'], {'data': 'self.data', 'IDs': 'self.IDlst'}), '(fp, data=self.data, IDs=self.IDlst)\n', (12795, 12831), True, 'import numpy as np\n'), ((13137, 13167), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': '(True)'}), '(nopython=True, nogil=True)\n', (13140, 13167), False, 'from numba import boolean, jit\n'), ((8404, 8424), 'numpy.minimum', 'np.minimum', (['(1)', '(1 / q)'], {}), '(1, 1 / q)\n', (8414, 8424), True, 'import numpy as np\n'), ((9326, 9367), 'numpy.fromiter', 'np.fromiter', (['sorted_keys'], {'dtype': 'np.uint32'}), '(sorted_keys, dtype=np.uint32)\n', (9337, 9367), True, 'import numpy as np\n'), ((14319, 14363), 'numpy.logical_and', 'np.logical_and', (['nbrs_ind', '(~nonzero[prev_idx])'], {}), '(nbrs_ind, ~nonzero[prev_idx])\n', (14333, 14363), True, 'import numpy as np\n'), ((15879, 15899), 'numpy.minimum', 'np.minimum', (['(1)', '(1 / q)'], {}), '(1, 1 / q)\n', (15889, 15899), True, 'import numpy as np\n'), ((6597, 6627), 'numpy.where', 'np.where', (['(nbrs_idx == prev_idx)'], {}), '(nbrs_idx == prev_idx)\n', (6605, 6627), True, 'import numpy as np\n'), ((7756, 7786), 'numpy.where', 'np.where', (['(nbrs_idx == prev_idx)'], {}), '(nbrs_idx == prev_idx)\n', (7764, 7786), True, 'import numpy as np\n')] |
import os
import pickle
import numpy as np
path = './data/cloning/experts/'
for file in os.listdir(path):
if file[-2:] == '.p':
with open(path+file, 'rb') as f:
data = pickle.loads(f.read())
print(file)
print('action size: {}'.format(data['actions'][0].shape))
print('min action: {}'.format(np.min(data['actions'])))
print('max action: {}'.format(np.max(data['actions'])))
print('observation size: {}'.format(data['observations'][0].shape))
print('min observation: {}'.format(np.min(data['observations'])))
print('max observation: {}'.format(np.max(data['observations'])))
print("_____________________________")
print("") | [
"numpy.min",
"numpy.max",
"os.listdir"
] | [((91, 107), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (101, 107), False, 'import os\n'), ((324, 347), 'numpy.min', 'np.min', (["data['actions']"], {}), "(data['actions'])\n", (330, 347), True, 'import numpy as np\n'), ((385, 408), 'numpy.max', 'np.max', (["data['actions']"], {}), "(data['actions'])\n", (391, 408), True, 'import numpy as np\n'), ((524, 552), 'numpy.min', 'np.min', (["data['observations']"], {}), "(data['observations'])\n", (530, 552), True, 'import numpy as np\n'), ((595, 623), 'numpy.max', 'np.max', (["data['observations']"], {}), "(data['observations'])\n", (601, 623), True, 'import numpy as np\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
class GAN:
def __init__(self, param):
# load data
self.dataset = param.get("dataset", -1)
# choose CNN setup for the dataset
if self.dataset == "MNIST":
(self.x_train, _), (_, _) = tf.keras.datasets.mnist.load_data()
self.init_dim = 7
self.strides = (1, 2, 2)
self.data_shape = self.x_train.shape + (1,)
elif self.dataset == "CIFAR10":
(self.x_train, _), (_, _) = tf.keras.datasets.cifar10.load_data()
self.init_dim = 4
self.strides = (2, 2, 2)
self.data_shape = self.x_train.shape
else:
raise ValueError('Dataset not supported.')
# get basic inputs
self.batch_size = param.get("batch_size", 128)
self.noise_dim = param.get("noise_dim", 128)
self.total_epoch = param.get("total_epoch", 100)
self.critic_step = param.get("critic_step", 1)
self.visualize = param.get("visualize", True)
self.out_path = param.get("output", os.getcwd())
# storage for the objectives
self.batch_num = int(self.data_shape[0] / self.batch_size) + (self.data_shape[0] % self.batch_size != 0)
self.d_obj = np.zeros([self.batch_num, self.total_epoch, self.critic_step])
self.g_obj = np.zeros([self.batch_num, self.total_epoch])
# normalize dataset
self.x_train = self.x_train.reshape(self.data_shape).astype('float32')
self.x_train = (self.x_train - 127.5) / 127.5 # Normalize RGB to [-1, 1]
self.x_train = \
tf.data.Dataset.from_tensor_slices(self.x_train).shuffle(self.data_shape[0]).batch(self.batch_size)
# setup optimizers
self.D_optimizer = tf.keras.optimizers.Adam(learning_rate=param.get("learning_rate_d", 1e-4),
beta_1=param.get("beta_1_d", 0.5),
beta_2=param.get("beta_2_d", 0.999),
epsilon=param.get("epsilon_d", 1e-7),
amsgrad=param.get("amsgrad_d", False))
self.G_optimizer = tf.keras.optimizers.Adam(learning_rate=param.get("learning_rate_g", 5e-5),
beta_1=param.get("beta_1_g", 0.2),
beta_2=param.get("beta_2_g", 0.999),
epsilon=param.get("epsilon_g", 1e-7),
amsgrad=param.get("amsgrad_g", False))
# setup models
self.G = self.set_generator()
self.D = self.set_discriminator()
def set_generator(self):
g = tf.keras.Sequential()
g.add(layers.Dense(self.init_dim * self.init_dim * 256, use_bias=False, input_shape=(self.noise_dim,)))
g.add(layers.BatchNormalization())
g.add(layers.LeakyReLU())
g.add(layers.Reshape((self.init_dim, self.init_dim, 256)))
g.add(layers.Conv2DTranspose(128, 5, strides=self.strides[0], padding='same', use_bias=False))
g.add(layers.BatchNormalization())
g.add(layers.LeakyReLU())
g.add(layers.Conv2DTranspose(64, 5, strides=self.strides[1], padding='same', use_bias=False))
g.add(layers.BatchNormalization())
g.add(layers.LeakyReLU())
g.add(layers.Conv2DTranspose(32, 5, strides=self.strides[2], padding='same', use_bias=False))
g.add(layers.BatchNormalization())
g.add(layers.LeakyReLU())
g.add(layers.Conv2DTranspose(self.data_shape[3], 5, padding='same', use_bias=False, activation='tanh'))
return g
def set_discriminator(self):
d = tf.keras.Sequential()
d.add(layers.Conv2D(32, kernel_size=5, strides=2, padding='same',
input_shape=self.data_shape[1:],
kernel_initializer="glorot_uniform"))
d.add(layers.LeakyReLU())
d.add(layers.Conv2D(64, kernel_size=5, strides=2, padding='same'))
d.add(layers.BatchNormalization())
d.add(layers.LeakyReLU())
d.add(layers.Conv2D(128, kernel_size=5, strides=2, padding='same'))
d.add(layers.BatchNormalization())
d.add(layers.LeakyReLU())
d.add(layers.Dropout(0.5))
d.add(layers.Conv2D(256, kernel_size=5, strides=2, padding='same'))
d.add(layers.BatchNormalization())
d.add(layers.LeakyReLU())
d.add(layers.Flatten())
d.add(layers.Dense(2, activation='softmax'))
d.add(layers.Lambda(lambda x: x[:, 0]))
return d
@tf.function
def train_discriminator(self, x_batch):
with tf.GradientTape() as D_tape:
x_gen = self.G(tf.random.uniform([x_batch.shape[0], self.noise_dim]), training=True)
y_real = self.D(x_batch, training=True)
y_gen = self.D(x_gen, training=True)
# compute the objective
loss_real = tf.math.log(tf.clip_by_value(y_real, 1e-10, 1.0))
loss_gen = tf.math.log(tf.clip_by_value(tf.math.add(1.0, tf.math.negative(y_gen)), 1e-10, 1.0))
d_obj = -tf.math.reduce_mean(loss_real) - tf.math.reduce_mean(loss_gen)
# update the discriminator
d_grad = D_tape.gradient(d_obj, self.D.trainable_variables)
self.D_optimizer.apply_gradients(zip(d_grad, self.D.trainable_variables))
return d_obj
@tf.function
def train_generator(self, x_batch_size):
with tf.GradientTape() as G_tape:
x_gen = self.G(tf.random.uniform([x_batch_size, self.noise_dim]), training=True)
y_gen = self.D(x_gen, training=True)
# compute the objective
g_obj = -tf.math.reduce_mean(tf.math.log(tf.clip_by_value(y_gen, 1e-10, 1.0)))
# update the generator
g_grad = G_tape.gradient(g_obj, self.G.trainable_variables)
self.G_optimizer.apply_gradients(zip(g_grad, self.G.trainable_variables))
return g_obj
def train(self):
vis_seed = None
if self.visualize:
# Seed for checking training progress
vis_seed = tf.random.uniform([16, self.noise_dim])
# Record current time and start training
print("Training...")
ts_start = tf.timestamp()
for t in range(self.total_epoch):
batch_id = 0
for b in self.x_train:
for k in range(self.critic_step):
self.d_obj[batch_id, t, k] = self.train_discriminator(b)
self.g_obj[batch_id, t] = self.train_generator(b.shape[0])
batch_id += 1
# Print time
print("Time used for epoch {} are {:0.2f} seconds.".format(t + 1, tf.timestamp() - ts_start))
# Check current generator
if self.visualize:
vis_gen = self.G(vis_seed, training=False)
fig = plt.figure(figsize=(4, 4))
plt.suptitle('Epoch: {:03d}'.format(t + 1))
for i in range(vis_gen.shape[0]):
plt.subplot(4, 4, i + 1)
if self.data_shape[3] == 1:
plt.imshow(vis_gen[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
else:
plt.imshow((vis_gen[i, :, :] + 1) / 2)
plt.axis('off')
plt.savefig(os.path.join(self.out_path, "GAN_{}_Epoch_{:03d}.png".format(self.dataset, t + 1)))
plt.clf()
plt.close(fig)
print("Done! {:0.2f} seconds have passed.".format(tf.timestamp() - ts_start))
| [
"tensorflow.math.negative",
"tensorflow.keras.layers.Reshape",
"tensorflow.clip_by_value",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.clf",
"tensorflow.keras.layers.LeakyReLU",
"matplotlib.pyplot.figure",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.l... | [((1347, 1409), 'numpy.zeros', 'np.zeros', (['[self.batch_num, self.total_epoch, self.critic_step]'], {}), '([self.batch_num, self.total_epoch, self.critic_step])\n', (1355, 1409), True, 'import numpy as np\n'), ((1431, 1475), 'numpy.zeros', 'np.zeros', (['[self.batch_num, self.total_epoch]'], {}), '([self.batch_num, self.total_epoch])\n', (1439, 1475), True, 'import numpy as np\n'), ((2895, 2916), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (2914, 2916), True, 'import tensorflow as tf\n'), ((3891, 3912), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (3910, 3912), True, 'import tensorflow as tf\n'), ((6470, 6484), 'tensorflow.timestamp', 'tf.timestamp', ([], {}), '()\n', (6482, 6484), True, 'import tensorflow as tf\n'), ((354, 389), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (387, 389), True, 'import tensorflow as tf\n'), ((1162, 1173), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1171, 1173), False, 'import os\n'), ((2931, 3031), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(self.init_dim * self.init_dim * 256)'], {'use_bias': '(False)', 'input_shape': '(self.noise_dim,)'}), '(self.init_dim * self.init_dim * 256, use_bias=False,\n input_shape=(self.noise_dim,))\n', (2943, 3031), False, 'from tensorflow.keras import layers\n'), ((3043, 3070), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3068, 3070), False, 'from tensorflow.keras import layers\n'), ((3086, 3104), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (3102, 3104), False, 'from tensorflow.keras import layers\n'), ((3120, 3171), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(self.init_dim, self.init_dim, 256)'], {}), '((self.init_dim, self.init_dim, 256))\n', (3134, 3171), False, 'from tensorflow.keras import layers\n'), ((3188, 3279), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(128)', '(5)'], {'strides': 'self.strides[0]', 'padding': '"""same"""', 'use_bias': '(False)'}), "(128, 5, strides=self.strides[0], padding='same',\n use_bias=False)\n", (3210, 3279), False, 'from tensorflow.keras import layers\n'), ((3291, 3318), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3316, 3318), False, 'from tensorflow.keras import layers\n'), ((3334, 3352), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (3350, 3352), False, 'from tensorflow.keras import layers\n'), ((3369, 3459), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(64)', '(5)'], {'strides': 'self.strides[1]', 'padding': '"""same"""', 'use_bias': '(False)'}), "(64, 5, strides=self.strides[1], padding='same',\n use_bias=False)\n", (3391, 3459), False, 'from tensorflow.keras import layers\n'), ((3471, 3498), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3496, 3498), False, 'from tensorflow.keras import layers\n'), ((3514, 3532), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (3530, 3532), False, 'from tensorflow.keras import layers\n'), ((3549, 3639), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(32)', '(5)'], {'strides': 'self.strides[2]', 'padding': '"""same"""', 'use_bias': '(False)'}), "(32, 5, strides=self.strides[2], padding='same',\n use_bias=False)\n", (3571, 3639), False, 'from tensorflow.keras import layers\n'), ((3651, 3678), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3676, 3678), False, 'from tensorflow.keras import layers\n'), ((3694, 3712), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (3710, 3712), False, 'from tensorflow.keras import layers\n'), ((3729, 3830), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['self.data_shape[3]', '(5)'], {'padding': '"""same"""', 'use_bias': '(False)', 'activation': '"""tanh"""'}), "(self.data_shape[3], 5, padding='same', use_bias=\n False, activation='tanh')\n", (3751, 3830), False, 'from tensorflow.keras import layers\n'), ((3927, 4061), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""', 'input_shape': 'self.data_shape[1:]', 'kernel_initializer': '"""glorot_uniform"""'}), "(32, kernel_size=5, strides=2, padding='same', input_shape=\n self.data_shape[1:], kernel_initializer='glorot_uniform')\n", (3940, 4061), False, 'from tensorflow.keras import layers\n'), ((4128, 4146), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (4144, 4146), False, 'from tensorflow.keras import layers\n'), ((4163, 4222), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""'}), "(64, kernel_size=5, strides=2, padding='same')\n", (4176, 4222), False, 'from tensorflow.keras import layers\n'), ((4238, 4265), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4263, 4265), False, 'from tensorflow.keras import layers\n'), ((4281, 4299), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (4297, 4299), False, 'from tensorflow.keras import layers\n'), ((4316, 4376), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""'}), "(128, kernel_size=5, strides=2, padding='same')\n", (4329, 4376), False, 'from tensorflow.keras import layers\n'), ((4392, 4419), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4417, 4419), False, 'from tensorflow.keras import layers\n'), ((4435, 4453), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (4451, 4453), False, 'from tensorflow.keras import layers\n'), ((4469, 4488), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (4483, 4488), False, 'from tensorflow.keras import layers\n'), ((4505, 4565), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)'], {'kernel_size': '(5)', 'strides': '(2)', 'padding': '"""same"""'}), "(256, kernel_size=5, strides=2, padding='same')\n", (4518, 4565), False, 'from tensorflow.keras import layers\n'), ((4581, 4608), 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4606, 4608), False, 'from tensorflow.keras import layers\n'), ((4624, 4642), 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), '()\n', (4640, 4642), False, 'from tensorflow.keras import layers\n'), ((4659, 4675), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4673, 4675), False, 'from tensorflow.keras import layers\n'), ((4691, 4728), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (4703, 4728), False, 'from tensorflow.keras import layers\n'), ((4744, 4776), 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['(lambda x: x[:, 0])'], {}), '(lambda x: x[:, 0])\n', (4757, 4776), False, 'from tensorflow.keras import layers\n'), ((4871, 4888), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4886, 4888), True, 'import tensorflow as tf\n'), ((5684, 5701), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5699, 5701), True, 'import tensorflow as tf\n'), ((6332, 6371), 'tensorflow.random.uniform', 'tf.random.uniform', (['[16, self.noise_dim]'], {}), '([16, self.noise_dim])\n', (6349, 6371), True, 'import tensorflow as tf\n'), ((593, 630), 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), '()\n', (628, 630), True, 'import tensorflow as tf\n'), ((4927, 4980), 'tensorflow.random.uniform', 'tf.random.uniform', (['[x_batch.shape[0], self.noise_dim]'], {}), '([x_batch.shape[0], self.noise_dim])\n', (4944, 4980), True, 'import tensorflow as tf\n'), ((5171, 5207), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_real', '(1e-10)', '(1.0)'], {}), '(y_real, 1e-10, 1.0)\n', (5187, 5207), True, 'import tensorflow as tf\n'), ((5371, 5400), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss_gen'], {}), '(loss_gen)\n', (5390, 5400), True, 'import tensorflow as tf\n'), ((5740, 5789), 'tensorflow.random.uniform', 'tf.random.uniform', (['[x_batch_size, self.noise_dim]'], {}), '([x_batch_size, self.noise_dim])\n', (5757, 5789), True, 'import tensorflow as tf\n'), ((7102, 7128), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (7112, 7128), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7679), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7677, 7679), True, 'import matplotlib.pyplot as plt\n'), ((7696, 7710), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7705, 7710), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5368), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss_real'], {}), '(loss_real)\n', (5357, 5368), True, 'import tensorflow as tf\n'), ((7259, 7283), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(i + 1)'], {}), '(4, 4, i + 1)\n', (7270, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7526, 7541), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7534, 7541), True, 'import matplotlib.pyplot as plt\n'), ((7769, 7783), 'tensorflow.timestamp', 'tf.timestamp', ([], {}), '()\n', (7781, 7783), True, 'import tensorflow as tf\n'), ((1703, 1751), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['self.x_train'], {}), '(self.x_train)\n', (1737, 1751), True, 'import tensorflow as tf\n'), ((5278, 5301), 'tensorflow.math.negative', 'tf.math.negative', (['y_gen'], {}), '(y_gen)\n', (5294, 5301), True, 'import tensorflow as tf\n'), ((5945, 5980), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y_gen', '(1e-10)', '(1.0)'], {}), '(y_gen, 1e-10, 1.0)\n', (5961, 5980), True, 'import tensorflow as tf\n'), ((6923, 6937), 'tensorflow.timestamp', 'tf.timestamp', ([], {}), '()\n', (6935, 6937), True, 'import tensorflow as tf\n'), ((7356, 7416), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(vis_gen[i, :, :, 0] * 127.5 + 127.5)'], {'cmap': '"""gray"""'}), "(vis_gen[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n", (7366, 7416), True, 'import matplotlib.pyplot as plt\n'), ((7467, 7505), 'matplotlib.pyplot.imshow', 'plt.imshow', (['((vis_gen[i, :, :] + 1) / 2)'], {}), '((vis_gen[i, :, :] + 1) / 2)\n', (7477, 7505), True, 'import matplotlib.pyplot as plt\n')] |
"""
Anova
=====
Statistical tools for time-series analysis.
* One-way: Find time intervals where signals recorded under single conditions differ from the baseline.
* Two-way: Find interactions between varying conditions time intervals of the recorded signal.
* Repeated-measures: Find time intervals where the signal was systematically changing on a group level.
"""
import numpy as np
from scipy.stats import f
import matplotlib.pyplot as plt
from tabulate import tabulate
import pandas as pd
import seaborn as sns
def one_way(groups):
"""Run one way analysis of variance on n groups of equal length.
* Identify which groups significanlty deviate from the grand mean.
* Prints a table with a spss-style output.
Parameters
----------
group: list or ndarray
| If list then each index represents a group,
| If ndarray then each column represents a group.
Returns
-------
F: double
F-value, ratio between effect and error sum of squares.
p: double
Probability of obtaining F-value by chance.
df_effect: int
degrees of freedom for the effect (n groups -1).
df_error: int
degrees of freedom for the error (n groups * (n samples - 1)).
"""
groups = np.array(groups).T
n_samples = groups.shape[0]
n_groups = groups.shape[1]
#total_sumsq = np.sum([(x- groups.ravel().mean())**2 for x in groups.ravel()])
within_group_sumsq = np.sum([[(x - group.mean())**2] for group in groups.T for x in group])
between_group_sumsq = np.sum([ n_samples * ((group.mean()- groups.mean())**2) for group in groups.T])
df_within = n_groups * (n_samples-1)
df_between = n_groups-1
F = (between_group_sumsq / df_between) / (within_group_sumsq / df_within )
p = 1 - f.cdf(F, df_between,df_within)
sns.boxplot(pd.DataFrame(groups))
print(tabulate([[F, p, between_group_sumsq, df_between, within_group_sumsq, df_within]],
['F-value','p-value','effect sss','effect df','error sss', 'error df'], tablefmt="grid"))
return F, p, df_between, df_within
def plot_F_probability(dfn, dfd, F):
x = np.linspace(0, F + 1, 1001)[1:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, 1-f.cdf(x, dfn, dfd), '--', label=r'$df_1=%i,\ df_2=%i$' % (dfn, dfd))
ax.set_ylabel(r'$ 1 - cdf(df_1,df_2)$')
ax.set_xlabel('$x (F = %f)$' %F)
ax.set_title('F-distribution')
print(1-f.cdf(F, dfn, dfd))
plt.legend()
plt.show()
tmp = np.array([[[4,6,8], [6,6,9], [8,9,13]],
[[4,8,9], [7,10,13], [12,14,16]]]).swapaxes(0,2).swapaxes(1,2)
def two_way(data, f1_name, f2_name):
"""Run two way analysis of variance in a factor by factor design.
* Identify main effects for each factor.
* Identify interaction between factors.
* Print a table with a spss-style output.
Parameters
----------
data: ndarray
| Each row represents a 1st factor level.
| Each column respresents a 2nd factor level.
| Each layer (depth dimension) is an observation.
"""
#Sums of squares
factor_1_effect, factor_2_effect, within_error = factor_sumofsq(data)
total_sumofsq = np.sum((data.ravel() - data.mean())**2)
interaction_sumofsq = total_sumofsq - factor_1_effect - factor_2_effect - within_error
#degrees of freedom
factor_1_df, factor_2_df = data.shape[1]-1, data.shape[2]-1
error_df = (data.shape[0]-1) * (data.shape[1] * data.shape[2])
interaction_df = factor_1_df * factor_2_df
#total_df = factor_1_df + factor_2_df + error_df + interaction_df
#Mean squares
within_mean_ssq = within_error / error_df
f1_mean_ssq, f2_mean_ssq = factor_1_effect / factor_1_df, factor_2_effect / factor_2_df
interaction_ssq = interaction_sumofsq / interaction_df
#F values
F1, F2 = f1_mean_ssq / within_mean_ssq, f2_mean_ssq / within_mean_ssq
F_interaction = interaction_ssq / within_mean_ssq
#P values
p_F1 = 1 - f.cdf(F1, factor_1_df, error_df)
p_F2 = 1 - f.cdf(F2, factor_2_df, error_df)
p_interaction = 1 - f.cdf(F_interaction, interaction_df, error_df)
print (tabulate([[f1_name, f1_mean_ssq, factor_1_df, F1, p_F1],
[f2_name, f2_mean_ssq,factor_2_df, F2, p_F2],
['Interaction', interaction_ssq, interaction_df, F_interaction, p_interaction]],
['Source','Mean square','df','F-values', 'p-values'], tablefmt='grid'))
#return [F1, p_F1, F2, p_F2, F_interaction, p_interaction]
def factor_sumofsq(data):
f1_effect_sumofsq = 0
f2_effect_sumofsq = 0
error_sumofsq = 0
#iterate over levels of the 1st factor
for factor1_level in data.swapaxes(0,1):
f1_effect_sumofsq = f1_effect_sumofsq + ((factor1_level.mean() - data.mean())**2) * len(factor1_level.ravel())
error_sumofsq = error_sumofsq + np.sum([[(x - other_factor.mean())**2] for other_factor in factor1_level.T for x in other_factor])
#iterate over levels of the 2nd factor
for factor2_level in data.swapaxes(1,2).swapaxes(0,1):
f2_effect_sumofsq = f2_effect_sumofsq + ((factor2_level.mean() - data.mean())**2) * len(factor2_level.ravel())
return f1_effect_sumofsq,f2_effect_sumofsq, error_sumofsq
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.array",
"tabulate.tabulate",
"numpy.linspace",
"scipy.stats.f.cdf"
] | [((2223, 2235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2233, 2235), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2519), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2517, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2532, 2534), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1294), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (1286, 1294), True, 'import numpy as np\n'), ((1814, 1845), 'scipy.stats.f.cdf', 'f.cdf', (['F', 'df_between', 'df_within'], {}), '(F, df_between, df_within)\n', (1819, 1845), False, 'from scipy.stats import f\n'), ((1863, 1883), 'pandas.DataFrame', 'pd.DataFrame', (['groups'], {}), '(groups)\n', (1875, 1883), True, 'import pandas as pd\n'), ((1896, 2079), 'tabulate.tabulate', 'tabulate', (['[[F, p, between_group_sumsq, df_between, within_group_sumsq, df_within]]', "['F-value', 'p-value', 'effect sss', 'effect df', 'error sss', 'error df']"], {'tablefmt': '"""grid"""'}), "([[F, p, between_group_sumsq, df_between, within_group_sumsq,\n df_within]], ['F-value', 'p-value', 'effect sss', 'effect df',\n 'error sss', 'error df'], tablefmt='grid')\n", (1904, 2079), False, 'from tabulate import tabulate\n'), ((2179, 2206), 'numpy.linspace', 'np.linspace', (['(0)', '(F + 1)', '(1001)'], {}), '(0, F + 1, 1001)\n', (2190, 2206), True, 'import numpy as np\n'), ((4048, 4080), 'scipy.stats.f.cdf', 'f.cdf', (['F1', 'factor_1_df', 'error_df'], {}), '(F1, factor_1_df, error_df)\n', (4053, 4080), False, 'from scipy.stats import f\n'), ((4097, 4129), 'scipy.stats.f.cdf', 'f.cdf', (['F2', 'factor_2_df', 'error_df'], {}), '(F2, factor_2_df, error_df)\n', (4102, 4129), False, 'from scipy.stats import f\n'), ((4155, 4201), 'scipy.stats.f.cdf', 'f.cdf', (['F_interaction', 'interaction_df', 'error_df'], {}), '(F_interaction, interaction_df, error_df)\n', (4160, 4201), False, 'from scipy.stats import f\n'), ((4214, 4484), 'tabulate.tabulate', 'tabulate', (["[[f1_name, f1_mean_ssq, factor_1_df, F1, p_F1], [f2_name, f2_mean_ssq,\n factor_2_df, F2, p_F2], ['Interaction', interaction_ssq, interaction_df,\n F_interaction, p_interaction]]", "['Source', 'Mean square', 'df', 'F-values', 'p-values']"], {'tablefmt': '"""grid"""'}), "([[f1_name, f1_mean_ssq, factor_1_df, F1, p_F1], [f2_name,\n f2_mean_ssq, factor_2_df, F2, p_F2], ['Interaction', interaction_ssq,\n interaction_df, F_interaction, p_interaction]], ['Source',\n 'Mean square', 'df', 'F-values', 'p-values'], tablefmt='grid')\n", (4222, 4484), False, 'from tabulate import tabulate\n'), ((2284, 2302), 'scipy.stats.f.cdf', 'f.cdf', (['x', 'dfn', 'dfd'], {}), '(x, dfn, dfd)\n', (2289, 2302), False, 'from scipy.stats import f\n'), ((2482, 2500), 'scipy.stats.f.cdf', 'f.cdf', (['F', 'dfn', 'dfd'], {}), '(F, dfn, dfd)\n', (2487, 2500), False, 'from scipy.stats import f\n'), ((2542, 2632), 'numpy.array', 'np.array', (['[[[4, 6, 8], [6, 6, 9], [8, 9, 13]], [[4, 8, 9], [7, 10, 13], [12, 14, 16]]]'], {}), '([[[4, 6, 8], [6, 6, 9], [8, 9, 13]], [[4, 8, 9], [7, 10, 13], [12,\n 14, 16]]])\n', (2550, 2632), True, 'import numpy as np\n')] |
# dataloader for 7-Scenes / when testing F-Net and MaGNet
import os
import random
import glob
import numpy as np
import torch
import torch.utils.data.distributed
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
# read camera pose
def _read_ExtM_from_txt(fpath_txt):
ExtM = np.eye(4)
with open(fpath_txt, 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
for ir, row in enumerate(ExtM):
row_content = content[ir].split()
row = np.asarray([float(x) for x in row_content])
ExtM[ir, :] = row
ExtM = np.linalg.inv(ExtM)
return ExtM
class SevenScenesLoader(object):
def __init__(self, args, mode):
self.t_samples = SevenScenesLoadPreprocess(args, mode)
self.data = DataLoader(self.t_samples, 1, shuffle=False, num_workers=1)
class SevenScenesLoadPreprocess(Dataset):
def __init__(self, args, mode):
self.args = args
# Test set by Long et al. (CVPR 21)
with open("./data_split/sevenscenes_long_test.txt", 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_path = args.dataset_path
# local window
self.window_radius = args.MAGNET_window_radius
self.n_views = args.MAGNET_num_source_views
self.frame_interval = self.window_radius // (self.n_views // 2)
self.img_idx_center = self.n_views // 2
# window_idx_list
self.window_idx_list = list(range(-self.n_views // 2, (self.n_views // 2) + 1))
self.window_idx_list = [i * self.frame_interval for i in self.window_idx_list]
# image resolution
self.img_H = args.input_height
self.img_W = args.input_width
self.dpv_H = args.dpv_height
self.dpv_W = args.dpv_width
# ray array
self.ray_array = self.get_ray_array()
self.cam_intrins = self.get_cam_intrinsics()
def __len__(self):
return len(self.filenames)
# ray array used to back-project depth-map into camera-centered coordinates
def get_ray_array(self):
ray_array = np.ones((self.dpv_H, self.dpv_W, 3))
x_range = np.arange(self.dpv_W)
y_range = np.arange(self.dpv_H)
x_range = np.concatenate([x_range.reshape(1, self.dpv_W)] * self.dpv_H, axis=0)
y_range = np.concatenate([y_range.reshape(self.dpv_H, 1)] * self.dpv_W, axis=1)
ray_array[:, :, 0] = x_range + 0.5
ray_array[:, :, 1] = y_range + 0.5
return ray_array
# get camera intrinscs
def get_cam_intrinsics(self):
IntM_ = np.eye(3)
raw_W, raw_H = self.img_W, self.img_H
# use the parameters in :
# https://www.microsoft.com/en-us/research/project/rgb-d-dataset-7-scenes/
IntM_[0, 0] = 585.
IntM_[1, 1] = 585.
IntM_[0, 2] = 320.
IntM_[1, 2] = 240.
# updated intrinsic matrix
IntM = np.zeros((3, 3))
IntM[2, 2] = 1.
IntM[0, 0] = IntM_[0, 0] * (self.dpv_W / raw_W)
IntM[1, 1] = IntM_[1, 1] * (self.dpv_H / raw_H)
IntM[0, 2] = IntM_[0, 2] * (self.dpv_W / raw_W)
IntM[1, 2] = IntM_[1, 2] * (self.dpv_H / raw_H)
# pixel to ray array
pixel_to_ray_array = np.copy(self.ray_array)
pixel_to_ray_array[:, :, 0] = ((pixel_to_ray_array[:, :, 0] * (raw_W / self.dpv_W))
- IntM_[0, 2]) / IntM_[0, 0]
pixel_to_ray_array[:, :, 1] = ((pixel_to_ray_array[:, :, 1] * (raw_H / self.dpv_H))
- IntM_[1, 2]) / IntM_[1, 1]
pixel_to_ray_array_2D = np.reshape(np.transpose(pixel_to_ray_array, axes=[2, 0, 1]), [3, -1]) # (3, H*W)
pixel_to_ray_array_2D = torch.from_numpy(pixel_to_ray_array_2D.astype(np.float32))
cam_intrinsics = {
'unit_ray_array_2D': pixel_to_ray_array_2D,
'intM': torch.from_numpy(IntM.astype(np.float32)),
}
return cam_intrinsics
def __getitem__(self, idx):
scene_name, seq_id, img_idx = self.filenames[idx].split(' ')
seq_id = int(seq_id)
img_idx = int(img_idx)
scene_dir = self.dataset_path + '/{}/seq-%02d/'.format(scene_name) % seq_id
# identify the neighbor views
img_idx_list = []
for i in self.window_idx_list:
if os.path.exists(scene_dir + '/frame-%06d.color.png' % (img_idx + i)):
img_idx_list.append(img_idx + i)
else:
img_idx_list.append(img_idx - i - np.sign(i) * int(self.frame_interval * 0.5))
# data array
data_array = []
for i in range(self.n_views + 1):
cur_idx = img_idx_list[i]
img_path = scene_dir + '/frame-%06d.color.png' % cur_idx
dmap_path = scene_dir + '/frame-%06d.depth.png' % cur_idx
pose_path = scene_dir + '/frame-%06d.pose.txt' % cur_idx
# read img
img = Image.open(img_path).convert("RGB").resize(size=(self.img_W, self.img_H), resample=Image.BILINEAR)
img = np.array(img).astype(np.float32) / 255.0 # (H, W, 3)
img = torch.from_numpy(img).permute(2, 0, 1) # (3, H, W)
img = self.normalize(img)
# read dmap (only for the ref img)
if i == self.img_idx_center:
gt_dmap = Image.open(dmap_path).resize(size=(self.img_W, self.img_H), resample=Image.NEAREST)
gt_dmap = np.array(gt_dmap)[:, :, np.newaxis]
gt_dmap[gt_dmap == 65535] = 0
gt_dmap = gt_dmap.astype(np.float32) / 1000.0
gt_dmap = torch.from_numpy(gt_dmap).permute(2, 0, 1) # (1, H, W)
else:
gt_dmap = 0.0
# read pose
extM = _read_ExtM_from_txt(pose_path)
data_dict = {
'img': img,
'gt_dmap': gt_dmap,
'extM': extM,
'scene_name': '%s_seq-%02d' % (scene_name, seq_id),
'img_idx': cur_idx
}
data_array.append(data_dict)
return data_array, self.cam_intrins
| [
"torch.utils.data.DataLoader",
"numpy.copy",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"os.path.exists",
"PIL.Image.open",
"numpy.linalg.inv",
"numpy.arange",
"numpy.array",
"numpy.sign",
"numpy.eye",
"torchvision.transforms.Normalize",
"torch.from_numpy"
] | [((338, 347), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (344, 347), True, 'import numpy as np\n'), ((633, 652), 'numpy.linalg.inv', 'np.linalg.inv', (['ExtM'], {}), '(ExtM)\n', (646, 652), True, 'import numpy as np\n'), ((823, 882), 'torch.utils.data.DataLoader', 'DataLoader', (['self.t_samples', '(1)'], {'shuffle': '(False)', 'num_workers': '(1)'}), '(self.t_samples, 1, shuffle=False, num_workers=1)\n', (833, 882), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1198, 1273), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1218, 1273), False, 'from torchvision import transforms\n'), ((2260, 2296), 'numpy.ones', 'np.ones', (['(self.dpv_H, self.dpv_W, 3)'], {}), '((self.dpv_H, self.dpv_W, 3))\n', (2267, 2296), True, 'import numpy as np\n'), ((2315, 2336), 'numpy.arange', 'np.arange', (['self.dpv_W'], {}), '(self.dpv_W)\n', (2324, 2336), True, 'import numpy as np\n'), ((2355, 2376), 'numpy.arange', 'np.arange', (['self.dpv_H'], {}), '(self.dpv_H)\n', (2364, 2376), True, 'import numpy as np\n'), ((2742, 2751), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2748, 2751), True, 'import numpy as np\n'), ((3075, 3091), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3083, 3091), True, 'import numpy as np\n'), ((3399, 3422), 'numpy.copy', 'np.copy', (['self.ray_array'], {}), '(self.ray_array)\n', (3406, 3422), True, 'import numpy as np\n'), ((3787, 3835), 'numpy.transpose', 'np.transpose', (['pixel_to_ray_array'], {'axes': '[2, 0, 1]'}), '(pixel_to_ray_array, axes=[2, 0, 1])\n', (3799, 3835), True, 'import numpy as np\n'), ((4503, 4570), 'os.path.exists', 'os.path.exists', (["(scene_dir + '/frame-%06d.color.png' % (img_idx + i))"], {}), "(scene_dir + '/frame-%06d.color.png' % (img_idx + i))\n", (4517, 4570), False, 'import os\n'), ((5303, 5324), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5319, 5324), False, 'import torch\n'), ((5624, 5641), 'numpy.array', 'np.array', (['gt_dmap'], {}), '(gt_dmap)\n', (5632, 5641), True, 'import numpy as np\n'), ((5227, 5240), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5235, 5240), True, 'import numpy as np\n'), ((5514, 5535), 'PIL.Image.open', 'Image.open', (['dmap_path'], {}), '(dmap_path)\n', (5524, 5535), False, 'from PIL import Image\n'), ((5794, 5819), 'torch.from_numpy', 'torch.from_numpy', (['gt_dmap'], {}), '(gt_dmap)\n', (5810, 5819), False, 'import torch\n'), ((4689, 4699), 'numpy.sign', 'np.sign', (['i'], {}), '(i)\n', (4696, 4699), True, 'import numpy as np\n'), ((5110, 5130), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (5120, 5130), False, 'from PIL import Image\n')] |
# import pytest
import itertools
import operator
import numpy as np
from kronecker import mKPGM as model
def test_different_probabilities_avg_edges():
b = 2
k = 5
l = 2
theta = [[0.7, 0.4], [0.4, 0.5]]
vertices = range(operator.pow(b, k))
n = 100
possible_edges = list(itertools.product(vertices, repeat=2))
counts = {edge: 0 for edge in possible_edges}
for i in range(n):
g = model.mKPGM(theta, k, b, l)
for e in possible_edges:
if e in g.edges:
counts[e] += 1
S = sum(theta[0]) + sum(theta[1])
S_2 = np.square(theta[0][0]) + np.square(theta[0][1]) \
+ np.square(theta[1][0]) + np.square(theta[1][1])
exp_num_edges = np.power(S, k)
var_num_edges = np.power(S, k-1) * (np.power(S, k-l) - 1) \
* float(S-S_2)/(S-1) + (np.power(S, k-l) - np.power(S_2, l)) \
* np.power(S, 2*(k-l))
avg_edges = float(sum(counts.values()))/n
assert avg_edges >= exp_num_edges - var_num_edges
assert avg_edges <= exp_num_edges + var_num_edges
| [
"numpy.power",
"numpy.square",
"kronecker.mKPGM.mKPGM",
"operator.pow",
"itertools.product"
] | [((724, 738), 'numpy.power', 'np.power', (['S', 'k'], {}), '(S, k)\n', (732, 738), True, 'import numpy as np\n'), ((241, 259), 'operator.pow', 'operator.pow', (['b', 'k'], {}), '(b, k)\n', (253, 259), False, 'import operator\n'), ((300, 337), 'itertools.product', 'itertools.product', (['vertices'], {'repeat': '(2)'}), '(vertices, repeat=2)\n', (317, 337), False, 'import itertools\n'), ((425, 452), 'kronecker.mKPGM.mKPGM', 'model.mKPGM', (['theta', 'k', 'b', 'l'], {}), '(theta, k, b, l)\n', (436, 452), True, 'from kronecker import mKPGM as model\n'), ((680, 702), 'numpy.square', 'np.square', (['theta[1][1]'], {}), '(theta[1][1])\n', (689, 702), True, 'import numpy as np\n'), ((655, 677), 'numpy.square', 'np.square', (['theta[1][0]'], {}), '(theta[1][0])\n', (664, 677), True, 'import numpy as np\n'), ((900, 924), 'numpy.power', 'np.power', (['S', '(2 * (k - l))'], {}), '(S, 2 * (k - l))\n', (908, 924), True, 'import numpy as np\n'), ((595, 617), 'numpy.square', 'np.square', (['theta[0][0]'], {}), '(theta[0][0])\n', (604, 617), True, 'import numpy as np\n'), ((620, 642), 'numpy.square', 'np.square', (['theta[0][1]'], {}), '(theta[0][1])\n', (629, 642), True, 'import numpy as np\n'), ((843, 861), 'numpy.power', 'np.power', (['S', '(k - l)'], {}), '(S, k - l)\n', (851, 861), True, 'import numpy as np\n'), ((862, 878), 'numpy.power', 'np.power', (['S_2', 'l'], {}), '(S_2, l)\n', (870, 878), True, 'import numpy as np\n'), ((759, 777), 'numpy.power', 'np.power', (['S', '(k - 1)'], {}), '(S, k - 1)\n', (767, 777), True, 'import numpy as np\n'), ((779, 797), 'numpy.power', 'np.power', (['S', '(k - l)'], {}), '(S, k - l)\n', (787, 797), True, 'import numpy as np\n')] |
import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(r=2, description=""):
vx = chainer.Variable(np.random.rand(2, 4, 6 * r, 8 * r).astype(np.float32))
vy = chainer.functions.space2depth(vx, r)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] F.Depth2Space {description}",
graph=graph,
backend=["webgpu", "webgl", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
)
def test():
template()
def test_r_3():
template(r=3)
def test_with_placeholder():
vx = chainer.Variable(np.random.rand(2, 5, 4, 8).astype(np.float32))
vy = chainer.functions.space2depth(vx, r=2)
N = Placeholder(label="N")
C = Placeholder(label="C")
px = PlaceholderVariable([N, C, 4, 8])
py = chainer.functions.space2depth(px, r=2)
graph = ChainerConverter().convert([px], [py])
N.value = 2
C.value = 5
generate_kernel_test_case(
description=f"[chainer] F.space2depth with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={graph.inputs[0]: vx.data},
expected={graph.outputs[0]: vy.data},
)
| [
"test.util.generate_kernel_test_case",
"webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable",
"chainer.functions.space2depth",
"numpy.random.rand",
"webdnn.frontend.chainer.converter.ChainerConverter",
"webdnn.graph.placeholder.Placeholder"
] | [((429, 465), 'chainer.functions.space2depth', 'chainer.functions.space2depth', (['vx', 'r'], {}), '(vx, r)\n', (458, 465), False, 'import chainer\n'), ((573, 768), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.Depth2Space {description}"""', 'graph': 'graph', 'backend': "['webgpu', 'webgl', 'webassembly']", 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}'}), "(description=\n f'[chainer] F.Depth2Space {description}', graph=graph, backend=[\n 'webgpu', 'webgl', 'webassembly'], inputs={x: vx.data}, expected={y: vy\n .data})\n", (598, 768), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((979, 1017), 'chainer.functions.space2depth', 'chainer.functions.space2depth', (['vx'], {'r': '(2)'}), '(vx, r=2)\n', (1008, 1017), False, 'import chainer\n'), ((1027, 1049), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""N"""'}), "(label='N')\n", (1038, 1049), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1058, 1080), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""C"""'}), "(label='C')\n", (1069, 1080), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1090, 1123), 'webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable', 'PlaceholderVariable', (['[N, C, 4, 8]'], {}), '([N, C, 4, 8])\n', (1109, 1123), False, 'from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n'), ((1133, 1171), 'chainer.functions.space2depth', 'chainer.functions.space2depth', (['px'], {'r': '(2)'}), '(px, r=2)\n', (1162, 1171), False, 'import chainer\n'), ((1261, 1479), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.space2depth with placeholder"""', 'graph': 'graph', 'backend': "['webgpu', 'webassembly']", 'inputs': '{graph.inputs[0]: vx.data}', 'expected': '{graph.outputs[0]: vy.data}'}), "(description=\n f'[chainer] F.space2depth with placeholder', graph=graph, backend=[\n 'webgpu', 'webassembly'], inputs={graph.inputs[0]: vx.data}, expected={\n graph.outputs[0]: vy.data})\n", (1286, 1479), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((479, 497), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (495, 497), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((1185, 1203), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (1201, 1203), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((365, 399), 'numpy.random.rand', 'np.random.rand', (['(2)', '(4)', '(6 * r)', '(8 * r)'], {}), '(2, 4, 6 * r, 8 * r)\n', (379, 399), True, 'import numpy as np\n'), ((923, 949), 'numpy.random.rand', 'np.random.rand', (['(2)', '(5)', '(4)', '(8)'], {}), '(2, 5, 4, 8)\n', (937, 949), True, 'import numpy as np\n')] |
import numpy as np
import os
import json
import random
import torch
from builtins import range
from config import opt
import sys
################################## For config ##################################
def read_kwargs(kwargs):
if 'path_key' not in kwargs:
print('Error: no path key')
sys.exit()
else:
dict_path = '../config/%s_dict.json' % kwargs['path_key']
with open(dict_path, 'r') as f:
data_info_dict = json.load(f)
kwargs['path_img'] = data_info_dict["path_img"]
if 'kidx' not in kwargs:
kwargs['kidx'] = list(range(len(data_info_dict['Train'])))
if 'model' not in kwargs:
print('Error: no model')
sys.exit()
if 'net_idx' not in kwargs:
print('Error: no net idx')
sys.exit()
# model
# optim set
if 'optim' in kwargs and kwargs['optim'] == 'SGD':
if 'wd' not in kwargs:
kwargs['wd'] = 0.00001
if 'lr' not in kwargs:
kwargs['lr'] = 0.01
# cycle learning
if 'cycle_r' in kwargs and int(kwargs['cycle_r']) > 0:
if 'Tmax' not in kwargs:
kwargs['Tmax'] = 20
kwargs['cos_lr'] = True
kwargs['epoch'] = int(kwargs['cycle_r']) * 2 * kwargs['Tmax']
kwargs['gap_epoch'] = kwargs['epoch'] + 1
# loss
return kwargs, data_info_dict
def update_kwargs(init_model_path, kwargs):
save_dict = torch.load(init_model_path, map_location=torch.device('cpu'))
config_dict = save_dict['config_dict']
del save_dict
config_dict.pop('gpu_idx')
config_dict['mode'] = 'test'
if 'val_bs' in kwargs:
config_dict['val_bs'] = val_bs
return config_dict
################################## For path ##################################
def make_path_folder(path):
path_split = path.split('/')
path_length = len(path_split)
for i in range(2, path_length):
tmp_path = '/'.join(path_split[:i]) + '/'
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
return
################################## For class-balance in mini-batch ##################################
def get_class_list(data_list, class_dict):
class_list = []
if opt.label_length == 2:
for data in data_list:
class_list.append(class_dict[data])
else:
# to do
pass
return class_list
def batch_class_balance(data_list, class_list):
class_list = np.array(class_list)
neg_idx_list = np.where(class_list == 0)[0].tolist()
pos_idx_list = np.where(class_list == 1)[0].tolist()
random.shuffle(pos_idx_list)
bc_train_idx_list = []
while True:
pos_num = random.randint(int(opt.train_bs / 3), int(opt.train_bs / 3 * 2))
neg_num = opt.train_bs - pos_num
tmp_batch = pos_idx_list[:pos_num] + random.sample(neg_idx_list, neg_num)
if len(pos_idx_list) == 0:
break
elif pos_num > len(pos_idx_list):
break
else:
pos_idx_list = pos_idx_list[pos_num:]
random.shuffle(tmp_batch)
bc_train_idx_list += tmp_batch
bc_train_list = np.array(data_list)[bc_train_idx_list].tolist()
return bc_train_list
################################## For batch preprocess ##################################
def num_collate(data):
case_list = []
img_list = []
annot_list = []
batch_size = len(data)
for tmp_data in data:
tmp_case, tmp_img, tmp_annot = tmp_data
case_list.append(tmp_case)
img_list.append(tmp_img)
annot_list.append(tmp_annot)
max_num_annots = max(annot.shape[0] for annot in annot_list)
if max_num_annots > 0:
annot_padded = np.ones((batch_size, max_num_annots, 5)) * -1
for idx, annot in enumerate(annot_list):
if annot.shape[0] > 0:
annot_padded[idx, :annot.shape[0], :] = annot
else:
annot_padded = np.ones((batch_size, 1, 5)) * -1
img_batch = torch.tensor(np.array(img_list))
annot_batch = torch.tensor(annot_padded)
return case_list, img_batch, annot_batch
################################## For Metric ##################################
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| [
"os.mkdir",
"json.load",
"numpy.sum",
"numpy.maximum",
"random.sample",
"random.shuffle",
"os.path.exists",
"numpy.ones",
"numpy.where",
"numpy.array",
"torch.device",
"sys.exit",
"torch.tensor",
"builtins.range",
"numpy.concatenate"
] | [((1893, 1914), 'builtins.range', 'range', (['(2)', 'path_length'], {}), '(2, path_length)\n', (1898, 1914), False, 'from builtins import range\n'), ((2455, 2475), 'numpy.array', 'np.array', (['class_list'], {}), '(class_list)\n', (2463, 2475), True, 'import numpy as np\n'), ((2595, 2623), 'random.shuffle', 'random.shuffle', (['pos_idx_list'], {}), '(pos_idx_list)\n', (2609, 2623), False, 'import random\n'), ((4045, 4071), 'torch.tensor', 'torch.tensor', (['annot_padded'], {}), '(annot_padded)\n', (4057, 4071), False, 'import torch\n'), ((4661, 4699), 'numpy.concatenate', 'np.concatenate', (['([0.0], recall, [1.0])'], {}), '(([0.0], recall, [1.0]))\n', (4675, 4699), True, 'import numpy as np\n'), ((4709, 4750), 'numpy.concatenate', 'np.concatenate', (['([0.0], precision, [0.0])'], {}), '(([0.0], precision, [0.0]))\n', (4723, 4750), True, 'import numpy as np\n'), ((4800, 4827), 'builtins.range', 'range', (['(mpre.size - 1)', '(0)', '(-1)'], {}), '(mpre.size - 1, 0, -1)\n', (4805, 4827), False, 'from builtins import range\n'), ((5073, 5118), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (5079, 5118), True, 'import numpy as np\n'), ((314, 324), 'sys.exit', 'sys.exit', ([], {}), '()\n', (322, 324), False, 'import sys\n'), ((708, 718), 'sys.exit', 'sys.exit', ([], {}), '()\n', (716, 718), False, 'import sys\n'), ((795, 805), 'sys.exit', 'sys.exit', ([], {}), '()\n', (803, 805), False, 'import sys\n'), ((3062, 3087), 'random.shuffle', 'random.shuffle', (['tmp_batch'], {}), '(tmp_batch)\n', (3076, 3087), False, 'import random\n'), ((4007, 4025), 'numpy.array', 'np.array', (['img_list'], {}), '(img_list)\n', (4015, 4025), True, 'import numpy as np\n'), ((4851, 4883), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (4861, 4883), True, 'import numpy as np\n'), ((4991, 5022), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (4999, 5022), True, 'import numpy as np\n'), ((470, 482), 'json.load', 'json.load', (['f'], {}), '(f)\n', (479, 482), False, 'import json\n'), ((1466, 1485), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1478, 1485), False, 'import torch\n'), ((1981, 2005), 'os.path.exists', 'os.path.exists', (['tmp_path'], {}), '(tmp_path)\n', (1995, 2005), False, 'import os\n'), ((2019, 2037), 'os.mkdir', 'os.mkdir', (['tmp_path'], {}), '(tmp_path)\n', (2027, 2037), False, 'import os\n'), ((2838, 2874), 'random.sample', 'random.sample', (['neg_idx_list', 'neg_num'], {}), '(neg_idx_list, neg_num)\n', (2851, 2874), False, 'import random\n'), ((3719, 3759), 'numpy.ones', 'np.ones', (['(batch_size, max_num_annots, 5)'], {}), '((batch_size, max_num_annots, 5))\n', (3726, 3759), True, 'import numpy as np\n'), ((3944, 3971), 'numpy.ones', 'np.ones', (['(batch_size, 1, 5)'], {}), '((batch_size, 1, 5))\n', (3951, 3971), True, 'import numpy as np\n'), ((2495, 2520), 'numpy.where', 'np.where', (['(class_list == 0)'], {}), '(class_list == 0)\n', (2503, 2520), True, 'import numpy as np\n'), ((2552, 2577), 'numpy.where', 'np.where', (['(class_list == 1)'], {}), '(class_list == 1)\n', (2560, 2577), True, 'import numpy as np\n'), ((3148, 3167), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (3156, 3167), True, 'import numpy as np\n')] |
# Cuda Kernel Original work by <NAME> (@Oh233)
# https://github.com/msracver/FCIS
# Modified by <NAME> (@knorth55)
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
if cuda.available:
import cupy as cp
def _roi_pooling_slice(size, stride, max_size, roi_offset):
start = int(np.floor(size * stride))
end = int(np.ceil((size + 1) * stride))
start = min(max(start + roi_offset, 0), max_size)
end = min(max(end + roi_offset, 0), max_size)
return slice(start, end), end - start
class PSROIPooling2D(function.Function):
def __init__(self, outh, outw, spatial_scale, group_size, output_dim):
self.outh, self.outw = outh, outw
self.spatial_scale = spatial_scale
self.group_size = group_size
self.output_dim = output_dim
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, roi_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 5,
)
def forward_cpu(self, inputs):
self.retain_inputs((1,))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = np.empty(
(n_rois, self.output_dim, self.outh, self.outw), dtype=np.float32)
for i_roi in six.moves.range(n_rois):
idx, xmin, ymin, xmax, ymax = bottom_rois[i_roi]
idx = int(idx)
xmin = round(xmin * self.spatial_scale)
xmax = round(xmax * self.spatial_scale)
ymin = round(ymin * self.spatial_scale)
ymax = round(ymax * self.spatial_scale)
roi_width = max(xmax - xmin, 0.1)
roi_height = max(ymax - ymin, 0.1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
strided = 1. * channels / self.output_dim
grouph = int(round(self.outh / self.group_size))
groupw = int(round(self.outw / self.group_size))
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, int(ymin))
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, int(xmin))
if slicew.stop <= slicew.start:
continue
for outd in six.moves.range(self.output_dim):
sliced, lend = _roi_pooling_slice(
outd, strided, channels, 0)
roi_data = bottom_data[idx, sliced, sliceh, slicew]\
.reshape(lend, -1)
d = (outh // grouph) * self.group_size \
+ (outw // groupw)
top_data[i_roi, outd, outh, outw] = np.average(
roi_data[d])
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1,))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois = inputs
channels, height, width = bottom_data.shape[1:]
n_rois = bottom_rois.shape[0]
top_data = cp.empty(
(n_rois, self.output_dim, self.outh, self.outw), dtype=np.float32)
cuda.cupy.ElementwiseKernel(
'''
raw float32 bottom_data, float32 spatial_scale, int32 channels,
int32 height, int32 width, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 output_dim, raw float32 bottom_rois
''',
'float32 top_data',
'''
// pos in output filter
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % output_dim;
int n = i / pooled_width / pooled_height / output_dim;
int roi_batch_ind = bottom_rois[n * 5 + 0];
float roi_start_w = static_cast<float>(
round(bottom_rois[n * 5 + 1])) * spatial_scale;
float roi_start_h = static_cast<float>(
round(bottom_rois[n * 5 + 2])) * spatial_scale;
float roi_end_w = static_cast<float>(
round(bottom_rois[n * 5 + 3])) * spatial_scale;
float roi_end_h = static_cast<float>(
round(bottom_rois[n * 5 + 4])) * spatial_scale;
// Force too small ROIs to be 1x1
float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
float roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
float bin_size_h = roi_height / static_cast<float>(pooled_height);
float bin_size_w = roi_width / static_cast<float>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<float>(ph)
* bin_size_h + roi_start_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw)
* bin_size_w + roi_start_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)
* bin_size_h + roi_start_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)
* bin_size_w + roi_start_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gw = floor(
static_cast<float>(pw) * group_size / pooled_width);
int gh = floor(
static_cast<float>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int data_offset = (roi_batch_ind * channels + c) * height * width;
float out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
out_sum += bottom_data[data_offset + bottom_index];
}
}
float bin_area = (hend - hstart) * (wend - wstart);
top_data = is_empty? (float) 0. : out_sum / bin_area;
''', 'psroi_pooling_2d_fwd'
)(bottom_data, self.spatial_scale, channels, height, width,
self.outh, self.outw, self.group_size, self.output_dim,
bottom_rois, top_data)
return top_data,
def backward_cpu(self, inputs, gy):
bottom_rois = inputs[1]
channels, height, width = self._bottom_data_shape[1:]
n_rois = bottom_rois.shape[0]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
for i_roi in six.moves.range(n_rois):
idx, xmin, ymin, xmax, ymax = bottom_rois[i_roi]
idx = int(idx)
xmin = round(xmin * self.spatial_scale)
xmax = round(xmax * self.spatial_scale)
ymin = round(ymin * self.spatial_scale)
ymax = round(ymax * self.spatial_scale)
roi_width = max(xmax - xmin, 0.1)
roi_height = max(ymax - ymin, 0.1)
strideh = 1. * roi_height / self.outh
stridew = 1. * roi_width / self.outw
strided = 1. * channels / self.output_dim
grouph = int(round(self.outh / self.group_size))
groupw = int(round(self.outw / self.group_size))
for outh in six.moves.range(self.outh):
sliceh, lenh = _roi_pooling_slice(
outh, strideh, height, int(ymin))
if sliceh.stop <= sliceh.start:
continue
for outw in six.moves.range(self.outw):
slicew, lenw = _roi_pooling_slice(
outw, stridew, width, int(xmin))
if slicew.stop <= slicew.start:
continue
for outd in six.moves.range(self.output_dim):
diff_val = gy[0][i_roi, outd, outh, outw] / lenh / lenw
startd = int(np.floor(outd * strided))
startd = min(max(startd, 0), channels)
d = (outh // grouph) * self.group_size \
+ (outw // groupw) + startd
bottom_diff[idx, d, sliceh, slicew] += diff_val
return bottom_diff, None
def backward_gpu(self, inputs, gy):
bottom_rois = inputs[1]
channels, height, width = self._bottom_data_shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
cuda.cupy.ElementwiseKernel(
'''
raw float32 bottom_diff, int32 num_rois,
float32 spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_height, int32 pooled_width, int32 group_size,
int32 output_dim, raw float32 bottom_rois
''',
'float32 top_diff',
'''
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % output_dim;
int n = i / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
int roi_batch_ind = bottom_rois[n * 5];
float roi_start_w = static_cast<float>(
round(bottom_rois[n * 5 + 1])) * spatial_scale;
float roi_start_h = static_cast<float>(
round(bottom_rois[n * 5 + 2])) * spatial_scale;
float roi_end_w = static_cast<float>(
round(bottom_rois[n * 5 + 3])) * spatial_scale;
float roi_end_h = static_cast<float>(
round(bottom_rois[n * 5 + 4])) * spatial_scale;
// Force too small ROIs to be 1x1
float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
float roi_height = max(roi_end_h - roi_start_h, 0.1);
// Compute w and h at bottom
float bin_size_w = roi_width / static_cast<float>(pooled_width);
float bin_size_h = roi_height / static_cast<float>(pooled_height);
int wstart = floor(
static_cast<float>(pw) * bin_size_w + roi_start_w);
int hstart = floor(
static_cast<float>(ph) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<float>(pw + 1.0) * bin_size_w + roi_start_w);
int hend = ceil(
static_cast<float>(ph + 1.0) * bin_size_h + roi_start_h);
// Add roi offsets and clip to input boundaries
wstart = min(max(wstart, 0), width);
hstart = min(max(hstart, 0), height);
wend = min(max(wend, 0), width);
hend = min(max(hend, 0), height);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int gw = floor(
static_cast<float>(pw) * group_size / pooled_width);
int gh = floor(
static_cast<float>(ph) * group_size / pooled_height);
gw = min(max(gw, 0), group_size - 1);
gh = min(max(gh, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset = (roi_batch_ind * channels + c);
bottom_diff_offset = bottom_diff_offset * height * width;
float bin_area = (hend - hstart) * (wend - wstart);
float diff_val = is_empty ? (float) 0. : top_diff / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
atomicAdd(
&bottom_diff[bottom_diff_offset + bottom_index], diff_val);
}
}
''', 'psroi_pooling_2d_bwd'
)(bottom_diff, bottom_rois.shape[0], self.spatial_scale,
channels, height, width, self.outh, self.outw, self.group_size,
self.output_dim, bottom_rois, gy[0])
return bottom_diff, None
def psroi_pooling_2d(
x, rois, outh, outw, spatial_scale,
group_size, output_dim
):
return PSROIPooling2D(outh, outw, spatial_scale,
group_size, output_dim)(x, rois)
| [
"chainer.utils.type_check.expect",
"numpy.average",
"numpy.ceil",
"six.moves.range",
"cupy.empty",
"numpy.empty",
"numpy.floor",
"numpy.zeros",
"chainer.backends.cuda.cupy.zeros",
"chainer.backends.cuda.cupy.ElementwiseKernel"
] | [((368, 391), 'numpy.floor', 'np.floor', (['(size * stride)'], {}), '(size * stride)\n', (376, 391), True, 'import numpy as np\n'), ((407, 435), 'numpy.ceil', 'np.ceil', (['((size + 1) * stride)'], {}), '((size + 1) * stride)\n', (414, 435), True, 'import numpy as np\n'), ((1001, 1143), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.dtype == np.float32)', '(x_type.ndim == 4)', '(roi_type.dtype == np.float32)', '(roi_type.ndim == 2)', '(roi_type.shape[1] == 5)'], {}), '(x_type.dtype == np.float32, x_type.ndim == 4, roi_type.\n dtype == np.float32, roi_type.ndim == 2, roi_type.shape[1] == 5)\n', (1018, 1143), False, 'from chainer.utils import type_check\n'), ((1485, 1560), 'numpy.empty', 'np.empty', (['(n_rois, self.output_dim, self.outh, self.outw)'], {'dtype': 'np.float32'}), '((n_rois, self.output_dim, self.outh, self.outw), dtype=np.float32)\n', (1493, 1560), True, 'import numpy as np\n'), ((1596, 1619), 'six.moves.range', 'six.moves.range', (['n_rois'], {}), '(n_rois)\n', (1611, 1619), False, 'import six\n'), ((3604, 3679), 'cupy.empty', 'cp.empty', (['(n_rois, self.output_dim, self.outh, self.outw)'], {'dtype': 'np.float32'}), '((n_rois, self.output_dim, self.outh, self.outw), dtype=np.float32)\n', (3612, 3679), True, 'import cupy as cp\n'), ((7439, 7484), 'numpy.zeros', 'np.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (7447, 7484), True, 'import numpy as np\n'), ((7507, 7530), 'six.moves.range', 'six.moves.range', (['n_rois'], {}), '(n_rois)\n', (7522, 7530), False, 'import six\n'), ((9341, 9393), 'chainer.backends.cuda.cupy.zeros', 'cuda.cupy.zeros', (['self._bottom_data_shape', 'np.float32'], {}), '(self._bottom_data_shape, np.float32)\n', (9356, 9393), False, 'from chainer.backends import cuda\n'), ((2311, 2337), 'six.moves.range', 'six.moves.range', (['self.outh'], {}), '(self.outh)\n', (2326, 2337), False, 'import six\n'), ((3701, 7033), 'chainer.backends.cuda.cupy.ElementwiseKernel', 'cuda.cupy.ElementwiseKernel', (['"""\n raw float32 bottom_data, float32 spatial_scale, int32 channels,\n int32 height, int32 width, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 output_dim, raw float32 bottom_rois\n """', '"""float32 top_data"""', '"""\n // pos in output filter\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % output_dim;\n int n = i / pooled_width / pooled_height / output_dim;\n\n int roi_batch_ind = bottom_rois[n * 5 + 0];\n float roi_start_w = static_cast<float>(\n round(bottom_rois[n * 5 + 1])) * spatial_scale;\n float roi_start_h = static_cast<float>(\n round(bottom_rois[n * 5 + 2])) * spatial_scale;\n float roi_end_w = static_cast<float>(\n round(bottom_rois[n * 5 + 3])) * spatial_scale;\n float roi_end_h = static_cast<float>(\n round(bottom_rois[n * 5 + 4])) * spatial_scale;\n\n // Force too small ROIs to be 1x1\n float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n float roi_height = max(roi_end_h - roi_start_h, 0.1);\n\n // Compute w and h at bottom\n float bin_size_h = roi_height / static_cast<float>(pooled_height);\n float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n int hstart = static_cast<int>(floor(static_cast<float>(ph)\n * bin_size_h + roi_start_h));\n int wstart = static_cast<int>(floor(static_cast<float>(pw)\n * bin_size_w + roi_start_w));\n int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)\n * bin_size_h + roi_start_h));\n int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)\n * bin_size_w + roi_start_w));\n\n // Add roi offsets and clip to input boundaries\n hstart = min(max(hstart, 0), height);\n hend = min(max(hend, 0), height);\n wstart = min(max(wstart, 0), width);\n wend = min(max(wend, 0), width);\n bool is_empty = (hend <= hstart) || (wend <= wstart);\n\n // Compute c at bottom\n int gw = floor(\n static_cast<float>(pw) * group_size / pooled_width);\n int gh = floor(\n static_cast<float>(ph) * group_size / pooled_height);\n gw = min(max(gw, 0), group_size - 1);\n gh = min(max(gh, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int data_offset = (roi_batch_ind * channels + c) * height * width;\n float out_sum = 0;\n for (int h = hstart; h < hend; ++h){\n for (int w = wstart; w < wend; ++w){\n int bottom_index = h * width + w;\n out_sum += bottom_data[data_offset + bottom_index];\n }\n }\n\n float bin_area = (hend - hstart) * (wend - wstart);\n top_data = is_empty? (float) 0. : out_sum / bin_area;\n """', '"""psroi_pooling_2d_fwd"""'], {}), '(\n """\n raw float32 bottom_data, float32 spatial_scale, int32 channels,\n int32 height, int32 width, int32 pooled_height, int32 pooled_width,\n int32 group_size, int32 output_dim, raw float32 bottom_rois\n """\n , \'float32 top_data\',\n """\n // pos in output filter\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % output_dim;\n int n = i / pooled_width / pooled_height / output_dim;\n\n int roi_batch_ind = bottom_rois[n * 5 + 0];\n float roi_start_w = static_cast<float>(\n round(bottom_rois[n * 5 + 1])) * spatial_scale;\n float roi_start_h = static_cast<float>(\n round(bottom_rois[n * 5 + 2])) * spatial_scale;\n float roi_end_w = static_cast<float>(\n round(bottom_rois[n * 5 + 3])) * spatial_scale;\n float roi_end_h = static_cast<float>(\n round(bottom_rois[n * 5 + 4])) * spatial_scale;\n\n // Force too small ROIs to be 1x1\n float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n float roi_height = max(roi_end_h - roi_start_h, 0.1);\n\n // Compute w and h at bottom\n float bin_size_h = roi_height / static_cast<float>(pooled_height);\n float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n int hstart = static_cast<int>(floor(static_cast<float>(ph)\n * bin_size_h + roi_start_h));\n int wstart = static_cast<int>(floor(static_cast<float>(pw)\n * bin_size_w + roi_start_w));\n int hend = static_cast<int>(ceil(static_cast<float>(ph + 1)\n * bin_size_h + roi_start_h));\n int wend = static_cast<int>(ceil(static_cast<float>(pw + 1)\n * bin_size_w + roi_start_w));\n\n // Add roi offsets and clip to input boundaries\n hstart = min(max(hstart, 0), height);\n hend = min(max(hend, 0), height);\n wstart = min(max(wstart, 0), width);\n wend = min(max(wend, 0), width);\n bool is_empty = (hend <= hstart) || (wend <= wstart);\n\n // Compute c at bottom\n int gw = floor(\n static_cast<float>(pw) * group_size / pooled_width);\n int gh = floor(\n static_cast<float>(ph) * group_size / pooled_height);\n gw = min(max(gw, 0), group_size - 1);\n gh = min(max(gh, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int data_offset = (roi_batch_ind * channels + c) * height * width;\n float out_sum = 0;\n for (int h = hstart; h < hend; ++h){\n for (int w = wstart; w < wend; ++w){\n int bottom_index = h * width + w;\n out_sum += bottom_data[data_offset + bottom_index];\n }\n }\n\n float bin_area = (hend - hstart) * (wend - wstart);\n top_data = is_empty? (float) 0. : out_sum / bin_area;\n """\n , \'psroi_pooling_2d_fwd\')\n', (3728, 7033), False, 'from chainer.backends import cuda\n'), ((8222, 8248), 'six.moves.range', 'six.moves.range', (['self.outh'], {}), '(self.outh)\n', (8237, 8248), False, 'import six\n'), ((9402, 12667), 'chainer.backends.cuda.cupy.ElementwiseKernel', 'cuda.cupy.ElementwiseKernel', (['"""\n raw float32 bottom_diff, int32 num_rois,\n float32 spatial_scale, int32 channels, int32 height, int32 width,\n int32 pooled_height, int32 pooled_width, int32 group_size,\n int32 output_dim, raw float32 bottom_rois\n """', '"""float32 top_diff"""', '"""\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % output_dim;\n int n = i / pooled_width / pooled_height / output_dim;\n\n // [start, end) interval for spatial sampling\n int roi_batch_ind = bottom_rois[n * 5];\n float roi_start_w = static_cast<float>(\n round(bottom_rois[n * 5 + 1])) * spatial_scale;\n float roi_start_h = static_cast<float>(\n round(bottom_rois[n * 5 + 2])) * spatial_scale;\n float roi_end_w = static_cast<float>(\n round(bottom_rois[n * 5 + 3])) * spatial_scale;\n float roi_end_h = static_cast<float>(\n round(bottom_rois[n * 5 + 4])) * spatial_scale;\n\n // Force too small ROIs to be 1x1\n float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n float roi_height = max(roi_end_h - roi_start_h, 0.1);\n\n // Compute w and h at bottom\n float bin_size_w = roi_width / static_cast<float>(pooled_width);\n float bin_size_h = roi_height / static_cast<float>(pooled_height);\n\n int wstart = floor(\n static_cast<float>(pw) * bin_size_w + roi_start_w);\n int hstart = floor(\n static_cast<float>(ph) * bin_size_h + roi_start_h);\n int wend = ceil(\n static_cast<float>(pw + 1.0) * bin_size_w + roi_start_w);\n int hend = ceil(\n static_cast<float>(ph + 1.0) * bin_size_h + roi_start_h);\n\n // Add roi offsets and clip to input boundaries\n wstart = min(max(wstart, 0), width);\n hstart = min(max(hstart, 0), height);\n wend = min(max(wend, 0), width);\n hend = min(max(hend, 0), height);\n bool is_empty = (hend <= hstart) || (wend <= wstart);\n\n // Compute c at bottom\n int gw = floor(\n static_cast<float>(pw) * group_size / pooled_width);\n int gh = floor(\n static_cast<float>(ph) * group_size / pooled_height);\n gw = min(max(gw, 0), group_size - 1);\n gh = min(max(gh, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset = (roi_batch_ind * channels + c);\n bottom_diff_offset = bottom_diff_offset * height * width;\n float bin_area = (hend - hstart) * (wend - wstart);\n float diff_val = is_empty ? (float) 0. : top_diff / bin_area;\n for (int h = hstart; h < hend; ++h){\n for (int w = wstart; w < wend; ++w){\n int bottom_index = h * width + w;\n atomicAdd(\n &bottom_diff[bottom_diff_offset + bottom_index], diff_val);\n }\n }\n """', '"""psroi_pooling_2d_bwd"""'], {}), '(\n """\n raw float32 bottom_diff, int32 num_rois,\n float32 spatial_scale, int32 channels, int32 height, int32 width,\n int32 pooled_height, int32 pooled_width, int32 group_size,\n int32 output_dim, raw float32 bottom_rois\n """\n , \'float32 top_diff\',\n """\n int pw = i % pooled_width;\n int ph = (i / pooled_width) % pooled_height;\n int ctop = (i / pooled_width / pooled_height) % output_dim;\n int n = i / pooled_width / pooled_height / output_dim;\n\n // [start, end) interval for spatial sampling\n int roi_batch_ind = bottom_rois[n * 5];\n float roi_start_w = static_cast<float>(\n round(bottom_rois[n * 5 + 1])) * spatial_scale;\n float roi_start_h = static_cast<float>(\n round(bottom_rois[n * 5 + 2])) * spatial_scale;\n float roi_end_w = static_cast<float>(\n round(bottom_rois[n * 5 + 3])) * spatial_scale;\n float roi_end_h = static_cast<float>(\n round(bottom_rois[n * 5 + 4])) * spatial_scale;\n\n // Force too small ROIs to be 1x1\n float roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0\n float roi_height = max(roi_end_h - roi_start_h, 0.1);\n\n // Compute w and h at bottom\n float bin_size_w = roi_width / static_cast<float>(pooled_width);\n float bin_size_h = roi_height / static_cast<float>(pooled_height);\n\n int wstart = floor(\n static_cast<float>(pw) * bin_size_w + roi_start_w);\n int hstart = floor(\n static_cast<float>(ph) * bin_size_h + roi_start_h);\n int wend = ceil(\n static_cast<float>(pw + 1.0) * bin_size_w + roi_start_w);\n int hend = ceil(\n static_cast<float>(ph + 1.0) * bin_size_h + roi_start_h);\n\n // Add roi offsets and clip to input boundaries\n wstart = min(max(wstart, 0), width);\n hstart = min(max(hstart, 0), height);\n wend = min(max(wend, 0), width);\n hend = min(max(hend, 0), height);\n bool is_empty = (hend <= hstart) || (wend <= wstart);\n\n // Compute c at bottom\n int gw = floor(\n static_cast<float>(pw) * group_size / pooled_width);\n int gh = floor(\n static_cast<float>(ph) * group_size / pooled_height);\n gw = min(max(gw, 0), group_size - 1);\n gh = min(max(gh, 0), group_size - 1);\n int c = (ctop * group_size + gh) * group_size + gw;\n\n int bottom_diff_offset = (roi_batch_ind * channels + c);\n bottom_diff_offset = bottom_diff_offset * height * width;\n float bin_area = (hend - hstart) * (wend - wstart);\n float diff_val = is_empty ? (float) 0. : top_diff / bin_area;\n for (int h = hstart; h < hend; ++h){\n for (int w = wstart; w < wend; ++w){\n int bottom_index = h * width + w;\n atomicAdd(\n &bottom_diff[bottom_diff_offset + bottom_index], diff_val);\n }\n }\n """\n , \'psroi_pooling_2d_bwd\')\n', (9429, 12667), False, 'from chainer.backends import cuda\n'), ((2549, 2575), 'six.moves.range', 'six.moves.range', (['self.outw'], {}), '(self.outw)\n', (2564, 2575), False, 'import six\n'), ((8460, 8486), 'six.moves.range', 'six.moves.range', (['self.outw'], {}), '(self.outw)\n', (8475, 8486), False, 'import six\n'), ((2806, 2838), 'six.moves.range', 'six.moves.range', (['self.output_dim'], {}), '(self.output_dim)\n', (2821, 2838), False, 'import six\n'), ((8717, 8749), 'six.moves.range', 'six.moves.range', (['self.output_dim'], {}), '(self.output_dim)\n', (8732, 8749), False, 'import six\n'), ((3251, 3274), 'numpy.average', 'np.average', (['roi_data[d]'], {}), '(roi_data[d])\n', (3261, 3274), True, 'import numpy as np\n'), ((8868, 8892), 'numpy.floor', 'np.floor', (['(outd * strided)'], {}), '(outd * strided)\n', (8876, 8892), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
General description is in the dosctring of a function below, and in click decorators.
Motivation, FAQ, and detailed description of _some_ features is here.
FAQ
---
Q: What happens to block numbers from old style trajectories?
A: they are put in the data dict under "block" key, the same way HDF5 trajectories in polychrom do it
Q: What happens to loop extrusion positions?
A: SMC12345.dat are automatically swept in under the key "lef_positions" and would be returned in a dict returned by polychrom.hdf5_format.load_URI
Q: What is the best way to save space?
A: Rounding to 1 digit (0.05 max error) would save 30-40%. Picking every second/5th/etc. file would save it by 2x/5x on top of that
Q: How to find how much do folders occupy?
A: `du -sch *` ; alternatively `du -sc * | sort -n` if you want to sort the output by size.
`find . | wc -l` to find how many files
Default Behavior
----------------
Defaults are fairly conservative, and would use little rounding (to 2 digits, 0.005 maximum error),
would demand the trajectory to be consecutive, and would not do in-place conversions.
All examples below convert each sub-folder in a given folder, which is probably the most common usecase.
For very critical data, it is recommended to not convert in place. Script below does this,
and converts each trajectory to a new-style, placed in a "../converted" folder with the same
name. It rounds to 2 digits (max error 0.005) by default, which is very conservative.
It is recommended to round to 1 digit unless you specifically need bond lengths or angles
to a high precision. Contactmaps are not affected by 1-digit rounding.
set - e
for i in *; do traj_convert.py --empty-policy raise --verbose "$i" "../converted/$i" ; done
For less critical data, in-place conversion is acceptable. Example below converts every trajectory in-place,
and rounds to 1 digit, and also skips every second file. This gives ~4x space savings.
It sets empty-policy to "ignore" because conversion is in place. You will be notified of all the cases
of empty folders because of the --verbose flag. It will use a temporary folder to copy files to, and then would
replace the original with the temporary folder. It also allows for missing blocks (e.g. block1.dat block2.dat block4.dat).
for i in *; do traj_convert.py --empty-policy ignore --verbose --round-to 1 --skip-files 2 --allow-nonconsecutive --replace "$i" `mktemp -d` ; done
Input can be new-style trajectory as well. You would use that for thinning or rounding the data. For example,
the script below would round data to 0.05, and take every 5th file (10x space reduction). It also shows an example of iterating
through all sub-subdirectories, (not sub-directories), which is also a common data layout.
for i in */*; do traj_convert.py --empty-policy ignore --verbose --input-style new --round-to 1 --skip-files 5 --allow-nonconsecutive --replace "$i" `mktemp -d` ; done
"""
import os
import sys
import shutil
import click
import pickle
import glob
import re
import pandas as pd
import numpy as np
from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI
from polychrom.polymerutils import load
def _find_matches(pat, filenames):
"""
Matches pattern to each filename in a list, and returns those that matched.
Enforces only one match per file.
"""
result = {}
for filename in filenames:
a = re.search(pat, filename)
if a is not None:
if len(a.groups()) != 1:
raise ValueError(
"You should have one group in regex denoting the number of the file"
)
assert len(a.groups()) == 1
gr = int(a.groups()[0])
result[filename] = gr
return result
@click.command()
@click.option(
"--input-style",
default="old",
show_default=True,
help="old (block*.dat) or new (HDF5) style for input files",
)
@click.option(
"--empty-policy",
default="copy-limit",
show_default=True,
help="empty trajectories: 'copy', 'copy-limit' (enforce file limit), 'raise', 'ignore'",
)
@click.option(
"--dry-run", is_flag=True, help="do not perform any file operations",
)
@click.option(
"--block-pattern",
default="block([0-9]+).dat",
show_default=True,
help="regex to match a block number in blockX.dat",
)
@click.option(
"--extra-pattern",
multiple=True,
default=["SMC([0-9]+).dat"],
show_default=True,
help="regex pattern to match 'extra' files with file number in parentheses. This argument can be repeated",
)
@click.option(
"--extra-pattern-name",
multiple=True,
default=["lef_positions"],
show_default=True,
help="key under which to store (can be repeated)",
)
@click.option(
"--extra-loader",
multiple=True,
default=["pickle.load(open(filename,'rb'))"],
show_default=True,
help="python expression f(filename) that loads data (can be repeated)",
)
@click.option(
"--extra-require/--extra-not-require",
multiple=True,
default=[False],
show_default=True,
help="Require or not that extra files are present (can be repeated)",
)
@click.option(
"--overwrite/--not-overwrite",
default=False,
show_default=True,
help="raise error if files exist in destination",
)
@click.option(
"--allow-nonconsecutive",
is_flag=True,
help="allow blocks to be non-consecutive (1,2,3...)",
)
@click.option("--verbose", is_flag=True)
@click.option(
"--round-to", default=2, show_default=True, help="round to this number of digits"
)
@click.option(
"--skip-files", default=1, show_default=True, help="save only every Nth file"
)
@click.option(
"--HDF5-blocks-per-file",
default=100,
show_default=True,
help="blocks per file for HDF5 reporter",
)
@click.option(
"--max-unmatched-files",
default=20,
show_default=True,
help="maximum number of extra files found",
)
@click.option(
"--replace",
is_flag=True,
help="use out_dir as temp dir, and replace in_dir with out_dir",
)
@click.option("--force-delete", is_flag=True, help="delete subdirectories")
@click.argument("IN_DIR")
@click.argument("OUT_DIR")
def trajcopy(
block_pattern,
extra_pattern,
extra_pattern_name,
extra_loader,
extra_require,
in_dir,
out_dir,
**kwargs,
):
"""
A function that copies a HDF5 trajectory with several possible features.
-- It can convert from old-style to new-style trajectories
-- It by default rounds it to 2 decimal digits (which has space savings)
-- It can "thin" the trajectory by skipping every Nth file (--skip_files)
-- It can integrade information from "extra" files
(by default it assumes that there is a file named "SMC<X>.dat" for each "block<x>.dat",
and that this file is a pickle. This is saved to "lef_positions" key, and this is optional).
If you have several files like that, you can repeat "--extra-pattern" and other 3 arguments
several times.
An example command to replace each subfolder in a folder, and take every second file (4x space saving):
for i in *; do traj_convert.py --round-to 1 --skip-files 2 --allow-nonconsecutive --replace $i `mktemp -d` ; done
"""
# managing input/output directories
in_dir = os.path.abspath(in_dir)
if os.path.isfile(in_dir):
raise IOError("input directory is a file")
if not os.path.exists(in_dir):
raise IOError("input directory doesn't exist")
out_dir = os.path.abspath(out_dir)
if out_dir == in_dir:
raise ValueError("Copying to same directory not supported - use replace=True")
if not kwargs["dry_run"]:
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# getting files/URIs corresponding to blocks
all_files = glob.glob(os.path.join(in_dir, "*"))
if kwargs["input_style"] == "old":
blocks = _find_matches(block_pattern, all_files)
elif kwargs["input_style"] == "new":
blocks = {
i: j
for j, i in list_URIs(in_dir, empty_error=True, return_dict=True).items()
}
else:
raise ValueError("input-style should be 'old' or 'new'")
# managing cases when the folder is empty
policy = kwargs["empty_policy"]
if len(blocks) == 0:
if (policy in ["copy", "copy-limit"]) and kwargs["verbose"]:
if kwargs["replace"]:
if kwargs["verbose"]:
print("no files found; not moving", in_dir)
else:
if not kwargs["dry_run"]:
shutil.move(in_dir, out_dir)
if kwargs["verbose"]:
print("no files found; simply moving", in_dir)
exit()
if policy == "copy":
kwargs["max_unmatched_files"] = 1e9
elif policy == "raise":
print(in_dir)
raise IOError("Emtpy directory encountered")
elif policy == "ignore":
if kwargs["verbose"]:
print("skipping", in_dir)
exit()
elif policy == "copy-limit":
pass
else:
raise ValueError(f"wrong empty policy: {policy}")
# coverting blocks to pd.Series
if len(blocks) > 0:
if kwargs["verbose"]:
print(f"moving {len(blocks)} blocks in {in_dir}")
blocks = pd.Series(data=list(blocks.keys()), index=list(blocks.values()))
blocks.name = "blocks"
all_series = [blocks]
# making sure the 4 arguments for extra files are repeated the same number of time
assert len(extra_pattern) == len(extra_pattern_name)
assert len(extra_loader) == len(extra_pattern_name)
assert len(extra_loader) == len(extra_require)
# matching patterns for extra files, populating the dataframe
for val_pat, val_name, require in zip(
extra_pattern, extra_pattern_name, extra_require
):
datas = _find_matches(val_pat, all_files)
if require:
if len(datas) != len(blocks):
raise ValueError(
f"files missing for {val_name}: need {len(blocks)} found {len(datas)}"
)
if len(datas) > 0:
datas = pd.Series(data=list(datas.keys()), index=list(datas.values()))
datas.name = val_name
all_series.append(datas)
df = pd.DataFrame(all_series).T
# verifying that index is consecutive
if not kwargs["allow_nonconsecutive"]:
assert (np.diff(df.index.values) == 1).all()
# managing files that are not blocks; raising an error if there are too many of them
vals = set(df.values.reshape((-1,)))
other = [i for i in all_files if i not in vals]
if len(other) > kwargs["max_unmatched_files"]:
print("example unmatched files found")
print(other[:: len(other) // 20 + 1])
print("Verify that none of these should be converted using extra_pattern")
print("If not, increase max_unmatched_files")
raise ValueError(
"Limit exceeded: {0} files did not match anything".format(len(other))
)
# creating the reporter
if (len(blocks) > 0) and (not kwargs["dry_run"]):
rep = HDF5Reporter(
folder=out_dir,
max_data_length=kwargs["hdf5_blocks_per_file"],
h5py_dset_opts=None,
overwrite=kwargs["overwrite"],
)
# copying the "other" files
if not kwargs["dry_run"]:
for i in other:
dest = os.path.join(out_dir, os.path.split(i)[-1])
if not kwargs["overwrite"]:
if os.path.exists(dest):
raise IOError(f"File exists: {dest}")
shutil.copy(i, dest)
if (len(blocks) > 0) and (not kwargs["dry_run"]):
# main loop - skip_files is aplied here
for i, subdf in df.iloc[:: kwargs["skip_files"]].iterrows():
cur = {}
data = subdf["blocks"]
if kwargs["input_style"] == "old":
data = load(data)
data = np.round(np.asarray(data, dtype=np.float32), kwargs["round_to"])
cur["pos"] = data
cur["block"] = i
elif kwargs["input_style"] == "new":
cur = load_URI(data)
cur["pos"] = np.round(
np.asarray(cur["pos"], dtype=np.float32), kwargs["round_to"]
)
# adding "extra" data in the dict to save
for name, ldr in zip(extra_pattern_name, extra_loader):
if name not in subdf:
continue
filename = subdf[name]
if filename is not None:
cur[name] = eval(ldr)
rep.report("data", cur)
rep.dump_data()
# replacing the original trajectory if requested
if kwargs["replace"] and (not kwargs["dry_run"]):
files = [os.path.join(in_dir, i) for i in os.listdir(in_dir)]
if not kwargs["force_delete"]:
for f in files:
if not os.path.isfile(f):
raise IOError(f"I won't delete a sub-directory {f}")
exit()
os.remove(f)
os.rmdir(in_dir)
shutil.move(out_dir, in_dir)
if __name__ == "__main__":
trajcopy()
| [
"os.mkdir",
"os.remove",
"click.option",
"os.path.isfile",
"os.path.join",
"shutil.copy",
"pandas.DataFrame",
"os.path.abspath",
"polychrom.hdf5_format.HDF5Reporter",
"os.path.exists",
"polychrom.hdf5_format.load_URI",
"click.command",
"re.search",
"polychrom.hdf5_format.list_URIs",
"num... | [((3800, 3815), 'click.command', 'click.command', ([], {}), '()\n', (3813, 3815), False, 'import click\n'), ((3817, 3946), 'click.option', 'click.option', (['"""--input-style"""'], {'default': '"""old"""', 'show_default': '(True)', 'help': '"""old (block*.dat) or new (HDF5) style for input files"""'}), "('--input-style', default='old', show_default=True, help=\n 'old (block*.dat) or new (HDF5) style for input files')\n", (3829, 3946), False, 'import click\n'), ((3962, 4136), 'click.option', 'click.option', (['"""--empty-policy"""'], {'default': '"""copy-limit"""', 'show_default': '(True)', 'help': '"""empty trajectories: \'copy\', \'copy-limit\' (enforce file limit), \'raise\', \'ignore\'"""'}), '(\'--empty-policy\', default=\'copy-limit\', show_default=True,\n help=\n "empty trajectories: \'copy\', \'copy-limit\' (enforce file limit), \'raise\', \'ignore\'"\n )\n', (3974, 4136), False, 'import click\n'), ((4143, 4230), 'click.option', 'click.option', (['"""--dry-run"""'], {'is_flag': '(True)', 'help': '"""do not perform any file operations"""'}), "('--dry-run', is_flag=True, help=\n 'do not perform any file operations')\n", (4155, 4230), False, 'import click\n'), ((4234, 4370), 'click.option', 'click.option', (['"""--block-pattern"""'], {'default': '"""block([0-9]+).dat"""', 'show_default': '(True)', 'help': '"""regex to match a block number in blockX.dat"""'}), "('--block-pattern', default='block([0-9]+).dat', show_default=\n True, help='regex to match a block number in blockX.dat')\n", (4246, 4370), False, 'import click\n'), ((4386, 4602), 'click.option', 'click.option', (['"""--extra-pattern"""'], {'multiple': '(True)', 'default': "['SMC([0-9]+).dat']", 'show_default': '(True)', 'help': '"""regex pattern to match \'extra\' files with file number in parentheses. This argument can be repeated"""'}), '(\'--extra-pattern\', multiple=True, default=[\'SMC([0-9]+).dat\'],\n show_default=True, help=\n "regex pattern to match \'extra\' files with file number in parentheses. This argument can be repeated"\n )\n', (4398, 4602), False, 'import click\n'), ((4613, 4771), 'click.option', 'click.option', (['"""--extra-pattern-name"""'], {'multiple': '(True)', 'default': "['lef_positions']", 'show_default': '(True)', 'help': '"""key under which to store (can be repeated)"""'}), "('--extra-pattern-name', multiple=True, default=[\n 'lef_positions'], show_default=True, help=\n 'key under which to store (can be repeated)')\n", (4625, 4771), False, 'import click\n'), ((4786, 4978), 'click.option', 'click.option', (['"""--extra-loader"""'], {'multiple': '(True)', 'default': '["pickle.load(open(filename,\'rb\'))"]', 'show_default': '(True)', 'help': '"""python expression f(filename) that loads data (can be repeated)"""'}), '(\'--extra-loader\', multiple=True, default=[\n "pickle.load(open(filename,\'rb\'))"], show_default=True, help=\n \'python expression f(filename) that loads data (can be repeated)\')\n', (4798, 4978), False, 'import click\n'), ((4993, 5175), 'click.option', 'click.option', (['"""--extra-require/--extra-not-require"""'], {'multiple': '(True)', 'default': '[False]', 'show_default': '(True)', 'help': '"""Require or not that extra files are present (can be repeated)"""'}), "('--extra-require/--extra-not-require', multiple=True, default=\n [False], show_default=True, help=\n 'Require or not that extra files are present (can be repeated)')\n", (5005, 5175), False, 'import click\n'), ((5190, 5322), 'click.option', 'click.option', (['"""--overwrite/--not-overwrite"""'], {'default': '(False)', 'show_default': '(True)', 'help': '"""raise error if files exist in destination"""'}), "('--overwrite/--not-overwrite', default=False, show_default=\n True, help='raise error if files exist in destination')\n", (5202, 5322), False, 'import click\n'), ((5338, 5449), 'click.option', 'click.option', (['"""--allow-nonconsecutive"""'], {'is_flag': '(True)', 'help': '"""allow blocks to be non-consecutive (1,2,3...)"""'}), "('--allow-nonconsecutive', is_flag=True, help=\n 'allow blocks to be non-consecutive (1,2,3...)')\n", (5350, 5449), False, 'import click\n'), ((5461, 5500), 'click.option', 'click.option', (['"""--verbose"""'], {'is_flag': '(True)'}), "('--verbose', is_flag=True)\n", (5473, 5500), False, 'import click\n'), ((5502, 5602), 'click.option', 'click.option', (['"""--round-to"""'], {'default': '(2)', 'show_default': '(True)', 'help': '"""round to this number of digits"""'}), "('--round-to', default=2, show_default=True, help=\n 'round to this number of digits')\n", (5514, 5602), False, 'import click\n'), ((5605, 5701), 'click.option', 'click.option', (['"""--skip-files"""'], {'default': '(1)', 'show_default': '(True)', 'help': '"""save only every Nth file"""'}), "('--skip-files', default=1, show_default=True, help=\n 'save only every Nth file')\n", (5617, 5701), False, 'import click\n'), ((5704, 5821), 'click.option', 'click.option', (['"""--HDF5-blocks-per-file"""'], {'default': '(100)', 'show_default': '(True)', 'help': '"""blocks per file for HDF5 reporter"""'}), "('--HDF5-blocks-per-file', default=100, show_default=True, help\n ='blocks per file for HDF5 reporter')\n", (5716, 5821), False, 'import click\n'), ((5837, 5954), 'click.option', 'click.option', (['"""--max-unmatched-files"""'], {'default': '(20)', 'show_default': '(True)', 'help': '"""maximum number of extra files found"""'}), "('--max-unmatched-files', default=20, show_default=True, help=\n 'maximum number of extra files found')\n", (5849, 5954), False, 'import click\n'), ((5970, 6079), 'click.option', 'click.option', (['"""--replace"""'], {'is_flag': '(True)', 'help': '"""use out_dir as temp dir, and replace in_dir with out_dir"""'}), "('--replace', is_flag=True, help=\n 'use out_dir as temp dir, and replace in_dir with out_dir')\n", (5982, 6079), False, 'import click\n'), ((6091, 6165), 'click.option', 'click.option', (['"""--force-delete"""'], {'is_flag': '(True)', 'help': '"""delete subdirectories"""'}), "('--force-delete', is_flag=True, help='delete subdirectories')\n", (6103, 6165), False, 'import click\n'), ((6167, 6191), 'click.argument', 'click.argument', (['"""IN_DIR"""'], {}), "('IN_DIR')\n", (6181, 6191), False, 'import click\n'), ((6193, 6218), 'click.argument', 'click.argument', (['"""OUT_DIR"""'], {}), "('OUT_DIR')\n", (6207, 6218), False, 'import click\n'), ((7369, 7392), 'os.path.abspath', 'os.path.abspath', (['in_dir'], {}), '(in_dir)\n', (7384, 7392), False, 'import os\n'), ((7400, 7422), 'os.path.isfile', 'os.path.isfile', (['in_dir'], {}), '(in_dir)\n', (7414, 7422), False, 'import os\n'), ((7579, 7603), 'os.path.abspath', 'os.path.abspath', (['out_dir'], {}), '(out_dir)\n', (7594, 7603), False, 'import os\n'), ((3440, 3464), 're.search', 're.search', (['pat', 'filename'], {}), '(pat, filename)\n', (3449, 3464), False, 'import re\n'), ((7486, 7508), 'os.path.exists', 'os.path.exists', (['in_dir'], {}), '(in_dir)\n', (7500, 7508), False, 'import os\n'), ((7893, 7918), 'os.path.join', 'os.path.join', (['in_dir', '"""*"""'], {}), "(in_dir, '*')\n", (7905, 7918), False, 'import os\n'), ((11361, 11493), 'polychrom.hdf5_format.HDF5Reporter', 'HDF5Reporter', ([], {'folder': 'out_dir', 'max_data_length': "kwargs['hdf5_blocks_per_file']", 'h5py_dset_opts': 'None', 'overwrite': "kwargs['overwrite']"}), "(folder=out_dir, max_data_length=kwargs['hdf5_blocks_per_file'],\n h5py_dset_opts=None, overwrite=kwargs['overwrite'])\n", (11373, 11493), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((13384, 13412), 'shutil.move', 'shutil.move', (['out_dir', 'in_dir'], {}), '(out_dir, in_dir)\n', (13395, 13412), False, 'import shutil\n'), ((7762, 7785), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (7776, 7785), False, 'import os\n'), ((7799, 7816), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (7807, 7816), False, 'import os\n'), ((10504, 10528), 'pandas.DataFrame', 'pd.DataFrame', (['all_series'], {}), '(all_series)\n', (10516, 10528), True, 'import pandas as pd\n'), ((11850, 11870), 'shutil.copy', 'shutil.copy', (['i', 'dest'], {}), '(i, dest)\n', (11861, 11870), False, 'import shutil\n'), ((13056, 13079), 'os.path.join', 'os.path.join', (['in_dir', 'i'], {}), '(in_dir, i)\n', (13068, 13079), False, 'import os\n'), ((13359, 13375), 'os.rmdir', 'os.rmdir', (['in_dir'], {}), '(in_dir)\n', (13367, 13375), False, 'import os\n'), ((11758, 11778), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (11772, 11778), False, 'import os\n'), ((12169, 12179), 'polychrom.polymerutils.load', 'load', (['data'], {}), '(data)\n', (12173, 12179), False, 'from polychrom.polymerutils import load\n'), ((13089, 13107), 'os.listdir', 'os.listdir', (['in_dir'], {}), '(in_dir)\n', (13099, 13107), False, 'import os\n'), ((13334, 13346), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (13343, 13346), False, 'import os\n'), ((8657, 8685), 'shutil.move', 'shutil.move', (['in_dir', 'out_dir'], {}), '(in_dir, out_dir)\n', (8668, 8685), False, 'import shutil\n'), ((11677, 11693), 'os.path.split', 'os.path.split', (['i'], {}), '(i)\n', (11690, 11693), False, 'import os\n'), ((12212, 12246), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (12222, 12246), True, 'import numpy as np\n'), ((12406, 12420), 'polychrom.hdf5_format.load_URI', 'load_URI', (['data'], {}), '(data)\n', (12414, 12420), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n'), ((13199, 13216), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (13213, 13216), False, 'import os\n'), ((10645, 10669), 'numpy.diff', 'np.diff', (['df.index.values'], {}), '(df.index.values)\n', (10652, 10669), True, 'import numpy as np\n'), ((12480, 12520), 'numpy.asarray', 'np.asarray', (["cur['pos']"], {'dtype': 'np.float32'}), "(cur['pos'], dtype=np.float32)\n", (12490, 12520), True, 'import numpy as np\n'), ((8117, 8170), 'polychrom.hdf5_format.list_URIs', 'list_URIs', (['in_dir'], {'empty_error': '(True)', 'return_dict': '(True)'}), '(in_dir, empty_error=True, return_dict=True)\n', (8126, 8170), False, 'from polychrom.hdf5_format import HDF5Reporter, list_URIs, load_URI\n')] |
# -*- coding: utf-8 -*-
# !@time: 2021/6/19 10:19 下午
# !@author: superMC @email: <EMAIL>
# !@fileName: TextRNN_selfAtt.py
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import matmul
class Config(object):
"""配置参数"""
def __init__(self, dataset, embedding):
self.model_name = 'TextRNN'
self.train_path = dataset + '/data/train.txt' # 训练集
self.dev_path = dataset + '/data/dev.txt' # 验证集
self.test_path = dataset + '/data/test.txt' # 测试集
self.class_list = [x.strip() for x in open(
dataset + '/data/class.txt', encoding='utf-8').readlines()] # 类别名单
self.vocab_path = dataset + '/data/vocab.pkl' # 词表
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果
self.log_path = dataset + '/log/' + self.model_name
self.embedding_pretrained = torch.tensor(
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32')) \
if embedding != 'random' else None # 预训练词向量
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备
self.dropout = 0.5 # 随机失活
self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练
self.num_classes = len(self.class_list) # 类别数
self.n_vocab = 0 # 词表大小,在运行时赋值
self.num_epochs = 20 # epoch数
self.batch_size = 64 # mini-batch大小
self.pad_size = 32 # 每句话处理成的长度(短填长切)
self.learning_rate = 1e-3 # 学习率
self.embed = self.embedding_pretrained.size(1) \
if self.embedding_pretrained is not None else 300 # 字向量维度, 若使用了预训练词向量,则维度统一
self.hidden_size = 128 # lstm隐藏层
self.num_layers = 2 # lstm层数
self.d_k = 64
self.d_v = 64
self.n_head = 1
self.attn_dropout = 0.1
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout, inplace=False)
self.softmax = nn.Softmax(dim=-1)
def forward(self, q, k, v, mask=None):
attn = matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = self.dropout(self.softmax(attn))
# dropout 前softmax的话 inplace必须为false
output = matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_v = d_v
self.d_k = d_k
self.n_head = n_head
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout, inplace=True)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
# 不能直接reshape到(sz_b,n_head,len_q,d_k) 因为q本身sz_b,len_q,d_model d_model-> n_head * d_q
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
if config.embedding_pretrained is not None:
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False)
else:
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers,
bidirectional=True, batch_first=True, dropout=config.dropout)
self.self_attention = MultiHeadAttention(config.n_head, config.hidden_size, config.d_k, config.d_v,
config.attn_dropout)
self.pool = nn.AdaptiveAvgPool1d(1)
self.fc1 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.fc2 = nn.Linear(config.hidden_size, config.num_classes)
def forward(self, x):
x, _ = x
emb = self.embedding(x) # [batch_size, seq_len, embeding]=[128, 32, 300]
out, _ = self.lstm(emb) # [batch_size, seq_len, hidden_size * num_direction]=[128, 32, 256]
out = self.fc1(out) # [128, 64]
out, _ = self.self_attention(out, out, out)
out = self.pool(out.transpose(1, 2).contiguous()).squeeze(-1)
out = self.fc2(out)
return out
| [
"torch.nn.Dropout",
"numpy.load",
"torch.nn.Embedding",
"torch.nn.Embedding.from_pretrained",
"torch.nn.LayerNorm",
"torch.nn.LSTM",
"torch.nn.Softmax",
"torch.cuda.is_available",
"torch.nn.Linear",
"torch.matmul",
"torch.nn.AdaptiveAvgPool1d"
] | [((2083, 2122), 'torch.nn.Dropout', 'nn.Dropout', (['attn_dropout'], {'inplace': '(False)'}), '(attn_dropout, inplace=False)\n', (2093, 2122), True, 'import torch.nn as nn\n'), ((2146, 2164), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2156, 2164), True, 'import torch.nn as nn\n'), ((2465, 2480), 'torch.matmul', 'matmul', (['attn', 'v'], {}), '(attn, v)\n', (2471, 2480), False, 'from torch import matmul\n'), ((2737, 2781), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {'bias': '(False)'}), '(d_model, n_head * d_k, bias=False)\n', (2746, 2781), True, 'import torch.nn as nn\n'), ((2802, 2846), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_k)'], {'bias': '(False)'}), '(d_model, n_head * d_k, bias=False)\n', (2811, 2846), True, 'import torch.nn as nn\n'), ((2867, 2911), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(n_head * d_v)'], {'bias': '(False)'}), '(d_model, n_head * d_v, bias=False)\n', (2876, 2911), True, 'import torch.nn as nn\n'), ((2930, 2974), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_v)', 'd_model'], {'bias': '(False)'}), '(n_head * d_v, d_model, bias=False)\n', (2939, 2974), True, 'import torch.nn as nn\n'), ((3075, 3108), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {'inplace': '(True)'}), '(dropout, inplace=True)\n', (3085, 3108), True, 'import torch.nn as nn\n'), ((3135, 3167), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {'eps': '(1e-06)'}), '(d_model, eps=1e-06)\n', (3147, 3167), True, 'import torch.nn as nn\n'), ((4410, 4537), 'torch.nn.LSTM', 'nn.LSTM', (['config.embed', 'config.hidden_size', 'config.num_layers'], {'bidirectional': '(True)', 'batch_first': '(True)', 'dropout': 'config.dropout'}), '(config.embed, config.hidden_size, config.num_layers, bidirectional=\n True, batch_first=True, dropout=config.dropout)\n', (4417, 4537), True, 'import torch.nn as nn\n'), ((4759, 4782), 'torch.nn.AdaptiveAvgPool1d', 'nn.AdaptiveAvgPool1d', (['(1)'], {}), '(1)\n', (4779, 4782), True, 'import torch.nn as nn\n'), ((4802, 4855), 'torch.nn.Linear', 'nn.Linear', (['(config.hidden_size * 2)', 'config.hidden_size'], {}), '(config.hidden_size * 2, config.hidden_size)\n', (4811, 4855), True, 'import torch.nn as nn\n'), ((4875, 4924), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_classes'], {}), '(config.hidden_size, config.num_classes)\n', (4884, 4924), True, 'import torch.nn as nn\n'), ((4200, 4271), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['config.embedding_pretrained'], {'freeze': '(False)'}), '(config.embedding_pretrained, freeze=False)\n', (4228, 4271), True, 'import torch.nn as nn\n'), ((4315, 4389), 'torch.nn.Embedding', 'nn.Embedding', (['config.n_vocab', 'config.embed'], {'padding_idx': '(config.n_vocab - 1)'}), '(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1)\n', (4327, 4389), True, 'import torch.nn as nn\n'), ((1109, 1134), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1132, 1134), False, 'import torch\n'), ((932, 971), 'numpy.load', 'np.load', (["(dataset + '/data/' + embedding)"], {}), "(dataset + '/data/' + embedding)\n", (939, 971), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.