code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from numpy import array, cos, identity, sin
from core.dynamics import SystemDynamics, RoboticDynamics
class DoubleInvertedPendulum(RoboticDynamics):
def __init__(self, m_1, m_2, l_1, l_2, g=9.81):
SystemDynamics.__init__(self, 4, 2)
RoboticDynamics.__init__(self, identity(2))
self.params = m_1, m_2, l_1, l_2, g
def D(self, q):
m_1, m_2, l_1, l_2, _ = self.params
_, theta_2 = q
D_11 = (m_1 + m_2) * (l_1 ** 2) + 2 * m_2 * l_1 * l_2 * cos(theta_2) + m_2 * (l_2 ** 2)
D_12 = m_2 * l_1 * l_2 * cos(theta_2) + m_2 * (l_2 ** 2)
D_21 = D_12
D_22 = m_2 * (l_2 ** 2)
return array([[D_11, D_12], [D_21, D_22]])
def C(self, q, q_dot):
_, m_2, l_1, l_2, _ = self.params
_, theta_2 = q
theta_1_dot, theta_2_dot = q_dot
C_11 = 0
C_12 = -m_2 * l_1 * l_2 * (2 * theta_1_dot + theta_2_dot) * sin(theta_2)
C_21 = -C_12 / 2
C_22 = -m_2 * l_1 * l_2 * theta_1_dot * sin(theta_2) / 2
return array([[C_11, C_12], [C_21, C_22]])
def U(self, q):
m_1, m_2, l_1, l_2, g = self.params
theta_1, theta_2 = q
return (m_1 + m_2) * g * l_1 * cos(theta_1) + m_2 * g * l_2 * cos(theta_1 + theta_2)
def G(self, q):
m_1, m_2, l_1, l_2, g = self.params
theta_1, theta_2 = q
G_1 = -(m_1 + m_2) * g * l_1 * sin(theta_1) - m_2 * g * l_2 * sin(theta_1 + theta_2)
G_2 = -m_2 * g * l_2 * sin(theta_1 + theta_2)
return array([G_1, G_2]) | [
"numpy.identity",
"numpy.sin",
"numpy.array",
"numpy.cos",
"core.dynamics.SystemDynamics.__init__"
] | [((210, 245), 'core.dynamics.SystemDynamics.__init__', 'SystemDynamics.__init__', (['self', '(4)', '(2)'], {}), '(self, 4, 2)\n', (233, 245), False, 'from core.dynamics import SystemDynamics, RoboticDynamics\n'), ((666, 701), 'numpy.array', 'array', (['[[D_11, D_12], [D_21, D_22]]'], {}), '([[D_11, D_12], [D_21, D_22]])\n', (671, 701), False, 'from numpy import array, cos, identity, sin\n'), ((1043, 1078), 'numpy.array', 'array', (['[[C_11, C_12], [C_21, C_22]]'], {}), '([[C_11, C_12], [C_21, C_22]])\n', (1048, 1078), False, 'from numpy import array, cos, identity, sin\n'), ((1530, 1547), 'numpy.array', 'array', (['[G_1, G_2]'], {}), '([G_1, G_2])\n', (1535, 1547), False, 'from numpy import array, cos, identity, sin\n'), ((285, 296), 'numpy.identity', 'identity', (['(2)'], {}), '(2)\n', (293, 296), False, 'from numpy import array, cos, identity, sin\n'), ((925, 937), 'numpy.sin', 'sin', (['theta_2'], {}), '(theta_2)\n', (928, 937), False, 'from numpy import array, cos, identity, sin\n'), ((1492, 1514), 'numpy.sin', 'sin', (['(theta_1 + theta_2)'], {}), '(theta_1 + theta_2)\n', (1495, 1514), False, 'from numpy import array, cos, identity, sin\n'), ((567, 579), 'numpy.cos', 'cos', (['theta_2'], {}), '(theta_2)\n', (570, 579), False, 'from numpy import array, cos, identity, sin\n'), ((1011, 1023), 'numpy.sin', 'sin', (['theta_2'], {}), '(theta_2)\n', (1014, 1023), False, 'from numpy import array, cos, identity, sin\n'), ((1216, 1228), 'numpy.cos', 'cos', (['theta_1'], {}), '(theta_1)\n', (1219, 1228), False, 'from numpy import array, cos, identity, sin\n'), ((1247, 1269), 'numpy.cos', 'cos', (['(theta_1 + theta_2)'], {}), '(theta_1 + theta_2)\n', (1250, 1269), False, 'from numpy import array, cos, identity, sin\n'), ((1407, 1419), 'numpy.sin', 'sin', (['theta_1'], {}), '(theta_1)\n', (1410, 1419), False, 'from numpy import array, cos, identity, sin\n'), ((1438, 1460), 'numpy.sin', 'sin', (['(theta_1 + theta_2)'], {}), '(theta_1 + theta_2)\n', (1441, 1460), False, 'from numpy import array, cos, identity, sin\n'), ((502, 514), 'numpy.cos', 'cos', (['theta_2'], {}), '(theta_2)\n', (505, 514), False, 'from numpy import array, cos, identity, sin\n')] |
import argparse
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
import cooler
import numpy as np
import pandas as pd
from hicmatrix import HiCMatrix
from hicmatrix.lib import MatrixFileHandler
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description=''
)
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to investigate for QC. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--outFileName', '-o',
help='File name of the normalized scool matrix.',
required=True)
parserRequired.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=4,
type=int)
parserRequired.add_argument('--normalize', '-n',
help='Normalize to a) all matrices to the lowest read count of the given matrices, b) all to a given read coverage value or c) to a multiplicative value',
choices=['smallest', 'read_count', 'multiplicative'],
default='smallest',
required=True)
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--setToZeroThreshold', '-z',
help='Values smaller as this threshold are set to 0.',
required=False,
default=1.0,
type=float)
parserOpt.add_argument('--value', '-v', default=1,
type=float,
help='This value is used to either be interpreted as the desired read_count or the multiplicative value. This depends on the value for --normalize')
parserOpt.add_argument('--maximumRegionToConsider',
help='To compute the normalization factor for the normalization mode \'smallest\' and \'read_count\', consider only this genomic distance around the diagonal.',
required=False,
default=30000000,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def compute_sum(pMatrixName, pMatricesList, pMaxDistance, pQueue):
sum_list = []
for i, matrix in enumerate(pMatricesList):
matrixFileHandler = MatrixFileHandler(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix, pLoadMatrixOnly=True)
_matrix, cut_intervals, nan_bins, \
distance_counts, correction_factors = matrixFileHandler.load()
instances = _matrix[0]
features = _matrix[1]
distances = np.absolute(instances - features)
mask = distances <= pMaxDistance
sum_of_matrix = _matrix[2][mask].sum()
sum_list.append(sum_of_matrix)
del _matrix
pQueue.put(sum_list)
def compute_normalize(pMatrixName, pMatricesList, pNormalizeMax, pSumOfAll, pThreshold, pMultiplicative, pQueue):
pixelList = []
for i, matrix in enumerate(pMatricesList):
matrixFileHandler = MatrixFileHandler(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix, pLoadMatrixOnly=True)
_matrix, cut_intervals, nan_bins, \
distance_counts, correction_factors = matrixFileHandler.load()
data = np.array(_matrix[2]).astype(np.float32)
instances = np.array(_matrix[0])
features = np.array(_matrix[1])
mask = np.isnan(data)
data[mask] = 0
mask = np.isinf(data)
data[mask] = 0
if pMultiplicative is None:
adjust_factor = pSumOfAll[i] / pNormalizeMax
else:
adjust_factor = pMultiplicative
if pMultiplicative is None:
data /= adjust_factor
else:
data *= adjust_factor
mask = np.isnan(data)
data[mask] = 0
mask = np.isinf(data)
data[mask] = 0
mask = data < pThreshold
data[mask] = 0
mask = data == 0
instances = instances[~mask]
features = features[~mask]
data = data[~mask]
pixels = pd.DataFrame({'bin1_id': instances, 'bin2_id': features, 'count': data})
pixelList.append(pixels)
pQueue.put(pixelList)
def main(args=None):
args = parse_arguments().parse_args(args)
matrices_name = args.matrix
matrices_list = cell_name_list(matrices_name)
threads = args.threads
# get bin size
cooler_obj = cooler.Cooler(matrices_name + '::' + matrices_list[0])
bin_size = cooler_obj.binsize
sum_list_threads = [None] * args.threads
process = [None] * args.threads
queue = [None] * args.threads
thread_done = [False] * args.threads
matricesPerThread = len(matrices_list) // threads
for i in range(args.threads):
if i < threads - 1:
matrices_name_list = matrices_list[i * matricesPerThread:(i + 1) * matricesPerThread]
else:
matrices_name_list = matrices_list[i * matricesPerThread:]
queue[i] = Queue()
process[i] = Process(target=compute_sum, kwargs=dict(
pMatrixName=matrices_name,
pMatricesList=matrices_name_list,
pMaxDistance=args.maximumRegionToConsider // bin_size,
pQueue=queue[i]
)
)
process[i].start()
all_data_collected = False
while not all_data_collected:
for i in range(threads):
if queue[i] is not None and not queue[i].empty():
sum_list_threads[i] = queue[i].get()
queue[i] = None
process[i].join()
process[i].terminate()
process[i] = None
thread_done[i] = True
all_data_collected = True
for thread in thread_done:
if not thread:
all_data_collected = False
time.sleep(1)
sum_of_all = [item for sublist in sum_list_threads for item in sublist]
sum_of_all = np.array(sum_of_all)
argmin = np.argmin(sum_of_all)
if args.normalize == 'smallest':
normalizeMax = sum_of_all[argmin]
multiplicative = None
elif args.normalize == 'read_count':
normalizeMax = args.value
multiplicative = None
else:
normalizeMax = None
multiplicative = args.value
matricesPerThread = len(matrices_list) // threads
pixelsListThreads = [None] * args.threads
process = [None] * args.threads
queue = [None] * args.threads
thread_done = [False] * args.threads
for i in range(args.threads):
if i < args.threads - 1:
matrices_name_list = matrices_list[i * matricesPerThread:(i + 1) * matricesPerThread]
sum_of_all_list = sum_of_all[i * matricesPerThread:(i + 1) * matricesPerThread]
else:
matrices_name_list = matrices_list[i * matricesPerThread:]
sum_of_all_list = sum_of_all[i * matricesPerThread:]
queue[i] = Queue()
process[i] = Process(target=compute_normalize, kwargs=dict(
pMatrixName=matrices_name,
pMatricesList=matrices_name_list,
pNormalizeMax=normalizeMax,
pSumOfAll=sum_of_all_list,
pThreshold=args.setToZeroThreshold,
pMultiplicative=multiplicative,
pQueue=queue[i]
)
)
process[i].start()
all_data_collected = False
while not all_data_collected:
for i in range(threads):
if queue[i] is not None and not queue[i].empty():
pixelsListThreads[i] = queue[i].get()
queue[i] = None
process[i].join()
process[i].terminate()
process[i] = None
thread_done[i] = True
all_data_collected = True
for thread in thread_done:
if not thread:
all_data_collected = False
time.sleep(1)
pixelsList = [item for sublist in pixelsListThreads for item in sublist]
cooler_obj_external = cooler.Cooler(matrices_name + '::' + matrices_list[0])
bins = cooler_obj_external.bins()[:]
matrixFileHandler = MatrixFileHandler(pFileType='scool')
matrixFileHandler.matrixFile.coolObjectsList = None
matrixFileHandler.matrixFile.bins = bins
matrixFileHandler.matrixFile.pixel_list = pixelsList
matrixFileHandler.matrixFile.name_list = matrices_list
matrixFileHandler.save(args.outFileName, pSymmetric=True, pApplyCorrection=False)
| [
"pandas.DataFrame",
"numpy.absolute",
"argparse.ArgumentParser",
"numpy.isinf",
"numpy.argmin",
"numpy.isnan",
"time.sleep",
"schicexplorer.utilities.cell_name_list",
"cooler.Cooler",
"numpy.array",
"multiprocessing.Queue",
"hicmatrix.lib.MatrixFileHandler",
"logging.getLogger"
] | [((92, 119), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (109, 119), False, 'import logging\n'), ((398, 514), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'add_help': '(False)', 'description': '""""""'}), "(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, add_help=False, description='')\n", (421, 514), False, 'import argparse\n'), ((5216, 5245), 'schicexplorer.utilities.cell_name_list', 'cell_name_list', (['matrices_name'], {}), '(matrices_name)\n', (5230, 5245), False, 'from schicexplorer.utilities import cell_name_list\n'), ((5311, 5365), 'cooler.Cooler', 'cooler.Cooler', (["(matrices_name + '::' + matrices_list[0])"], {}), "(matrices_name + '::' + matrices_list[0])\n", (5324, 5365), False, 'import cooler\n'), ((6821, 6841), 'numpy.array', 'np.array', (['sum_of_all'], {}), '(sum_of_all)\n', (6829, 6841), True, 'import numpy as np\n'), ((6855, 6876), 'numpy.argmin', 'np.argmin', (['sum_of_all'], {}), '(sum_of_all)\n', (6864, 6876), True, 'import numpy as np\n'), ((8873, 8927), 'cooler.Cooler', 'cooler.Cooler', (["(matrices_name + '::' + matrices_list[0])"], {}), "(matrices_name + '::' + matrices_list[0])\n", (8886, 8927), False, 'import cooler\n'), ((8994, 9030), 'hicmatrix.lib.MatrixFileHandler', 'MatrixFileHandler', ([], {'pFileType': '"""scool"""'}), "(pFileType='scool')\n", (9011, 9030), False, 'from hicmatrix.lib import MatrixFileHandler\n'), ((3198, 3300), 'hicmatrix.lib.MatrixFileHandler', 'MatrixFileHandler', ([], {'pFileType': '"""cool"""', 'pMatrixFile': "(pMatrixName + '::' + matrix)", 'pLoadMatrixOnly': '(True)'}), "(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix,\n pLoadMatrixOnly=True)\n", (3215, 3300), False, 'from hicmatrix.lib import MatrixFileHandler\n'), ((3498, 3531), 'numpy.absolute', 'np.absolute', (['(instances - features)'], {}), '(instances - features)\n', (3509, 3531), True, 'import numpy as np\n'), ((3919, 4021), 'hicmatrix.lib.MatrixFileHandler', 'MatrixFileHandler', ([], {'pFileType': '"""cool"""', 'pMatrixFile': "(pMatrixName + '::' + matrix)", 'pLoadMatrixOnly': '(True)'}), "(pFileType='cool', pMatrixFile=pMatrixName + '::' + matrix,\n pLoadMatrixOnly=True)\n", (3936, 4021), False, 'from hicmatrix.lib import MatrixFileHandler\n'), ((4213, 4233), 'numpy.array', 'np.array', (['_matrix[0]'], {}), '(_matrix[0])\n', (4221, 4233), True, 'import numpy as np\n'), ((4253, 4273), 'numpy.array', 'np.array', (['_matrix[1]'], {}), '(_matrix[1])\n', (4261, 4273), True, 'import numpy as np\n'), ((4290, 4304), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (4298, 4304), True, 'import numpy as np\n'), ((4343, 4357), 'numpy.isinf', 'np.isinf', (['data'], {}), '(data)\n', (4351, 4357), True, 'import numpy as np\n'), ((4668, 4682), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (4676, 4682), True, 'import numpy as np\n'), ((4722, 4736), 'numpy.isinf', 'np.isinf', (['data'], {}), '(data)\n', (4730, 4736), True, 'import numpy as np\n'), ((4960, 5032), 'pandas.DataFrame', 'pd.DataFrame', (["{'bin1_id': instances, 'bin2_id': features, 'count': data}"], {}), "({'bin1_id': instances, 'bin2_id': features, 'count': data})\n", (4972, 5032), True, 'import pandas as pd\n'), ((5878, 5885), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (5883, 5885), False, 'from multiprocessing import Process, Queue\n'), ((6713, 6726), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6723, 6726), False, 'import time\n'), ((7807, 7814), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (7812, 7814), False, 'from multiprocessing import Process, Queue\n'), ((8754, 8767), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8764, 8767), False, 'import time\n'), ((4153, 4173), 'numpy.array', 'np.array', (['_matrix[2]'], {}), '(_matrix[2])\n', (4161, 4173), True, 'import numpy as np\n')] |
import numpy as np
import gym
class SmartDiscrete:
def __init__(self, ignore_keys=None, always_attack=0):
if ignore_keys is None:
ignore_keys = []
self.always_attack = always_attack
self.angle = 5
self.all_actions_dict = {
"[('attack', {}), ('back', 0), ('camera', [0, 0]), ('forward', 1), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack): 0,
"[('attack', {}), ('back', 0), ('camera', [0, {}]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack, self.angle): 1,
"[('attack', 1), ('back', 0), ('camera', [0, 0]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]": 2,
"[('attack', {}), ('back', 0), ('camera', [{}, 0]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack, self.angle): 3,
"[('attack', {}), ('back', 0), ('camera', [-{}, 0]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack, self.angle): 4,
"[('attack', {}), ('back', 0), ('camera', [0, -{}]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack, self.angle): 5,
"[('attack', {}), ('back', 0), ('camera', [0, 0]), ('forward', 1), ('jump', 1), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack): 6,
"[('attack', {}), ('back', 0), ('camera', [0, 0]), ('forward', 0), ('jump', 0), ('left', 1), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack): 7,
"[('attack', {}), ('back', 0), ('camera', [0, 0]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 1), ('sneak', 0), ('sprint', 0)]".format(always_attack): 8,
"[('attack', {}), ('back', 1), ('camera', [0, 0]), ('forward', 0), ('jump', 0), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack): 9,
"[('attack', 1), ('back', 0), ('camera', [0, 0]), ('forward', 1), ('jump', 1), ('left', 0), ('right', 0), ('sneak', 0), ('sprint', 0)]".format(always_attack): 10}
self.ignore_keys = ignore_keys
self.key_to_dict = {
0: {'attack': always_attack,'back': 0,'camera': [0, 0],'forward': 1,'jump': 0,'left': 0,'right': 0,'sneak': 0,'sprint': 0},
1: {'attack': always_attack, 'back': 0, 'camera': [0, self.angle], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
2: {'attack': 1, 'back': 0, 'camera': [0, 0], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
3: {'attack': always_attack, 'back': 0, 'camera': [self.angle, 0], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
4: {'attack': always_attack, 'back': 0, 'camera': [-self.angle, 0], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
5: {'attack': always_attack, 'back': 0, 'camera': [0, -self.angle], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
6: {'attack': always_attack, 'back': 0, 'camera': [0, 0], 'forward': 1, 'jump': 1, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
7: {'attack': always_attack, 'back': 0, 'camera': [0, 0], 'forward': 0, 'jump': 0, 'left': 1, 'right': 0, 'sneak': 0, 'sprint': 0},
8: {'attack': always_attack, 'back': 0, 'camera': [0, 0], 'forward': 0, 'jump': 0, 'left': 0, 'right': 1, 'sneak': 0, 'sprint': 0},
9: {'attack': always_attack, 'back': 1, 'camera': [0, 0], 'forward': 0, 'jump': 0, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
10: {'attack': 1, 'back': 0, 'camera': [0, 0], 'forward': 1, 'jump': 1, 'left': 0, 'right': 0, 'sneak': 0, 'sprint': 0},
}
@staticmethod
def discrete_camera(camera):
result = list(camera)
if abs(result[1]) >= abs(result[0]):
result[0] = 0
else:
result[1] = 0
def cut(value, max_value=1.2):
sign = -1 if value < 0 else 1
if abs(value) >= max_value:
return 5 * sign
else:
return 0
cutten = list(map(cut, result))
return cutten
def preprocess_action_dict(self, action_dict):
no_action_part = ["sneak", "sprint"]
action_part = ["attack"] if self.always_attack else []
moving_actions = ["forward", "back", "right", "left"]
if action_dict["camera"] != [0, 0]:
no_action_part.append("attack")
no_action_part.append("jump")
no_action_part += moving_actions
elif action_dict["jump"] == 1:
action_dict["forward"] = 1
no_action_part += filter(lambda x: x != "forward", moving_actions)
else:
for a in moving_actions:
if action_dict[a] == 1:
no_action_part += filter(lambda x: x != a, moving_actions)
no_action_part.append("attack")
no_action_part.append("jump")
break
if "attack" not in no_action_part:
action_dict["attack"] = 1
for a in no_action_part:
action_dict[a] = 0
for a in action_part:
action_dict[a] = 1
return action_dict
@staticmethod
def dict_to_sorted_str(dict_):
return str(sorted(dict_.items()))
def get_key_by_action_dict(self, action_dict):
for ignored_key in self.ignore_keys:
action_dict.pop(ignored_key, None)
str_dict = self.dict_to_sorted_str(action_dict)
return self.all_actions_dict[str_dict]
def get_action_dict_by_key(self, key):
return self.key_to_dict[key]
def get_actions_dim(self):
return len(self.key_to_dict)
def get_dtype_dict(env):
action_shape = env.action_space.shape
action_shape = action_shape if len(action_shape) > 0 else 1
action_dtype = env.action_space.dtype
action_dtype = 'int32' if np.issubdtype(action_dtype, int) else action_dtype
action_dtype = 'float32' if np.issubdtype(action_dtype, float) else action_dtype
env_dict = {'action': {'shape': action_shape,
'dtype': action_dtype},
'reward': {'dtype': 'float32'},
'done': {'dtype': 'bool'},
'n_reward': {'dtype': 'float32'},
'n_done': {'dtype': 'bool'},
'actual_n': {'dtype': 'float32'},
'demo': {'dtype': 'float32'}
}
for prefix in ('', 'next_', 'n_'):
if isinstance(env.observation_space, gym.spaces.Dict):
for name, space in env.observation_space.spaces.items():
env_dict[prefix + name] = {'shape': space.shape,
'dtype': space.dtype}
else:
env_dict[prefix + 'state'] = {'shape': env.observation_space.shape,
'dtype': env.observation_space.dtype}
dtype_dict = {key: value['dtype'] for key, value in env_dict.items()}
dtype_dict.update(weights='float32', indexes='int32')
return env_dict, dtype_dict | [
"numpy.issubdtype"
] | [((6138, 6170), 'numpy.issubdtype', 'np.issubdtype', (['action_dtype', 'int'], {}), '(action_dtype, int)\n', (6151, 6170), True, 'import numpy as np\n'), ((6221, 6255), 'numpy.issubdtype', 'np.issubdtype', (['action_dtype', 'float'], {}), '(action_dtype, float)\n', (6234, 6255), True, 'import numpy as np\n')] |
from keras.models import Sequential
from keras.layers import Dense, Activation
import numpy as np
import csv
import random
# create model with 4 inputs and 3 outputs
model = Sequential()
model.add(Dense(4, input_shape=(4,)))
model.add(Activation("sigmoid"))
model.add(Dense(8))
model.add(Activation("sigmoid"))
model.add(Dense(3))
model.add(Activation("sigmoid"))
# use loss function for multiple classes
model.compile(
optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"]
)
# read dataset
data = []
data_classes = ["Iris-setosa", "Iris-versicolor", "Iris-virginica"]
with open("iris.data", "r") as iris_data:
iris_reader = csv.reader(iris_data, delimiter=",")
for sample in iris_reader:
if len(sample) == 5:
data.append(sample)
random.shuffle(data)
half = int(len(data) * 0.5)
train_data = []
train_labels = []
for sample in data[:half]:
sample_data = sample[:4]
sample_label = [0, 0, 0]
sample_label[data_classes.index(sample[4])] = 1
train_data.append(sample_data)
train_labels.append(sample_label)
test_data = []
test_labels = []
for sample in data[half:]:
sample_data = sample[:4]
sample_label = [0, 0, 0]
sample_label[data_classes.index(sample[4])] = 1
test_data.append(sample_data)
test_labels.append(sample_label)
# train model
BATCH_SIZE = 12
for i in range(int(len(train_data) / BATCH_SIZE)):
batch_size = BATCH_SIZE
lower = i
upper = i + batch_size
if len(train_data) - 1 < upper:
upper = len(train_data) - 1
batch_size = len(train_data) - 1 - i
batch_data = np.array(train_data[lower:upper])
batch_labels = np.array(train_labels[lower:upper])
model.fit(batch_data, batch_labels, epochs=1000, batch_size=batch_size)
print("Evaluating model...")
# evaluate model
BATCH_SIZE = 12
for i in range(int(len(test_data) / BATCH_SIZE)):
batch_size = BATCH_SIZE
lower = i
upper = i + batch_size
if len(test_data) - 1 < upper:
upper = len(test_data) - 1
batch_size = len(test_data) - 1 - i
batch_data = np.array(test_data[lower:upper])
batch_labels = np.array(test_labels[lower:upper])
loss, accuracy = model.evaluate(batch_data, batch_labels, batch_size=batch_size)
print("loss: {0} - accuracy: {1}".format(loss, accuracy))
| [
"csv.reader",
"keras.layers.Activation",
"random.shuffle",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential"
] | [((176, 188), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (186, 188), False, 'from keras.models import Sequential\n'), ((792, 812), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (806, 812), False, 'import random\n'), ((200, 226), 'keras.layers.Dense', 'Dense', (['(4)'], {'input_shape': '(4,)'}), '(4, input_shape=(4,))\n', (205, 226), False, 'from keras.layers import Dense, Activation\n'), ((238, 259), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (248, 259), False, 'from keras.layers import Dense, Activation\n'), ((272, 280), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (277, 280), False, 'from keras.layers import Dense, Activation\n'), ((292, 313), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (302, 313), False, 'from keras.layers import Dense, Activation\n'), ((326, 334), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (331, 334), False, 'from keras.layers import Dense, Activation\n'), ((346, 367), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (356, 367), False, 'from keras.layers import Dense, Activation\n'), ((662, 698), 'csv.reader', 'csv.reader', (['iris_data'], {'delimiter': '""","""'}), "(iris_data, delimiter=',')\n", (672, 698), False, 'import csv\n'), ((1614, 1647), 'numpy.array', 'np.array', (['train_data[lower:upper]'], {}), '(train_data[lower:upper])\n', (1622, 1647), True, 'import numpy as np\n'), ((1667, 1702), 'numpy.array', 'np.array', (['train_labels[lower:upper]'], {}), '(train_labels[lower:upper])\n', (1675, 1702), True, 'import numpy as np\n'), ((2094, 2126), 'numpy.array', 'np.array', (['test_data[lower:upper]'], {}), '(test_data[lower:upper])\n', (2102, 2126), True, 'import numpy as np\n'), ((2146, 2180), 'numpy.array', 'np.array', (['test_labels[lower:upper]'], {}), '(test_labels[lower:upper])\n', (2154, 2180), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.utils import assert_eq
import cudf as gd
import dask_cudf as dgd
def _make_random_frame(nelem, npartitions=2):
df = pd.DataFrame(
{
"x": np.random.randint(0, 5, size=nelem),
"y": np.random.normal(size=nelem) + 1,
}
)
gdf = gd.DataFrame.from_pandas(df)
dgf = dgd.from_cudf(gdf, npartitions=npartitions)
return df, dgf
_reducers = ["sum", "count", "mean", "var", "std", "min", "max"]
def _get_reduce_fn(name):
def wrapped(series):
fn = getattr(series, name)
return fn()
return wrapped
@pytest.mark.parametrize("reducer", _reducers)
def test_series_reduce(reducer):
reducer = _get_reduce_fn(reducer)
np.random.seed(0)
size = 10
df, gdf = _make_random_frame(size)
got = reducer(gdf.x)
exp = reducer(df.x)
assert_eq(got, exp)
| [
"numpy.random.seed",
"dask_cudf.from_cudf",
"cudf.DataFrame.from_pandas",
"numpy.random.randint",
"dask.dataframe.utils.assert_eq",
"numpy.random.normal",
"pytest.mark.parametrize"
] | [((653, 698), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""reducer"""', '_reducers'], {}), "('reducer', _reducers)\n", (676, 698), False, 'import pytest\n'), ((353, 381), 'cudf.DataFrame.from_pandas', 'gd.DataFrame.from_pandas', (['df'], {}), '(df)\n', (377, 381), True, 'import cudf as gd\n'), ((392, 435), 'dask_cudf.from_cudf', 'dgd.from_cudf', (['gdf'], {'npartitions': 'npartitions'}), '(gdf, npartitions=npartitions)\n', (405, 435), True, 'import dask_cudf as dgd\n'), ((774, 791), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (788, 791), True, 'import numpy as np\n'), ((899, 918), 'dask.dataframe.utils.assert_eq', 'assert_eq', (['got', 'exp'], {}), '(got, exp)\n', (908, 918), False, 'from dask.dataframe.utils import assert_eq\n'), ((239, 274), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': 'nelem'}), '(0, 5, size=nelem)\n', (256, 274), True, 'import numpy as np\n'), ((293, 321), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'nelem'}), '(size=nelem)\n', (309, 321), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Practica 6 - Espacio Fasico
<NAME> y <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import simps
from numpy import trapz
'''
Funcion que calcula la derivada como limite como cociente de diferencias
input:
q: vector de posiciones
dq0: valor inicial de la derivada
d: granularidad del parametro temporal
output:
dq: vector de derivadas
'''
def deriv(q,dq0,d):
#dq = np.empty([len(q)])
dq = (q[1:len(q)]-q[0:(len(q)-1)])/d
dq = np.insert(dq,0,dq0) #dq = np.concatenate(([dq0],dq))
return dq
'''
Funcion F - Ecuaciones de Hamilton-Jacobi para oscilador no lineal
input:
q: vector de posiciones
output:
ddq: vector de derivadas segundas
'''
def F(q):
ddq = -2*q*(q**2-1)
return ddq
'''
Funcion que resuelve la ecuación dinámica ddq = F(q), obteniendo la orbita q(t)
input:
n: numero de puntos de la orbita
q0: posicion inicial
dq0: valor inicial de la derivada
F: funcion del sistema
d: granularidad del parametro temporal
output:
q: vector de posiciones calculado
'''
def orb(n,q0,dq0,F, args=None, d=0.001):
#q = [0.0]*(n+1)
q = np.empty([n+1])
q[0] = q0
q[1] = q0 + dq0*d
for i in np.arange(2,n+1):
args = q[i-2]
q[i] = - q[i-2] + d**2*F(args) + 2*q[i-1]
return q
'''
Funcion que calcula el periodo de un vector de datos
input:
q: vector de datos
d: granularidad
max: si se quiere calcular ondas por picos o valles
output:
pers: distancia entre picos/valles
waves: indices de los picos/valles
'''
def periodos(q,d,max=True):
#Si max = True, tomamos las ondas a partir de los máximos/picos
#Si max == False, tomamos los las ondas a partir de los mínimos/valles
epsilon = 5*d
dq = deriv(q,dq0=None,d=d) #La primera derivada es irrelevante
if max == True:
waves = np.where((np.round(dq,int(-np.log10(epsilon))) == 0) & (q >0))
if max != True:
waves = np.where((np.round(dq,int(-np.log10(epsilon))) == 0) & (q <0))
diff_waves = np.diff(waves)
waves = waves[0][1:][diff_waves[0]>1]
pers = diff_waves[diff_waves>1]*d
return pers, waves
#################################################################
# EJERCICIO 1
# CALCULO DE ÓRBITAS
#################################################################
#Vamos a ver qué delta en el intervalo [10**-4,10**-3] da mejores resultados
#(una granularidad mayor para los puntos calculados de la orbita)
q0 = 0.
dq0 = 1.
fig, ax = plt.subplots(figsize=(12,5))
plt.ylim(-1.5, 1.5)
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("t = n $\delta$", fontsize=12)
ax.set_ylabel("q(t)", fontsize=12)
iseq = np.array([3,3.1,3.5,3.8,4])
for i in iseq:
d = 10**(-i)
n = int(32/d)
t = np.arange(n+1)*d
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
plt.plot(t, q, 'ro', markersize=0.5/i,label='$\delta$ ='+str(np.around(d,4)),c=plt.get_cmap("winter")(i/np.max(iseq)))
ax.legend(loc=3, frameon=False, fontsize=12)
#plt.savefig('Time_granularity.png', dpi=250)
#Nos quedamos con d = 10**-4. Calculamos q(t) y p(t)
#
q0 = 0.
dq0 = 1.
d = 10**(-4)
n = int(32/d)
t = np.arange(n+1)*d
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
#################################################################
# ESPACIO FÁSICO
#################################################################
'''
Funcion que dibuja una orbita del espacio fasico
input:
n: numero de puntos de la orbita
q0: posicion inicial
dq0: valor inicial de la derivada
F: funcion del sistema
d: granularidad del parametro temporal
output:
q: vector de posiciones calculado
'''
def simplectica(q0,dq0,F,col=0,d = 10**(-4),n = int(16/d),marker='-'):
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
plt.plot(q, p, marker,c=plt.get_cmap("winter")(col), linewidth=0.5)
fig = plt.figure(figsize=(8,5))
fig.subplots_adjust(hspace=0.4, wspace=0.2)
#Dibujamos el espacio fasico para un total de 12*12 puntos iniciales
seq_q0 = np.linspace(0.,1.,num=10)
seq_dq0 = np.linspace(0.,2,num=10)
for i in range(len(seq_q0)):
for j in range(len(seq_dq0)):
q0 = seq_q0[i]
dq0 = seq_dq0[j]
ax = fig.add_subplot(1,1, 1)
col = (1+i+j*(len(seq_q0)))/(len(seq_q0)*len(seq_dq0))
#ax = fig.add_subplot(len(seq_q0), len(seq_dq0), 1+i+j*(len(seq_q0)))
simplectica(q0=q0,dq0=dq0,F=F,col=col,marker='ro',d= 10**(-4),n = int(16/d))
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
#fig.savefig('Simplectic.png', dpi=250)
plt.show()
#################################################################
# EJERCICIO 2
# CÁLCULO DEL ÁREA DEL ESPACIO FÁSICO
#################################################################
'''
Funcion que calcula el area contenida en una orbita
input:
q0, dq0: valores iniciales
d: granularidad temporal
F: funcion del sistema
n: numero de puntos
'''
def area(q0,dq0,d,n, F):
q = orb(n,q0=q0,dq0=dq0,F=F,d=d)
dq = deriv(q,dq0=dq0,d=d)
p = dq/2
fig, ax = plt.subplots(figsize=(5,5))
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
plt.plot(q, p, '-')
plt.show()
#Tomaremos los periodos de la órbita, que definen las ondas
T, W = periodos(q,d,max=False)
#Nos quedamos con el primer trozo
#Tomamos la mitad de la "curva cerrada" para integrar más fácilmente
mitad = np.arange(W[0],W[0]+np.int((W[1]-W[0])/2),1)
# Regla del trapezoide
areaT = 2*trapz(p[mitad],q[mitad])
# Regla de Simpson
areaS = 2*simps(p[mitad],q[mitad])
return areaT, areaS
#Tomamos un par (q(0), p(0)) y nos quedamos sólo en un trozo/onda de la órbita, sin repeticiones
#Para eso, tomaremos los periodos de la órbita, que definen las ondas
#Paso1: Buscamos las condiciones iniciales que minimizan en área.
#Como el (0,0) es punto inestable, cogemos un punto cercano
q0 = 0.
dq0 = 10**(-10)
d = 10**(-4)
n = int(60/d)
t = np.arange(n+1)*d
areaMinT, areaMinS = area(q0,dq0,d,n,F)
print("Area con regla de Simpson =", areaMinS)
print("Area con regla del trapezoide =", areaMinT)
#Paso2: Buscamos las condiciones iniciales que maximizan en área
#Vamos a coger un punto de la frontera, (0,1)
q0 = 0.
dq0 = 2.
n = int(32/d)
t = np.arange(n+1)*d
areaMaxT, areaMaxS = area(q0,dq0,d,n,F)
print("Area con regla de Simpson =", areaMaxS)
print("Area con regla del trapezoide =", areaMaxT)
# El area total es el area maxima menos el area minima
areaTotalT = areaMaxT-areaMinT/2
print("Area total Trapezoide =", areaTotalT)
areaTotalS = areaMaxS-areaMinS/2
print("Area total Simpson =", areaTotalS)
####################################
# CALCULO DEL ERROR
####################################
#Paso1: Buscamos las condiciones iniciales que minimizan en área.
#Como el (0,0) es punto inestable, cogemos un punto cercano
q0 = 0.
dq0 = 10**(-10)
d = 10**(-5)
n = int(100/d)
t = np.arange(n+1)*d
areaMinT, areaMinS = area(q0,dq0,d,n,F)
print("Area con regla de Simpson =", areaMinS)
print("Area con regla del trapezoide =", areaMinT)
#Paso2: Buscamos las condiciones iniciales que maximizan en área
#Vamos a coger un punto de la frontera, (0,1)
q0 = 0.
dq0 = 2.
n = int(32/d)
t = np.arange(n+1)*d
areaMaxT, areaMaxS = area(q0,dq0,d,n,F)
print("Area con regla de Simpson =", areaMaxS)
print("Area con regla del trapezoide =", areaMaxT)
# El area total es el area maxima menos el area minima
areaTotalT_5 = areaMaxT-areaMinT/2
print("Area total Trapezoide =", areaTotalT_5)
areaTotalS_5 = areaMaxS-areaMinS/2
print("Area total Simpson =", areaTotalS_5)
#Para calcular el error, restamos las dos areas calculadas
error = max(abs(areaTotalT-areaTotalT_5),abs(areaTotalS-areaTotalS_5))
print("El error del area es ", error)
#####################
# Teorema de Liouville
#####################
'''
Funcion que calcula el area encerrada por un conjunto de puntos
input:
x : lista de primera coordenada de los puntos
y: lista de segunda coordenada
output:
valor del área
'''
def PolyArea(x,y):
return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
#Generamos los puntos del borde de D0 ([0,1]x[0,1])
x_d0 = []
y_d0 = []
for i in np.arange(0,1,0.1):
x_d0.append(i)
y_d0.append(0)
for i in np.arange(0,1,0.1):
x_d0.append(1)
y_d0.append(i)
for i in np.arange(1,0,-0.1):
x_d0.append(i)
y_d0.append(1)
for i in np.arange(1,-0.1,-0.1):
x_d0.append(0)
y_d0.append(i)
print(PolyArea(x_d0,y_d0))
#Evolucionamos cada uno de los puntos a lo largo del tiempo.
#Guardamos en (qt,pt) el valor del punto inicial en varios tiempos (0,1000,5000,9000)
qt = []
pt = []
for i in range (0,len(x_d0)):
q = orb(10000,x_d0[i],2*y_d0[i],F=F, d=10**-4)
dq = deriv(q,y_d0[i]*2,10**-4)
p = dq/2
qt.append([q[0],q[1000],q[5000],q[9000]])
pt.append([p[0],p[1000],p[5000],p[9000]])
fig, ax = plt.subplots(figsize=(5,5))
plt.rcParams["legend.markerscale"] = 6
ax.set_xlabel("q(t)", fontsize=12)
ax.set_ylabel("p(t)", fontsize=12)
plt.plot(qt, pt, '-')
plt.show()
#Mostramos que las áreas son (casi) constantes
for i in range(0,len(qt[0])):
print(PolyArea(np.array(qt)[:,i],np.array(pt)[:,i]))
| [
"numpy.empty",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.insert",
"numpy.max",
"numpy.int",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.subplots",
"scipy.integrate.simps",
"numpy.trapz",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.... | [((2612, 2641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (2624, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2641, 2660), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (2649, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2789, 2820), 'numpy.array', 'np.array', (['[3, 3.1, 3.5, 3.8, 4]'], {}), '([3, 3.1, 3.5, 3.8, 4])\n', (2797, 2820), True, 'import numpy as np\n'), ((4034, 4060), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (4044, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4183, 4212), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(10)'}), '(0.0, 1.0, num=10)\n', (4194, 4212), True, 'import numpy as np\n'), ((4219, 4246), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2)'], {'num': '(10)'}), '(0.0, 2, num=10)\n', (4230, 4246), True, 'import numpy as np\n'), ((4728, 4738), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4736, 4738), True, 'import matplotlib.pyplot as plt\n'), ((8509, 8529), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (8518, 8529), True, 'import numpy as np\n'), ((8576, 8596), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (8585, 8596), True, 'import numpy as np\n'), ((8647, 8668), 'numpy.arange', 'np.arange', (['(1)', '(0)', '(-0.1)'], {}), '(1, 0, -0.1)\n', (8656, 8668), True, 'import numpy as np\n'), ((8715, 8739), 'numpy.arange', 'np.arange', (['(1)', '(-0.1)', '(-0.1)'], {}), '(1, -0.1, -0.1)\n', (8724, 8739), True, 'import numpy as np\n'), ((9208, 9236), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (9220, 9236), True, 'import matplotlib.pyplot as plt\n'), ((9346, 9367), 'matplotlib.pyplot.plot', 'plt.plot', (['qt', 'pt', '"""-"""'], {}), "(qt, pt, '-')\n", (9354, 9367), True, 'import matplotlib.pyplot as plt\n'), ((9368, 9378), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9376, 9378), True, 'import matplotlib.pyplot as plt\n'), ((538, 559), 'numpy.insert', 'np.insert', (['dq', '(0)', 'dq0'], {}), '(dq, 0, dq0)\n', (547, 559), True, 'import numpy as np\n'), ((1230, 1247), 'numpy.empty', 'np.empty', (['[n + 1]'], {}), '([n + 1])\n', (1238, 1247), True, 'import numpy as np\n'), ((1295, 1314), 'numpy.arange', 'np.arange', (['(2)', '(n + 1)'], {}), '(2, n + 1)\n', (1304, 1314), True, 'import numpy as np\n'), ((2146, 2160), 'numpy.diff', 'np.diff', (['waves'], {}), '(waves)\n', (2153, 2160), True, 'import numpy as np\n'), ((3253, 3269), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (3262, 3269), True, 'import numpy as np\n'), ((5255, 5283), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5267, 5283), True, 'import matplotlib.pyplot as plt\n'), ((5409, 5428), 'matplotlib.pyplot.plot', 'plt.plot', (['q', 'p', '"""-"""'], {}), "(q, p, '-')\n", (5417, 5428), True, 'import matplotlib.pyplot as plt\n'), ((5433, 5443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5441, 5443), True, 'import matplotlib.pyplot as plt\n'), ((6235, 6251), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (6244, 6251), True, 'import numpy as np\n'), ((6546, 6562), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (6555, 6562), True, 'import numpy as np\n'), ((7207, 7223), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (7216, 7223), True, 'import numpy as np\n'), ((7518, 7534), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (7527, 7534), True, 'import numpy as np\n'), ((2875, 2891), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (2884, 2891), True, 'import numpy as np\n'), ((5762, 5787), 'numpy.trapz', 'trapz', (['p[mitad]', 'q[mitad]'], {}), '(p[mitad], q[mitad])\n', (5767, 5787), False, 'from numpy import trapz\n'), ((5829, 5854), 'scipy.integrate.simps', 'simps', (['p[mitad]', 'q[mitad]'], {}), '(p[mitad], q[mitad])\n', (5834, 5854), False, 'from scipy.integrate import simps\n'), ((5691, 5716), 'numpy.int', 'np.int', (['((W[1] - W[0]) / 2)'], {}), '((W[1] - W[0]) / 2)\n', (5697, 5716), True, 'import numpy as np\n'), ((3012, 3034), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (3024, 3034), True, 'import matplotlib.pyplot as plt\n'), ((3982, 4004), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""winter"""'], {}), "('winter')\n", (3994, 4004), True, 'import matplotlib.pyplot as plt\n'), ((9483, 9495), 'numpy.array', 'np.array', (['qt'], {}), '(qt)\n', (9491, 9495), True, 'import numpy as np\n'), ((9501, 9513), 'numpy.array', 'np.array', (['pt'], {}), '(pt)\n', (9509, 9513), True, 'import numpy as np\n'), ((2994, 3009), 'numpy.around', 'np.around', (['d', '(4)'], {}), '(d, 4)\n', (3003, 3009), True, 'import numpy as np\n'), ((3037, 3049), 'numpy.max', 'np.max', (['iseq'], {}), '(iseq)\n', (3043, 3049), True, 'import numpy as np\n'), ((8388, 8401), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (8395, 8401), True, 'import numpy as np\n'), ((8411, 8424), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (8418, 8424), True, 'import numpy as np\n'), ((1994, 2011), 'numpy.log10', 'np.log10', (['epsilon'], {}), '(epsilon)\n', (2002, 2011), True, 'import numpy as np\n'), ((2093, 2110), 'numpy.log10', 'np.log10', (['epsilon'], {}), '(epsilon)\n', (2101, 2110), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchpruner as pruner
import torchpruner.model_tools as model_tools
import torchslim
import torchslim.slim_solver as slim_solver
from torchslim.modules.rep_modules import (
merge_conv_bn,
merge_conv_compactor,
Compactor,
ModuleCompactor,
RepModule,
)
from collections import defaultdict, OrderedDict
import copy
def neg_index(index, size):
n_index = np.arange(0, size)
mask = np.ones(size) > 0
mask[index] = False
n_index = n_index[mask]
return list(n_index)
# just get the linear structure
# conv->bn->relu->conv
def get_linear_bn_names(graph):
modules = graph.modules
bn_names = []
for _, module in modules.items():
if isinstance(module.nn_object, nn.BatchNorm2d):
cut_dict = module.cut_analysis("weight", [0], 0)
terminal_dict = cut_dict["terminal"]
key_length = len(list(terminal_dict.keys()))
if key_length > 7:
continue
bn_names.append(module.name)
return bn_names
def get_linear_conv_names(graph):
modules = graph.modules
conv_names = []
for _, module in modules.items():
if isinstance(module.nn_object, (nn.Conv2d, nn.ConvTranspose2d)):
cut_dict = module.cut_analysis("weight", [0], 0)
terminal_dict = cut_dict["terminal"]
key_length = len(terminal_dict.keys())
if key_length > 7:
continue
conv_names.append(module.name)
return conv_names
# get all the bn names
def get_all_bn_names(graph):
modules = graph.modules
bn_names = []
for _, module in modules.items():
if isinstance(module.nn_object, nn.BatchNorm2d):
bn_names.append(module.name)
return bn_names
def get_all_conv_names(graph):
modules = graph.modules
conv_names = []
for _, module in modules.items():
if isinstance(module.nn_object, (nn.Conv2d, nn.ConvTranspose2d)):
conv_names.append(module.name)
return conv_names
strategy_mapping = {
"linear_bn": get_linear_bn_names,
"all_bn": get_all_bn_names,
"linear_conv": get_linear_conv_names,
"all_conv": get_all_conv_names,
}
def RepModule_convert_hook(name, origin_object):
return origin_object.convert()
def get_target_module_names(model, graph_inputs, strategy="linear"):
# the first step compress the RepModule to the single conv
model = copy.deepcopy(model)
module_names = model_tools.get_names_by_class(model, RepModule)
model = model_tools.replace_object_by_names(
model, module_names, RepModule_convert_hook
)
# create the graph
graph = pruner.ONNXGraph(model)
graph.build_graph(graph_inputs)
return strategy_mapping[strategy](graph)
def module_compactor_replace_function(name, origin_object):
module_compactor = ModuleCompactor(origin_object).to(
origin_object.weight.data.device
)
return module_compactor
# Conv BN and Compactor
def merge_conv_bn_compactor_hook(names, object_groups):
conv, bn, compactor = object_groups
conv = merge_conv_bn(conv, bn)
return merge_conv_compactor(conv, compactor), nn.Identity(), nn.Identity()
# Conv N
def merge_conv_compactor_hook(names, object_groups):
conv, compactor = object_groups
return merge_conv_compactor(conv, compactor), nn.Identity()
def get_bn_channels(model, names):
return_dict = OrderedDict()
for name in names:
nn_object = pruner.model_tools.get_object(model, name)
return_dict[name] = nn_object.compactor.conv.out_channels
return return_dict
def deploy_convert(model, graph_inputs):
model = copy.deepcopy(model)
model = model.cpu()
# rep module
model = model_tools.replace_object_by_class(
model, RepModule, RepModule_convert_hook
)
current_graph = pruner.ONNXGraph(model)
current_graph.build_graph(graph_inputs)
# conv and compactor
name_groups = pruner.model_tools.get_name_groups_by_classes(
current_graph, [(nn.Conv2d, nn.ConvTranspose2d), Compactor]
)
model = model_tools.replace_object_by_name_groups(
model, name_groups, merge_conv_compactor_hook
)
# conv bn and compactor
name_groups = pruner.model_tools.get_name_groups_by_classes(
current_graph, [(nn.Conv2d, nn.ConvTranspose2d), nn.BatchNorm2d, Compactor]
)
model = model_tools.replace_object_by_name_groups(
model, name_groups, merge_conv_bn_compactor_hook
)
return model
def prune_model(graph, model, optimizer, prune_groups=1, group_size=8, min_channels=8):
module_names = []
module_lasso_value = []
for name, module in graph.modules.items():
nn_object = module.nn_object
if isinstance(nn_object, Compactor):
weight = nn_object.conv.weight.data.cpu().numpy()
lasso_value = np.sum(weight * weight, axis=(1, 2, 3))
module_names.append(name)
module_lasso_value.append(lasso_value)
for c_group in range(0, prune_groups):
min_module_name = None
min_lasso_value = 1e100
name_index = -1
min_index = None
for i in range(0, len(module_names)):
module_name, lasso_value = module_names[i], module_lasso_value[i]
remain_channels = len(lasso_value)
if remain_channels <= group_size or remain_channels <= min_channels:
continue
index = np.argsort(lasso_value)
index_group = index[:group_size]
lasso_sum_value = np.sum(lasso_value[index_group])
if lasso_sum_value < min_lasso_value:
min_lasso_value = lasso_sum_value
min_module_name = module_name
min_index = index_group
name_index = i
if min_module_name is None:
raise RuntimeError("The model can not be pruned to target flops")
print("Cutting layer is: " + min_module_name)
module_lasso_value[name_index] = module_lasso_value[name_index][
neg_index(min_index, len(module_lasso_value[name_index]))
]
analysis_result = graph.modules[min_module_name].cut_analysis(
"conv.weight", index=min_index, dim=0
)
model, _ = pruner.set_cut(model, analysis_result)
optimizer, _ = pruner.set_cut_optimizer(model, optimizer, analysis_result)
return model, optimizer
def flops(model, graph_inputs):
model = deploy_convert(model, graph_inputs)
graph = pruner.ONNXGraph(model)
graph.build_graph(graph_inputs)
return graph.flops()
# the init hook
# insert the compactor into the model and get the init flops
def init_hook(self):
# prepare the sample input
if self.config["input_shapes"] is None:
input_shapes = self.infer_input_shapes()
else:
input_shapes = self.config["input_shapes"]
graph_inputs = []
for input_shape in input_shapes:
graph_inputs.append(torch.zeros(1, *input_shape))
graph_inputs = tuple(graph_inputs)
if self.config["prune_module_names"] is not None:
target_module_names = self.config["prune_module_names"]
else:
target_module_names = get_target_module_names(
self.model, graph_inputs, self.config["auto_find_module_strategy"]
)
# remove the bn layer without conv or with depthwise conv
model = copy.deepcopy(self.model)
module_names = model_tools.get_names_by_class(model, RepModule)
model = model_tools.replace_object_by_names(
model, module_names, RepModule_convert_hook
)
graph = pruner.ONNXGraph(model)
graph.build_graph(graph_inputs)
name_groups = model_tools.get_name_groups_by_classes(
graph, [(nn.Conv2d, nn.ConvTranspose2d), nn.BatchNorm2d]
)
filtered_module_names = []
for conv_name, bn_name in name_groups:
if (
model_tools.get_object(model, conv_name).groups == 1
and bn_name in target_module_names
):
filtered_module_names.append(bn_name)
names = model_tools.get_names_by_class(model, (nn.Conv2d, nn.ConvTranspose2d), True)
for name in names:
if (
name in target_module_names
and model_tools.get_object(model, name).groups == 1
):
filtered_module_names.append(name)
# sort the names:
sorted_filtered_module_names = []
for name in target_module_names:
if name in filtered_module_names:
sorted_filtered_module_names.append(name)
target_module_names = sorted_filtered_module_names
print("The pruning module is:")
print(target_module_names)
# insert the compactor
self.model = model_tools.replace_object_by_names(
self.model, target_module_names, module_compactor_replace_function
)
current_flops = flops(self.model, graph_inputs)
# save the variables
self.variable_dict["graph_inputs"] = graph_inputs
self.variable_dict["target_module_names"] = target_module_names
self.variable_dict["init_flops"] = current_flops
self.variable_dict["current_flops"] = current_flops
# set the allow save to be false
self.variable_dict["allow_save"] = False
# before iteration hook
def before_iteration_hook(self):
if self.variable_dict["epoch"] >= self.config["warmup_epoch"]:
self.variable_dict["prune_iteration"] += 1
# the iteration hook
def after_iteration_hook(self):
current_flops = self.variable_dict["current_flops"]
init_flops = self.variable_dict["init_flops"]
graph_inputs = self.variable_dict["graph_inputs"]
target_module_names = self.variable_dict["target_module_names"]
if self.variable_dict["prune_iteration"] % self.config["prune_interval"] == 0:
if current_flops < (1 - self.config["prune_rate"]) * init_flops:
print("reach the target flops no need to prune")
self.variable_dict["allow_save"] = True
else:
print(">>>>>>>>>>>>>>>>>>>>Pruning the model >>>>>>>>>>>>>>>>>>>>")
current_graph = pruner.ONNXGraph(self.model)
current_graph.build_graph(graph_inputs)
self.model, self.optimizer = prune_model(
current_graph,
self.model,
self.optimizer,
self.config["prune_groups"],
self.config["group_size"],
self.config["min_channels"],
)
current_flops = flops(self.model, graph_inputs)
bn_channels = get_bn_channels(self.model, target_module_names)
print("The cutting bn channel is:")
for key in bn_channels.keys():
print(key + ": " + str(bn_channels[key]))
print("The new flops is %.4f" % (current_flops))
self.variable_dict["current_flops"] = current_flops
def optimizer_generator(params, config):
return torch.optim.SGD(params, lr=0.0)
def scheduler_generator(optimizer, config):
return torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config["epoch"])
class ResRepSolver(slim_solver.CommonSlimSolver):
def __init__(self, model, config):
super(ResRepSolver, self).__init__(model, config)
self.variable_dict["prune_iteration"] = 1
self.regist_init_hook(init_hook)
self.regist_iteration_begin_hook(before_iteration_hook)
self.regist_iteration_end_hook(after_iteration_hook)
__config_setting__ = [
("task_name", str, "default", False, "The task name"),
(
"save_deploy_format",
bool,
True,
False,
"convert the ACNet to conv when saving the model",
),
("lr", float, 0.01, False, "The learning rate of the optimizer"),
("epoch", int, 360, False, "The total epoch to train the model"),
("batch_size", int, 128, False, "The batch size per step"),
("test_batch_size", int, 128, False, "The evaluation batch size per step"),
("warmup_epoch", int, 5, False, "The total train epoch before pruning"),
("momentum", float, 0.9, False, "The momentum for the optimizer"),
("compactor_momentum", float, 0.99, False, "The momentum value for compactor"),
("weight_decay", float, 1e-4, False, "The wegith decay for the parameters"),
("lasso_decay", float, 1e-4, False, "The lasso decay for the compactor"),
(
"input_shapes",
list,
None,
True,
"A 2 dim list, representing the input shapes of the data, if None, \
the first item size of the dataset will be used as the input size",
),
(
"prune_module_names",
list,
None,
True,
"The module names to be pruned, just support BatchNorm2d and Conv2d",
),
(
"auto_find_module_strategy",
str,
"linear_bn",
False,
"The strategy to determine the name of module layers to be cut if the prune_module_names is None, \
support linear and all",
),
("prune_rate", float, None, False, "The prune rate of the model"),
("prune_interval", int, 200, False, "The prune iteration per pruning"),
("prune_groups", int, 1, False, "The prune groups prune pruning"),
("group_size", int, 8, False, "The channels to be pruned per group"),
("min_channels", int, 8, False, "The min channels that layer remain"),
("num_workers", int, 0, False, "The number of workers to read data"),
("save_keyword", str, "acc", False, "The keyword for save"),
("save_dir", str, "checkpoints", False, "The model save dir"),
("devices", list, None, False, "The device to be used in training"),
("log_interval", int, 20, False, "The interval to report the log"),
# generate the optimizer
(
"optimizer_generator",
"function",
optimizer_generator,
False,
"The optimizer generator (params,config)->optimizer",
),
# generate the scheduler
(
"scheduler_generator",
"function",
scheduler_generator,
True,
"the scheduler generator for the task (optmizer,config)->scheduler",
),
# predict the result
(
"predict_function",
"function",
None,
False,
"get the prediction of the data (model,batch_data)->predict",
),
# calculate the loss for one iteration
(
"calculate_loss_function",
"function",
None,
False,
"(predict,batch_data)->loss",
),
# get the evaluate result for one iteration
(
"evaluate_function",
"function",
None,
True,
"(predict,batch_data)->evaluate_dict",
),
# get the dataset
(
"dataset_generator",
"function",
None,
True,
"()->dataset_train,dataset_validation",
),
]
# infer the input sizes
def infer_input_shapes(self):
samples = iter(self.trainloader).__next__()
input_size = samples[0].size()[1:]
return [list(input_size)]
# overwrite the generate_params setting
def generate_params_setting(self):
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
weight_decay = self.config["weight_decay"]
momentum = self.config["momentum"]
base_lr = self.config["lr"]
params = []
for key, value in model.named_parameters():
apply_weight_decay = weight_decay
apply_momentum = momentum
apply_lr = base_lr
if not value.requires_grad:
continue
parent_key = ["self"] + key.split(".")[:-1]
parent_key = ".".join(parent_key)
parent_object = model_tools.get_object(model, parent_key)
if isinstance(parent_object, nn.BatchNorm2d):
apply_weight_decay = 0.0
if (
isinstance(parent_object, (nn.Conv2d, nn.ConvTranspose2d))
and parent_object.groups == parent_object.in_channels
):
apply_weight_decay = 0.0
item_list = key.split(".")
if len(item_list) <= 3:
continue
grand_parent_key = ["self"] + key.split(".")[:-2]
grand_parent_key = ".".join(grand_parent_key)
grand_parent = model_tools.get_object(model, grand_parent_key)
if isinstance(grand_parent, Compactor):
apply_weight_decay = 0.0
apply_momentum = 0.99
if "bias" in key:
apply_lr = 2 * base_lr
apply_weight_decay = 0.0
else:
apply_lr = base_lr
params += [
{
"params": [value],
"lr": apply_lr,
"weight_decay": apply_weight_decay,
"momentum": apply_momentum,
}
]
return params
# overwrite the after_loss_backward
def after_loss_backward(self):
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
for key, value in model.named_parameters():
split_key = key.split(".")
if len(split_key) <= 3:
continue
split_key = ["self"] + split_key[:-2]
grand_parent_key = ".".join(split_key)
nn_object = model_tools.get_object(model, grand_parent_key)
if isinstance(nn_object, Compactor):
lasso_grad = value.data * (
(value.data ** 2).sum(dim=(1, 2, 3), keepdim=True) ** (-0.5)
)
value.grad.data.add_(self.config["lasso_decay"], lasso_grad)
def save_model(self):
if isinstance(self.model, nn.DataParallel):
model = self.model.module
else:
model = self.model
deploy_model = copy.deepcopy(model)
deploy_model = deploy_model.cpu()
if self.config["save_deploy_format"]:
graph_inputs = self.variable_dict["graph_inputs"]
deploy_model = deploy_convert(model, graph_inputs)
torch.save(
{
self.config["save_keyword"]: self.variable_dict["save_target"],
"net": deploy_model,
},
self.variable_dict["save_path"],
)
| [
"torchslim.modules.rep_modules.ModuleCompactor",
"numpy.sum",
"torchslim.modules.rep_modules.merge_conv_bn",
"torchpruner.model_tools.replace_object_by_name_groups",
"numpy.ones",
"numpy.argsort",
"numpy.arange",
"torchpruner.model_tools.replace_object_by_names",
"torchpruner.ONNXGraph",
"torch.op... | [((479, 497), 'numpy.arange', 'np.arange', (['(0)', 'size'], {}), '(0, size)\n', (488, 497), True, 'import numpy as np\n'), ((2515, 2535), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (2528, 2535), False, 'import copy\n'), ((2555, 2603), 'torchpruner.model_tools.get_names_by_class', 'model_tools.get_names_by_class', (['model', 'RepModule'], {}), '(model, RepModule)\n', (2585, 2603), True, 'import torchpruner.model_tools as model_tools\n'), ((2616, 2701), 'torchpruner.model_tools.replace_object_by_names', 'model_tools.replace_object_by_names', (['model', 'module_names', 'RepModule_convert_hook'], {}), '(model, module_names, RepModule_convert_hook\n )\n', (2651, 2701), True, 'import torchpruner.model_tools as model_tools\n'), ((2746, 2769), 'torchpruner.ONNXGraph', 'pruner.ONNXGraph', (['model'], {}), '(model)\n', (2762, 2769), True, 'import torchpruner as pruner\n'), ((3179, 3202), 'torchslim.modules.rep_modules.merge_conv_bn', 'merge_conv_bn', (['conv', 'bn'], {}), '(conv, bn)\n', (3192, 3202), False, 'from torchslim.modules.rep_modules import merge_conv_bn, merge_conv_compactor, Compactor, ModuleCompactor, RepModule\n'), ((3501, 3514), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3512, 3514), False, 'from collections import defaultdict, OrderedDict\n'), ((3745, 3765), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (3758, 3765), False, 'import copy\n'), ((3819, 3896), 'torchpruner.model_tools.replace_object_by_class', 'model_tools.replace_object_by_class', (['model', 'RepModule', 'RepModule_convert_hook'], {}), '(model, RepModule, RepModule_convert_hook)\n', (3854, 3896), True, 'import torchpruner.model_tools as model_tools\n'), ((3931, 3954), 'torchpruner.ONNXGraph', 'pruner.ONNXGraph', (['model'], {}), '(model)\n', (3947, 3954), True, 'import torchpruner as pruner\n'), ((4042, 4152), 'torchpruner.model_tools.get_name_groups_by_classes', 'pruner.model_tools.get_name_groups_by_classes', (['current_graph', '[(nn.Conv2d, nn.ConvTranspose2d), Compactor]'], {}), '(current_graph, [(nn.Conv2d,\n nn.ConvTranspose2d), Compactor])\n', (4087, 4152), True, 'import torchpruner as pruner\n'), ((4175, 4267), 'torchpruner.model_tools.replace_object_by_name_groups', 'model_tools.replace_object_by_name_groups', (['model', 'name_groups', 'merge_conv_compactor_hook'], {}), '(model, name_groups,\n merge_conv_compactor_hook)\n', (4216, 4267), True, 'import torchpruner.model_tools as model_tools\n'), ((4324, 4450), 'torchpruner.model_tools.get_name_groups_by_classes', 'pruner.model_tools.get_name_groups_by_classes', (['current_graph', '[(nn.Conv2d, nn.ConvTranspose2d), nn.BatchNorm2d, Compactor]'], {}), '(current_graph, [(nn.Conv2d,\n nn.ConvTranspose2d), nn.BatchNorm2d, Compactor])\n', (4369, 4450), True, 'import torchpruner as pruner\n'), ((4473, 4568), 'torchpruner.model_tools.replace_object_by_name_groups', 'model_tools.replace_object_by_name_groups', (['model', 'name_groups', 'merge_conv_bn_compactor_hook'], {}), '(model, name_groups,\n merge_conv_bn_compactor_hook)\n', (4514, 4568), True, 'import torchpruner.model_tools as model_tools\n'), ((6598, 6621), 'torchpruner.ONNXGraph', 'pruner.ONNXGraph', (['model'], {}), '(model)\n', (6614, 6621), True, 'import torchpruner as pruner\n'), ((7472, 7497), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (7485, 7497), False, 'import copy\n'), ((7517, 7565), 'torchpruner.model_tools.get_names_by_class', 'model_tools.get_names_by_class', (['model', 'RepModule'], {}), '(model, RepModule)\n', (7547, 7565), True, 'import torchpruner.model_tools as model_tools\n'), ((7578, 7663), 'torchpruner.model_tools.replace_object_by_names', 'model_tools.replace_object_by_names', (['model', 'module_names', 'RepModule_convert_hook'], {}), '(model, module_names, RepModule_convert_hook\n )\n', (7613, 7663), True, 'import torchpruner.model_tools as model_tools\n'), ((7685, 7708), 'torchpruner.ONNXGraph', 'pruner.ONNXGraph', (['model'], {}), '(model)\n', (7701, 7708), True, 'import torchpruner as pruner\n'), ((7763, 7864), 'torchpruner.model_tools.get_name_groups_by_classes', 'model_tools.get_name_groups_by_classes', (['graph', '[(nn.Conv2d, nn.ConvTranspose2d), nn.BatchNorm2d]'], {}), '(graph, [(nn.Conv2d, nn.\n ConvTranspose2d), nn.BatchNorm2d])\n', (7801, 7864), True, 'import torchpruner.model_tools as model_tools\n'), ((8146, 8222), 'torchpruner.model_tools.get_names_by_class', 'model_tools.get_names_by_class', (['model', '(nn.Conv2d, nn.ConvTranspose2d)', '(True)'], {}), '(model, (nn.Conv2d, nn.ConvTranspose2d), True)\n', (8176, 8222), True, 'import torchpruner.model_tools as model_tools\n'), ((8781, 8888), 'torchpruner.model_tools.replace_object_by_names', 'model_tools.replace_object_by_names', (['self.model', 'target_module_names', 'module_compactor_replace_function'], {}), '(self.model, target_module_names,\n module_compactor_replace_function)\n', (8816, 8888), True, 'import torchpruner.model_tools as model_tools\n'), ((10979, 11010), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.0)'}), '(params, lr=0.0)\n', (10994, 11010), False, 'import torch\n'), ((11068, 11138), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', "config['epoch']"], {}), "(optimizer, config['epoch'])\n", (11110, 11138), False, 'import torch\n'), ((509, 522), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (516, 522), True, 'import numpy as np\n'), ((3214, 3251), 'torchslim.modules.rep_modules.merge_conv_compactor', 'merge_conv_compactor', (['conv', 'compactor'], {}), '(conv, compactor)\n', (3234, 3251), False, 'from torchslim.modules.rep_modules import merge_conv_bn, merge_conv_compactor, Compactor, ModuleCompactor, RepModule\n'), ((3253, 3266), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (3264, 3266), True, 'import torch.nn as nn\n'), ((3268, 3281), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (3279, 3281), True, 'import torch.nn as nn\n'), ((3393, 3430), 'torchslim.modules.rep_modules.merge_conv_compactor', 'merge_conv_compactor', (['conv', 'compactor'], {}), '(conv, compactor)\n', (3413, 3430), False, 'from torchslim.modules.rep_modules import merge_conv_bn, merge_conv_compactor, Compactor, ModuleCompactor, RepModule\n'), ((3432, 3445), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (3443, 3445), True, 'import torch.nn as nn\n'), ((3558, 3600), 'torchpruner.model_tools.get_object', 'pruner.model_tools.get_object', (['model', 'name'], {}), '(model, name)\n', (3587, 3600), True, 'import torchpruner as pruner\n'), ((6354, 6392), 'torchpruner.set_cut', 'pruner.set_cut', (['model', 'analysis_result'], {}), '(model, analysis_result)\n', (6368, 6392), True, 'import torchpruner as pruner\n'), ((6416, 6475), 'torchpruner.set_cut_optimizer', 'pruner.set_cut_optimizer', (['model', 'optimizer', 'analysis_result'], {}), '(model, optimizer, analysis_result)\n', (6440, 6475), True, 'import torchpruner as pruner\n'), ((18355, 18375), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (18368, 18375), False, 'import copy\n'), ((18597, 18731), 'torch.save', 'torch.save', (["{self.config['save_keyword']: self.variable_dict['save_target'], 'net':\n deploy_model}", "self.variable_dict['save_path']"], {}), "({self.config['save_keyword']: self.variable_dict['save_target'],\n 'net': deploy_model}, self.variable_dict['save_path'])\n", (18607, 18731), False, 'import torch\n'), ((2936, 2966), 'torchslim.modules.rep_modules.ModuleCompactor', 'ModuleCompactor', (['origin_object'], {}), '(origin_object)\n', (2951, 2966), False, 'from torchslim.modules.rep_modules import merge_conv_bn, merge_conv_compactor, Compactor, ModuleCompactor, RepModule\n'), ((4953, 4992), 'numpy.sum', 'np.sum', (['(weight * weight)'], {'axis': '(1, 2, 3)'}), '(weight * weight, axis=(1, 2, 3))\n', (4959, 4992), True, 'import numpy as np\n'), ((5534, 5557), 'numpy.argsort', 'np.argsort', (['lasso_value'], {}), '(lasso_value)\n', (5544, 5557), True, 'import numpy as np\n'), ((5633, 5665), 'numpy.sum', 'np.sum', (['lasso_value[index_group]'], {}), '(lasso_value[index_group])\n', (5639, 5665), True, 'import numpy as np\n'), ((7055, 7083), 'torch.zeros', 'torch.zeros', (['(1)', '*input_shape'], {}), '(1, *input_shape)\n', (7066, 7083), False, 'import torch\n'), ((10143, 10171), 'torchpruner.ONNXGraph', 'pruner.ONNXGraph', (['self.model'], {}), '(self.model)\n', (10159, 10171), True, 'import torchpruner as pruner\n'), ((16170, 16211), 'torchpruner.model_tools.get_object', 'model_tools.get_object', (['model', 'parent_key'], {}), '(model, parent_key)\n', (16192, 16211), True, 'import torchpruner.model_tools as model_tools\n'), ((16776, 16823), 'torchpruner.model_tools.get_object', 'model_tools.get_object', (['model', 'grand_parent_key'], {}), '(model, grand_parent_key)\n', (16798, 16823), True, 'import torchpruner.model_tools as model_tools\n'), ((17853, 17900), 'torchpruner.model_tools.get_object', 'model_tools.get_object', (['model', 'grand_parent_key'], {}), '(model, grand_parent_key)\n', (17875, 17900), True, 'import torchpruner.model_tools as model_tools\n'), ((7973, 8013), 'torchpruner.model_tools.get_object', 'model_tools.get_object', (['model', 'conv_name'], {}), '(model, conv_name)\n', (7995, 8013), True, 'import torchpruner.model_tools as model_tools\n'), ((8315, 8350), 'torchpruner.model_tools.get_object', 'model_tools.get_object', (['model', 'name'], {}), '(model, name)\n', (8337, 8350), True, 'import torchpruner.model_tools as model_tools\n')] |
import math
import multiprocessing
import time
from functools import lru_cache, partial
from multiprocessing import Pool
import pandas as pd
from numpy.random import shuffle
from retry.api import retry_call
from ..mongodb import get_db
from ..scripts.trading_calendar import is_trading_day
from ..setting.constants import MAX_WORKER
from ..utils import batch_loop, data_root, ensure_dtypes, make_logger
from ..utils.db_utils import to_dict
from ..websource.wy import fetch_cjmx
logger = make_logger('成交明细')
DATE_FMT = r'%Y-%m-%d'
def _last_5():
"""最近的5个交易日"""
db = get_db()
try:
return db['交易日历'].find_one()['last_month'][-5:]
except Exception:
today = pd.Timestamp('today').normalize()
dates = pd.date_range(today - pd.Timedelta(days=5), today)
return [d.to_pydatetime() for d in dates]
def _wy_fix_data(df):
dts = df.日期.dt.strftime(DATE_FMT) + ' ' + df.时间
df['成交时间'] = pd.to_datetime(dts)
del df['时间']
del df['日期']
df = df.rename(columns={'价格': '成交价', '涨跌额': '价格变动', '方向': '性质'})
df = ensure_dtypes(df,
d_cols=['成交时间'],
s_cols=['股票代码', '性质'],
i_cols=['成交量'],
f_cols=['成交价', '成交额'])
# 保留2位小数
df = df.round({'价格变动': 2, '成交额': 2, '成交价': 2})
df.fillna(0.0, inplace=True)
return df
def bacth_refresh(codes, timestamp):
db = get_db('cjmx')
date_str = timestamp.strftime(DATE_FMT)
collection = db[date_str]
if collection.estimated_document_count() == 0:
create_index(collection)
status = {}
for code in codes:
try:
df = retry_call(fetch_cjmx, [code, date_str],
delay=0.3,
tries=3,
logger=logger)
if not df.empty:
df = _wy_fix_data(df)
collection.insert_many(to_dict(df))
logger.info(f'股票 {code} {date_str} 共 {len(df):>5} 行')
status[code] = True
except Exception as e:
logger.info(f'股票 {code} 日期 {date_str} {e!r}')
status[code] = False
failed = [k for k, v in status.items() if not v]
if len(failed):
logger.warning(f'{date_str} 以下股票成交明细提取失败')
logger.warning(failed)
return len(failed) == 0
def was_traded(db, code, timestamp):
collection = db[code]
filter = {'日期': timestamp, '成交量': {'$gt': 0}}
if collection.find_one(filter, {'_id': 1}):
return True
else:
return False
@lru_cache(None)
def get_traded_codes(timestamp):
"""当天交易的股票代码列表"""
db = get_db('wy_stock_daily')
codes = db.list_collection_names()
return [code for code in codes if was_traded(db, code, timestamp)]
def completed_codes(timestamp):
"""已经下载的股票代码"""
db = get_db('cjmx')
collection = db[timestamp.strftime(DATE_FMT)]
return collection.distinct('股票代码')
def _refresh(timestamp):
"""刷新指定日期成交明细数据(只能为近5天)"""
t_codes = get_traded_codes(timestamp)
d_codes = completed_codes(timestamp)
codes = list(set(t_codes).difference(set(d_codes)))
if len(codes) == 0:
return True
shuffle(codes)
logger.info(f'{timestamp.strftime(DATE_FMT)} 共 {len(codes)} 股票')
completed = bacth_refresh(codes, timestamp)
return completed
def refresh(timestamp):
"""刷新指定日期成交明细数据(只能为近5天)"""
for i in range(1, 4):
logger.info(f"第{i}次尝试 {timestamp}")
completed = _refresh(timestamp)
if completed:
break
def create_index(collection):
collection.create_index([("成交时间", -1)], name='dt_index')
collection.create_index([("股票代码", 1)], name='code_index')
def refresh_last_5():
"""刷新最近5天成交明细"""
tdates = [pd.Timestamp(d) for d in _last_5()]
with Pool(MAX_WORKER) as pool:
r = pool.map_async(refresh, tdates)
r.wait()
| [
"pandas.Timestamp",
"retry.api.retry_call",
"pandas.to_datetime",
"multiprocessing.Pool",
"pandas.Timedelta",
"functools.lru_cache",
"numpy.random.shuffle"
] | [((2556, 2571), 'functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (2565, 2571), False, 'from functools import lru_cache, partial\n'), ((942, 961), 'pandas.to_datetime', 'pd.to_datetime', (['dts'], {}), '(dts)\n', (956, 961), True, 'import pandas as pd\n'), ((3183, 3197), 'numpy.random.shuffle', 'shuffle', (['codes'], {}), '(codes)\n', (3190, 3197), False, 'from numpy.random import shuffle\n'), ((3757, 3772), 'pandas.Timestamp', 'pd.Timestamp', (['d'], {}), '(d)\n', (3769, 3772), True, 'import pandas as pd\n'), ((3802, 3818), 'multiprocessing.Pool', 'Pool', (['MAX_WORKER'], {}), '(MAX_WORKER)\n', (3806, 3818), False, 'from multiprocessing import Pool\n'), ((1656, 1731), 'retry.api.retry_call', 'retry_call', (['fetch_cjmx', '[code, date_str]'], {'delay': '(0.3)', 'tries': '(3)', 'logger': 'logger'}), '(fetch_cjmx, [code, date_str], delay=0.3, tries=3, logger=logger)\n', (1666, 1731), False, 'from retry.api import retry_call\n'), ((690, 711), 'pandas.Timestamp', 'pd.Timestamp', (['"""today"""'], {}), "('today')\n", (702, 711), True, 'import pandas as pd\n'), ((762, 782), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(5)'}), '(days=5)\n', (774, 782), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unit tests for jointmoments.
"""
from __future__ import division
import os
import sys
import numpy as np
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
sys.path.insert(0, os.path.join(HERE, os.pardir, "jointmoments"))
from jointmoments import *
tolerance = 1e-5
def test_jointmoments():
unbias = 0
data = [[ 0.837698, 0.49452, 2.54352 ],
[-0.294096, -0.39636, 0.728619],
[-1.62089 , -0.44919, 1.20592 ],
[-1.06458 , -0.68214, -1.12841 ],
[ 2.14341 , 0.7309 , 0.644968],
[-0.284139, -1.133 , 1.98615 ],
[ 1.19879 , 2.55633, -0.526461],
[-0.032277, 0.11701, -0.249265],
[-1.02516 , -0.44665, 2.50556 ],
[-0.515272, -0.578 , 0.515139],
[ 0.259474, -1.24193, 0.105051],
[ 0.178546, -0.80547, -0.016838],
[-0.607696, -0.21319, -1.40657 ],
[ 0.372248, 0.93341, -0.667086],
[-0.099814, 0.52698, -0.253867],
[ 0.743166, -0.79375, 2.11131 ],
[ 0.109262, -1.28021, -0.415184],
[ 0.499346, -0.95897, -2.24336 ],
[-0.191825, -0.59756, -0.63292 ],
[-1.98255 , -1.5936 , -0.935766],
[-0.317612, 1.33143, -0.46866 ],
[ 0.666652, -0.81507, 0.370959],
[-0.761136, 0.10966, -0.997161],
[-1.09972 , 0.28247, -0.846566]]
rows = len(data)
cols = len(data[0])
coskew_result = coskew(data, rows, cols, unbias)
cokurt_result = cokurt(data, rows, cols, unbias)
# expected_coskew = np.array([[
# [ 0.153993 , 0.161605 , 0.131816 ],
# [ 0.161605 , 0.433037 , -0.035224 ],
# [ 0.131816 , -0.035224 , 0.0136523],
# ], [
# [ 0.161605 , 0.433037 , -0.035224 ],
# [ 0.433037 , 0.899048 , -0.314352 ],
# [-0.035224 , -0.314352 , -0.29955 ],
# ], [
# [ 0.131816 , -0.035224, 0.0136523],
# [-0.035224 , -0.314352, -0.29955 ],
# [ 0.0136523, -0.29955 , 1.06208 ],
# ]])
expected_coskew = np.array([[
[ 0.147577 , 0.154872 , 0.126324 ],
[ 0.154872 , 0.414994 , -0.0337563 ],
[ 0.126324 , -0.0337563, 0.0130835 ],
], [
[ 0.154872 , 0.414994 , -0.0337563 ],
[ 0.414994 , 0.861588 , -0.301254 ],
[-0.0337563, -0.301254 , -0.287068 ],
], [
[ 0.126324 , -0.0337563, 0.0130835],
[-0.0337563, -0.301254 , -0.287068 ],
[ 0.0130835, -0.287068 , 1.01782 ],
]])
expected_cokurt = np.array([
[[
[ 2.12678 , 1.11885 , 0.474782 ],
[ 1.11885 , 1.12294 , 0.187331 ],
[ 0.474782 , 0.187331 , 1.15524 ],
], [
[ 1.11885 , 1.12294 , 0.187331 ],
[ 1.12294 , 1.40462 , -0.0266349],
[ 0.187331 , -0.0266349, 0.276558 ],
], [
[ 0.474782 , 0.187331 , 1.15524 ],
[ 0.187331 , -0.0266349, 0.276558 ],
[ 1.15524 , 0.276558 , 0.178083 ],
]], [[
[ 1.11885 , 1.12294 , 0.187331 ],
[ 1.12294 , 1.40462 , -0.0266349],
[ 0.187331 , -0.0266349, 0.276558 ],
], [
[ 1.12294 , 1.40462 , -0.0266349 ],
[ 1.40462 , 3.10288 , -0.517198 ],
[-0.0266349, -0.517198 , 0.779221 ],
], [
[ 0.187331 , -0.0266349, 0.276558 ],
[-0.0266349, -0.517198 , 0.779221 ],
[ 0.276558 , 0.779221 , 0.218732 ],
]], [[
[ 0.474782 , 0.187331 , 1.15524 ],
[ 0.187331 , -0.0266349 , 0.276558 ],
[ 1.15524 , 0.276558 , 0.178083 ],
], [
[ 0.187331 , -0.0266349, 0.276558 ],
[-0.0266349, -0.517198 , 0.779221 ],
[ 0.276558 , 0.779221 , 0.218732 ],
], [
[ 1.15524 , 0.276558 , 0.178083 ],
[ 0.276558 , 0.779221 , 0.218732 ],
[ 0.178083 , 0.218732 , 5.98947 ],
]]
])
assert((np.array(coskew_result) - expected_coskew < tolerance).all())
assert((np.array(cokurt_result) - expected_cokurt < tolerance).all())
if __name__ == "__main__":
reports = [[1, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]]
num_voters = len(reports)
num_events = len(reports[0])
test_jointmoments()
| [
"numpy.array",
"os.path.realpath",
"os.path.join"
] | [((180, 206), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((227, 256), 'os.path.join', 'os.path.join', (['HERE', 'os.pardir'], {}), '(HERE, os.pardir)\n', (239, 256), False, 'import os\n'), ((277, 322), 'os.path.join', 'os.path.join', (['HERE', 'os.pardir', '"""jointmoments"""'], {}), "(HERE, os.pardir, 'jointmoments')\n", (289, 322), False, 'import os\n'), ((2203, 2544), 'numpy.array', 'np.array', (['[[[0.147577, 0.154872, 0.126324], [0.154872, 0.414994, -0.0337563], [\n 0.126324, -0.0337563, 0.0130835]], [[0.154872, 0.414994, -0.0337563], [\n 0.414994, 0.861588, -0.301254], [-0.0337563, -0.301254, -0.287068]], [[\n 0.126324, -0.0337563, 0.0130835], [-0.0337563, -0.301254, -0.287068], [\n 0.0130835, -0.287068, 1.01782]]]'], {}), '([[[0.147577, 0.154872, 0.126324], [0.154872, 0.414994, -0.0337563],\n [0.126324, -0.0337563, 0.0130835]], [[0.154872, 0.414994, -0.0337563],\n [0.414994, 0.861588, -0.301254], [-0.0337563, -0.301254, -0.287068]], [\n [0.126324, -0.0337563, 0.0130835], [-0.0337563, -0.301254, -0.287068],\n [0.0130835, -0.287068, 1.01782]]])\n', (2211, 2544), True, 'import numpy as np\n'), ((2695, 3662), 'numpy.array', 'np.array', (['[[[[2.12678, 1.11885, 0.474782], [1.11885, 1.12294, 0.187331], [0.474782, \n 0.187331, 1.15524]], [[1.11885, 1.12294, 0.187331], [1.12294, 1.40462, \n -0.0266349], [0.187331, -0.0266349, 0.276558]], [[0.474782, 0.187331, \n 1.15524], [0.187331, -0.0266349, 0.276558], [1.15524, 0.276558, \n 0.178083]]], [[[1.11885, 1.12294, 0.187331], [1.12294, 1.40462, -\n 0.0266349], [0.187331, -0.0266349, 0.276558]], [[1.12294, 1.40462, -\n 0.0266349], [1.40462, 3.10288, -0.517198], [-0.0266349, -0.517198, \n 0.779221]], [[0.187331, -0.0266349, 0.276558], [-0.0266349, -0.517198, \n 0.779221], [0.276558, 0.779221, 0.218732]]], [[[0.474782, 0.187331, \n 1.15524], [0.187331, -0.0266349, 0.276558], [1.15524, 0.276558, \n 0.178083]], [[0.187331, -0.0266349, 0.276558], [-0.0266349, -0.517198, \n 0.779221], [0.276558, 0.779221, 0.218732]], [[1.15524, 0.276558, \n 0.178083], [0.276558, 0.779221, 0.218732], [0.178083, 0.218732, 5.98947]]]]'], {}), '([[[[2.12678, 1.11885, 0.474782], [1.11885, 1.12294, 0.187331], [\n 0.474782, 0.187331, 1.15524]], [[1.11885, 1.12294, 0.187331], [1.12294,\n 1.40462, -0.0266349], [0.187331, -0.0266349, 0.276558]], [[0.474782, \n 0.187331, 1.15524], [0.187331, -0.0266349, 0.276558], [1.15524, \n 0.276558, 0.178083]]], [[[1.11885, 1.12294, 0.187331], [1.12294, \n 1.40462, -0.0266349], [0.187331, -0.0266349, 0.276558]], [[1.12294, \n 1.40462, -0.0266349], [1.40462, 3.10288, -0.517198], [-0.0266349, -\n 0.517198, 0.779221]], [[0.187331, -0.0266349, 0.276558], [-0.0266349, -\n 0.517198, 0.779221], [0.276558, 0.779221, 0.218732]]], [[[0.474782, \n 0.187331, 1.15524], [0.187331, -0.0266349, 0.276558], [1.15524, \n 0.276558, 0.178083]], [[0.187331, -0.0266349, 0.276558], [-0.0266349, -\n 0.517198, 0.779221], [0.276558, 0.779221, 0.218732]], [[1.15524, \n 0.276558, 0.178083], [0.276558, 0.779221, 0.218732], [0.178083, \n 0.218732, 5.98947]]]])\n', (2703, 3662), True, 'import numpy as np\n'), ((4260, 4283), 'numpy.array', 'np.array', (['coskew_result'], {}), '(coskew_result)\n', (4268, 4283), True, 'import numpy as np\n'), ((4334, 4357), 'numpy.array', 'np.array', (['cokurt_result'], {}), '(cokurt_result)\n', (4342, 4357), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def read_file(filename):
labels = ["futures", "title", "wait", "exec", "duration", "us_future", "queue", "numa_sensitive", "num_threads", "info_string", "libcds"]
data = pd.read_csv(filename, sep=',', header=None)
data.columns = labels
return data
def get_files_data(threads):
filenames = []
for t in threads:
filenames.append( "thread_" + str(t) + ".txt" )
rawdata = []
for f in filenames:
rawdata.append(read_file(f))
data = pd.concat(rawdata)
return data
def get_overhead(threads, exec_type, data):
with_libcds = []
no_libcds = []
for t in threads:
with_libcds.append(data.loc[ (data["num_threads"] == t) & (data["libcds"] == 1) & (data["exec"] == exec_type) ]["us_future"].mean())
no_libcds.append(data.loc[ (data["num_threads"] == t) & (data["libcds"] == 0) & (data["exec"] == exec_type) ]["us_future"].mean())
return np.array(with_libcds) - np.array(no_libcds)
threads = [1, 2, 4, 6, 8, 10, 12, 14, 14, 16]
data = get_files_data(threads)
#print(data)
#remove whitespace
data = data.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
#print(data)
exec = ["none", "parallel_executor", "thread_pool_executor"]
overhead_exec_none = get_overhead(threads, exec[0], data)
overhead_exec_parallel_executor = get_overhead(threads, exec[1], data)
overhead_exec_thread_pool_executor = get_overhead(threads, exec[2], data)
plt.plot(threads, overhead_exec_none, marker="x")
plt.plot(threads, overhead_exec_parallel_executor, marker="x")
plt.plot(threads, overhead_exec_thread_pool_executor, marker="x")
exec_labels = ["create_thread_hierarchical, latch, none", "apply parallel_executor", "apply thread_pool_executor"]
plt.legend(exec_labels, loc=1)
plt.ylabel('overhead in us/future')
plt.xlabel('Threads')
plt.title('Libcds Hazard Pointers Overhead Measured w/ 1M HPX Futures')
#plt.show()
plt.savefig('overhead_hazard_pointers.png') | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"pandas.concat",
"matplotlib.pyplot.savefig"
] | [((1460, 1509), 'matplotlib.pyplot.plot', 'plt.plot', (['threads', 'overhead_exec_none'], {'marker': '"""x"""'}), "(threads, overhead_exec_none, marker='x')\n", (1468, 1509), True, 'from matplotlib import pyplot as plt\n'), ((1510, 1572), 'matplotlib.pyplot.plot', 'plt.plot', (['threads', 'overhead_exec_parallel_executor'], {'marker': '"""x"""'}), "(threads, overhead_exec_parallel_executor, marker='x')\n", (1518, 1572), True, 'from matplotlib import pyplot as plt\n'), ((1573, 1638), 'matplotlib.pyplot.plot', 'plt.plot', (['threads', 'overhead_exec_thread_pool_executor'], {'marker': '"""x"""'}), "(threads, overhead_exec_thread_pool_executor, marker='x')\n", (1581, 1638), True, 'from matplotlib import pyplot as plt\n'), ((1754, 1784), 'matplotlib.pyplot.legend', 'plt.legend', (['exec_labels'], {'loc': '(1)'}), '(exec_labels, loc=1)\n', (1764, 1784), True, 'from matplotlib import pyplot as plt\n'), ((1786, 1821), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""overhead in us/future"""'], {}), "('overhead in us/future')\n", (1796, 1821), True, 'from matplotlib import pyplot as plt\n'), ((1822, 1843), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threads"""'], {}), "('Threads')\n", (1832, 1843), True, 'from matplotlib import pyplot as plt\n'), ((1844, 1915), 'matplotlib.pyplot.title', 'plt.title', (['"""Libcds Hazard Pointers Overhead Measured w/ 1M HPX Futures"""'], {}), "('Libcds Hazard Pointers Overhead Measured w/ 1M HPX Futures')\n", (1853, 1915), True, 'from matplotlib import pyplot as plt\n'), ((1929, 1972), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""overhead_hazard_pointers.png"""'], {}), "('overhead_hazard_pointers.png')\n", (1940, 1972), True, 'from matplotlib import pyplot as plt\n'), ((252, 295), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""","""', 'header': 'None'}), "(filename, sep=',', header=None)\n", (263, 295), True, 'import pandas as pd\n'), ((534, 552), 'pandas.concat', 'pd.concat', (['rawdata'], {}), '(rawdata)\n', (543, 552), True, 'import pandas as pd\n'), ((950, 971), 'numpy.array', 'np.array', (['with_libcds'], {}), '(with_libcds)\n', (958, 971), True, 'import numpy as np\n'), ((974, 993), 'numpy.array', 'np.array', (['no_libcds'], {}), '(no_libcds)\n', (982, 993), True, 'import numpy as np\n')] |
import copy
from typing import List
import numpy as np
from pyecg.annotations import ECGAnnotation
def is_monotonic_increasing(x):
return np.all(np.diff(x) > 0)
class SubjectInfo:
sex = None # 1=male, 2=female
race = None # 1=white, 2=black, 3=oriental
birth_data = None
record_data = None
file_date = None
start_time = None
pm = None
class Sequence:
seq_data = None
def __eq__(self, other):
return self.seq_data == other
def __getitem__(self, item):
return self.seq_data[item]
def slice(self, slice_):
new_instance = copy.copy(self)
new_instance.seq_data = new_instance[slice_]
return new_instance
def __len__(self):
return len(self.seq_data)
def __iter__(self):
return iter(self.seq_data)
class Time(Sequence):
fs = None
samples = None
@property
def time(self):
return self.seq_data
def __init__(self, fs=None, samples=None, time_stamps=None):
if fs is not None and samples is not None:
self.fs = fs
self.samples = samples
self.seq_data = (1 / self.fs) * np.arange(0, self.samples)
return
if time_stamps is not None and not isinstance(time_stamps, list) and not isinstance(time_stamps, np.ndarray):
raise TypeError(f"time_stamps should be a np.ndarray or a list: {type(time_stamps)}")
if time_stamps is not None:
self.seq_data = time_stamps
@classmethod
def from_fs_samples(cls, fs, samples):
return cls(fs=fs, samples=samples)
@classmethod
def from_timestamps(cls, time_stamps):
if not is_monotonic_increasing(time_stamps):
raise ValueError("Timestamps are not monotonically increasing")
return cls(time_stamps=time_stamps)
class Signal(Sequence):
lead_name = None
def __repr__(self):
return f"Lead {self.lead_name}"
def __init__(self, signal, lead_name):
if isinstance(signal, str):
raise TypeError(f"Bad type of signal: {type(signal)}")
if isinstance(signal, np.ndarray):
self.seq_data = signal.tolist()
else:
self.seq_data = signal
self.lead_name = lead_name
class ECGRecord:
time: Time = None
record_name: str = None
_signals: List[Signal] = []
annotations: ECGAnnotation = None
info: SubjectInfo = None
def __init__(self, name, time):
self.record_name = name
if not isinstance(time, Time):
raise TypeError("time should be ECGTime")
self.time = time
self._signals = []
@property
def duration(self):
return max(self.time)
@property
def n_sig(self):
return len(self._signals)
@property
def p_signal(self):
return np.array([s for s in self._signals])
@property
def lead_names(self):
return [s.lead_name for s in self._signals]
def get_lead(self, lead_name):
try:
return list(filter(lambda s: s.lead_name == lead_name, self._signals))[0]
except IndexError:
return None
def add_signal(self, signal):
if not isinstance(signal, Signal):
raise TypeError("signal should be ECGSignal")
if len(signal) != len(self):
raise ValueError(f"len(signal) has {len(signal)} samples != len(timestamps) = {len(self.time)}")
self._signals.append(signal)
def __len__(self):
return len(self.time)
def __repr__(self):
return f"Record {self.record_name}: {self.lead_names}"
def __getitem__(self, item):
new_instance = copy.copy(self)
new_instance.time = new_instance.time.slice(item)
new_instance._signals = [s.slice(item) for s in new_instance._signals]
return new_instance
@classmethod
def from_wfdb(cls, hea_file):
from pyecg.importers import WFDBLoader
loader = WFDBLoader()
return loader.load(hea_file)
@classmethod
def from_ishine(cls, ecg_file):
from pyecg.importers import ISHINELoader
loader = ISHINELoader()
return loader.load(ecg_file)
@classmethod
def from_np_array(cls, name, time, signal_array, signal_names):
new_instance = cls(name, Time.from_timestamps(time))
if len(signal_array.shape) != 2:
raise ValueError(f"Signal should be 2D array e.g. (3, 1000) got {signal_array.shape}")
if signal_array.shape[0] != len(signal_names):
raise ValueError(f"signal_array.shape[0] should match len(signal_names)")
for signal, name in zip(signal_array, signal_names):
new_instance.add_signal(Signal(signal=signal, lead_name=name))
return new_instance
| [
"pyecg.importers.WFDBLoader",
"copy.copy",
"numpy.diff",
"numpy.array",
"numpy.arange",
"pyecg.importers.ISHINELoader"
] | [((603, 618), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (612, 618), False, 'import copy\n'), ((2842, 2878), 'numpy.array', 'np.array', (['[s for s in self._signals]'], {}), '([s for s in self._signals])\n', (2850, 2878), True, 'import numpy as np\n'), ((3676, 3691), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (3685, 3691), False, 'import copy\n'), ((3973, 3985), 'pyecg.importers.WFDBLoader', 'WFDBLoader', ([], {}), '()\n', (3983, 3985), False, 'from pyecg.importers import WFDBLoader\n'), ((4143, 4157), 'pyecg.importers.ISHINELoader', 'ISHINELoader', ([], {}), '()\n', (4155, 4157), False, 'from pyecg.importers import ISHINELoader\n'), ((153, 163), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (160, 163), True, 'import numpy as np\n'), ((1160, 1186), 'numpy.arange', 'np.arange', (['(0)', 'self.samples'], {}), '(0, self.samples)\n', (1169, 1186), True, 'import numpy as np\n')] |
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
def affine_batchnorm_relu_forward(x, w, b, gamma, beta, bn_params):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
- gamma, beta, bn_params: Parameters for the batchnorm layer
Returns a tuple of:
- out: Output from the ReLU after batch normalisation
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
b, bn_cache = batchnorm_forward(a, gamma, beta, bn_params)
out, relu_cache = relu_forward(b)
cache = (fc_cache, bn_cache, relu_cache)
return out, cache
def affine_batchnorm_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, bn_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
db, dgamma, dbeta = batchnorm_backward(da, bn_cache)
dx, dw, db = affine_backward(db, fc_cache)
return dx, dw, db, dgamma, dbeta
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self,
input_dim=3 * 32 * 32,
hidden_dim=100,
num_classes=10,
weight_scale=1e-3,
reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
self.params['W1'] = np.random.randn(input_dim, hidden_dim) * weight_scale
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = np.random.randn(hidden_dim, num_classes) * weight_scale
self.params['b2'] = np.zeros(num_classes)
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
N = X.shape[0]
D = np.prod(X.shape[1:])
# Feed-forward
hidden, cache_hidden = affine_relu_forward(X, self.params['W1'], self.params['b1'])
scores, cache_score = affine_forward(hidden, self.params['W2'], self.params['b2'])
# If y is None then we are in test mode so just return scores
if y is None:
return scores
grads = {}
# Backpropagate
loss, dloss = softmax_loss(scores, y)
dscores, grads['W2'], grads['b2'] = affine_backward(dloss, cache_score)
_, grads['W1'], grads['b1'] = affine_relu_backward(dscores, cache_hidden)
loss += 0.5 * self.reg * (np.sum(np.square(self.params['W1'])) +
np.sum(np.square(self.params['W2'])))
grads['W1'] += self.reg * self.params['W1']
grads['W2'] += self.reg * self.params['W2']
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self,
hidden_dims,
input_dim=3 * 32 * 32,
num_classes=10,
dropout=0,
use_batchnorm=False,
reg=0.0,
weight_scale=1e-2,
dtype=np.float32,
seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
# Setup parameters
self.params['W1'] = np.random.randn(input_dim, hidden_dims[0]) * weight_scale
self.params['b1'] = np.zeros(hidden_dims[0])
if self.use_batchnorm:
self.params['gamma1'] = np.random.randn(hidden_dims[0]) * weight_scale
self.params['beta1'] = np.random.randn(hidden_dims[0]) * weight_scale
for i in np.arange(1, len(hidden_dims)):
self.params['W%d' % (i + 1, )] = np.random.randn(hidden_dims[i - 1], hidden_dims[i]) * weight_scale
self.params['b%d' % (i + 1, )] = np.zeros(hidden_dims[i])
if self.use_batchnorm:
self.params['gamma%d' % (i + 1, )] = np.ones(hidden_dims[i])
self.params['beta%d' % (i + 1, )] = np.zeros(hidden_dims[i])
self.params['W%d' % (self.num_layers, )] = np.random.randn(hidden_dims[-1], num_classes) * weight_scale
self.params['b%d' % (self.num_layers, )] = np.zeros(num_classes)
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{
'mode': 'train'
} for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param[mode] = mode
# Feed-forward
forward = X
cache = []
dropout_cache = []
for i in np.arange(0, self.num_layers - 1):
if self.use_batchnorm:
forward, c = affine_batchnorm_relu_forward(forward,
self.params['W%d' % (i + 1, )],
self.params['b%d' % (i + 1, )],
self.params['gamma%d' % (i + 1, )],
self.params['beta%d' % (i + 1, )],
self.bn_params[i])
else:
forward, c = affine_relu_forward(forward,
self.params['W%d' % (i + 1, )],
self.params['b%d' % (i + 1, )])
cache.append(c)
if self.use_dropout:
forward, c = dropout_forward(forward, self.dropout_param)
dropout_cache.append(c)
scores, c = affine_forward(forward,
self.params['W%d' % (self.num_layers, )],
self.params['b%d' % (self.num_layers, )])
cache.append(c)
# If test mode return early
if mode == 'test':
return scores
# Backpropagate
grads = {}
loss, dloss = softmax_loss(scores, y)
dprev, grads['W%d' % (self.num_layers, )], grads['b%d' % (self.num_layers)] = affine_backward(
dloss, cache[-1])
grads['W%d' % (self.num_layers, )] += self.reg * self.params['W%d' % (self.num_layers, )]
loss += 0.5 * self.reg * np.sum(np.square(self.params['W%d' % (self.num_layers, )]))
for i in np.arange(self.num_layers - 1, 0, -1):
if self.use_dropout:
dprev = dropout_backward(dprev, dropout_cache[i - 1])
if self.use_batchnorm:
dprev, grads['W%d' % (i, )], \
grads['b%d' % (i, )], \
grads['gamma%d' % (i, )], \
grads['beta%d' % (i, )] = affine_batchnorm_relu_backward(dprev, cache[i - 1])
else:
dprev, grads['W%d' % (i, )], grads['b%d' % (i, )] = affine_relu_backward(dprev, cache[i - 1])
grads['W%d' % (i, )] += self.reg * self.params['W%d' % (i, )]
loss += 0.5 * self.reg * np.sum(np.square(self.params['W%d' % (i, )]))
return loss, grads | [
"numpy.random.randn",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.arange",
"numpy.prod"
] | [((2547, 2567), 'numpy.zeros', 'np.zeros', (['hidden_dim'], {}), '(hidden_dim)\n', (2555, 2567), True, 'import numpy as np\n'), ((2680, 2701), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (2688, 2701), True, 'import numpy as np\n'), ((3564, 3584), 'numpy.prod', 'np.prod', (['X.shape[1:]'], {}), '(X.shape[1:])\n', (3571, 3584), True, 'import numpy as np\n'), ((6855, 6879), 'numpy.zeros', 'np.zeros', (['hidden_dims[0]'], {}), '(hidden_dims[0])\n', (6863, 6879), True, 'import numpy as np\n'), ((7659, 7680), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (7667, 7680), True, 'import numpy as np\n'), ((9557, 9590), 'numpy.arange', 'np.arange', (['(0)', '(self.num_layers - 1)'], {}), '(0, self.num_layers - 1)\n', (9566, 9590), True, 'import numpy as np\n'), ((11337, 11374), 'numpy.arange', 'np.arange', (['(self.num_layers - 1)', '(0)', '(-1)'], {}), '(self.num_layers - 1, 0, -1)\n', (11346, 11374), True, 'import numpy as np\n'), ((2465, 2503), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'hidden_dim'], {}), '(input_dim, hidden_dim)\n', (2480, 2503), True, 'import numpy as np\n'), ((2596, 2636), 'numpy.random.randn', 'np.random.randn', (['hidden_dim', 'num_classes'], {}), '(hidden_dim, num_classes)\n', (2611, 2636), True, 'import numpy as np\n'), ((6769, 6811), 'numpy.random.randn', 'np.random.randn', (['input_dim', 'hidden_dims[0]'], {}), '(input_dim, hidden_dims[0])\n', (6784, 6811), True, 'import numpy as np\n'), ((7282, 7306), 'numpy.zeros', 'np.zeros', (['hidden_dims[i]'], {}), '(hidden_dims[i])\n', (7290, 7306), True, 'import numpy as np\n'), ((7547, 7592), 'numpy.random.randn', 'np.random.randn', (['hidden_dims[-1]', 'num_classes'], {}), '(hidden_dims[-1], num_classes)\n', (7562, 7592), True, 'import numpy as np\n'), ((6947, 6978), 'numpy.random.randn', 'np.random.randn', (['hidden_dims[0]'], {}), '(hidden_dims[0])\n', (6962, 6978), True, 'import numpy as np\n'), ((7029, 7060), 'numpy.random.randn', 'np.random.randn', (['hidden_dims[0]'], {}), '(hidden_dims[0])\n', (7044, 7060), True, 'import numpy as np\n'), ((7170, 7221), 'numpy.random.randn', 'np.random.randn', (['hidden_dims[i - 1]', 'hidden_dims[i]'], {}), '(hidden_dims[i - 1], hidden_dims[i])\n', (7185, 7221), True, 'import numpy as np\n'), ((7395, 7418), 'numpy.ones', 'np.ones', (['hidden_dims[i]'], {}), '(hidden_dims[i])\n', (7402, 7418), True, 'import numpy as np\n'), ((7471, 7495), 'numpy.zeros', 'np.zeros', (['hidden_dims[i]'], {}), '(hidden_dims[i])\n', (7479, 7495), True, 'import numpy as np\n'), ((11267, 11317), 'numpy.square', 'np.square', (["self.params['W%d' % (self.num_layers,)]"], {}), "(self.params['W%d' % (self.num_layers,)])\n", (11276, 11317), True, 'import numpy as np\n'), ((4206, 4234), 'numpy.square', 'np.square', (["self.params['W1']"], {}), "(self.params['W1'])\n", (4215, 4234), True, 'import numpy as np\n'), ((4289, 4317), 'numpy.square', 'np.square', (["self.params['W2']"], {}), "(self.params['W2'])\n", (4298, 4317), True, 'import numpy as np\n'), ((12002, 12038), 'numpy.square', 'np.square', (["self.params['W%d' % (i,)]"], {}), "(self.params['W%d' % (i,)])\n", (12011, 12038), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
import numpy as np
from scipy import special
import six
########################
# LOG-SPACE ARITHMETIC #
########################
def compute_rdp(q, noise_multiplier, steps, orders):
"""Compute RDP of the Sampled Gaussian Mechanism.
Args:
q: The sampling rate.
noise_multiplier: The ratio of the standard deviation of the Gaussian noise
to the l2-sensitivity of the function to which it is added.
steps: The number of steps.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDPs at all orders, can be np.inf.
"""
if np.isscalar(orders):
rdp = _compute_rdp(q, noise_multiplier, orders) # Call-1
else:
rdp = np.array([_compute_rdp(q, noise_multiplier, order)
for order in orders])
return rdp * steps
def _compute_rdp(q, sigma, alpha): # Called-1
"""Compute RDP of the Sampled Gaussian mechanism at order alpha.
Args:
q: The sampling rate.
sigma: The std of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at alpha, can be np.inf.
"""
if q == 0:
return 0
if q == 1.:
return alpha / (2 * sigma**2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1) # Call-2
def _compute_log_a(q, sigma, alpha): # Called-2
"""Compute log(A_alpha) for any positive finite alpha."""
if float(alpha).is_integer():
return _compute_log_a_int(q, sigma, int(alpha))
else:
return _compute_log_a_frac(q, sigma, alpha) # Call-3
def _compute_log_a_frac(q, sigma, alpha): #Called-3
"""Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma**2 * math.log(1 / q - 1) + .5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma)) #Call- 4
log_e1 = math.log(.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma**2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma**2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _log_erfc(x): #Called-4
"""Compute log(erfc(x)) with high accuracy for large x."""
try:
return math.log(2) + special.log_ndtr(-x * 2**.5)
except NameError:
# If log_ndtr is not available, approximate as follows:
r = special.erfc(x)
if r == 0.0:
# Using the Laurent series at infinity for the tail of the erfc function:
# erfc(x) ~ exp(-x^2-.5/x^2+.625/x^4)/(x*pi^.5)
# To verify in Mathematica:
# Series[Log[Erfc[x]] + Log[x] + Log[Pi]/2 + x^2, {x, Infinity, 6}]
return (-math.log(math.pi) / 2 - math.log(x) - x**2 - .5 * x**-2 +
.625 * x**-4 - 37. / 24. * x**-6 + 353. / 64. * x**-8)
else:
return math.log(r)
def _log_add(logx, logy):
"""Add two numbers in the log space."""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1) | [
"scipy.special.binom",
"math.exp",
"math.sqrt",
"numpy.isscalar",
"numpy.isinf",
"scipy.special.erfc",
"math.log",
"scipy.special.log_ndtr"
] | [((703, 722), 'numpy.isscalar', 'np.isscalar', (['orders'], {}), '(orders)\n', (714, 722), True, 'import numpy as np\n'), ((1295, 1310), 'numpy.isinf', 'np.isinf', (['alpha'], {}), '(alpha)\n', (1303, 1310), True, 'import numpy as np\n'), ((2017, 2040), 'scipy.special.binom', 'special.binom', (['alpha', 'i'], {}), '(alpha, i)\n', (2030, 2040), False, 'from scipy import special\n'), ((1945, 1964), 'math.log', 'math.log', (['(1 / q - 1)'], {}), '(1 / q - 1)\n', (1953, 1964), False, 'import math\n'), ((2233, 2246), 'math.log', 'math.log', (['(0.5)'], {}), '(0.5)\n', (2241, 2246), False, 'import math\n'), ((2315, 2328), 'math.log', 'math.log', (['(0.5)'], {}), '(0.5)\n', (2323, 2328), False, 'import math\n'), ((2889, 2900), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (2897, 2900), False, 'import math\n'), ((2903, 2934), 'scipy.special.log_ndtr', 'special.log_ndtr', (['(-x * 2 ** 0.5)'], {}), '(-x * 2 ** 0.5)\n', (2919, 2934), False, 'from scipy import special\n'), ((3020, 3035), 'scipy.special.erfc', 'special.erfc', (['x'], {}), '(x)\n', (3032, 3035), False, 'from scipy import special\n'), ((3707, 3722), 'math.exp', 'math.exp', (['(a - b)'], {}), '(a - b)\n', (3715, 3722), False, 'import math\n'), ((2141, 2156), 'math.log', 'math.log', (['(1 - q)'], {}), '(1 - q)\n', (2149, 2156), False, 'import math\n'), ((2203, 2218), 'math.log', 'math.log', (['(1 - q)'], {}), '(1 - q)\n', (2211, 2218), False, 'import math\n'), ((3468, 3479), 'math.log', 'math.log', (['r'], {}), '(r)\n', (3476, 3479), False, 'import math\n'), ((2123, 2134), 'math.log', 'math.log', (['q'], {}), '(q)\n', (2131, 2134), False, 'import math\n'), ((2185, 2196), 'math.log', 'math.log', (['q'], {}), '(q)\n', (2193, 2196), False, 'import math\n'), ((2270, 2282), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (2279, 2282), False, 'import math\n'), ((2352, 2364), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (2361, 2364), False, 'import math\n'), ((3342, 3353), 'math.log', 'math.log', (['x'], {}), '(x)\n', (3350, 3353), False, 'import math\n'), ((3318, 3335), 'math.log', 'math.log', (['math.pi'], {}), '(math.pi)\n', (3326, 3335), False, 'import math\n')] |
import numpy as np
from uncertainties import correlated_values, covariance_matrix, nominal_value, std_dev
from uncertainties.unumpy import nominal_values, std_devs
def setup(baseunit):
global convertpscale, Distance, distances, pixels, microns
if baseunit == "pixels":
from .pixels import convertpscale, Distance, microns, pixels
elif baseunit == "microns":
from .microns import convertpscale, Distance, microns, pixels
else:
raise ValueError(f"Unknown baseunit: {baseunit}")
distances = Distance
setup("pixels")
def correlated_distances(*, pscale=None, pixels=None, microns=None, distances=None, covariance=None, power=1):
if distances is not None is pixels is microns:
distances = distances
one = 1
elif pixels is not None is distances is microns:
one = Distance(pscale=pscale, pixels=1)
distances = np.array(pixels)*one
elif microns is not None is distances is pixels:
one = Distance(pscale=pscale, microns=1)
distances = np.array(microns)*one
else:
raise TypeError("Have to provide exactly one of pixels, microns, or distances")
if covariance is None:
return distances
covariance = covariance*one**2
return correlated_values(distances, covariance)
def asdimensionless(distance): return distance
__all__ = [
"convertpscale", "correlated_distances", "Distance", "distances",
"asdimensionless", "covariance_matrix", "microns", "nominal_value", "nominal_values", "pixels", "std_dev", "std_devs",
]
| [
"uncertainties.correlated_values",
"numpy.array"
] | [((1186, 1226), 'uncertainties.correlated_values', 'correlated_values', (['distances', 'covariance'], {}), '(distances, covariance)\n', (1203, 1226), False, 'from uncertainties import correlated_values, covariance_matrix, nominal_value, std_dev\n'), ((849, 865), 'numpy.array', 'np.array', (['pixels'], {}), '(pixels)\n', (857, 865), True, 'import numpy as np\n'), ((982, 999), 'numpy.array', 'np.array', (['microns'], {}), '(microns)\n', (990, 999), True, 'import numpy as np\n')] |
import numpy as np
from scipy.special import gamma
import pickle as pk
import os
import argparse
def metropolisHastingsSymmetric(p, nsamples):
x = 0
out = np.zeros((nsamples,))
for i in range(nsamples):
xHat = x + np.random.normal()
if np.random.uniform() < p(xHat) / p(x):
x = xHat
out[i] = x
return out
def generateSpike(params, expInd = 0):
alpha = np.random.uniform(0, 1, (params['C'],)) + 2.5
epsi = np.zeros((params['C'], params['Q'])) # % intrinsic paramter
beta = np.zeros((params['C'], params['C'], params['R'])) # extrinsic paramter
gammaUU = np.zeros((params['C'], params['I'], params['M'])) # unknown paramter
# intrinsic para
x = np.arange(1,params['Q']+1)
z_epsi = np.random.uniform(1.5, 2, (params['C'],))
for c in range(params['C']):
epsi[c,:] = -np.sin(x / z_epsi[c]) / (x / z_epsi[c])
# extrinsic para
pos_beta = 2*(np.random.uniform(0, 1, (params['C'],params['C']))>0.5)-1
np.fill_diagonal(pos_beta, 0)
z_beta = np.random.uniform(0.5, 1, (params['C'],params['C']))
for c in range(params['C']):
for c1 in range(params['C']):
beta[c,c1,:] = pos_beta[c,c1]*np.exp((-z_beta[c,c1])*np.arange(1, params['R']+1))
# unknowns
loggamma_a = 1
loggamma_b = 50
shiftLogGamma = -3.9
flg = lambda x: np.exp(loggamma_b * (x - shiftLogGamma)) \
* np.exp(-np.exp(x - shiftLogGamma)/loggamma_a) \
/((loggamma_a**loggamma_b)*gamma(loggamma_b))
nsamples = params['K']*params['I']
# sample from log-gamma distribution with mean centered
dU = metropolisHastingsSymmetric(flg, nsamples)
dU = np.reshape(dU, (params['I'], params['K']))
dN = np.zeros((params['C'], params['K']))
dN[:,0] = np.random.uniform(0, 1, (params['C'],))>0.5
lambda_ = np.zeros((params['C'], params['K']))
for k in range(1, params['K']):
for c in range(params['C']):
in_ = np.dot(epsi[c, :min([k-1, params['Q']])+1],
np.flip(dN[c, k-min([k, params['Q']]):k], axis=0)
)
ex_ = np.sum(np.squeeze(beta[:,c,:min([k-1,params['R']])+1]) *
np.fliplr(dN[:, k-min([k, params['R']]):k])
)
if params['flag'] == 'wUU':
if params['I'] == 1:
un_ = np.sum(np.squeeze(gammaUU[c,:,:min([k-1,params['M']])]) *
np.flip(dU[:, k - min([k, params['M']]):k], axis=0)
)
else:
un_ = np.sum(np.squeeze(gammaUU[c,:,:min([k-1,params['M']])+1]) *
np.fliplr(dU[:, k - min([k, params['M']]):k])
)
else:
un_ = 0
lambda_[c, k] = np.exp(alpha[c] + in_ + ex_ + un_)
for i in range(params['C']):
u = np.random.uniform(0, 1)
if u <= lambda_[i, k] * params['tau']:
dN[i,k] = 1
return {'dN':dN, 'alpha':alpha, 'epsi':epsi,
'beta':beta, 'pos_beta':pos_beta, 'gammaUU':gammaUU,
'params':params}
flagTypes = ['woUU', 'wUU']
parser = argparse.ArgumentParser(description="artificial spikes generation")
parser.add_argument('-C', '--num_nodes',help='number of nodes', default=6,
type=int)
parser.add_argument('-K', '--length', help='length of spiking events', default=1500,
type=int)
parser.add_argument('-Q', '--intr_len', default=50,
help='intrinsic memory length', type=int)
parser.add_argument('-R', '--extr_len', default=5,
help='extrinsic memory length', type=int)
parser.add_argument('-M', '--unknown_len', default=5,
help = 'unknown activity memory length', type=int)
parser.add_argument('-I', '--num_unk', default=2,
help = 'Number of unknowns', type=int)
parser.add_argument('-tau', '--tau', default=0.05,
help = 'spiking interval length', type=float)
parser.add_argument('-f', '--flag', default='woUU', choices=flagTypes,
help = 'type of spike samples, with/without unknowns', type=str)
if __name__ == '__main__':
# flag = woUU: for generating spikes without unknowns contribution
# wUU : for generating spikes with unknowns contribution
args = parser.parse_args()
params = {'C':args.num_nodes, 'Q':args.intr_len, 'R':args.extr_len,
'M':args.unknown_len, 'I':args.num_unk,
'K':args.length, 'tau':args.tau, 'flag':args.flag}
dataDir = 'data'
if not os.path.exists(dataDir):
os.makedirs(dataDir, exist_ok=True)
numExperiments = 5
for expInd in range(numExperiments):
out = generateSpike(params, expInd)
saveStr = 'neuronSpikeSim_%s_C_%d_K_%d_exp_%d.p'%(params['flag'], params['C'], params['K'], expInd)
pk.dump(out, open(os.path.join(dataDir, saveStr), 'wb')) | [
"numpy.random.uniform",
"numpy.fill_diagonal",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"numpy.zeros",
"os.path.exists",
"numpy.sin",
"numpy.arange",
"numpy.reshape",
"numpy.random.normal",
"numpy.exp",
"scipy.special.gamma"
] | [((2739, 2806), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""artificial spikes generation"""'}), "(description='artificial spikes generation')\n", (2762, 2806), False, 'import argparse\n'), ((158, 179), 'numpy.zeros', 'np.zeros', (['(nsamples,)'], {}), '((nsamples,))\n', (166, 179), True, 'import numpy as np\n'), ((423, 459), 'numpy.zeros', 'np.zeros', (["(params['C'], params['Q'])"], {}), "((params['C'], params['Q']))\n", (431, 459), True, 'import numpy as np\n'), ((491, 540), 'numpy.zeros', 'np.zeros', (["(params['C'], params['C'], params['R'])"], {}), "((params['C'], params['C'], params['R']))\n", (499, 540), True, 'import numpy as np\n'), ((573, 622), 'numpy.zeros', 'np.zeros', (["(params['C'], params['I'], params['M'])"], {}), "((params['C'], params['I'], params['M']))\n", (581, 622), True, 'import numpy as np\n'), ((667, 696), 'numpy.arange', 'np.arange', (['(1)', "(params['Q'] + 1)"], {}), "(1, params['Q'] + 1)\n", (676, 696), True, 'import numpy as np\n'), ((704, 745), 'numpy.random.uniform', 'np.random.uniform', (['(1.5)', '(2)', "(params['C'],)"], {}), "(1.5, 2, (params['C'],))\n", (721, 745), True, 'import numpy as np\n'), ((925, 954), 'numpy.fill_diagonal', 'np.fill_diagonal', (['pos_beta', '(0)'], {}), '(pos_beta, 0)\n', (941, 954), True, 'import numpy as np\n'), ((965, 1018), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)', "(params['C'], params['C'])"], {}), "(0.5, 1, (params['C'], params['C']))\n", (982, 1018), True, 'import numpy as np\n'), ((1551, 1593), 'numpy.reshape', 'np.reshape', (['dU', "(params['I'], params['K'])"], {}), "(dU, (params['I'], params['K']))\n", (1561, 1593), True, 'import numpy as np\n'), ((1601, 1637), 'numpy.zeros', 'np.zeros', (["(params['C'], params['K'])"], {}), "((params['C'], params['K']))\n", (1609, 1637), True, 'import numpy as np\n'), ((1705, 1741), 'numpy.zeros', 'np.zeros', (["(params['C'], params['K'])"], {}), "((params['C'], params['K']))\n", (1713, 1741), True, 'import numpy as np\n'), ((369, 408), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', "(params['C'],)"], {}), "(0, 1, (params['C'],))\n", (386, 408), True, 'import numpy as np\n'), ((1649, 1688), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', "(params['C'],)"], {}), "(0, 1, (params['C'],))\n", (1666, 1688), True, 'import numpy as np\n'), ((4027, 4050), 'os.path.exists', 'os.path.exists', (['dataDir'], {}), '(dataDir)\n', (4041, 4050), False, 'import os\n'), ((4054, 4089), 'os.makedirs', 'os.makedirs', (['dataDir'], {'exist_ok': '(True)'}), '(dataDir, exist_ok=True)\n', (4065, 4089), False, 'import os\n'), ((220, 238), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (236, 238), True, 'import numpy as np\n'), ((244, 263), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (261, 263), True, 'import numpy as np\n'), ((2422, 2456), 'numpy.exp', 'np.exp', (['(alpha[c] + in_ + ex_ + un_)'], {}), '(alpha[c] + in_ + ex_ + un_)\n', (2428, 2456), True, 'import numpy as np\n'), ((2496, 2519), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2513, 2519), True, 'import numpy as np\n'), ((791, 812), 'numpy.sin', 'np.sin', (['(x / z_epsi[c])'], {}), '(x / z_epsi[c])\n', (797, 812), True, 'import numpy as np\n'), ((866, 917), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', "(params['C'], params['C'])"], {}), "(0, 1, (params['C'], params['C']))\n", (883, 917), True, 'import numpy as np\n'), ((1253, 1293), 'numpy.exp', 'np.exp', (['(loggamma_b * (x - shiftLogGamma))'], {}), '(loggamma_b * (x - shiftLogGamma))\n', (1259, 1293), True, 'import numpy as np\n'), ((1383, 1400), 'scipy.special.gamma', 'gamma', (['loggamma_b'], {}), '(loggamma_b)\n', (1388, 1400), False, 'from scipy.special import gamma\n'), ((4309, 4339), 'os.path.join', 'os.path.join', (['dataDir', 'saveStr'], {}), '(dataDir, saveStr)\n', (4321, 4339), False, 'import os\n'), ((1136, 1165), 'numpy.arange', 'np.arange', (['(1)', "(params['R'] + 1)"], {}), "(1, params['R'] + 1)\n", (1145, 1165), True, 'import numpy as np\n'), ((1311, 1336), 'numpy.exp', 'np.exp', (['(x - shiftLogGamma)'], {}), '(x - shiftLogGamma)\n', (1317, 1336), True, 'import numpy as np\n')] |
"""
Author: <NAME>
"""
import sys
sys.path.append("..")
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import time
from sklearn import metrics
import numpy as np
import os
'''
_,loss,summary_str,acc,_, cr,out = sess.run([model.train_op, model.loss, model.sum, model.accuracy, model.metric_op, model.class_ratio, model.output], feed_dict={
model.x: train_x,
model.y: train_y,
model.m: train_m,
model.delta: train_delta,
model.x_lengths: train_xlen,
model.mean: dataset.mean,
model.y_mask:y_mask,
model.utp:utp,
model.ufp:ufp,
model.ufn:ufn,
model.keep_prob:model.dropout_rate,
model.isTrain:True
})
'''
class DAE():
'''
Class to run the GRUD model as described in https://www.nature.com/articles/s41598-018-24271-9
'''
def __init__(self,
sess,
args,
train_data,
val_data,
test_data):
self.train_data = train_data
self.val_data = val_data
self.test_data = test_data
self.lr = args.lr
self.batch_size = args.batch_size
self.model_path = args.model_path
self.result_path = args.result_path
self.epochs = args.epochs
self.n_inputs = args.n_inputs
self.n_hidden_units = args.n_hidden_units
self.n_classes = args.n_classes
self.checkpoint_dir = args.checkpoint_dir
self.normalize = args.normalize
self.log_dir = args.log_dir
self.dropout_rate = args.dropout_rate
self.n_steps = train_data.maxLength
self.threshold = args.threshold
self.experiment = args.experiment
self.early_stopping_patience = args.early_stopping_patience
self.seed = args.seed
self.x = tf.placeholder(tf.float32, [None, self.n_steps, self.n_inputs])
self.y = tf.placeholder(tf.float32, [None, self.n_steps, self.n_inputs])
self.m = tf.placeholder(tf.float32, [None, self.n_steps, self.n_inputs])
self.delta = tf.placeholder(tf.float32, [None, self.n_steps, self.n_inputs])
self.mean = tf.placeholder(tf.float32, [self.n_inputs,])
self.x_lengths = tf.placeholder(tf.float32, [self.batch_size,])
self.y_mask = tf.placeholder(tf.float32, [None, self.n_steps, self.n_classes])
self.utp = tf.placeholder(tf.float32, [None, self.n_steps, self.n_classes])
self.ufp = tf.placeholder(tf.float32, [None, self.n_steps, self.n_classes])
self.ufn = tf.placeholder(tf.float32, [None, self.n_steps, self.n_classes])
self.keep_prob = tf.placeholder(tf.float32)
self.isTrain = tf.placeholder(tf.bool)
# Output Weights
self.kernel_initializer = tf.initializers.glorot_uniform(seed=self.seed)
self.bias_initializer = tf.initializers.zeros()
self.sess = sess
def getModelDir(self, epoch):
return "{}_{}_{}_{}/epoch{}".format(self.experiment, self.lr,
self.batch_size, self.normalize, epoch)
def RNN(self, x, m, delta, mean, x_lengths):
with tf.variable_scope('DAE', reuse=tf.AUTO_REUSE):
# shape of x = [batch_size, n_steps, n_inputs]
# shape of m = [batch_size, n_steps, n_inputs]
# shape of delta = [batch_size, n_steps, n_inputs]
X = tf.reshape(x, [-1, self.n_inputs])
M = tf.reshape(m, [-1, self.n_inputs])
Delta = tf.reshape(delta, [-1, self.n_inputs])
X = tf.reshape(X, [-1, self.n_steps, self.n_inputs])
grud_cell = tf.nn.rnn_cell.GRUCell(num_units=self.n_hidden_units,
activation=None, # Uses tanh if None
reuse=tf.AUTO_REUSE,
kernel_initializer=self.kernel_initializer,#Orthogonal initializer
bias_initializer=self.bias_initializer)
grud_cell=tf.nn.rnn_cell.DropoutWrapper(grud_cell,output_keep_prob=self.keep_prob)
init_state = grud_cell.zero_state(self.batch_size, dtype=tf.float32) # Initializing first hidden state to zeros
outputs, _ = tf.nn.dynamic_rnn(grud_cell, X,
initial_state=init_state,
sequence_length=x_lengths,
time_major=False)
outputs=tf.reshape(outputs,[-1, self.n_hidden_units])
outputs = tf.nn.dropout(outputs,keep_prob=self.keep_prob)
outputs = tf.layers.dense(outputs,units=self.n_hidden_units,
kernel_initializer=self.kernel_initializer,
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-4))
outputs = tf.nn.relu(outputs)
outputs = tf.layers.dense(outputs,units=self.n_inputs,
kernel_initializer=self.kernel_initializer)
outputs=tf.reshape(outputs,[-1,self.n_steps,self.n_inputs])
return outputs
def build(self):
self.pred = self.RNN(self.x, self.m, self.delta, self.mean, self.x_lengths)
self.output = self.pred
self.loss = tf.reduce_sum(tf.squared_difference(self.pred*self.m,self.y*self.m))/tf.reduce_sum(self.m)
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
self.saver = tf.train.Saver(max_to_keep=None)
loss_sum = tf.summary.scalar("loss", self.loss)
self.sum=tf.summary.merge([loss_sum])
self.board = tf.summary.FileWriter(self.log_dir,self.sess.graph)
def load_model(self, epoch, checkpoint_dir=None):
import re
import os
if checkpoint_dir is None:
checkpoint_dir = os.path.join(self.checkpoint_dir, self.getModelDir(epoch))
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(next(re.finditer(r"(\d+)(?!.*\d)",ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Checkpoint not found")
return False, 0
def save_model(self,epoch,step):
import os
checkpoint_dir = os.path.join(self.checkpoint_dir, self.getModelDir(epoch))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,os.path.join(checkpoint_dir, self.experiment+'.model'), global_step=step)
def train(self):
tf.global_variables_initializer().run()
start_time=time.time()
idx = 0
epochcount=0
dataset=self.train_data
counter = 0
min_mse = np.inf
early_stopping_counter = 0
while epochcount<self.epochs:
tf.local_variables_initializer().run()
dataset.shuffle()
for train_x,train_y,train_m,train_delta,train_xlen,y_mask,utp,ufp,ufn,files,labels in dataset.getNextBatch(epoch=epochcount):
_,loss,summary_str = self.sess.run([self.train_op, self.loss, self.sum], feed_dict={
self.x: train_x,
self.y: train_y,
self.m: train_m,
self.delta: train_delta,
self.x_lengths: train_xlen,
self.mean: dataset.mean,
self.y_mask:y_mask,
self.utp:utp,
self.ufp:ufp,
self.ufn:ufn,
self.keep_prob:self.dropout_rate,
self.isTrain:True
})
counter += 1
self.board.add_summary(summary_str, counter)
epochcount+=1
if epochcount%1==0:
self.save_model(epochcount, epochcount)
test_counter = (epochcount-1)*counter/epochcount
val_loss=self.test(val=True,counter=test_counter)
print("epoch is : %2.2f, TrainLoss(MSE): %.8f, ValLoss(MSE): %.8f" % (epochcount, loss, val_loss))
# Early Stopping
if(val_loss < min_mse):
min_mse = val_loss
early_stopping_counter = 0
best_epoch = epochcount
else:
early_stopping_counter+=1
# if early_stopping_counter >= self.early_stopping_patience:
# print("Early Stopping Training : Max MSE = %f , Best Epoch : %d"%(min_mse, best_epoch))
# break
return min_mse, best_epoch
def save_output(self,predictions,labels,filenames):
for i in range(0,len(predictions)):
folder = os.path.join(self.result_path,self.experiment)
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, filenames[i])
with open(filename,'w') as f:
f.write('HR|O2Sat|Temp|SBP|MAP|DBP|Resp|EtCO2|BaseExcess|HCO3|FiO2|pH|PaCO2|SaO2|AST|BUN|Alkalinephos|Calcium|Chloride|Creatinine|Bilirubin_direct|Glucose|Lactate|Magnesium|Phosphate|Potassium|Bilirubin_total|TroponinI|Hct|Hgb|PTT|WBC|Fibrinogen|Platelets|Age|Gender|Unit1|Unit2|HospAdmTime|ICULOS|SepsisLabel\n')
# For each time step
for j in range(0, len(predictions[i])):
# For each feature
for k in range(0,len(predictions[i][j])):
f.write(str(predictions[i][j][k])+'|')
f.write('0|0|0|'+str(j)+'|'+str(labels[i][j][0])+'\n')
def test(self, val=True, checkpoint_dir=None, test_epoch=100, generate_files=False,counter=0, load_checkpoint=False):
start_time=time.time()
if val:
dataset = self.val_data
else:
dataset = self.test_data
dataset.shuffle()
target = []
predictions = []
test_files = []
predictions_ind = []
labels_ind = []
if load_checkpoint:
self.load_model(test_epoch, checkpoint_dir)
tf.local_variables_initializer().run()
squared_error = 0.0
num_samples = 0
for test_x,test_y,test_m,test_delta,test_xlen,y_mask,utp,ufp,ufn,files,test_labels in dataset.getNextBatch(epoch=30):
summary_str,pred,val_loss = self.sess.run([self.sum, self.output, self.loss], feed_dict={
self.x: test_x,
self.y: test_y,
self.m: test_m,
self.delta: test_delta,
self.mean: dataset.mean,
self.x_lengths: test_xlen,
self.y_mask:y_mask,
self.utp:utp,
self.ufp:ufp,
self.ufn:ufn,
self.keep_prob:1.0,
self.isTrain:False
})
# Remove padding for accuracy and AUC calculation
pred = (pred*(1.0-test_delta))+(test_y*test_delta)
squared_error+= np.sum(((pred-test_y)*test_m*y_mask)**2)
num_samples+=np.sum(test_m) - np.sum(test_delta)
# Reset the non-missing values to actual known values
pred = (pred*(1.0-test_m))+(test_y*test_m)
for i in range(0,test_xlen.shape[0]):
outputs = pred[i, 0:test_xlen[i]]*dataset.std+dataset.mean
predictions_ind.append(list(outputs))
labels_ind.append(list(test_labels[i, 0:test_xlen[i]]))
test_files.append(files[i])
if generate_files:
self.save_output(predictions_ind,labels_ind,test_files)
mse = squared_error/num_samples
return mse
| [
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.contrib.layers.l2_regularizer",
"re.finditer",
"tensorflow.reshape",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.local_variables_initializer",
"tensorflow.initializers.glorot_uniform",
"tensorflow.summary.merge",
"os.path.join",
"sys.pat... | [((34, 55), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (49, 55), False, 'import sys\n'), ((2053, 2116), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_inputs]'], {}), '(tf.float32, [None, self.n_steps, self.n_inputs])\n', (2067, 2116), True, 'import tensorflow as tf\n'), ((2134, 2197), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_inputs]'], {}), '(tf.float32, [None, self.n_steps, self.n_inputs])\n', (2148, 2197), True, 'import tensorflow as tf\n'), ((2215, 2278), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_inputs]'], {}), '(tf.float32, [None, self.n_steps, self.n_inputs])\n', (2229, 2278), True, 'import tensorflow as tf\n'), ((2300, 2363), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_inputs]'], {}), '(tf.float32, [None, self.n_steps, self.n_inputs])\n', (2314, 2363), True, 'import tensorflow as tf\n'), ((2384, 2427), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.n_inputs]'], {}), '(tf.float32, [self.n_inputs])\n', (2398, 2427), True, 'import tensorflow as tf\n'), ((2454, 2499), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.batch_size]'], {}), '(tf.float32, [self.batch_size])\n', (2468, 2499), True, 'import tensorflow as tf\n'), ((2523, 2587), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_classes]'], {}), '(tf.float32, [None, self.n_steps, self.n_classes])\n', (2537, 2587), True, 'import tensorflow as tf\n'), ((2607, 2671), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_classes]'], {}), '(tf.float32, [None, self.n_steps, self.n_classes])\n', (2621, 2671), True, 'import tensorflow as tf\n'), ((2691, 2755), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_classes]'], {}), '(tf.float32, [None, self.n_steps, self.n_classes])\n', (2705, 2755), True, 'import tensorflow as tf\n'), ((2775, 2839), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_steps, self.n_classes]'], {}), '(tf.float32, [None, self.n_steps, self.n_classes])\n', (2789, 2839), True, 'import tensorflow as tf\n'), ((2865, 2891), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2879, 2891), True, 'import tensorflow as tf\n'), ((2915, 2938), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (2929, 2938), True, 'import tensorflow as tf\n'), ((2999, 3045), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (3029, 3045), True, 'import tensorflow as tf\n'), ((3078, 3101), 'tensorflow.initializers.zeros', 'tf.initializers.zeros', ([], {}), '()\n', (3099, 3101), True, 'import tensorflow as tf\n'), ((5827, 5859), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (5841, 5859), True, 'import tensorflow as tf\n'), ((5879, 5915), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (5896, 5915), True, 'import tensorflow as tf\n'), ((5942, 5970), 'tensorflow.summary.merge', 'tf.summary.merge', (['[loss_sum]'], {}), '([loss_sum])\n', (5958, 5970), True, 'import tensorflow as tf\n'), ((5992, 6044), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.log_dir', 'self.sess.graph'], {}), '(self.log_dir, self.sess.graph)\n', (6013, 6044), True, 'import tensorflow as tf\n'), ((6279, 6324), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6308, 6324), True, 'import tensorflow as tf\n'), ((7222, 7233), 'time.time', 'time.time', ([], {}), '()\n', (7231, 7233), False, 'import time\n'), ((10365, 10376), 'time.time', 'time.time', ([], {}), '()\n', (10374, 10376), False, 'import time\n'), ((3380, 3425), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""DAE"""'], {'reuse': 'tf.AUTO_REUSE'}), "('DAE', reuse=tf.AUTO_REUSE)\n", (3397, 3425), True, 'import tensorflow as tf\n'), ((3624, 3658), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, self.n_inputs]'], {}), '(x, [-1, self.n_inputs])\n', (3634, 3658), True, 'import tensorflow as tf\n'), ((3675, 3709), 'tensorflow.reshape', 'tf.reshape', (['m', '[-1, self.n_inputs]'], {}), '(m, [-1, self.n_inputs])\n', (3685, 3709), True, 'import tensorflow as tf\n'), ((3730, 3768), 'tensorflow.reshape', 'tf.reshape', (['delta', '[-1, self.n_inputs]'], {}), '(delta, [-1, self.n_inputs])\n', (3740, 3768), True, 'import tensorflow as tf\n'), ((3785, 3833), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, self.n_steps, self.n_inputs]'], {}), '(X, [-1, self.n_steps, self.n_inputs])\n', (3795, 3833), True, 'import tensorflow as tf\n'), ((3859, 4042), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', ([], {'num_units': 'self.n_hidden_units', 'activation': 'None', 'reuse': 'tf.AUTO_REUSE', 'kernel_initializer': 'self.kernel_initializer', 'bias_initializer': 'self.bias_initializer'}), '(num_units=self.n_hidden_units, activation=None,\n reuse=tf.AUTO_REUSE, kernel_initializer=self.kernel_initializer,\n bias_initializer=self.bias_initializer)\n', (3881, 4042), True, 'import tensorflow as tf\n'), ((4305, 4378), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['grud_cell'], {'output_keep_prob': 'self.keep_prob'}), '(grud_cell, output_keep_prob=self.keep_prob)\n', (4334, 4378), True, 'import tensorflow as tf\n'), ((4527, 4634), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['grud_cell', 'X'], {'initial_state': 'init_state', 'sequence_length': 'x_lengths', 'time_major': '(False)'}), '(grud_cell, X, initial_state=init_state, sequence_length=\n x_lengths, time_major=False)\n', (4544, 4634), True, 'import tensorflow as tf\n'), ((4796, 4842), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, self.n_hidden_units]'], {}), '(outputs, [-1, self.n_hidden_units])\n', (4806, 4842), True, 'import tensorflow as tf\n'), ((4864, 4912), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['outputs'], {'keep_prob': 'self.keep_prob'}), '(outputs, keep_prob=self.keep_prob)\n', (4877, 4912), True, 'import tensorflow as tf\n'), ((5190, 5209), 'tensorflow.nn.relu', 'tf.nn.relu', (['outputs'], {}), '(outputs)\n', (5200, 5209), True, 'import tensorflow as tf\n'), ((5232, 5326), 'tensorflow.layers.dense', 'tf.layers.dense', (['outputs'], {'units': 'self.n_inputs', 'kernel_initializer': 'self.kernel_initializer'}), '(outputs, units=self.n_inputs, kernel_initializer=self.\n kernel_initializer)\n', (5247, 5326), True, 'import tensorflow as tf\n'), ((5382, 5436), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, self.n_steps, self.n_inputs]'], {}), '(outputs, [-1, self.n_steps, self.n_inputs])\n', (5392, 5436), True, 'import tensorflow as tf\n'), ((5706, 5727), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.m'], {}), '(self.m)\n', (5719, 5727), True, 'import tensorflow as tf\n'), ((6398, 6442), 'os.path.basename', 'os.path.basename', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (6414, 6442), False, 'import os\n'), ((6952, 6982), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (6966, 6982), False, 'import os\n'), ((6996, 7023), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7007, 7023), False, 'import os\n'), ((7058, 7114), 'os.path.join', 'os.path.join', (['checkpoint_dir', "(self.experiment + '.model')"], {}), "(checkpoint_dir, self.experiment + '.model')\n", (7070, 7114), False, 'import os\n'), ((9335, 9382), 'os.path.join', 'os.path.join', (['self.result_path', 'self.experiment'], {}), '(self.result_path, self.experiment)\n', (9347, 9382), False, 'import os\n'), ((9484, 9518), 'os.path.join', 'os.path.join', (['folder', 'filenames[i]'], {}), '(folder, filenames[i])\n', (9496, 9518), False, 'import os\n'), ((11630, 11678), 'numpy.sum', 'np.sum', (['(((pred - test_y) * test_m * y_mask) ** 2)'], {}), '(((pred - test_y) * test_m * y_mask) ** 2)\n', (11636, 11678), True, 'import numpy as np\n'), ((5651, 5709), 'tensorflow.squared_difference', 'tf.squared_difference', (['(self.pred * self.m)', '(self.y * self.m)'], {}), '(self.pred * self.m, self.y * self.m)\n', (5672, 5709), True, 'import tensorflow as tf\n'), ((5752, 5783), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (5774, 5783), True, 'import tensorflow as tf\n'), ((6485, 6524), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'ckpt_name'], {}), '(checkpoint_dir, ckpt_name)\n', (6497, 6524), False, 'import os\n'), ((7163, 7196), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7194, 7196), True, 'import tensorflow as tf\n'), ((9401, 9423), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (9415, 9423), False, 'import os\n'), ((9441, 9460), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (9452, 9460), False, 'import os\n'), ((10724, 10756), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (10754, 10756), True, 'import tensorflow as tf\n'), ((11696, 11710), 'numpy.sum', 'np.sum', (['test_m'], {}), '(test_m)\n', (11702, 11710), True, 'import numpy as np\n'), ((11713, 11731), 'numpy.sum', 'np.sum', (['test_delta'], {}), '(test_delta)\n', (11719, 11731), True, 'import numpy as np\n'), ((5128, 5168), 'tensorflow.contrib.layers.l2_regularizer', 'tf.contrib.layers.l2_regularizer', (['(0.0001)'], {}), '(0.0001)\n', (5160, 5168), True, 'import tensorflow as tf\n'), ((7433, 7465), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (7463, 7465), True, 'import tensorflow as tf\n'), ((6557, 6598), 're.finditer', 're.finditer', (['"""(\\\\d+)(?!.*\\\\d)"""', 'ckpt_name'], {}), "('(\\\\d+)(?!.*\\\\d)', ckpt_name)\n", (6568, 6598), False, 'import re\n')] |
import os
import glob
import argparse
from collections import OrderedDict
import numpy as np
def parse_args():
""" Parse input arguments"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-d', '--dataset', type=str,
default='./data/50_salads_dataset')
parser.add_argument(
'-s', '--segmented_activities', type=str,
default='activity')
parser.add_argument(
'-l', '--labels', type=str,
default='labels')
parser.add_argument(
'-v', '--level', type=str,
default='low')
args = parser.parse_args()
assert os.path.exists(args.dataset)
assert args.level == 'low' or args.level == 'mid'
return args
def process_key(activity, level):
""" Return the actual activity class name, according to the given level
"""
key = activity[activity.index('_')+1:]
if level == 'mid':
key = key[:key.rindex('_')]
return key
def main():
"""Main function"""
args = parse_args()
# retrieve classes
lbl_pth = os.path.join(
args.dataset, args.labels, 'actions_{}lvl.txt'.format(args.level))
assert os.path.exists(lbl_pth)
classes = open(lbl_pth).read().splitlines()
# retrieve list of video paths
segmented_pth = os.path.join(
args.dataset, args.segmented_activities)
assert os.path.exists(segmented_pth)
vid_list = glob.glob(segmented_pth + '/*')
# prepare frequency dictionary
freq = OrderedDict()
for cls in classes:
freq[cls] = []
# go through each segmented videos
for vid_pth in vid_list:
activities = os.listdir(vid_pth)
activities.sort()
for activity in activities:
# ignore keys not in the analyzing level
key = process_key(activity, args.level)
if key not in classes:
continue
# count number of frames of this activity
n_frames = len(glob.glob(os.path.join(
vid_pth, activity, '*.jpg')))
# append to dictionary
freq[key].append(n_frames)
# analyze
print('Number of frames per activity class')
for key in freq.keys():
foo = freq[key]
print(' %s\tmin=%d\tmax=%d\tmedian=%d' % \
(key, np.min(foo), np.max(foo), np.median(foo)))
print(80*'-' + '\n')
pass
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.median",
"os.path.exists",
"numpy.min",
"numpy.max",
"glob.glob",
"collections.OrderedDict",
"os.path.join",
"os.listdir"
] | [((159, 184), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (182, 184), False, 'import argparse\n'), ((608, 636), 'os.path.exists', 'os.path.exists', (['args.dataset'], {}), '(args.dataset)\n', (622, 636), False, 'import os\n'), ((1145, 1168), 'os.path.exists', 'os.path.exists', (['lbl_pth'], {}), '(lbl_pth)\n', (1159, 1168), False, 'import os\n'), ((1273, 1326), 'os.path.join', 'os.path.join', (['args.dataset', 'args.segmented_activities'], {}), '(args.dataset, args.segmented_activities)\n', (1285, 1326), False, 'import os\n'), ((1347, 1376), 'os.path.exists', 'os.path.exists', (['segmented_pth'], {}), '(segmented_pth)\n', (1361, 1376), False, 'import os\n'), ((1392, 1423), 'glob.glob', 'glob.glob', (["(segmented_pth + '/*')"], {}), "(segmented_pth + '/*')\n", (1401, 1423), False, 'import glob\n'), ((1471, 1484), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1482, 1484), False, 'from collections import OrderedDict\n'), ((1622, 1641), 'os.listdir', 'os.listdir', (['vid_pth'], {}), '(vid_pth)\n', (1632, 1641), False, 'import os\n'), ((1961, 2001), 'os.path.join', 'os.path.join', (['vid_pth', 'activity', '"""*.jpg"""'], {}), "(vid_pth, activity, '*.jpg')\n", (1973, 2001), False, 'import os\n'), ((2288, 2299), 'numpy.min', 'np.min', (['foo'], {}), '(foo)\n', (2294, 2299), True, 'import numpy as np\n'), ((2301, 2312), 'numpy.max', 'np.max', (['foo'], {}), '(foo)\n', (2307, 2312), True, 'import numpy as np\n'), ((2314, 2328), 'numpy.median', 'np.median', (['foo'], {}), '(foo)\n', (2323, 2328), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import math as mt
from timeit import default_timer as timer
def read_data():
train = pd.read_csv('./data/train_data.csv')
test = pd.read_csv('./data/test_data.csv')
sum_ratings = 0
ratings = train['rating']
for r in ratings:
sum_ratings += r
mean = sum_ratings / train.shape[0]
return train, test, mean
def calculateSVD(data, mean, k, l, r, iterations):
users = data.user_id.unique()[-1]
movies = np.sort(data.movie_id.unique())[-1]
b_u = np.zeros((users))
b_i = np.zeros((movies))
P = np.random.uniform(0,0.1,[users, k])
Q = np.random.uniform(0,0.1,[movies, k])
error = []
for i in range(iterations):
sq_error = 0
for row in data.itertuples():
u = row.user_id
i = row.movie_id
r_u_i = row.rating
pred = mean + b_u[u - 1] + b_i[i - 1] + np.dot(P[u-1,:], Q[i-1,:])
e_u_i = r_u_i - pred
sq_error = r_u_i + (e_u_i * e_u_i)
b_u[u - 1] = b_u[u - 1] + l * e_u_i
b_i[i - 1] = b_i[i - 1] + l * e_u_i
for f in range(k):
temp_u_f = P[u - 1][f - 1]
P[u - 1][f - 1] = P[u - 1][f - 1] + l * (e_u_i * Q[i - 1][f - 1] - r * P[u - 1][f - 1])
Q[i - 1][f - 1] = Q[i - 1][f - 1] + l * (e_u_i * temp_u_f - r * Q[i - 1][f - 1])
error.append(mt.sqrt(sq_error / len(data.index)))
return b_u, b_i, P, Q, error
def predict(q_id, user, movie, bu, bi, p, q, k, mean):
b_u_i = mean + bu[user - 1] + bi[movie - 1]
s_p_q = 0
for i in range(k):
s_p_q = s_p_q + (p[user - 1][i] * q[movie - 1][i])
prediction = b_u_i + s_p_q
# print("%d,%f" % (q_id,prediction))
def main():
data, test, mean= read_data()
start_global = timer()
bu, bi, p, q, errors = calculateSVD(data, mean, 2, 0.05, 0.002, 10)
start_global = timer()
for row in test.itertuples():
start_it = timer()
predict(row.id, row.user_id, row.movie_id, bu, bi, p, q, 2, mean)
end_it = timer()
time_elapsed_it = end_it - start_it
print(row.id, time_elapsed_it)
end_global = timer()
time_elapsed_global = end_global - start_global
print(time_elapsed_global)
if __name__ == '__main__':
main() | [
"numpy.random.uniform",
"pandas.read_csv",
"timeit.default_timer",
"numpy.zeros",
"numpy.dot"
] | [((130, 166), 'pandas.read_csv', 'pd.read_csv', (['"""./data/train_data.csv"""'], {}), "('./data/train_data.csv')\n", (141, 166), True, 'import pandas as pd\n'), ((178, 213), 'pandas.read_csv', 'pd.read_csv', (['"""./data/test_data.csv"""'], {}), "('./data/test_data.csv')\n", (189, 213), True, 'import pandas as pd\n'), ((546, 561), 'numpy.zeros', 'np.zeros', (['users'], {}), '(users)\n', (554, 561), True, 'import numpy as np\n'), ((574, 590), 'numpy.zeros', 'np.zeros', (['movies'], {}), '(movies)\n', (582, 590), True, 'import numpy as np\n'), ((606, 643), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.1)', '[users, k]'], {}), '(0, 0.1, [users, k])\n', (623, 643), True, 'import numpy as np\n'), ((650, 688), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.1)', '[movies, k]'], {}), '(0, 0.1, [movies, k])\n', (667, 688), True, 'import numpy as np\n'), ((1897, 1904), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1902, 1904), True, 'from timeit import default_timer as timer\n'), ((1998, 2005), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2003, 2005), True, 'from timeit import default_timer as timer\n'), ((2278, 2285), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2283, 2285), True, 'from timeit import default_timer as timer\n'), ((2060, 2067), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2065, 2067), True, 'from timeit import default_timer as timer\n'), ((2169, 2176), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2174, 2176), True, 'from timeit import default_timer as timer\n'), ((953, 985), 'numpy.dot', 'np.dot', (['P[u - 1, :]', 'Q[i - 1, :]'], {}), '(P[u - 1, :], Q[i - 1, :])\n', (959, 985), True, 'import numpy as np\n')] |
from __future__ import annotations
"""A module containing the core class to specify a Factor Graph."""
import collections
import copy
import functools
import inspect
import typing
from dataclasses import asdict, dataclass
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
FrozenSet,
Hashable,
List,
Mapping,
Optional,
OrderedDict,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import jax
import jax.numpy as jnp
import numpy as np
from jax.scipy.special import logsumexp
from pgmax.bp import infer
from pgmax.factors import FAC_TO_VAR_UPDATES
from pgmax.fg import groups, nodes
from pgmax.groups.enumeration import EnumerationFactorGroup
from pgmax.utils import cached_property
@dataclass
class FactorGraph:
"""Class for representing a factor graph.
Factors in a graph are clustered in factor groups, which are grouped according to their factor types.
Args:
variables: A single VariableGroup or a container containing variable groups.
If not a single VariableGroup, supported containers include mapping and sequence.
For a mapping, the keys of the mapping are used to index the variable groups.
For a sequence, the indices of the sequence are used to index the variable groups.
Note that if not a single VariableGroup, a CompositeVariableGroup will be created from
this input, and the individual VariableGroups will need to be accessed by indexing.
"""
variables: Union[
Mapping[Any, groups.VariableGroup],
Sequence[groups.VariableGroup],
groups.VariableGroup,
]
def __post_init__(self):
if isinstance(self.variables, groups.VariableGroup):
self._variable_group = self.variables
else:
self._variable_group = groups.CompositeVariableGroup(self.variables)
vars_num_states_cumsum = np.insert(
np.array(
[variable.num_states for variable in self._variable_group.variables],
dtype=int,
).cumsum(),
0,
0,
)
# Useful objects to build the FactorGraph
self._factor_types_to_groups: OrderedDict[
Type, List[groups.FactorGroup]
] = collections.OrderedDict(
[(factor_type, []) for factor_type in FAC_TO_VAR_UPDATES]
)
self._factor_types_to_variable_names_for_factors: OrderedDict[
Type, Set[FrozenSet]
] = collections.OrderedDict(
[(factor_type, set()) for factor_type in FAC_TO_VAR_UPDATES]
)
# See FactorGraphState docstrings for documentation on the following fields
self._num_var_states = vars_num_states_cumsum[-1]
self._vars_to_starts = MappingProxyType(
{
variable: vars_num_states_cumsum[vv]
for vv, variable in enumerate(self._variable_group.variables)
}
)
self._named_factor_groups: Dict[Hashable, groups.FactorGroup] = {}
def __hash__(self) -> int:
all_factor_groups = tuple(
[
factor_group
for factor_groups_per_type in self._factor_types_to_groups.values()
for factor_group in factor_groups_per_type
]
)
return hash(all_factor_groups)
def add_factor(
self,
variable_names: List,
factor_configs: np.ndarray,
log_potentials: Optional[np.ndarray] = None,
name: Optional[str] = None,
) -> None:
"""Function to add a single factor to the FactorGraph.
Args:
variable_names: A list containing the connected variable names.
Variable names are tuples of the type (variable_group_name, variable_name_within_variable_group)
factor_configs: Array of shape (num_val_configs, num_variables)
An array containing explicit enumeration of all valid configurations.
If the connected variables have n1, n2, ... states, 1 <= num_val_configs <= n1 * n2 * ...
factor_configs[config_idx, variable_idx] represents the state of variable_names[variable_idx]
in the configuration factor_configs[config_idx].
log_potentials: Optional array of shape (num_val_configs,).
If specified, log_potentials[config_idx] contains the log of the potential value for
the valid configuration factor_configs[config_idx].
If None, it is assumed the log potential is uniform 0 and such an array is automatically
initialized.
"""
factor_group = EnumerationFactorGroup(
self._variable_group,
variable_names_for_factors=[variable_names],
factor_configs=factor_configs,
log_potentials=log_potentials,
)
self._register_factor_group(factor_group, name)
def add_factor_by_type(
self, variable_names: List, factor_type: type, *args, **kwargs
) -> None:
"""Function to add a single factor to the FactorGraph.
Args:
variable_names: A list containing the connected variable names.
Variable names are tuples of the type (variable_group_name, variable_name_within_variable_group)
factor_type: Type of factor to be added
args: Args to be passed to the factor_type.
kwargs: kwargs to be passed to the factor_type, and an optional "name" argument
for specifying the name of a named factor group.
Example:
To add an ORFactor to a FactorGraph fg, run::
fg.add_factor_by_type(
variable_names=variables_names_for_OR_factor,
factor_type=logical.ORFactor
)
"""
if factor_type not in FAC_TO_VAR_UPDATES:
raise ValueError(
f"Type {factor_type} is not one of the supported factor types {FAC_TO_VAR_UPDATES.keys()}"
)
name = kwargs.pop("name", None)
variables = tuple(self._variable_group[variable_names])
factor = factor_type(variables, *args, **kwargs)
factor_group = groups.SingleFactorGroup(
variable_group=self._variable_group,
variable_names_for_factors=[variable_names],
factor=factor,
)
self._register_factor_group(factor_group, name)
def add_factor_group(self, factory: Callable, *args, **kwargs) -> None:
"""Add a factor group to the factor graph
Args:
factory: Factory function that takes args and kwargs as input and outputs a factor group.
args: Args to be passed to the factory function.
kwargs: kwargs to be passed to the factory function, and an optional "name" argument
for specifying the name of a named factor group.
"""
name = kwargs.pop("name", None)
factor_group = factory(self._variable_group, *args, **kwargs)
self._register_factor_group(factor_group, name)
def _register_factor_group(
self, factor_group: groups.FactorGroup, name: Optional[str] = None
) -> None:
"""Register a factor group to the factor graph, by updating the factor graph state.
Args:
factor_group: The factor group to be registered to the factor graph.
name: Optional name of the factor group.
Raises:
ValueError: If the factor group with the same name or a factor involving the same variables
already exists in the factor graph.
"""
if name in self._named_factor_groups:
raise ValueError(
f"A factor group with the name {name} already exists. Please choose a different name!"
)
factor_type = factor_group.factor_type
for var_names_for_factor in factor_group.variable_names_for_factors:
var_names = frozenset(var_names_for_factor)
if (
var_names
in self._factor_types_to_variable_names_for_factors[factor_type]
):
raise ValueError(
f"A Factor of type {factor_type} involving variables {var_names} already exists. Please merge the corresponding factors."
)
self._factor_types_to_variable_names_for_factors[factor_type].add(var_names)
self._factor_types_to_groups[factor_type].append(factor_group)
if name is not None:
self._named_factor_groups[name] = factor_group
@functools.lru_cache(None)
def compute_offsets(self) -> None:
"""Compute factor messages offsets for the factor types and factor groups
in the flattened array of message.
Also compute log potentials offsets for factor groups.
See FactorGraphState for documentation on the following fields
If offsets have already beeen compiled, do nothing.
"""
# Message offsets for ftov messages
self._factor_type_to_msgs_range = collections.OrderedDict()
self._factor_group_to_msgs_starts = collections.OrderedDict()
factor_num_states_cumsum = 0
# Log potentials offsets
self._factor_type_to_potentials_range = collections.OrderedDict()
self._factor_group_to_potentials_starts = collections.OrderedDict()
factor_num_configs_cumsum = 0
for factor_type, factors_groups_by_type in self._factor_types_to_groups.items():
factor_type_num_states_start = factor_num_states_cumsum
factor_type_num_configs_start = factor_num_configs_cumsum
for factor_group in factors_groups_by_type:
self._factor_group_to_msgs_starts[
factor_group
] = factor_num_states_cumsum
self._factor_group_to_potentials_starts[
factor_group
] = factor_num_configs_cumsum
factor_num_states_cumsum += sum(factor_group.factor_edges_num_states)
factor_num_configs_cumsum += (
factor_group.factor_group_log_potentials.shape[0]
)
self._factor_type_to_msgs_range[factor_type] = (
factor_type_num_states_start,
factor_num_states_cumsum,
)
self._factor_type_to_potentials_range[factor_type] = (
factor_type_num_configs_start,
factor_num_configs_cumsum,
)
self._total_factor_num_states = factor_num_states_cumsum
self._total_factor_num_configs = factor_num_configs_cumsum
@cached_property
def wiring(self) -> OrderedDict[Type, nodes.Wiring]:
"""Function to compile wiring for belief propagation.
If wiring has already beeen compiled, do nothing.
Returns:
A dictionnary mapping each factor type to its wiring.
"""
wiring = collections.OrderedDict(
[
(
factor_type,
[
factor_group.compile_wiring(self._vars_to_starts)
for factor_group in self._factor_types_to_groups[factor_type]
],
)
for factor_type in self._factor_types_to_groups
]
)
wiring = collections.OrderedDict(
[
(factor_type, factor_type.concatenate_wirings(wiring[factor_type]))
for factor_type in wiring
]
)
return wiring
@cached_property
def log_potentials(self) -> OrderedDict[Type, np.ndarray]:
"""Function to compile potential array for belief propagation.
If potential array has already been compiled, do nothing.
Returns:
A dictionnary mapping each factor type to the array of the log of the potential
function for each valid configuration
"""
log_potentials = collections.OrderedDict()
for factor_type, factors_groups_by_type in self._factor_types_to_groups.items():
if len(factors_groups_by_type) == 0:
log_potentials[factor_type] = np.empty((0,))
else:
log_potentials[factor_type] = np.concatenate(
[
factor_group.factor_group_log_potentials
for factor_group in factors_groups_by_type
]
)
return log_potentials
@cached_property
def factors(self) -> OrderedDict[Type, Tuple[nodes.Factor, ...]]:
"""Mapping factor type to individual factors in the factor graph.
This function is only called on demand when the user requires it."""
print(
"Factors have not been added to the factor graph yet, this may take a while..."
)
factors: OrderedDict[Type, Tuple[nodes.Factor, ...]] = collections.OrderedDict(
[
(
factor_type,
tuple(
[
factor
for factor_group in self.factor_groups[factor_type]
for factor in factor_group.factors
]
),
)
for factor_type in self.factor_groups
]
)
return factors
@property
def factor_groups(self) -> OrderedDict[Type, List[groups.FactorGroup]]:
"""Tuple of factor groups in the factor graph"""
return self._factor_types_to_groups
@cached_property
def fg_state(self) -> FactorGraphState:
"""Current factor graph state given the added factors."""
# Preliminary computations
self.compute_offsets()
log_potentials = np.concatenate(
[self.log_potentials[factor_type] for factor_type in self.log_potentials]
)
return FactorGraphState(
variable_group=self._variable_group,
vars_to_starts=self._vars_to_starts,
num_var_states=self._num_var_states,
total_factor_num_states=self._total_factor_num_states,
named_factor_groups=copy.copy(self._named_factor_groups),
factor_type_to_msgs_range=copy.copy(self._factor_type_to_msgs_range),
factor_type_to_potentials_range=copy.copy(
self._factor_type_to_potentials_range
),
factor_group_to_potentials_starts=copy.copy(
self._factor_group_to_potentials_starts
),
log_potentials=log_potentials,
wiring=self.wiring,
)
@property
def bp_state(self) -> BPState:
"""Relevant information for doing belief propagation."""
# Preliminary computations
self.compute_offsets()
return BPState(
log_potentials=LogPotentials(fg_state=self.fg_state),
ftov_msgs=FToVMessages(fg_state=self.fg_state),
evidence=Evidence(fg_state=self.fg_state),
)
@dataclass(frozen=True, eq=False)
class FactorGraphState:
"""FactorGraphState.
Args:
variable_group: A variable group containing all the variables in the FactorGraph.
vars_to_starts: Maps variables to their starting indices in the flat evidence array.
flat_evidence[vars_to_starts[variable]: vars_to_starts[variable] + variable.num_var_states]
contains evidence to the variable.
num_var_states: Total number of variable states.
total_factor_num_states: Size of the flat ftov messages array.
named_factor_groups: Maps the names of named factor groups to the corresponding factor groups.
factor_type_to_msgs_range: Maps factors types to their start and end indices in the flat ftov messages.
factor_type_to_potentials_range: Maps factor types to their start and end indices in the flat log potentials.
factor_group_to_potentials_starts: Maps factor groups to their starting indices in the flat log potentials.
log_potentials: Flat log potentials array concatenated for each factor type.
wiring: Wiring derived for each factor type.
"""
variable_group: groups.VariableGroup
vars_to_starts: Mapping[nodes.Variable, int]
num_var_states: int
total_factor_num_states: int
named_factor_groups: Mapping[Hashable, groups.FactorGroup]
factor_type_to_msgs_range: OrderedDict[type, Tuple[int, int]]
factor_type_to_potentials_range: OrderedDict[type, Tuple[int, int]]
factor_group_to_potentials_starts: OrderedDict[groups.FactorGroup, int]
log_potentials: OrderedDict[type, None | np.ndarray]
wiring: OrderedDict[type, nodes.Wiring]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
if isinstance(getattr(self, field), Mapping):
object.__setattr__(self, field, MappingProxyType(getattr(self, field)))
@dataclass(frozen=True, eq=False)
class BPState:
"""Container class for belief propagation states, including log potentials,
ftov messages and evidence (unary log potentials).
Args:
log_potentials: log potentials of the model
ftov_msgs: factor to variable messages
evidence: evidence (unary log potentials) for variables.
Raises:
ValueError: If log_potentials, ftov_msgs or evidence are not derived from the same
FactorGraphState.
"""
log_potentials: LogPotentials
ftov_msgs: FToVMessages
evidence: Evidence
def __post_init__(self):
if (self.log_potentials.fg_state != self.ftov_msgs.fg_state) or (
self.ftov_msgs.fg_state != self.evidence.fg_state
):
raise ValueError(
"log_potentials, ftov_msgs and evidence should be derived from the same fg_state."
)
@property
def fg_state(self) -> FactorGraphState:
return self.log_potentials.fg_state
@functools.partial(jax.jit, static_argnames="fg_state")
def update_log_potentials(
log_potentials: jnp.ndarray,
updates: Dict[Any, jnp.ndarray],
fg_state: FactorGraphState,
) -> jnp.ndarray:
"""Function to update log_potentials.
Args:
log_potentials: A flat jnp array containing log_potentials.
updates: A dictionary containing updates for log_potentials
fg_state: Factor graph state
Returns:
A flat jnp array containing updated log_potentials.
Raises: ValueError if
(1) Provided log_potentials shape does not match the expected log_potentials shape.
(2) Provided name is not valid for log_potentials updates.
"""
for name in updates:
data = updates[name]
if name in fg_state.named_factor_groups:
factor_group = fg_state.named_factor_groups[name]
flat_data = factor_group.flatten(data)
if flat_data.shape != factor_group.factor_group_log_potentials.shape:
raise ValueError(
f"Expected log potentials shape {factor_group.factor_group_log_potentials.shape} "
f"for factor group {name}. Got incompatible data shape {data.shape}."
)
start = fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = log_potentials.at[start : start + flat_data.shape[0]].set(
flat_data
)
else:
raise ValueError(f"Invalid name {name} for log potentials updates.")
return log_potentials
@dataclass(frozen=True, eq=False)
class LogPotentials:
"""Class for storing and manipulating log potentials.
Args:
fg_state: Factor graph state
value: Optionally specify an initial value
Raises:
ValueError: If provided value shape does not match the expected log_potentials shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(self, "value", self.fg_state.log_potentials)
else:
if not self.value.shape == self.fg_state.log_potentials.shape:
raise ValueError(
f"Expected log potentials shape {self.fg_state.log_potentials.shape}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __getitem__(self, name: Any) -> np.ndarray:
"""Function to query log potentials for a named factor group or a factor.
Args:
name: Name of a named factor group, or a frozenset containing the set
of connected variables for the queried factor.
Returns:
The queried log potentials.
"""
value = cast(np.ndarray, self.value)
if not isinstance(name, Hashable):
name = frozenset(name)
if name in self.fg_state.named_factor_groups:
factor_group = self.fg_state.named_factor_groups[name]
start = self.fg_state.factor_group_to_potentials_starts[factor_group]
log_potentials = value[
start : start + factor_group.factor_group_log_potentials.shape[0]
]
else:
raise ValueError(f"Invalid name {name} for log potentials updates.")
return log_potentials
def __setitem__(
self,
name: Any,
data: Union[np.ndarray, jnp.ndarray],
):
"""Set the log potentials for a named factor group or a factor.
Args:
name: Name of a named factor group, or a frozenset containing the set
of connected variables for the queried factor.
data: Array containing the log potentials for the named factor group
or the factor.
"""
if not isinstance(name, Hashable):
name = frozenset(name)
object.__setattr__(
self,
"value",
np.asarray(
update_log_potentials(
jax.device_put(self.value),
{name: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_ftov_msgs(
ftov_msgs: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update ftov_msgs.
Args:
ftov_msgs: A flat jnp array containing ftov_msgs.
updates: A dictionary containing updates for ftov_msgs
fg_state: Factor graph state
Returns:
A flat jnp array containing updated ftov_msgs.
Raises: ValueError if:
(1) provided ftov_msgs shape does not match the expected ftov_msgs shape.
(2) provided name is not valid for ftov_msgs updates.
"""
for names in updates:
data = updates[names]
if names in fg_state.variable_group.names:
variable = fg_state.variable_group[names]
if data.shape != (variable.num_states,):
raise ValueError(
f"Given belief shape {data.shape} does not match expected "
f"shape {(variable.num_states,)} for variable {names}."
)
var_states_for_edges = np.concatenate(
[
wiring_by_type.var_states_for_edges
for wiring_by_type in fg_state.wiring.values()
]
)
starts = np.nonzero(
var_states_for_edges == fg_state.vars_to_starts[variable]
)[0]
for start in starts:
ftov_msgs = ftov_msgs.at[start : start + variable.num_states].set(
data / starts.shape[0]
)
else:
raise ValueError(
"Invalid names for setting messages. "
"Supported names include a tuple of length 2 with factor "
"and variable names for directly setting factor to variable "
"messages, or a valid variable name for spreading expected "
"beliefs at a variable"
)
return ftov_msgs
@dataclass(frozen=True, eq=False)
class FToVMessages:
"""Class for storing and manipulating factor to variable messages.
Args:
fg_state: Factor graph state
value: Optionally specify initial value for ftov messages
Raises: ValueError if provided value does not match expected ftov messages shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(
self, "value", np.zeros(self.fg_state.total_factor_num_states)
)
else:
if not self.value.shape == (self.fg_state.total_factor_num_states,):
raise ValueError(
f"Expected messages shape {(self.fg_state.total_factor_num_states,)}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
@typing.overload
def __setitem__(
self,
names: Tuple[Any, Any],
data: Union[np.ndarray, jnp.ndarray],
) -> None:
"""Setting messages from a factor to a variable
Args:
names: A tuple of length 2
names[0] is the name of the factor
names[1] is the name of the variable
data: An array containing messages from factor names[0]
to variable names[1]
"""
@typing.overload
def __setitem__(
self,
names: Any,
data: Union[np.ndarray, jnp.ndarray],
) -> None:
"""Spreading beliefs at a variable to all connected factors
Args:
names: The name of the variable
data: An array containing the beliefs to be spread uniformly
across all factor to variable messages involving this
variable.
"""
def __setitem__(self, names, data) -> None:
if (
isinstance(names, tuple)
and len(names) == 2
and names[1] in self.fg_state.variable_group.names
):
names = (frozenset(names[0]), names[1])
object.__setattr__(
self,
"value",
np.asarray(
update_ftov_msgs(
jax.device_put(self.value),
{names: jax.device_put(data)},
self.fg_state,
)
),
)
@functools.partial(jax.jit, static_argnames="fg_state")
def update_evidence(
evidence: jnp.ndarray, updates: Dict[Any, jnp.ndarray], fg_state: FactorGraphState
) -> jnp.ndarray:
"""Function to update evidence.
Args:
evidence: A flat jnp array containing evidence.
updates: A dictionary containing updates for evidence
fg_state: Factor graph state
Returns:
A flat jnp array containing updated evidence.
"""
for name in updates:
data = updates[name]
if name in fg_state.variable_group.container_names:
if name is None:
variable_group = fg_state.variable_group
else:
assert isinstance(
fg_state.variable_group, groups.CompositeVariableGroup
)
variable_group = fg_state.variable_group.variable_group_container[name]
start_index = fg_state.vars_to_starts[variable_group.variables[0]]
flat_data = variable_group.flatten(data)
evidence = evidence.at[start_index : start_index + flat_data.shape[0]].set(
flat_data
)
else:
var = fg_state.variable_group[name]
start_index = fg_state.vars_to_starts[var]
evidence = evidence.at[start_index : start_index + var.num_states].set(data)
return evidence
@dataclass(frozen=True, eq=False)
class Evidence:
"""Class for storing and manipulating evidence
Args:
fg_state: Factor graph state
value: Optionally specify initial value for evidence
Raises: ValueError if provided value does not match expected evidence shape.
"""
fg_state: FactorGraphState
value: Optional[np.ndarray] = None
def __post_init__(self):
if self.value is None:
object.__setattr__(self, "value", np.zeros(self.fg_state.num_var_states))
else:
if self.value.shape != (self.fg_state.num_var_states,):
raise ValueError(
f"Expected evidence shape {(self.fg_state.num_var_states,)}. "
f"Got {self.value.shape}."
)
object.__setattr__(self, "value", self.value)
def __getitem__(self, name: Any) -> np.ndarray:
"""Function to query evidence for a variable
Args:
name: name for the variable
Returns:
evidence for the queried variable
"""
value = cast(np.ndarray, self.value)
variable = self.fg_state.variable_group[name]
start = self.fg_state.vars_to_starts[variable]
evidence = value[start : start + variable.num_states]
return evidence
def __setitem__(
self,
name: Any,
data: np.ndarray,
) -> None:
"""Function to update the evidence for variables
Args:
name: The name of a variable group or a single variable.
If name is the name of a variable group, updates are derived by using the variable group to
flatten the data.
If name is the name of a variable, data should be of an array shape (num_states,)
If name is None, updates are derived by using self.fg_state.variable_group to flatten the data.
data: Array containing the evidence updates.
"""
object.__setattr__(
self,
"value",
np.asarray(
update_evidence(
jax.device_put(self.value),
{name: jax.device_put(data)},
self.fg_state,
),
),
)
@jax.tree_util.register_pytree_node_class
@dataclass(frozen=True, eq=False)
class BPArrays:
"""Container for the relevant flat arrays used in belief propagation.
Args:
log_potentials: Flat log potentials array.
ftov_msgs: Flat factor to variable messages array.
evidence: Flat evidence array.
"""
log_potentials: Union[np.ndarray, jnp.ndarray]
ftov_msgs: Union[np.ndarray, jnp.ndarray]
evidence: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclass(frozen=True, eq=False)
class BeliefPropagation:
"""Belief propagation functions.
Arguments:
init: Function to create log_potentials, ftov_msgs and evidence.
Args:
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the log_potentials, ftov_msgs and evidence.
update: Function to update log_potentials, ftov_msgs and evidence.
Args:
bp_arrays: Optional arrays of log_potentials, ftov_msgs, evidence.
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the updated log_potentials, ftov_msgs and evidence.
run_bp: Function to run belief propagation for num_iters with a damping_factor.
Args:
bp_arrays: Initial arrays of log_potentials, ftov_msgs, evidence.
num_iters: Number of belief propagation iterations.
damping: The damping factor to use for message updates between one timestep and the next.
Returns:
A BPArrays containing the updated ftov_msgs.
get_bp_state: Function to reconstruct the BPState from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
The reconstructed BPState
get_beliefs: Function to calculate beliefs from a BPArrays.
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
beliefs: An array or a PyTree container containing the beliefs for the variables.
"""
init: Callable
update: Callable
run_bp: Callable
to_bp_state: Callable
get_beliefs: Callable
def BP(bp_state: BPState, temperature: float = 0.0) -> BeliefPropagation:
"""Function for generating belief propagation functions.
Args:
bp_state: Belief propagation state.
temperature: Temperature for loopy belief propagation.
1.0 corresponds to sum-product, 0.0 corresponds to max-product.
Returns:
Belief propagation functions.
"""
wiring = bp_state.fg_state.wiring
edges_num_states = np.concatenate(
[wiring[factor_type].edges_num_states for factor_type in FAC_TO_VAR_UPDATES]
)
max_msg_size = int(np.max(edges_num_states))
var_states_for_edges = np.concatenate(
[wiring[factor_type].var_states_for_edges for factor_type in FAC_TO_VAR_UPDATES]
)
# Inference argumnets per factor type
inference_arguments: Dict[type, Mapping] = {}
for factor_type in FAC_TO_VAR_UPDATES:
this_inference_arguments = inspect.getfullargspec(
FAC_TO_VAR_UPDATES[factor_type]
).args
this_inference_arguments.remove("vtof_msgs")
this_inference_arguments.remove("log_potentials")
this_inference_arguments.remove("temperature")
this_inference_arguments = {
key: getattr(wiring[factor_type], key) for key in this_inference_arguments
}
inference_arguments[factor_type] = this_inference_arguments
factor_type_to_msgs_range = bp_state.fg_state.factor_type_to_msgs_range
factor_type_to_potentials_range = bp_state.fg_state.factor_type_to_potentials_range
def update(
bp_arrays: Optional[BPArrays] = None,
log_potentials_updates: Optional[Dict[Any, jnp.ndarray]] = None,
ftov_msgs_updates: Optional[Dict[Any, jnp.ndarray]] = None,
evidence_updates: Optional[Dict[Any, jnp.ndarray]] = None,
) -> BPArrays:
"""Function to update belief propagation log_potentials, ftov_msgs, evidence.
Args:
bp_arrays: Optional arrays of log_potentials, ftov_msgs, evidence.
log_potentials_updates: Optional dictionary containing log_potentials updates.
ftov_msgs_updates: Optional dictionary containing ftov_msgs updates.
evidence_updates: Optional dictionary containing evidence updates.
Returns:
A BPArrays with the updated log_potentials, ftov_msgs and evidence.
"""
if bp_arrays is not None:
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
else:
log_potentials = jax.device_put(bp_state.log_potentials.value)
ftov_msgs = bp_state.ftov_msgs.value
evidence = bp_state.evidence.value
if log_potentials_updates is not None:
log_potentials = update_log_potentials(
log_potentials, log_potentials_updates, bp_state.fg_state
)
if ftov_msgs_updates is not None:
ftov_msgs = update_ftov_msgs(
ftov_msgs, ftov_msgs_updates, bp_state.fg_state
)
if evidence_updates is not None:
evidence = update_evidence(evidence, evidence_updates, bp_state.fg_state)
return BPArrays(
log_potentials=log_potentials, ftov_msgs=ftov_msgs, evidence=evidence
)
def run_bp(
bp_arrays: BPArrays,
num_iters: int,
damping: float = 0.5,
) -> BPArrays:
"""Function to run belief propagation for num_iters with a damping_factor.
Args:
bp_arrays: Initial arrays of log_potentials, ftov_msgs, evidence.
num_iters: Number of belief propagation iterations.
damping: The damping factor to use for message updates between one timestep and the next.
Returns:
A BPArrays containing the updated ftov_msgs.
"""
log_potentials = bp_arrays.log_potentials
evidence = bp_arrays.evidence
ftov_msgs = bp_arrays.ftov_msgs
# Normalize the messages to ensure the maximum value is 0.
ftov_msgs = infer.normalize_and_clip_msgs(
ftov_msgs, edges_num_states, max_msg_size
)
@jax.checkpoint
def update(msgs: jnp.ndarray, _) -> Tuple[jnp.ndarray, None]:
# Compute new variable to factor messages by message passing
vtof_msgs = infer.pass_var_to_fac_messages(
msgs,
evidence,
var_states_for_edges,
)
ftov_msgs = jnp.zeros_like(vtof_msgs)
for factor_type in FAC_TO_VAR_UPDATES:
msgs_start, msgs_end = factor_type_to_msgs_range[factor_type]
potentials_start, potentials_end = factor_type_to_potentials_range[
factor_type
]
ftov_msgs_type = FAC_TO_VAR_UPDATES[factor_type](
vtof_msgs=vtof_msgs[msgs_start:msgs_end],
log_potentials=log_potentials[potentials_start:potentials_end],
temperature=temperature,
**inference_arguments[factor_type],
)
ftov_msgs = ftov_msgs.at[msgs_start:msgs_end].set(ftov_msgs_type)
# Use the results of message passing to perform damping and
# update the factor to variable messages
delta_msgs = ftov_msgs - msgs
msgs = msgs + (1 - damping) * delta_msgs
# Normalize and clip these damped, updated messages before
# returning them.
msgs = infer.normalize_and_clip_msgs(msgs, edges_num_states, max_msg_size)
return msgs, None
ftov_msgs, _ = jax.lax.scan(update, ftov_msgs, None, num_iters)
return BPArrays(
log_potentials=log_potentials, ftov_msgs=ftov_msgs, evidence=evidence
)
def to_bp_state(bp_arrays: BPArrays) -> BPState:
"""Function to reconstruct the BPState from a BPArrays
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
The reconstructed BPState
"""
return BPState(
log_potentials=LogPotentials(
fg_state=bp_state.fg_state, value=bp_arrays.log_potentials
),
ftov_msgs=FToVMessages(
fg_state=bp_state.fg_state,
value=bp_arrays.ftov_msgs,
),
evidence=Evidence(fg_state=bp_state.fg_state, value=bp_arrays.evidence),
)
@jax.jit
def get_beliefs(bp_arrays: BPArrays) -> Any:
"""Function to calculate beliefs from a BPArrays
Args:
bp_arrays: A BPArrays containing log_potentials, ftov_msgs, evidence.
Returns:
beliefs: An array or a PyTree container containing the beliefs for the variables.
"""
beliefs = bp_state.fg_state.variable_group.unflatten(
jax.device_put(bp_arrays.evidence)
.at[jax.device_put(var_states_for_edges)]
.add(bp_arrays.ftov_msgs)
)
return beliefs
bp = BeliefPropagation(
init=functools.partial(update, None),
update=update,
run_bp=run_bp,
to_bp_state=to_bp_state,
get_beliefs=get_beliefs,
)
return bp
@jax.jit
def decode_map_states(beliefs: Any) -> Any:
"""Function to decode MAP states given the calculated beliefs.
Args:
beliefs: An array or a PyTree container containing beliefs for different variables.
Returns:
An array or a PyTree container containing the MAP states for different variables.
"""
map_states = jax.tree_util.tree_map(
lambda x: jnp.argmax(x, axis=-1),
beliefs,
)
return map_states
@jax.jit
def get_marginals(beliefs: Any) -> Any:
"""Function to get marginal probabilities given the calculated beliefs.
Args:
beliefs: An array or a PyTree container containing beliefs for different variables.
Returns:
An array or a PyTree container containing the marginal probabilities different variables.
"""
marginals = jax.tree_util.tree_map(
lambda x: jnp.exp(x - logsumexp(x, axis=-1, keepdims=True)),
beliefs,
)
return marginals
| [
"pgmax.groups.enumeration.EnumerationFactorGroup",
"typing.cast",
"numpy.empty",
"pgmax.fg.groups.SingleFactorGroup",
"pgmax.fg.groups.CompositeVariableGroup",
"jax.device_put",
"jax.numpy.argmax",
"numpy.max",
"functools.partial",
"jax.lax.scan",
"jax.numpy.zeros_like",
"jax.scipy.special.log... | [((15208, 15240), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (15217, 15240), False, 'from dataclasses import asdict, dataclass\n'), ((17234, 17266), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (17243, 17266), False, 'from dataclasses import asdict, dataclass\n'), ((18246, 18300), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': '"""fg_state"""'}), "(jax.jit, static_argnames='fg_state')\n", (18263, 18300), False, 'import functools\n'), ((19817, 19849), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (19826, 19849), False, 'from dataclasses import asdict, dataclass\n'), ((22492, 22546), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': '"""fg_state"""'}), "(jax.jit, static_argnames='fg_state')\n", (22509, 22546), False, 'import functools\n'), ((24487, 24519), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (24496, 24519), False, 'from dataclasses import asdict, dataclass\n'), ((26912, 26966), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnames': '"""fg_state"""'}), "(jax.jit, static_argnames='fg_state')\n", (26929, 26966), False, 'import functools\n'), ((28295, 28327), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (28304, 28327), False, 'from dataclasses import asdict, dataclass\n'), ((30616, 30648), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (30625, 30648), False, 'from dataclasses import asdict, dataclass\n'), ((31456, 31488), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (31465, 31488), False, 'from dataclasses import asdict, dataclass\n'), ((8657, 8682), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (8676, 8682), False, 'import functools\n'), ((34117, 34213), 'numpy.concatenate', 'np.concatenate', (['[wiring[factor_type].edges_num_states for factor_type in FAC_TO_VAR_UPDATES]'], {}), '([wiring[factor_type].edges_num_states for factor_type in\n FAC_TO_VAR_UPDATES])\n', (34131, 34213), True, 'import numpy as np\n'), ((34301, 34401), 'numpy.concatenate', 'np.concatenate', (['[wiring[factor_type].var_states_for_edges for factor_type in FAC_TO_VAR_UPDATES\n ]'], {}), '([wiring[factor_type].var_states_for_edges for factor_type in\n FAC_TO_VAR_UPDATES])\n', (34315, 34401), True, 'import numpy as np\n'), ((2308, 2394), 'collections.OrderedDict', 'collections.OrderedDict', (['[(factor_type, []) for factor_type in FAC_TO_VAR_UPDATES]'], {}), '([(factor_type, []) for factor_type in\n FAC_TO_VAR_UPDATES])\n', (2331, 2394), False, 'import collections\n'), ((4714, 4875), 'pgmax.groups.enumeration.EnumerationFactorGroup', 'EnumerationFactorGroup', (['self._variable_group'], {'variable_names_for_factors': '[variable_names]', 'factor_configs': 'factor_configs', 'log_potentials': 'log_potentials'}), '(self._variable_group, variable_names_for_factors=[\n variable_names], factor_configs=factor_configs, log_potentials=\n log_potentials)\n', (4736, 4875), False, 'from pgmax.groups.enumeration import EnumerationFactorGroup\n'), ((6275, 6400), 'pgmax.fg.groups.SingleFactorGroup', 'groups.SingleFactorGroup', ([], {'variable_group': 'self._variable_group', 'variable_names_for_factors': '[variable_names]', 'factor': 'factor'}), '(variable_group=self._variable_group,\n variable_names_for_factors=[variable_names], factor=factor)\n', (6299, 6400), False, 'from pgmax.fg import groups, nodes\n'), ((9141, 9166), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9164, 9166), False, 'import collections\n'), ((9211, 9236), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9234, 9236), False, 'import collections\n'), ((9356, 9381), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9379, 9381), False, 'import collections\n'), ((9432, 9457), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9455, 9457), False, 'import collections\n'), ((12101, 12126), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (12124, 12126), False, 'import collections\n'), ((13959, 14053), 'numpy.concatenate', 'np.concatenate', (['[self.log_potentials[factor_type] for factor_type in self.log_potentials]'], {}), '([self.log_potentials[factor_type] for factor_type in self.\n log_potentials])\n', (13973, 14053), True, 'import numpy as np\n'), ((21071, 21099), 'typing.cast', 'cast', (['np.ndarray', 'self.value'], {}), '(np.ndarray, self.value)\n', (21075, 21099), False, 'from typing import Any, Callable, Dict, FrozenSet, Hashable, List, Mapping, Optional, OrderedDict, Sequence, Set, Tuple, Type, Union, cast\n'), ((29388, 29416), 'typing.cast', 'cast', (['np.ndarray', 'self.value'], {}), '(np.ndarray, self.value)\n', (29392, 29416), False, 'from typing import Any, Callable, Dict, FrozenSet, Hashable, List, Mapping, Optional, OrderedDict, Sequence, Set, Tuple, Type, Union, cast\n'), ((34247, 34271), 'numpy.max', 'np.max', (['edges_num_states'], {}), '(edges_num_states)\n', (34253, 34271), True, 'import numpy as np\n'), ((37750, 37822), 'pgmax.bp.infer.normalize_and_clip_msgs', 'infer.normalize_and_clip_msgs', (['ftov_msgs', 'edges_num_states', 'max_msg_size'], {}), '(ftov_msgs, edges_num_states, max_msg_size)\n', (37779, 37822), False, 'from pgmax.bp import infer\n'), ((39358, 39406), 'jax.lax.scan', 'jax.lax.scan', (['update', 'ftov_msgs', 'None', 'num_iters'], {}), '(update, ftov_msgs, None, num_iters)\n', (39370, 39406), False, 'import jax\n'), ((1861, 1906), 'pgmax.fg.groups.CompositeVariableGroup', 'groups.CompositeVariableGroup', (['self.variables'], {}), '(self.variables)\n', (1890, 1906), False, 'from pgmax.fg import groups, nodes\n'), ((31321, 31333), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (31327, 31333), False, 'from dataclasses import asdict, dataclass\n'), ((34583, 34638), 'inspect.getfullargspec', 'inspect.getfullargspec', (['FAC_TO_VAR_UPDATES[factor_type]'], {}), '(FAC_TO_VAR_UPDATES[factor_type])\n', (34605, 34638), False, 'import inspect\n'), ((36247, 36292), 'jax.device_put', 'jax.device_put', (['bp_state.log_potentials.value'], {}), '(bp_state.log_potentials.value)\n', (36261, 36292), False, 'import jax\n'), ((38037, 38105), 'pgmax.bp.infer.pass_var_to_fac_messages', 'infer.pass_var_to_fac_messages', (['msgs', 'evidence', 'var_states_for_edges'], {}), '(msgs, evidence, var_states_for_edges)\n', (38067, 38105), False, 'from pgmax.bp import infer\n'), ((38193, 38218), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['vtof_msgs'], {}), '(vtof_msgs)\n', (38207, 38218), True, 'import jax.numpy as jnp\n'), ((39236, 39303), 'pgmax.bp.infer.normalize_and_clip_msgs', 'infer.normalize_and_clip_msgs', (['msgs', 'edges_num_states', 'max_msg_size'], {}), '(msgs, edges_num_states, max_msg_size)\n', (39265, 39303), False, 'from pgmax.bp import infer\n'), ((40813, 40844), 'functools.partial', 'functools.partial', (['update', 'None'], {}), '(update, None)\n', (40830, 40844), False, 'import functools\n'), ((41374, 41396), 'jax.numpy.argmax', 'jnp.argmax', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (41384, 41396), True, 'import jax.numpy as jnp\n'), ((12311, 12325), 'numpy.empty', 'np.empty', (['(0,)'], {}), '((0,))\n', (12319, 12325), True, 'import numpy as np\n'), ((12390, 12495), 'numpy.concatenate', 'np.concatenate', (['[factor_group.factor_group_log_potentials for factor_group in\n factors_groups_by_type]'], {}), '([factor_group.factor_group_log_potentials for factor_group in\n factors_groups_by_type])\n', (12404, 12495), True, 'import numpy as np\n'), ((14351, 14387), 'copy.copy', 'copy.copy', (['self._named_factor_groups'], {}), '(self._named_factor_groups)\n', (14360, 14387), False, 'import copy\n'), ((14427, 14469), 'copy.copy', 'copy.copy', (['self._factor_type_to_msgs_range'], {}), '(self._factor_type_to_msgs_range)\n', (14436, 14469), False, 'import copy\n'), ((14515, 14563), 'copy.copy', 'copy.copy', (['self._factor_type_to_potentials_range'], {}), '(self._factor_type_to_potentials_range)\n', (14524, 14563), False, 'import copy\n'), ((14641, 14691), 'copy.copy', 'copy.copy', (['self._factor_group_to_potentials_starts'], {}), '(self._factor_group_to_potentials_starts)\n', (14650, 14691), False, 'import copy\n'), ((23799, 23868), 'numpy.nonzero', 'np.nonzero', (['(var_states_for_edges == fg_state.vars_to_starts[variable])'], {}), '(var_states_for_edges == fg_state.vars_to_starts[variable])\n', (23809, 23868), True, 'import numpy as np\n'), ((25015, 25062), 'numpy.zeros', 'np.zeros', (['self.fg_state.total_factor_num_states'], {}), '(self.fg_state.total_factor_num_states)\n', (25023, 25062), True, 'import numpy as np\n'), ((28772, 28810), 'numpy.zeros', 'np.zeros', (['self.fg_state.num_var_states'], {}), '(self.fg_state.num_var_states)\n', (28780, 28810), True, 'import numpy as np\n'), ((1964, 2058), 'numpy.array', 'np.array', (['[variable.num_states for variable in self._variable_group.variables]'], {'dtype': 'int'}), '([variable.num_states for variable in self._variable_group.\n variables], dtype=int)\n', (1972, 2058), True, 'import numpy as np\n'), ((22333, 22359), 'jax.device_put', 'jax.device_put', (['self.value'], {}), '(self.value)\n', (22347, 22359), False, 'import jax\n'), ((26752, 26778), 'jax.device_put', 'jax.device_put', (['self.value'], {}), '(self.value)\n', (26766, 26778), False, 'import jax\n'), ((30414, 30440), 'jax.device_put', 'jax.device_put', (['self.value'], {}), '(self.value)\n', (30428, 30440), False, 'import jax\n'), ((41863, 41899), 'jax.scipy.special.logsumexp', 'logsumexp', (['x'], {'axis': '(-1)', 'keepdims': '(True)'}), '(x, axis=-1, keepdims=True)\n', (41872, 41899), False, 'from jax.scipy.special import logsumexp\n'), ((6048, 6073), 'pgmax.factors.FAC_TO_VAR_UPDATES.keys', 'FAC_TO_VAR_UPDATES.keys', ([], {}), '()\n', (6071, 6073), False, 'from pgmax.factors import FAC_TO_VAR_UPDATES\n'), ((22388, 22408), 'jax.device_put', 'jax.device_put', (['data'], {}), '(data)\n', (22402, 22408), False, 'import jax\n'), ((26808, 26828), 'jax.device_put', 'jax.device_put', (['data'], {}), '(data)\n', (26822, 26828), False, 'import jax\n'), ((30469, 30489), 'jax.device_put', 'jax.device_put', (['data'], {}), '(data)\n', (30483, 30489), False, 'import jax\n'), ((40662, 40698), 'jax.device_put', 'jax.device_put', (['var_states_for_edges'], {}), '(var_states_for_edges)\n', (40676, 40698), False, 'import jax\n'), ((40611, 40645), 'jax.device_put', 'jax.device_put', (['bp_arrays.evidence'], {}), '(bp_arrays.evidence)\n', (40625, 40645), False, 'import jax\n')] |
import geonumpy as gnp
import geonumpy.io as gio
import geonumpy.util as gutil
import geonumpy.draw as gdraw
import geonumpy.match as gmt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from glob import glob
def draw_simple():
shandong = gio.read_shp('../data/shape/shandong.shp')
shandong = shandong.to_crs(3857)
box = gutil.shp2box(shandong, (3600, 2400), 0.1)
paper = gnp.frombox(*box, chan=1, dtype=np.uint8)
paper[:] = 255
gdraw.draw_polygon(paper, shandong, 0, 2)
gdraw.draw_ruler(paper, 80, 50, -80, -50, 1, 4326, ('times', 32), 0, 2, 5)
gdraw.draw_lab(paper, shandong, 'name', 0, ('simhei', 32), 'ct')
gdraw.draw_unit(paper, -180, -100, 0.3, 30, ('times', 48), 0, 'km', 3, anc='r')
gdraw.draw_text(paper, '山东省', 180, 120, 0, ('simkai', 128))
gdraw.draw_N(paper, -240, 240, ('simhei', 100), 2, 100, 0)
return paper
def draw_style():
paper = gnp.geoarray(np.zeros((480,1024), dtype=np.uint8))
body = [('Style', 'simhei', 72),
('blank',50),
('line', 1, 'this is line style'),
('circle', 2, 'this is circle style'),
('rect', 3, 'this is rect style')]
lut = np.array([[255,255,255],
[255,0 ,0 ],
[0 ,255,0 ],
[0 ,0 ,255],
[0 ,0 ,0 ]], dtype=np.uint8)
gdraw.draw_style(paper,128,-20, body, mar=(20, 30),
recsize=(120,60,3), font=('simsun', 60), color=4, box=5)
return paper.lookup(lut)
def draw_grade():
shandong = gio.read_shp('../data/shape/shandong.shp')
shandong = shandong.to_crs(3857)
box = gutil.shp2box(shandong, (3600, 2400), 0.1)
paper = gnp.frombox(*box, dtype=np.uint8)
areas = shandong.area
grade_lut = np.array([3]*60 + [2]*30 + [1]*10, dtype=np.uint8)
vs = (areas-areas.min())/(areas.max()-areas.min())*99
grade = grade_lut[vs.astype(int)]
print(grade)
gdraw.draw_polygon(paper, shandong, grade, 0)
gdraw.draw_polygon(paper, shandong, 4, 2)
gdraw.draw_ruler(paper, 80, 50, -80, -50, 1, 4326, ('times', 32), 4, 2, 5)
gdraw.draw_lab(paper, shandong, 'name', 4, ('simhei', 32), 'ct')
gdraw.draw_unit(paper, -180, -100, 0.3, 30, ('times', 48), 4, 'km', 3, anc='r')
gdraw.draw_text(paper, '山东省', 180, 120, 4, ('simkai', 128))
gdraw.draw_N(paper, -240, 240, ('simhei', 100), 2, 100, 4)
body = [('图例', 'simhei', 72),
('rect', 1, '特大城市'),
('rect', 2, '中型城市'),
('rect', 3, '一般城市')]
# 底图,位置,内容,空隙,矩形尺寸及线宽,字体字号颜色,外边框宽度
gdraw.draw_style(paper, 150, -90, body, mar=(20, 30),
recsize=(120,60,2), font=('simsun', 60, 4), color=4, box=0)
lut = np.array([[255,255,255],
[255,200,100],
[255,255,128],
[255,255,200],
[0 ,0 ,0 ]], dtype=np.uint8)
return paper.lookup(lut)
def draw_class():
# ===== look up table =====
lut = np.array([[0 ,0 ,0 ],
[168,168,0 ],
[20 ,119,73 ],
[169,208,95 ],
[56 ,168,0 ],
[126,206,244],
[0 ,86 ,154],
[112,168,0 ],
[147,47 ,20 ],
[202,202,202],
[0 ,255,197],
[255,255,255]], dtype=np.uint8)
# ===== read shape file and make a paper =====
liaoning = gio.read_shp('../data/shape/shandong.shp')
liaoning = liaoning.to_crs(3857)
box = gutil.shp2box(liaoning, (3600, 2400), 0.15)
paper = gnp.frombox(*box, dtype=np.uint8)
# ===== match the class tif into paper
fs = glob('../data/class/*.tif')
idx = gmt.build_index(fs)
gmt.match_idx(idx, out=paper, order=0)
msk = paper * 0
gdraw.draw_polygon(msk, liaoning, 255, 0)
paper[msk==0] = 11
body = [('图例', 'simhei', 72),
('rect', 1, '农田'),
('rect', 2, '森林'),
('rect', 3, '草地'),
('rect', 4, '灌丛'),
('rect', 5, '湿地'),
('rect', 6, '水体'),
('rect', 7, '苔原'),
('rect', 8, '隔水层'),
('rect', 9, '裸地'),
('rect', 10, '冰雪')]
# 底图,位置,内容,空隙,矩形尺寸及线宽,字体字号颜色,外边框宽度
gdraw.draw_style(paper, 60, -60, body, mar=(20, 30),
recsize=(120,60,0), font=('simsun', 60, 0), color=0, box=0)
gdraw.draw_unit(paper, -120, -60, 0.3, 30, ('times', 48), 0, 'km', 3, anc='r')
gdraw.draw_text(paper, '山东省土地利用类型', 80, 60, 0, ('simkai', 128))
gdraw.draw_N(paper, -240, 240, ('msyh', 100), 2, 100, 0)
gdraw.draw_polygon(paper, liaoning, 0, 2)
gdraw.draw_bound(paper, 5, 5, -5, -5, 0, 2, clear=None)
return paper.lookup(lut)
if __name__ == '__main__':
rst = draw_simple()
rst = draw_style()
rst = draw_grade()
rst = draw_class()
Image.fromarray(rst).save('../doc/imgs/00.png')
Image.fromarray(rst).show()
| [
"geonumpy.match.build_index",
"geonumpy.match.match_idx",
"geonumpy.io.read_shp",
"geonumpy.draw.draw_text",
"geonumpy.draw.draw_bound",
"geonumpy.draw.draw_unit",
"numpy.zeros",
"geonumpy.draw.draw_ruler",
"PIL.Image.fromarray",
"geonumpy.draw.draw_polygon",
"geonumpy.draw.draw_lab",
"geonump... | [((272, 314), 'geonumpy.io.read_shp', 'gio.read_shp', (['"""../data/shape/shandong.shp"""'], {}), "('../data/shape/shandong.shp')\n", (284, 314), True, 'import geonumpy.io as gio\n'), ((362, 404), 'geonumpy.util.shp2box', 'gutil.shp2box', (['shandong', '(3600, 2400)', '(0.1)'], {}), '(shandong, (3600, 2400), 0.1)\n', (375, 404), True, 'import geonumpy.util as gutil\n'), ((417, 458), 'geonumpy.frombox', 'gnp.frombox', (['*box'], {'chan': '(1)', 'dtype': 'np.uint8'}), '(*box, chan=1, dtype=np.uint8)\n', (428, 458), True, 'import geonumpy as gnp\n'), ((482, 523), 'geonumpy.draw.draw_polygon', 'gdraw.draw_polygon', (['paper', 'shandong', '(0)', '(2)'], {}), '(paper, shandong, 0, 2)\n', (500, 523), True, 'import geonumpy.draw as gdraw\n'), ((528, 602), 'geonumpy.draw.draw_ruler', 'gdraw.draw_ruler', (['paper', '(80)', '(50)', '(-80)', '(-50)', '(1)', '(4326)', "('times', 32)", '(0)', '(2)', '(5)'], {}), "(paper, 80, 50, -80, -50, 1, 4326, ('times', 32), 0, 2, 5)\n", (544, 602), True, 'import geonumpy.draw as gdraw\n'), ((607, 671), 'geonumpy.draw.draw_lab', 'gdraw.draw_lab', (['paper', 'shandong', '"""name"""', '(0)', "('simhei', 32)", '"""ct"""'], {}), "(paper, shandong, 'name', 0, ('simhei', 32), 'ct')\n", (621, 671), True, 'import geonumpy.draw as gdraw\n'), ((676, 755), 'geonumpy.draw.draw_unit', 'gdraw.draw_unit', (['paper', '(-180)', '(-100)', '(0.3)', '(30)', "('times', 48)", '(0)', '"""km"""', '(3)'], {'anc': '"""r"""'}), "(paper, -180, -100, 0.3, 30, ('times', 48), 0, 'km', 3, anc='r')\n", (691, 755), True, 'import geonumpy.draw as gdraw\n'), ((760, 819), 'geonumpy.draw.draw_text', 'gdraw.draw_text', (['paper', '"""山东省"""', '(180)', '(120)', '(0)', "('simkai', 128)"], {}), "(paper, '山东省', 180, 120, 0, ('simkai', 128))\n", (775, 819), True, 'import geonumpy.draw as gdraw\n'), ((824, 882), 'geonumpy.draw.draw_N', 'gdraw.draw_N', (['paper', '(-240)', '(240)', "('simhei', 100)", '(2)', '(100)', '(0)'], {}), "(paper, -240, 240, ('simhei', 100), 2, 100, 0)\n", (836, 882), True, 'import geonumpy.draw as gdraw\n'), ((1205, 1303), 'numpy.array', 'np.array', (['[[255, 255, 255], [255, 0, 0], [0, 255, 0], [0, 0, 255], [0, 0, 0]]'], {'dtype': 'np.uint8'}), '([[255, 255, 255], [255, 0, 0], [0, 255, 0], [0, 0, 255], [0, 0, 0]\n ], dtype=np.uint8)\n', (1213, 1303), True, 'import numpy as np\n'), ((1391, 1507), 'geonumpy.draw.draw_style', 'gdraw.draw_style', (['paper', '(128)', '(-20)', 'body'], {'mar': '(20, 30)', 'recsize': '(120, 60, 3)', 'font': "('simsun', 60)", 'color': '(4)', 'box': '(5)'}), "(paper, 128, -20, body, mar=(20, 30), recsize=(120, 60, 3),\n font=('simsun', 60), color=4, box=5)\n", (1407, 1507), True, 'import geonumpy.draw as gdraw\n'), ((1571, 1613), 'geonumpy.io.read_shp', 'gio.read_shp', (['"""../data/shape/shandong.shp"""'], {}), "('../data/shape/shandong.shp')\n", (1583, 1613), True, 'import geonumpy.io as gio\n'), ((1661, 1703), 'geonumpy.util.shp2box', 'gutil.shp2box', (['shandong', '(3600, 2400)', '(0.1)'], {}), '(shandong, (3600, 2400), 0.1)\n', (1674, 1703), True, 'import geonumpy.util as gutil\n'), ((1716, 1749), 'geonumpy.frombox', 'gnp.frombox', (['*box'], {'dtype': 'np.uint8'}), '(*box, dtype=np.uint8)\n', (1727, 1749), True, 'import geonumpy as gnp\n'), ((1797, 1853), 'numpy.array', 'np.array', (['([3] * 60 + [2] * 30 + [1] * 10)'], {'dtype': 'np.uint8'}), '([3] * 60 + [2] * 30 + [1] * 10, dtype=np.uint8)\n', (1805, 1853), True, 'import numpy as np\n'), ((1970, 2015), 'geonumpy.draw.draw_polygon', 'gdraw.draw_polygon', (['paper', 'shandong', 'grade', '(0)'], {}), '(paper, shandong, grade, 0)\n', (1988, 2015), True, 'import geonumpy.draw as gdraw\n'), ((2020, 2061), 'geonumpy.draw.draw_polygon', 'gdraw.draw_polygon', (['paper', 'shandong', '(4)', '(2)'], {}), '(paper, shandong, 4, 2)\n', (2038, 2061), True, 'import geonumpy.draw as gdraw\n'), ((2071, 2145), 'geonumpy.draw.draw_ruler', 'gdraw.draw_ruler', (['paper', '(80)', '(50)', '(-80)', '(-50)', '(1)', '(4326)', "('times', 32)", '(4)', '(2)', '(5)'], {}), "(paper, 80, 50, -80, -50, 1, 4326, ('times', 32), 4, 2, 5)\n", (2087, 2145), True, 'import geonumpy.draw as gdraw\n'), ((2150, 2214), 'geonumpy.draw.draw_lab', 'gdraw.draw_lab', (['paper', 'shandong', '"""name"""', '(4)', "('simhei', 32)", '"""ct"""'], {}), "(paper, shandong, 'name', 4, ('simhei', 32), 'ct')\n", (2164, 2214), True, 'import geonumpy.draw as gdraw\n'), ((2219, 2298), 'geonumpy.draw.draw_unit', 'gdraw.draw_unit', (['paper', '(-180)', '(-100)', '(0.3)', '(30)', "('times', 48)", '(4)', '"""km"""', '(3)'], {'anc': '"""r"""'}), "(paper, -180, -100, 0.3, 30, ('times', 48), 4, 'km', 3, anc='r')\n", (2234, 2298), True, 'import geonumpy.draw as gdraw\n'), ((2303, 2362), 'geonumpy.draw.draw_text', 'gdraw.draw_text', (['paper', '"""山东省"""', '(180)', '(120)', '(4)', "('simkai', 128)"], {}), "(paper, '山东省', 180, 120, 4, ('simkai', 128))\n", (2318, 2362), True, 'import geonumpy.draw as gdraw\n'), ((2367, 2425), 'geonumpy.draw.draw_N', 'gdraw.draw_N', (['paper', '(-240)', '(240)', "('simhei', 100)", '(2)', '(100)', '(4)'], {}), "(paper, -240, 240, ('simhei', 100), 2, 100, 4)\n", (2379, 2425), True, 'import geonumpy.draw as gdraw\n'), ((2606, 2725), 'geonumpy.draw.draw_style', 'gdraw.draw_style', (['paper', '(150)', '(-90)', 'body'], {'mar': '(20, 30)', 'recsize': '(120, 60, 2)', 'font': "('simsun', 60, 4)", 'color': '(4)', 'box': '(0)'}), "(paper, 150, -90, body, mar=(20, 30), recsize=(120, 60, 2),\n font=('simsun', 60, 4), color=4, box=0)\n", (2622, 2725), True, 'import geonumpy.draw as gdraw\n'), ((2743, 2853), 'numpy.array', 'np.array', (['[[255, 255, 255], [255, 200, 100], [255, 255, 128], [255, 255, 200], [0, 0, 0]]'], {'dtype': 'np.uint8'}), '([[255, 255, 255], [255, 200, 100], [255, 255, 128], [255, 255, 200\n ], [0, 0, 0]], dtype=np.uint8)\n', (2751, 2853), True, 'import numpy as np\n'), ((3036, 3252), 'numpy.array', 'np.array', (['[[0, 0, 0], [168, 168, 0], [20, 119, 73], [169, 208, 95], [56, 168, 0], [\n 126, 206, 244], [0, 86, 154], [112, 168, 0], [147, 47, 20], [202, 202, \n 202], [0, 255, 197], [255, 255, 255]]'], {'dtype': 'np.uint8'}), '([[0, 0, 0], [168, 168, 0], [20, 119, 73], [169, 208, 95], [56, 168,\n 0], [126, 206, 244], [0, 86, 154], [112, 168, 0], [147, 47, 20], [202, \n 202, 202], [0, 255, 197], [255, 255, 255]], dtype=np.uint8)\n', (3044, 3252), True, 'import numpy as np\n'), ((3530, 3572), 'geonumpy.io.read_shp', 'gio.read_shp', (['"""../data/shape/shandong.shp"""'], {}), "('../data/shape/shandong.shp')\n", (3542, 3572), True, 'import geonumpy.io as gio\n'), ((3620, 3663), 'geonumpy.util.shp2box', 'gutil.shp2box', (['liaoning', '(3600, 2400)', '(0.15)'], {}), '(liaoning, (3600, 2400), 0.15)\n', (3633, 3663), True, 'import geonumpy.util as gutil\n'), ((3676, 3709), 'geonumpy.frombox', 'gnp.frombox', (['*box'], {'dtype': 'np.uint8'}), '(*box, dtype=np.uint8)\n', (3687, 3709), True, 'import geonumpy as gnp\n'), ((3763, 3790), 'glob.glob', 'glob', (['"""../data/class/*.tif"""'], {}), "('../data/class/*.tif')\n", (3767, 3790), False, 'from glob import glob\n'), ((3801, 3820), 'geonumpy.match.build_index', 'gmt.build_index', (['fs'], {}), '(fs)\n', (3816, 3820), True, 'import geonumpy.match as gmt\n'), ((3825, 3863), 'geonumpy.match.match_idx', 'gmt.match_idx', (['idx'], {'out': 'paper', 'order': '(0)'}), '(idx, out=paper, order=0)\n', (3838, 3863), True, 'import geonumpy.match as gmt\n'), ((3889, 3930), 'geonumpy.draw.draw_polygon', 'gdraw.draw_polygon', (['msk', 'liaoning', '(255)', '(0)'], {}), '(msk, liaoning, 255, 0)\n', (3907, 3930), True, 'import geonumpy.draw as gdraw\n'), ((4353, 4471), 'geonumpy.draw.draw_style', 'gdraw.draw_style', (['paper', '(60)', '(-60)', 'body'], {'mar': '(20, 30)', 'recsize': '(120, 60, 0)', 'font': "('simsun', 60, 0)", 'color': '(0)', 'box': '(0)'}), "(paper, 60, -60, body, mar=(20, 30), recsize=(120, 60, 0),\n font=('simsun', 60, 0), color=0, box=0)\n", (4369, 4471), True, 'import geonumpy.draw as gdraw\n'), ((4479, 4557), 'geonumpy.draw.draw_unit', 'gdraw.draw_unit', (['paper', '(-120)', '(-60)', '(0.3)', '(30)', "('times', 48)", '(0)', '"""km"""', '(3)'], {'anc': '"""r"""'}), "(paper, -120, -60, 0.3, 30, ('times', 48), 0, 'km', 3, anc='r')\n", (4494, 4557), True, 'import geonumpy.draw as gdraw\n'), ((4563, 4626), 'geonumpy.draw.draw_text', 'gdraw.draw_text', (['paper', '"""山东省土地利用类型"""', '(80)', '(60)', '(0)', "('simkai', 128)"], {}), "(paper, '山东省土地利用类型', 80, 60, 0, ('simkai', 128))\n", (4578, 4626), True, 'import geonumpy.draw as gdraw\n'), ((4632, 4688), 'geonumpy.draw.draw_N', 'gdraw.draw_N', (['paper', '(-240)', '(240)', "('msyh', 100)", '(2)', '(100)', '(0)'], {}), "(paper, -240, 240, ('msyh', 100), 2, 100, 0)\n", (4644, 4688), True, 'import geonumpy.draw as gdraw\n'), ((4694, 4735), 'geonumpy.draw.draw_polygon', 'gdraw.draw_polygon', (['paper', 'liaoning', '(0)', '(2)'], {}), '(paper, liaoning, 0, 2)\n', (4712, 4735), True, 'import geonumpy.draw as gdraw\n'), ((4745, 4800), 'geonumpy.draw.draw_bound', 'gdraw.draw_bound', (['paper', '(5)', '(5)', '(-5)', '(-5)', '(0)', '(2)'], {'clear': 'None'}), '(paper, 5, 5, -5, -5, 0, 2, clear=None)\n', (4761, 4800), True, 'import geonumpy.draw as gdraw\n'), ((944, 981), 'numpy.zeros', 'np.zeros', (['(480, 1024)'], {'dtype': 'np.uint8'}), '((480, 1024), dtype=np.uint8)\n', (952, 981), True, 'import numpy as np\n'), ((4983, 5003), 'PIL.Image.fromarray', 'Image.fromarray', (['rst'], {}), '(rst)\n', (4998, 5003), False, 'from PIL import Image\n'), ((5035, 5055), 'PIL.Image.fromarray', 'Image.fromarray', (['rst'], {}), '(rst)\n', (5050, 5055), False, 'from PIL import Image\n')] |
import numpy as np
from scipy import signal
from scipy import ndimage
from scipy import spatial
from scipy import stats
import skimage
import warnings
from skimage.segmentation import watershed
from ephysiopy.common.utils import blurImage
"""
These methods differ from MapCalcsGeneric in that they are mostly
concerned with treating rate maps as images as opposed to using
the spiking information contained within them. They therefore mostly
deals with spatial rate maps of place and grid cells.
"""
def field_lims(A):
"""
Returns a labelled matrix of the ratemap A.
Uses anything >
than the half peak rate to select as a field. Data is heavily smoothed
Parameters
----------
A: np.array
The ratemap
Returns
-------
label: np.array
The labelled ratemap
"""
nan_idx = np.isnan(A)
A[nan_idx] = 0
h = int(np.max(A.shape) / 2)
sm_rmap = blurImage(A, h, ftype='gaussian')
thresh = np.max(sm_rmap.ravel()) * 0.2 # select area > 20% of peak
distance = ndimage.distance_transform_edt(sm_rmap > thresh)
mask = skimage.feature.peak_local_max(
distance, indices=False,
exclude_border=False,
labels=sm_rmap > thresh)
label = ndimage.label(mask)[0]
w = watershed(
image=-distance, markers=label,
mask=sm_rmap > thresh)
label = ndimage.label(w)[0]
return label
def limit_to_one(A, prc=50, min_dist=5):
"""
Processes a multi-peaked ratemap (ie grid cell) and returns a matrix
where the multi-peaked ratemap consist of a single peaked field that is
a) not connected to the border and b) close to the middle of the
ratemap
"""
Ac = A.copy()
Ac[np.isnan(A)] = 0
# smooth Ac more to remove local irregularities
n = ny = 5
x, y = np.mgrid[-n:n+1, -ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
Ac = signal.convolve(Ac, g, mode='same')
# remove really small values
Ac[Ac < 1e-10] = 0
peak_mask = skimage.feature.peak_local_max(
Ac, min_distance=min_dist,
exclude_border=False,
indices=False)
peak_labels = skimage.measure.label(peak_mask, connectivity=2)
field_labels = watershed(
image=-Ac, markers=peak_labels)
nFields = np.max(field_labels)
sub_field_mask = np.zeros((nFields, Ac.shape[0], Ac.shape[1]))
labelled_sub_field_mask = np.zeros_like(sub_field_mask)
sub_field_props = skimage.measure.regionprops(
field_labels, intensity_image=Ac)
sub_field_centroids = []
sub_field_size = []
for sub_field in sub_field_props:
tmp = np.zeros(Ac.shape).astype(bool)
tmp[sub_field.coords[:, 0], sub_field.coords[:, 1]] = True
tmp2 = Ac > sub_field.max_intensity * (prc/float(100))
sub_field_mask[sub_field.label - 1, :, :] = np.logical_and(
tmp2, tmp)
labelled_sub_field_mask[
sub_field.label-1, np.logical_and(tmp2, tmp)] = sub_field.label
sub_field_centroids.append(sub_field.centroid)
sub_field_size.append(sub_field.area) # in bins
sub_field_mask = np.sum(sub_field_mask, 0)
middle = np.round(np.array(A.shape) / 2)
normd_dists = sub_field_centroids - middle
field_dists_from_middle = np.hypot(
normd_dists[:, 0], normd_dists[:, 1])
central_field_idx = np.argmin(field_dists_from_middle)
central_field = np.squeeze(
labelled_sub_field_mask[central_field_idx, :, :])
# collapse the labelled mask down to an 2d array
labelled_sub_field_mask = np.sum(labelled_sub_field_mask, 0)
# clear the border
cleared_mask = skimage.segmentation.clear_border(central_field)
# check we've still got stuff in the matrix or fail
if ~np.any(cleared_mask):
print(
'No fields were detected away from edges so nothing returned')
return None, None, None
else:
central_field_props = sub_field_props[central_field_idx]
return central_field_props, central_field, central_field_idx
def global_threshold(A, prc=50, min_dist=5):
"""
Globally thresholds a ratemap and counts number of fields found
"""
Ac = A.copy()
Ac[np.isnan(A)] = 0
n = ny = 5
x, y = np.mgrid[-n:n+1, -ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
Ac = signal.convolve(Ac, g, mode='same')
maxRate = np.nanmax(np.ravel(Ac))
Ac[Ac < maxRate*(prc/float(100))] = 0
peak_mask = skimage.feature.peak_local_max(
Ac, min_distance=min_dist,
exclude_border=False,
indices=False)
peak_labels = skimage.measure.label(peak_mask, connectivity=2)
field_labels = watershed(
image=-Ac, markers=peak_labels)
nFields = np.max(field_labels)
return nFields
def local_threshold(A, prc=50, min_dist=5):
"""
Locally thresholds a ratemap to take only the surrounding prc amount
around any local peak
"""
Ac = A.copy()
nanidx = np.isnan(Ac)
Ac[nanidx] = 0
# smooth Ac more to remove local irregularities
n = ny = 5
x, y = np.mgrid[-n:n+1, -ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
Ac = signal.convolve(Ac, g, mode='same')
peak_mask = skimage.feature.peak_local_max(
Ac, min_distance=min_dist, exclude_border=False,
indices=False)
peak_labels = skimage.measure.label(peak_mask, connectivity=2)
field_labels = watershed(
image=-Ac, markers=peak_labels)
nFields = np.max(field_labels)
sub_field_mask = np.zeros((nFields, Ac.shape[0], Ac.shape[1]))
sub_field_props = skimage.measure.regionprops(
field_labels, intensity_image=Ac)
sub_field_centroids = []
sub_field_size = []
for sub_field in sub_field_props:
tmp = np.zeros(Ac.shape).astype(bool)
tmp[sub_field.coords[:, 0], sub_field.coords[:, 1]] = True
tmp2 = Ac > sub_field.max_intensity * (prc/float(100))
sub_field_mask[sub_field.label - 1, :, :] = np.logical_and(
tmp2, tmp)
sub_field_centroids.append(sub_field.centroid)
sub_field_size.append(sub_field.area) # in bins
sub_field_mask = np.sum(sub_field_mask, 0)
A_out = np.zeros_like(A)
A_out[sub_field_mask.astype(bool)] = A[sub_field_mask.astype(bool)]
A_out[nanidx] = np.nan
return A_out
def border_score(
A, B=None, shape='square', fieldThresh=0.3, smthKernSig=3,
circumPrc=0.2, binSize=3.0, minArea=200, debug=False):
"""
Calculates a border score totally dis-similar to that calculated in
Solstad et al (2008)
Parameters
----------
A : array_like
Should be the ratemap
B : array_like
This should be a boolean mask where True (1)
is equivalent to the presence of a border and False (0)
is equivalent to 'open space'. Naievely this will be the
edges of the ratemap but could be used to take account of
boundary insertions/ creations to check tuning to multiple
environmental boundaries. Default None: when the mask is
None then a mask is created that has 1's at the edges of the
ratemap i.e. it is assumed that occupancy = environmental
shape
shape : str
description of environment shape. Currently
only 'square' or 'circle' accepted. Used to calculate the
proportion of the environmental boundaries to examine for
firing
fieldThresh : float
Between 0 and 1 this is the percentage
amount of the maximum firing rate
to remove from the ratemap (i.e. to remove noise)
smthKernSig : float
the sigma value used in smoothing the ratemap
(again!) with a gaussian kernel
circumPrc : float
The percentage amount of the circumference
of the environment that the field needs to be to count
as long enough to make it through
binSize : float
bin size in cm
minArea : float
min area for a field to be considered
debug : bool
If True then some plots and text will be output
Returns
-------
float : the border score
Notes
-----
If the cell is a border cell (BVC) then we know that it should
fire at a fixed distance from a given boundary (possibly more
than one). In essence this algorithm estimates the amount of
variance in this distance i.e. if the cell is a border cell this
number should be small. This is achieved by first doing a bunch of
morphological operations to isolate individual fields in the
ratemap (similar to the code used in phasePrecession.py - see
the partitionFields method therein). These partitioned fields are then
thinned out (using skimage's skeletonize) to a single pixel
wide field which will lie more or less in the middle of the
(highly smoothed) sub-field. It is the variance in distance from the
nearest boundary along this pseudo-iso-line that is the boundary
measure
Other things to note are that the pixel-wide field has to have some
minimum length. In the case of a circular environment this is set to
20% of the circumference; in the case of a square environment markers
this is at least half the length of the longest side
"""
# need to know borders of the environment so we can see if a field
# touches the edges, and the perimeter length of the environment
# deal with square or circles differently
borderMask = np.zeros_like(A)
A_rows, A_cols = np.shape(A)
if 'circle' in shape:
radius = np.max(np.array(np.shape(A))) / 2.0
dist_mask = skimage.morphology.disk(radius)
if np.shape(dist_mask) > np.shape(A):
dist_mask = dist_mask[1:A_rows+1, 1:A_cols+1]
tmp = np.zeros([A_rows + 2, A_cols + 2])
tmp[1:-1, 1:-1] = dist_mask
dists = ndimage.morphology.distance_transform_bf(tmp)
dists = dists[1:-1, 1:-1]
borderMask = np.logical_xor(dists <= 0, dists < 2)
# open up the border mask a little
borderMask = skimage.morphology.binary_dilation(
borderMask, skimage.morphology.disk(1))
elif 'square' in shape:
borderMask[0:3, :] = 1
borderMask[-3:, :] = 1
borderMask[:, 0:3] = 1
borderMask[:, -3:] = 1
tmp = np.zeros([A_rows + 2, A_cols + 2])
dist_mask = np.ones_like(A)
tmp[1:-1, 1:-1] = dist_mask
dists = ndimage.morphology.distance_transform_bf(tmp)
# remove edges to make same shape as input ratemap
dists = dists[1:-1, 1:-1]
A[np.isnan(A)] = 0
# get some morphological info about the fields in the ratemap
# start image processing:
# get some markers
# NB I've tried a variety of techniques to optimise this part and the
# best seems to be the local adaptive thresholding technique which)
# smooths locally with a gaussian - see the skimage docs for more
idx = A >= np.nanmax(np.ravel(A)) * fieldThresh
A_thresh = np.zeros_like(A)
A_thresh[idx] = A[idx]
# label these markers so each blob has a unique id
labels, nFields = ndimage.label(A_thresh)
# remove small objects
min_size = int(minArea / binSize) - 1
skimage.morphology.remove_small_objects(
labels, min_size=min_size, connectivity=2, in_place=True)
labels = skimage.segmentation.relabel_sequential(labels)[0]
nFields = np.max(labels)
if nFields == 0:
return np.nan
# Iterate over the labelled parts of the array labels calculating
# how much of the total circumference of the environment edge it
# covers
fieldAngularCoverage = np.zeros([1, nFields]) * np.nan
fractionOfPixelsOnBorder = np.zeros([1, nFields]) * np.nan
fieldsToKeep = np.zeros_like(A)
for i in range(1, nFields+1):
fieldMask = np.logical_and(labels == i, borderMask)
# check the angle subtended by the fieldMask
if np.sum(fieldMask.astype(int)) > 0:
s = skimage.measure.regionprops(
fieldMask.astype(int), intensity_image=A_thresh)[0]
x = s.coords[:, 0] - (A_cols / 2.0)
y = s.coords[:, 1] - (A_rows / 2.0)
subtended_angle = np.rad2deg(np.ptp(np.arctan2(x, y)))
if subtended_angle > (360 * circumPrc):
pixelsOnBorder = np.count_nonzero(
fieldMask) / float(np.count_nonzero(labels == i))
fractionOfPixelsOnBorder[:, i-1] = pixelsOnBorder
if pixelsOnBorder > 0.5:
fieldAngularCoverage[0, i-1] = subtended_angle
fieldsToKeep = np.logical_or(fieldsToKeep, labels == i)
fieldAngularCoverage = (fieldAngularCoverage / 360.)
rateInField = A[fieldsToKeep]
# normalize firing rate in the field to sum to 1
rateInField = rateInField / np.nansum(rateInField)
dist2WallInField = dists[fieldsToKeep]
Dm = np.dot(dist2WallInField, rateInField)
if 'circle' in shape:
Dm = Dm / radius
elif 'square' in shape:
Dm = Dm / (np.max(np.shape(A)) / 2.0)
borderScore = (fractionOfPixelsOnBorder-Dm) / (
fractionOfPixelsOnBorder+Dm)
return np.max(borderScore)
def _get_field_labels(A: np.ndarray, **kwargs) -> tuple:
'''
Returns a labeled version of A after finding the peaks
in A and finding the watershed basins from the markers
found from those peaks. Used in field_props() and
grid_field_props()
Parameters
-----------------
A : np.ndarray
Valid kwargs:
min_distance : float
The distance in bins between fields to separate the regions
of the image
clear_border : bool
Input to skimage.feature.peak_local_max. The number of
pixels to ignore at the edge of the image
'''
clear_border = True
if 'clear_border' in kwargs:
clear_border = kwargs.pop('clear_border')
min_distance = 1
if 'min_distance' in kwargs:
min_distance = kwargs.pop('min_distance')
A[~np.isfinite(A)] = -1
A[A < 0] = -1
peak_coords = skimage.feature.peak_local_max(
A, min_distance=min_distance,
exclude_border=clear_border)
peaksMask = np.zeros_like(A, dtype=bool)
peaksMask[tuple(peak_coords.T)] = True
peaksLabel, nLbls = ndimage.label(peaksMask)
ws = watershed(image=-A, markers=peaksLabel)
return peak_coords, ws
def field_props(
A, min_dist=5, neighbours=2, prc=50,
plot=False, ax=None, tri=False, verbose=True, **kwargs):
"""
Returns a dictionary of properties of the field(s) in a ratemap A
Parameters
----------
A : array_like
a ratemap (but could be any image)
min_dist : float
the separation (in bins) between fields for measures
such as field distance to make sense. Used to
partition the image into separate fields in the call to
feature.peak_local_max
neighbours : int
the number of fields to consider as neighbours to
any given field. Defaults to 2
prc : float
percent of fields to consider
ax : matplotlib.Axes
user supplied axis. If None a new figure window is created
tri : bool
whether to do Delaunay triangulation between fields
and add to plot
verbose : bool
dumps the properties to the console
plot : bool
whether to plot some output - currently consists of the
ratemap A, the fields of which are outline in a black
contour. Default False
Returns
-------
result : dict
The properties of the field(s) in the input ratemap A
"""
from skimage.measure import find_contours
from sklearn.neighbors import NearestNeighbors
nan_idx = np.isnan(A)
Ac = A.copy()
Ac[np.isnan(A)] = 0
# smooth Ac more to remove local irregularities
n = ny = 5
x, y = np.mgrid[-n:n+1, -ny:ny+1]
g = np.exp(-(x**2/float(n) + y**2/float(ny)))
g = g / g.sum()
Ac = signal.convolve(Ac, g, mode='same')
peak_idx, field_labels = _get_field_labels(Ac, **kwargs)
nFields = np.max(field_labels)
if neighbours > nFields:
print('neighbours value of {0} > the {1} peaks found'.format(
neighbours, nFields))
print('Reducing neighbours to number of peaks found')
neighbours = nFields
sub_field_mask = np.zeros((nFields, Ac.shape[0], Ac.shape[1]))
sub_field_props = skimage.measure.regionprops(
field_labels, intensity_image=Ac)
sub_field_centroids = []
sub_field_size = []
for sub_field in sub_field_props:
tmp = np.zeros(Ac.shape).astype(bool)
tmp[sub_field.coords[:, 0], sub_field.coords[:, 1]] = True
tmp2 = Ac > sub_field.max_intensity * (prc/float(100))
sub_field_mask[sub_field.label - 1, :, :] = np.logical_and(
tmp2, tmp)
sub_field_centroids.append(sub_field.centroid)
sub_field_size.append(sub_field.area) # in bins
sub_field_mask = np.sum(sub_field_mask, 0)
contours = skimage.measure.find_contours(sub_field_mask, 0.5)
# find the nearest neighbors to the peaks of each sub-field
nbrs = NearestNeighbors(n_neighbors=neighbours,
algorithm='ball_tree').fit(peak_idx)
distances, _ = nbrs.kneighbors(peak_idx)
mean_field_distance = np.mean(distances[:, 1:neighbours])
nValid_bins = np.sum(~nan_idx)
# calculate the amount of out of field firing
A_non_field = np.zeros_like(A) * np.nan
A_non_field[~sub_field_mask.astype(bool)] = A[
~sub_field_mask.astype(bool)]
A_non_field[nan_idx] = np.nan
out_of_field_firing_prc = (np.count_nonzero(
A_non_field > 0) / float(nValid_bins)) * 100
Ac[np.isnan(A)] = np.nan
"""
get some stats about the field ellipticity
"""
ellipse_ratio = np.nan
_, central_field, _ = limit_to_one(A, prc=50)
contour_coords = find_contours(central_field, 0.5)
from skimage.measure import EllipseModel
E = EllipseModel()
E.estimate(contour_coords[0])
ellipse_axes = E.params[2:4]
ellipse_ratio = np.min(ellipse_axes) / np.max(ellipse_axes)
""" using the peak_idx values calculate the angles of the triangles that
make up a delaunay tesselation of the space if the calc_angles arg is
in kwargs
"""
if 'calc_angs' in kwargs.keys():
angs = calc_angs(peak_idx)
else:
angs = None
props = {
'Ac': Ac, 'Peak_rate': np.nanmax(A), 'Mean_rate': np.nanmean(A),
'Field_size': np.mean(sub_field_size),
'Pct_bins_with_firing': (np.sum(
sub_field_mask) / nValid_bins) * 100,
'Out_of_field_firing_prc': out_of_field_firing_prc,
'Dist_between_fields': mean_field_distance,
'Num_fields': float(nFields),
'Sub_field_mask': sub_field_mask,
'Smoothed_map': Ac,
'field_labels': field_labels,
'Peak_idx': peak_idx,
'angles': angs,
'contours': contours,
'ellipse_ratio': ellipse_ratio}
if verbose:
print('\nPercentage of bins with firing: {:.2%}'.format(
np.sum(sub_field_mask) / nValid_bins))
print('Percentage out of field firing: {:.2%}'.format(
np.count_nonzero(A_non_field > 0) / float(nValid_bins)))
print('Peak firing rate: {:.3} Hz'.format(np.nanmax(A)))
print('Mean firing rate: {:.3} Hz'.format(np.nanmean(A)))
print('Number of fields: {0}'.format(nFields))
print('Mean field size: {:.5} cm'.format(np.mean(sub_field_size)))
print('Mean inter-peak distance between \
fields: {:.4} cm'.format(mean_field_distance))
return props
def calc_angs(points):
"""
Calculates the angles for all triangles in a delaunay tesselation of
the peak points in the ratemap
"""
# calculate the lengths of the sides of the triangles
tri = spatial.Delaunay(points)
angs = []
for s in tri.simplices:
A = tri.points[s[1]] - tri.points[s[0]]
B = tri.points[s[2]] - tri.points[s[1]]
C = tri.points[s[0]] - tri.points[s[2]]
for e1, e2 in ((A, -B), (B, -C), (C, -A)):
num = np.dot(e1, e2)
denom = np.linalg.norm(e1) * np.linalg.norm(e2)
angs.append(np.arccos(num/denom) * 180 / np.pi)
return np.array(angs).T
def corr_maps(map1, map2, maptype='normal'):
"""
correlates two ratemaps together ignoring areas that have zero sampling
"""
if map1.shape > map2.shape:
map2 = skimage.transform.resize(map2, map1.shape, mode='reflect')
elif map1.shape < map2.shape:
map1 = skimage.transform.resize(map1, map2.shape, mode='reflect')
map1 = map1.flatten()
map2 = map2.flatten()
if 'normal' in maptype:
valid_map1 = np.logical_or((map1 > 0), ~np.isnan(map1))
valid_map2 = np.logical_or((map2 > 0), ~np.isnan(map2))
elif 'grid' in maptype:
valid_map1 = ~np.isnan(map1)
valid_map2 = ~np.isnan(map2)
valid = np.logical_and(valid_map1, valid_map2)
r = np.corrcoef(map1[valid], map2[valid])
return r[1][0]
def coherence(smthd_rate, unsmthd_rate):
"""calculates coherence of receptive field via correlation of smoothed
and unsmoothed ratemaps
"""
smthd = smthd_rate.ravel()
unsmthd = unsmthd_rate.ravel()
si = ~np.isnan(smthd)
ui = ~np.isnan(unsmthd)
idx = ~(~si | ~ui)
coherence = np.corrcoef(unsmthd[idx], smthd[idx])
return coherence[1, 0]
def kldiv_dir(polarPlot):
"""
Returns a kl divergence for directional firing: measure of
directionality.
Calculates kl diveregence between a smoothed ratemap (probably
should be smoothed otherwise information theoretic measures
don't 'care' about position of bins relative to
one another) and a pure circular distribution.
The larger the divergence the more tendancy the cell has to fire
when the animal faces a specific direction.
Parameters
----------
polarPlot: 1D-array
The binned and smoothed directional ratemap
Returns
-------
klDivergence: float
The divergence from circular of the 1D-array from a
uniform circular distribution
"""
__inc = 0.00001
polarPlot = np.atleast_2d(polarPlot)
polarPlot[np.isnan(polarPlot)] = __inc
polarPlot[polarPlot == 0] = __inc
normdPolar = polarPlot / float(np.nansum(polarPlot))
nDirBins = polarPlot.shape[1]
compCirc = np.ones_like(polarPlot) / float(nDirBins)
X = np.arange(0, nDirBins)
kldivergence = kldiv(np.atleast_2d(X), normdPolar, compCirc)
return kldivergence
def kldiv(X, pvect1, pvect2, variant=None):
"""
Calculates the Kullback-Leibler or Jensen-Shannon divergence between
two distributions.
kldiv(X,P1,P2) returns the Kullback-Leibler divergence between two
distributions specified over the M variable values in vector X.
P1 is a length-M vector of probabilities representing distribution 1;
P2 is a length-M vector of probabilities representing distribution 2.
Thus, the probability of value X(i) is P1(i) for distribution 1 and
P2(i) for distribution 2.
The Kullback-Leibler divergence is given by:
.. math:: KL(P1(x),P2(x)) = sum_[P1(x).log(P1(x)/P2(x))]
If X contains duplicate values, there will be an warning message,
and these values will be treated as distinct values. (I.e., the
actual values do not enter into the computation, but the probabilities
for the two duplicate values will be considered as probabilities
corresponding to two unique values.).
The elements of probability vectors P1 and P2 must
each sum to 1 +/- .00001.
kldiv(X,P1,P2,'sym') returns a symmetric variant of the
Kullback-Leibler divergence, given by [KL(P1,P2)+KL(P2,P1)]/2
kldiv(X,P1,P2,'js') returns the Jensen-Shannon divergence, given by
[KL(P1,Q)+KL(P2,Q)]/2, where Q = (P1+P2)/2. See the Wikipedia article
for "Kullback–Leibler divergence". This is equal to 1/2 the so-called
"Jeffrey divergence."
See Also
--------
Cover, T.M. and <NAME>. "Elements of Information Theory," Wiley,
1991.
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Notes
-----
This function is taken from one on the Mathworks file exchange
"""
if len(np.unique(X)) != len(np.sort(X)):
warnings.warn(
'X contains duplicate values. Treated as distinct values.',
UserWarning)
if not np.equal(
np.shape(X), np.shape(pvect1)).all() or not np.equal(
np.shape(X), np.shape(pvect2)).all():
raise ValueError("Inputs are not the same size")
if (np.abs(
np.sum(pvect1) - 1) > 0.00001) or (np.abs(
np.sum(pvect2) - 1) > 0.00001):
warnings.warn('Probabilities don''t sum to 1.', UserWarning)
if variant:
if variant == 'js':
logqvect = np.log2((pvect2 + pvect1) / 2)
KL = 0.5 * (np.nansum(pvect1 * (np.log2(pvect1) - logqvect)) +
np.sum(pvect2 * (np.log2(pvect2) - logqvect)))
return KL
elif variant == 'sym':
KL1 = np.nansum(pvect1 * (np.log2(pvect1) - np.log2(pvect2)))
KL2 = np.nansum(pvect2 * (np.log2(pvect2) - np.log2(pvect1)))
KL = (KL1 + KL2) / 2
return KL
else:
warnings.warn('Last argument not recognised', UserWarning)
KL = np.nansum(pvect1 * (np.log2(pvect1) - np.log2(pvect2)))
return KL
def skaggs_info(ratemap, dwelltimes, **kwargs):
"""
Calculates Skaggs information measure
Parameters
----------
ratemap : array_like
The binned up ratemap
dwelltimes: array_like
Must be same size as ratemap
Returns
-------
bits_per_spike : float
Skaggs information score
Notes
-----
THIS DATA SHOULD UNDERGO ADAPTIVE BINNING
See adaptiveBin in binning class above
Returns Skaggs et al's estimate of spatial information
in bits per spike:
.. math:: I = sum_{x} p(x).r(x).log(r(x)/r)
"""
if 'sample_rate' in kwargs:
sample_rate = kwargs['sample_rate']
else:
sample_rate = 50
dwelltimes = dwelltimes / sample_rate # assumed sample rate of 50Hz
if ratemap.ndim > 1:
ratemap = np.reshape(
ratemap, (np.prod(np.shape(ratemap)), 1))
dwelltimes = np.reshape(
dwelltimes, (np.prod(np.shape(dwelltimes)), 1))
duration = np.nansum(dwelltimes)
meanrate = np.nansum(ratemap * dwelltimes) / duration
if meanrate <= 0.0:
bits_per_spike = np.nan
return bits_per_spike
p_x = dwelltimes / duration
p_r = ratemap / meanrate
dum = p_x * ratemap
ind = np.nonzero(dum)[0]
bits_per_spike = np.nansum(dum[ind] * np.log2(p_r[ind]))
bits_per_spike = bits_per_spike / meanrate
return bits_per_spike
def grid_field_props(
A, maxima='centroid', allProps=True,
**kwargs):
"""
Extracts various measures from a spatial autocorrelogram
Parameters
----------
A : array_like
The spatial autocorrelogram (SAC)
maxima : str, optional
The method used to detect the peaks in the SAC.
Legal values are 'single' and 'centroid'. Default 'centroid'
allProps : bool, optional
Whether to return a dictionary that contains the attempt to fit an
ellipse around the edges of the central size peaks. See below
Default True
Returns
-------
props : dict
A dictionary containing measures of the SAC. Keys include:
* gridness score
* scale
* orientation
* coordinates of the peaks (nominally 6) closest to SAC centre
* a binary mask around the extent of the 6 central fields
* values of the rotation procedure used to calculate gridness
* ellipse axes and angle (if allProps is True and the it worked)
Notes
-----
The output from this method can be used as input to the show() method
of this class.
When it is the plot produced will display a lot more informative.
See Also
--------
ephysiopy.common.binning.autoCorr2D()
"""
A_tmp = A.copy()
A_tmp[~np.isfinite(A)] = -1
A_tmp[A_tmp <= 0] = -1
A_sz = np.array(np.shape(A))
# [STAGE 1] find peaks & identify 7 closest to centre
if 'min_distance' in kwargs:
min_distance = kwargs.pop('min_distance')
else:
min_distance = np.ceil(np.min(A_sz / 2) / 8.).astype(int)
peak_idx, field_labels = _get_field_labels(
A_tmp, neighbours=7, **kwargs)
# a fcn for the labeled_comprehension function that returns
# linear indices in A where the values in A for each label are
# greater than half the max in that labeled region
def fn(val, pos):
return pos[val > (np.max(val)/2)]
nLbls = np.max(field_labels)
indices = ndimage.labeled_comprehension(
A_tmp, field_labels, np.arange(0, nLbls), fn, np.ndarray, 0, True)
# turn linear indices into coordinates
coords = [np.unravel_index(i, np.shape(A)) for i in indices]
half_peak_labels = np.zeros_like(A)
for peak_id, coord in enumerate(coords):
xc, yc = coord
half_peak_labels[xc, yc] = peak_id
# Get some statistics about the labeled regions
# fieldPerim = bwperim(half_peak_labels)
lbl_range = np.arange(0, nLbls)
# meanRInLabel = ndimage.mean(A, half_peak_labels, lbl_range)
# nPixelsInLabel = np.bincount(np.ravel(half_peak_labels.astype(int)))
# sumRInLabel = ndimage.sum_labels(A, half_peak_labels, lbl_range)
# maxRInLabel = ndimage.maximum(A, half_peak_labels, lbl_range)
peak_coords = ndimage.maximum_position(
A, half_peak_labels, lbl_range)
# Get some distance and morphology measures
centre = np.floor(np.array(np.shape(A))/2)
centred_peak_coords = peak_coords - centre
peak_dist_to_centre = np.hypot(
centred_peak_coords.T[0],
centred_peak_coords.T[1]
)
closest_peak_idx = np.argsort(peak_dist_to_centre)
central_peak_label = closest_peak_idx[0]
closest_peak_idx = closest_peak_idx[1:np.min((7, len(closest_peak_idx)-1))]
# closest_peak_idx should now the indices of the labeled 6 peaks
# surrounding the central peak at the image centre
scale = np.median(peak_dist_to_centre[closest_peak_idx])
orientation = np.nan
orientation = grid_orientation(
centred_peak_coords, closest_peak_idx)
central_pt = peak_coords[central_peak_label]
x = np.linspace(-central_pt[0], central_pt[0], A_sz[0])
y = np.linspace(-central_pt[1], central_pt[1], A_sz[1])
xv, yv = np.meshgrid(x, y, indexing='ij')
dist_to_centre = np.hypot(xv, yv)
# get the max distance of the half-peak width labeled fields
# from the centre of the image
max_dist_from_centre = 0
for peak_id, _coords in enumerate(coords):
if peak_id in closest_peak_idx:
xc, yc = _coords
if np.any(xc) and np.any(yc):
xc = xc - np.floor(A_sz[0]/2)
yc = yc - np.floor(A_sz[1]/2)
d = np.max(np.hypot(xc, yc))
if d > max_dist_from_centre:
max_dist_from_centre = d
# Set the outer bits and the central region of the SAC to nans
# getting ready for the correlation procedure
dist_to_centre[np.abs(dist_to_centre) > max_dist_from_centre] = 0
dist_to_centre[half_peak_labels == central_peak_label] = 0
dist_to_centre[dist_to_centre != 0] = 1
dist_to_centre = dist_to_centre.astype(bool)
sac_middle = A.copy()
sac_middle[~dist_to_centre] = np.nan
if 'step' in kwargs.keys():
step = kwargs.pop('step')
else:
step = 30
try:
gridscore, rotationCorrVals, rotationArr = gridness(
sac_middle, step=step)
except Exception:
gridscore, rotationCorrVals, rotationArr = np.nan, np.nan, np.nan
im_centre = central_pt
if allProps:
# attempt to fit an ellipse around the outer edges of the nearest
# peaks to the centre of the SAC. First find the outer edges for
# the closest peaks using a ndimages labeled_comprehension
try:
def fn2(val, pos):
xc, yc = np.unravel_index(pos, A_sz)
xc = xc - np.floor(A_sz[0]/2)
yc = yc - np.floor(A_sz[1]/2)
idx = np.argmax(np.hypot(xc, yc))
return xc[idx], yc[idx]
ellipse_coords = ndimage.labeled_comprehension(
A, half_peak_labels, closest_peak_idx, fn2, tuple, 0, True)
ellipse_fit_coords = np.array([(x, y) for x, y in ellipse_coords])
from skimage.measure import EllipseModel
E = EllipseModel()
E.estimate(ellipse_fit_coords)
im_centre = E.params[0:2]
ellipse_axes = E.params[2:4]
ellipse_angle = E.params[-1]
ellipseXY = E.predict_xy(np.linspace(0, 2*np.pi, 50), E.params)
# get the min containing circle given the eliipse minor axis
from skimage.measure import CircleModel
_params = im_centre
_params.append(np.min(ellipse_axes))
circleXY = CircleModel().predict_xy(
np.linspace(0, 2*np.pi, 50), params=_params)
except (TypeError, ValueError): # non-iterable x and y i.e. ellipse coords fail
ellipse_axes = None
ellipse_angle = (None, None)
ellipseXY = None
circleXY = None
# collect all the following keywords into a dict for output
closest_peak_coords = np.array(peak_coords)[closest_peak_idx]
dictKeys = (
'gridscore', 'scale', 'orientation', 'closest_peak_coords',
'dist_to_centre', 'ellipse_axes',
'ellipse_angle', 'ellipseXY', 'circleXY', 'im_centre',
'rotationArr', 'rotationCorrVals')
outDict = dict.fromkeys(dictKeys, np.nan)
for thiskey in outDict.keys():
outDict[thiskey] = locals()[thiskey]
# neat trick: locals is a dict holding all locally scoped variables
return outDict
def grid_orientation(peakCoords, closestPeakIdx):
"""
Calculates the orientation angle of a grid field.
The orientation angle is the angle of the first peak working
counter-clockwise from 3 o'clock
Parameters
----------
peakCoords : array_like
The peak coordinates as pairs of xy
closestPeakIdx : array_like
A 1D array of the indices in peakCoords of the peaks closest
to the centre of the SAC
Returns
-------
peak_orientation : float
The first value in an array of the angles of the peaks in the SAC
working counter-clockwise from a line extending from the
middle of the SAC to 3 o'clock.
"""
if len(peakCoords) < 3 or closestPeakIdx.size == 0:
return np.nan
else:
from ephysiopy.common.utils import polar
# Assume that the first entry in peakCoords is
# the central peak of the SAC
peaks = peakCoords[closestPeakIdx]
peaks = peaks - peakCoords[closestPeakIdx[0]]
theta = polar(
peaks[:, 1],
-peaks[:, 0], deg=1)[1]
return np.sort(theta.compress(theta >= 0))[0]
def gridness(image, step=30):
"""
Calculates the gridness score in a grid cell SAC.
Briefly, the data in `image` is rotated in `step` amounts and
each rotated array is correlated with the original.
The maximum of the values at 30, 90 and 150 degrees
is the subtracted from the minimum of the values at 60, 120
and 180 degrees to give the grid score.
Parameters
----------
image : array_like
The spatial autocorrelogram
step : int, optional
The amount to rotate the SAC in each step of the rotational
correlation procedure
Returns
-------
gridmeasures : 3-tuple
The gridscore, the correlation values at each `step` and
the rotational array
Notes
-----
The correlation performed is a Pearsons R. Some rescaling of the
values in `image` is performed following rotation.
See Also
--------
skimage.transform.rotate : for how the rotation of `image` is done
skimage.exposure.rescale_intensity : for the resscaling following
rotation
"""
# TODO: add options in here for whether the full range of correlations
# are wanted or whether a reduced set is wanted (i.e. at the 30-tuples)
from collections import OrderedDict
rotationalCorrVals = OrderedDict.fromkeys(
np.arange(0, 181, step), np.nan)
rotationArr = np.zeros(len(rotationalCorrVals)) * np.nan
# autoCorrMiddle needs to be rescaled or the image rotation falls down
# as values are cropped to lie between 0 and 1.0
in_range = (np.nanmin(image), np.nanmax(image))
out_range = (0, 1)
import skimage
autoCorrMiddleRescaled = skimage.exposure.rescale_intensity(
image, in_range, out_range)
origNanIdx = np.isnan(autoCorrMiddleRescaled.ravel())
for idx, angle in enumerate(rotationalCorrVals.keys()):
rotatedA = skimage.transform.rotate(
autoCorrMiddleRescaled, angle=angle, cval=np.nan, order=3)
# ignore nans
rotatedNanIdx = np.isnan(rotatedA.ravel())
allNans = np.logical_or(origNanIdx, rotatedNanIdx)
# get the correlation between the original and rotated images
rotationalCorrVals[angle] = stats.pearsonr(
autoCorrMiddleRescaled.ravel()[~allNans],
rotatedA.ravel()[~allNans])[0]
rotationArr[idx] = rotationalCorrVals[angle]
gridscore = np.min(
(
rotationalCorrVals[60],
rotationalCorrVals[120])) - np.max(
(
rotationalCorrVals[150],
rotationalCorrVals[30],
rotationalCorrVals[90]))
return gridscore, rotationalCorrVals, rotationArr
def deform_SAC(A, circleXY=None, ellipseXY=None):
"""
Deforms a SAC that is non-circular to be more circular
Basically a blatant attempt to improve grid scores, possibly
introduced in a paper by <NAME>...
Parameters
----------
A : array_like
The SAC
circleXY : array_like
The xy coordinates defining a circle. Default None.
ellipseXY : array_like
The xy coordinates defining an ellipse. Default None.
Returns
-------
deformed_sac : array_like
The SAC deformed to be more circular
See Also
--------
ephysiopy.common.ephys_generic.FieldCalcs.grid_field_props
skimage.transform.AffineTransform
skimage.transform.warp
skimage.exposure.rescale_intensity
"""
if circleXY is None or ellipseXY is None:
SAC_stats = grid_field_props(A)
circleXY = SAC_stats['circleXY']
ellipseXY = SAC_stats['ellipseXY']
# The ellipse detection stuff might have failed, if so
# return the original SAC
if circleXY is None:
return A
tform = skimage.transform.AffineTransform()
tform.estimate(ellipseXY, circleXY)
"""
the transformation algorithms used here crop values < 0 to 0. Need to
rescale the SAC values before doing the deformation and then rescale
again so the values assume the same range as in the unadulterated SAC
"""
A[np.isnan(A)] = 0
SACmin = np.nanmin(A.flatten())
SACmax = np.nanmax(A.flatten()) # should be 1 if autocorr
AA = A + 1
deformedSAC = skimage.transform.warp(
AA / np.nanmax(AA.flatten()), inverse_map=tform.inverse, cval=0)
return skimage.exposure.rescale_intensity(
deformedSAC, out_range=(SACmin, SACmax))
| [
"numpy.sum",
"skimage.feature.peak_local_max",
"numpy.abs",
"numpy.ravel",
"numpy.arctan2",
"numpy.floor",
"skimage.exposure.rescale_intensity",
"numpy.isnan",
"numpy.argmin",
"numpy.shape",
"numpy.argsort",
"skimage.measure.label",
"numpy.mean",
"numpy.arange",
"skimage.measure.find_con... | [((834, 845), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (842, 845), True, 'import numpy as np\n'), ((912, 945), 'ephysiopy.common.utils.blurImage', 'blurImage', (['A', 'h'], {'ftype': '"""gaussian"""'}), "(A, h, ftype='gaussian')\n", (921, 945), False, 'from ephysiopy.common.utils import blurImage\n'), ((1033, 1081), 'scipy.ndimage.distance_transform_edt', 'ndimage.distance_transform_edt', (['(sm_rmap > thresh)'], {}), '(sm_rmap > thresh)\n', (1063, 1081), False, 'from scipy import ndimage\n'), ((1093, 1200), 'skimage.feature.peak_local_max', 'skimage.feature.peak_local_max', (['distance'], {'indices': '(False)', 'exclude_border': '(False)', 'labels': '(sm_rmap > thresh)'}), '(distance, indices=False, exclude_border=\n False, labels=sm_rmap > thresh)\n', (1123, 1200), False, 'import skimage\n'), ((1264, 1328), 'skimage.segmentation.watershed', 'watershed', ([], {'image': '(-distance)', 'markers': 'label', 'mask': '(sm_rmap > thresh)'}), '(image=-distance, markers=label, mask=sm_rmap > thresh)\n', (1273, 1328), False, 'from skimage.segmentation import watershed\n'), ((1910, 1945), 'scipy.signal.convolve', 'signal.convolve', (['Ac', 'g'], {'mode': '"""same"""'}), "(Ac, g, mode='same')\n", (1925, 1945), False, 'from scipy import signal\n'), ((2018, 2117), 'skimage.feature.peak_local_max', 'skimage.feature.peak_local_max', (['Ac'], {'min_distance': 'min_dist', 'exclude_border': '(False)', 'indices': '(False)'}), '(Ac, min_distance=min_dist, exclude_border=\n False, indices=False)\n', (2048, 2117), False, 'import skimage\n'), ((2156, 2204), 'skimage.measure.label', 'skimage.measure.label', (['peak_mask'], {'connectivity': '(2)'}), '(peak_mask, connectivity=2)\n', (2177, 2204), False, 'import skimage\n'), ((2224, 2265), 'skimage.segmentation.watershed', 'watershed', ([], {'image': '(-Ac)', 'markers': 'peak_labels'}), '(image=-Ac, markers=peak_labels)\n', (2233, 2265), False, 'from skimage.segmentation import watershed\n'), ((2289, 2309), 'numpy.max', 'np.max', (['field_labels'], {}), '(field_labels)\n', (2295, 2309), True, 'import numpy as np\n'), ((2331, 2376), 'numpy.zeros', 'np.zeros', (['(nFields, Ac.shape[0], Ac.shape[1])'], {}), '((nFields, Ac.shape[0], Ac.shape[1]))\n', (2339, 2376), True, 'import numpy as np\n'), ((2407, 2436), 'numpy.zeros_like', 'np.zeros_like', (['sub_field_mask'], {}), '(sub_field_mask)\n', (2420, 2436), True, 'import numpy as np\n'), ((2459, 2520), 'skimage.measure.regionprops', 'skimage.measure.regionprops', (['field_labels'], {'intensity_image': 'Ac'}), '(field_labels, intensity_image=Ac)\n', (2486, 2520), False, 'import skimage\n'), ((3131, 3156), 'numpy.sum', 'np.sum', (['sub_field_mask', '(0)'], {}), '(sub_field_mask, 0)\n', (3137, 3156), True, 'import numpy as np\n'), ((3279, 3325), 'numpy.hypot', 'np.hypot', (['normd_dists[:, 0]', 'normd_dists[:, 1]'], {}), '(normd_dists[:, 0], normd_dists[:, 1])\n', (3287, 3325), True, 'import numpy as np\n'), ((3359, 3393), 'numpy.argmin', 'np.argmin', (['field_dists_from_middle'], {}), '(field_dists_from_middle)\n', (3368, 3393), True, 'import numpy as np\n'), ((3414, 3474), 'numpy.squeeze', 'np.squeeze', (['labelled_sub_field_mask[central_field_idx, :, :]'], {}), '(labelled_sub_field_mask[central_field_idx, :, :])\n', (3424, 3474), True, 'import numpy as np\n'), ((3567, 3601), 'numpy.sum', 'np.sum', (['labelled_sub_field_mask', '(0)'], {}), '(labelled_sub_field_mask, 0)\n', (3573, 3601), True, 'import numpy as np\n'), ((3644, 3692), 'skimage.segmentation.clear_border', 'skimage.segmentation.clear_border', (['central_field'], {}), '(central_field)\n', (3677, 3692), False, 'import skimage\n'), ((4346, 4381), 'scipy.signal.convolve', 'signal.convolve', (['Ac', 'g'], {'mode': '"""same"""'}), "(Ac, g, mode='same')\n", (4361, 4381), False, 'from scipy import signal\n'), ((4478, 4577), 'skimage.feature.peak_local_max', 'skimage.feature.peak_local_max', (['Ac'], {'min_distance': 'min_dist', 'exclude_border': '(False)', 'indices': '(False)'}), '(Ac, min_distance=min_dist, exclude_border=\n False, indices=False)\n', (4508, 4577), False, 'import skimage\n'), ((4616, 4664), 'skimage.measure.label', 'skimage.measure.label', (['peak_mask'], {'connectivity': '(2)'}), '(peak_mask, connectivity=2)\n', (4637, 4664), False, 'import skimage\n'), ((4684, 4725), 'skimage.segmentation.watershed', 'watershed', ([], {'image': '(-Ac)', 'markers': 'peak_labels'}), '(image=-Ac, markers=peak_labels)\n', (4693, 4725), False, 'from skimage.segmentation import watershed\n'), ((4749, 4769), 'numpy.max', 'np.max', (['field_labels'], {}), '(field_labels)\n', (4755, 4769), True, 'import numpy as np\n'), ((4981, 4993), 'numpy.isnan', 'np.isnan', (['Ac'], {}), '(Ac)\n', (4989, 4993), True, 'import numpy as np\n'), ((5197, 5232), 'scipy.signal.convolve', 'signal.convolve', (['Ac', 'g'], {'mode': '"""same"""'}), "(Ac, g, mode='same')\n", (5212, 5232), False, 'from scipy import signal\n'), ((5249, 5348), 'skimage.feature.peak_local_max', 'skimage.feature.peak_local_max', (['Ac'], {'min_distance': 'min_dist', 'exclude_border': '(False)', 'indices': '(False)'}), '(Ac, min_distance=min_dist, exclude_border=\n False, indices=False)\n', (5279, 5348), False, 'import skimage\n'), ((5379, 5427), 'skimage.measure.label', 'skimage.measure.label', (['peak_mask'], {'connectivity': '(2)'}), '(peak_mask, connectivity=2)\n', (5400, 5427), False, 'import skimage\n'), ((5447, 5488), 'skimage.segmentation.watershed', 'watershed', ([], {'image': '(-Ac)', 'markers': 'peak_labels'}), '(image=-Ac, markers=peak_labels)\n', (5456, 5488), False, 'from skimage.segmentation import watershed\n'), ((5512, 5532), 'numpy.max', 'np.max', (['field_labels'], {}), '(field_labels)\n', (5518, 5532), True, 'import numpy as np\n'), ((5554, 5599), 'numpy.zeros', 'np.zeros', (['(nFields, Ac.shape[0], Ac.shape[1])'], {}), '((nFields, Ac.shape[0], Ac.shape[1]))\n', (5562, 5599), True, 'import numpy as np\n'), ((5622, 5683), 'skimage.measure.regionprops', 'skimage.measure.regionprops', (['field_labels'], {'intensity_image': 'Ac'}), '(field_labels, intensity_image=Ac)\n', (5649, 5683), False, 'import skimage\n'), ((6185, 6210), 'numpy.sum', 'np.sum', (['sub_field_mask', '(0)'], {}), '(sub_field_mask, 0)\n', (6191, 6210), True, 'import numpy as np\n'), ((6223, 6239), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (6236, 6239), True, 'import numpy as np\n'), ((9493, 9509), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (9506, 9509), True, 'import numpy as np\n'), ((9531, 9542), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (9539, 9542), True, 'import numpy as np\n'), ((11023, 11039), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (11036, 11039), True, 'import numpy as np\n'), ((11145, 11168), 'scipy.ndimage.label', 'ndimage.label', (['A_thresh'], {}), '(A_thresh)\n', (11158, 11168), False, 'from scipy import ndimage\n'), ((11242, 11343), 'skimage.morphology.remove_small_objects', 'skimage.morphology.remove_small_objects', (['labels'], {'min_size': 'min_size', 'connectivity': '(2)', 'in_place': '(True)'}), '(labels, min_size=min_size,\n connectivity=2, in_place=True)\n', (11281, 11343), False, 'import skimage\n'), ((11427, 11441), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (11433, 11441), True, 'import numpy as np\n'), ((11779, 11795), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (11792, 11795), True, 'import numpy as np\n'), ((12933, 12970), 'numpy.dot', 'np.dot', (['dist2WallInField', 'rateInField'], {}), '(dist2WallInField, rateInField)\n', (12939, 12970), True, 'import numpy as np\n'), ((13196, 13215), 'numpy.max', 'np.max', (['borderScore'], {}), '(borderScore)\n', (13202, 13215), True, 'import numpy as np\n'), ((14098, 14192), 'skimage.feature.peak_local_max', 'skimage.feature.peak_local_max', (['A'], {'min_distance': 'min_distance', 'exclude_border': 'clear_border'}), '(A, min_distance=min_distance, exclude_border\n =clear_border)\n', (14128, 14192), False, 'import skimage\n'), ((14221, 14249), 'numpy.zeros_like', 'np.zeros_like', (['A'], {'dtype': 'bool'}), '(A, dtype=bool)\n', (14234, 14249), True, 'import numpy as np\n'), ((14317, 14341), 'scipy.ndimage.label', 'ndimage.label', (['peaksMask'], {}), '(peaksMask)\n', (14330, 14341), False, 'from scipy import ndimage\n'), ((14351, 14390), 'skimage.segmentation.watershed', 'watershed', ([], {'image': '(-A)', 'markers': 'peaksLabel'}), '(image=-A, markers=peaksLabel)\n', (14360, 14390), False, 'from skimage.segmentation import watershed\n'), ((15774, 15785), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (15782, 15785), True, 'import numpy as np\n'), ((16012, 16047), 'scipy.signal.convolve', 'signal.convolve', (['Ac', 'g'], {'mode': '"""same"""'}), "(Ac, g, mode='same')\n", (16027, 16047), False, 'from scipy import signal\n'), ((16125, 16145), 'numpy.max', 'np.max', (['field_labels'], {}), '(field_labels)\n', (16131, 16145), True, 'import numpy as np\n'), ((16391, 16436), 'numpy.zeros', 'np.zeros', (['(nFields, Ac.shape[0], Ac.shape[1])'], {}), '((nFields, Ac.shape[0], Ac.shape[1]))\n', (16399, 16436), True, 'import numpy as np\n'), ((16459, 16520), 'skimage.measure.regionprops', 'skimage.measure.regionprops', (['field_labels'], {'intensity_image': 'Ac'}), '(field_labels, intensity_image=Ac)\n', (16486, 16520), False, 'import skimage\n'), ((17022, 17047), 'numpy.sum', 'np.sum', (['sub_field_mask', '(0)'], {}), '(sub_field_mask, 0)\n', (17028, 17047), True, 'import numpy as np\n'), ((17063, 17113), 'skimage.measure.find_contours', 'skimage.measure.find_contours', (['sub_field_mask', '(0.5)'], {}), '(sub_field_mask, 0.5)\n', (17092, 17113), False, 'import skimage\n'), ((17366, 17401), 'numpy.mean', 'np.mean', (['distances[:, 1:neighbours]'], {}), '(distances[:, 1:neighbours])\n', (17373, 17401), True, 'import numpy as np\n'), ((17421, 17437), 'numpy.sum', 'np.sum', (['(~nan_idx)'], {}), '(~nan_idx)\n', (17427, 17437), True, 'import numpy as np\n'), ((17952, 17985), 'skimage.measure.find_contours', 'find_contours', (['central_field', '(0.5)'], {}), '(central_field, 0.5)\n', (17965, 17985), False, 'from skimage.measure import find_contours\n'), ((18039, 18053), 'skimage.measure.EllipseModel', 'EllipseModel', ([], {}), '()\n', (18051, 18053), False, 'from skimage.measure import EllipseModel\n'), ((19939, 19963), 'scipy.spatial.Delaunay', 'spatial.Delaunay', (['points'], {}), '(points)\n', (19955, 19963), False, 'from scipy import spatial\n'), ((21057, 21095), 'numpy.logical_and', 'np.logical_and', (['valid_map1', 'valid_map2'], {}), '(valid_map1, valid_map2)\n', (21071, 21095), True, 'import numpy as np\n'), ((21104, 21141), 'numpy.corrcoef', 'np.corrcoef', (['map1[valid]', 'map2[valid]'], {}), '(map1[valid], map2[valid])\n', (21115, 21141), True, 'import numpy as np\n'), ((21474, 21511), 'numpy.corrcoef', 'np.corrcoef', (['unsmthd[idx]', 'smthd[idx]'], {}), '(unsmthd[idx], smthd[idx])\n', (21485, 21511), True, 'import numpy as np\n'), ((22308, 22332), 'numpy.atleast_2d', 'np.atleast_2d', (['polarPlot'], {}), '(polarPlot)\n', (22321, 22332), True, 'import numpy as np\n'), ((22570, 22592), 'numpy.arange', 'np.arange', (['(0)', 'nDirBins'], {}), '(0, nDirBins)\n', (22579, 22592), True, 'import numpy as np\n'), ((26586, 26607), 'numpy.nansum', 'np.nansum', (['dwelltimes'], {}), '(dwelltimes)\n', (26595, 26607), True, 'import numpy as np\n'), ((28997, 29017), 'numpy.max', 'np.max', (['field_labels'], {}), '(field_labels)\n', (29003, 29017), True, 'import numpy as np\n'), ((29269, 29285), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (29282, 29285), True, 'import numpy as np\n'), ((29511, 29530), 'numpy.arange', 'np.arange', (['(0)', 'nLbls'], {}), '(0, nLbls)\n', (29520, 29530), True, 'import numpy as np\n'), ((29829, 29885), 'scipy.ndimage.maximum_position', 'ndimage.maximum_position', (['A', 'half_peak_labels', 'lbl_range'], {}), '(A, half_peak_labels, lbl_range)\n', (29853, 29885), False, 'from scipy import ndimage\n'), ((30064, 30124), 'numpy.hypot', 'np.hypot', (['centred_peak_coords.T[0]', 'centred_peak_coords.T[1]'], {}), '(centred_peak_coords.T[0], centred_peak_coords.T[1])\n', (30072, 30124), True, 'import numpy as np\n'), ((30174, 30205), 'numpy.argsort', 'np.argsort', (['peak_dist_to_centre'], {}), '(peak_dist_to_centre)\n', (30184, 30205), True, 'import numpy as np\n'), ((30467, 30515), 'numpy.median', 'np.median', (['peak_dist_to_centre[closest_peak_idx]'], {}), '(peak_dist_to_centre[closest_peak_idx])\n', (30476, 30515), True, 'import numpy as np\n'), ((30682, 30733), 'numpy.linspace', 'np.linspace', (['(-central_pt[0])', 'central_pt[0]', 'A_sz[0]'], {}), '(-central_pt[0], central_pt[0], A_sz[0])\n', (30693, 30733), True, 'import numpy as np\n'), ((30742, 30793), 'numpy.linspace', 'np.linspace', (['(-central_pt[1])', 'central_pt[1]', 'A_sz[1]'], {}), '(-central_pt[1], central_pt[1], A_sz[1])\n', (30753, 30793), True, 'import numpy as np\n'), ((30807, 30839), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (30818, 30839), True, 'import numpy as np\n'), ((30861, 30877), 'numpy.hypot', 'np.hypot', (['xv', 'yv'], {}), '(xv, yv)\n', (30869, 30877), True, 'import numpy as np\n'), ((37152, 37214), 'skimage.exposure.rescale_intensity', 'skimage.exposure.rescale_intensity', (['image', 'in_range', 'out_range'], {}), '(image, in_range, out_range)\n', (37186, 37214), False, 'import skimage\n'), ((39267, 39302), 'skimage.transform.AffineTransform', 'skimage.transform.AffineTransform', ([], {}), '()\n', (39300, 39302), False, 'import skimage\n'), ((39844, 39919), 'skimage.exposure.rescale_intensity', 'skimage.exposure.rescale_intensity', (['deformedSAC'], {'out_range': '(SACmin, SACmax)'}), '(deformedSAC, out_range=(SACmin, SACmax))\n', (39878, 39919), False, 'import skimage\n'), ((1233, 1252), 'scipy.ndimage.label', 'ndimage.label', (['mask'], {}), '(mask)\n', (1246, 1252), False, 'from scipy import ndimage\n'), ((1358, 1374), 'scipy.ndimage.label', 'ndimage.label', (['w'], {}), '(w)\n', (1371, 1374), False, 'from scipy import ndimage\n'), ((1709, 1720), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (1717, 1720), True, 'import numpy as np\n'), ((2850, 2875), 'numpy.logical_and', 'np.logical_and', (['tmp2', 'tmp'], {}), '(tmp2, tmp)\n', (2864, 2875), True, 'import numpy as np\n'), ((3757, 3777), 'numpy.any', 'np.any', (['cleared_mask'], {}), '(cleared_mask)\n', (3763, 3777), True, 'import numpy as np\n'), ((4197, 4208), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (4205, 4208), True, 'import numpy as np\n'), ((4406, 4418), 'numpy.ravel', 'np.ravel', (['Ac'], {}), '(Ac)\n', (4414, 4418), True, 'import numpy as np\n'), ((6013, 6038), 'numpy.logical_and', 'np.logical_and', (['tmp2', 'tmp'], {}), '(tmp2, tmp)\n', (6027, 6038), True, 'import numpy as np\n'), ((9642, 9673), 'skimage.morphology.disk', 'skimage.morphology.disk', (['radius'], {}), '(radius)\n', (9665, 9673), False, 'import skimage\n'), ((9792, 9826), 'numpy.zeros', 'np.zeros', (['[A_rows + 2, A_cols + 2]'], {}), '([A_rows + 2, A_cols + 2])\n', (9800, 9826), True, 'import numpy as np\n'), ((9879, 9924), 'scipy.ndimage.morphology.distance_transform_bf', 'ndimage.morphology.distance_transform_bf', (['tmp'], {}), '(tmp)\n', (9919, 9924), False, 'from scipy import ndimage\n'), ((9980, 10017), 'numpy.logical_xor', 'np.logical_xor', (['(dists <= 0)', '(dists < 2)'], {}), '(dists <= 0, dists < 2)\n', (9994, 10017), True, 'import numpy as np\n'), ((10604, 10615), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (10612, 10615), True, 'import numpy as np\n'), ((11362, 11409), 'skimage.segmentation.relabel_sequential', 'skimage.segmentation.relabel_sequential', (['labels'], {}), '(labels)\n', (11401, 11409), False, 'import skimage\n'), ((11665, 11687), 'numpy.zeros', 'np.zeros', (['[1, nFields]'], {}), '([1, nFields])\n', (11673, 11687), True, 'import numpy as np\n'), ((11728, 11750), 'numpy.zeros', 'np.zeros', (['[1, nFields]'], {}), '([1, nFields])\n', (11736, 11750), True, 'import numpy as np\n'), ((11850, 11889), 'numpy.logical_and', 'np.logical_and', (['(labels == i)', 'borderMask'], {}), '(labels == i, borderMask)\n', (11864, 11889), True, 'import numpy as np\n'), ((12858, 12880), 'numpy.nansum', 'np.nansum', (['rateInField'], {}), '(rateInField)\n', (12867, 12880), True, 'import numpy as np\n'), ((15811, 15822), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (15819, 15822), True, 'import numpy as np\n'), ((16850, 16875), 'numpy.logical_and', 'np.logical_and', (['tmp2', 'tmp'], {}), '(tmp2, tmp)\n', (16864, 16875), True, 'import numpy as np\n'), ((17506, 17522), 'numpy.zeros_like', 'np.zeros_like', (['A'], {}), '(A)\n', (17519, 17522), True, 'import numpy as np\n'), ((17764, 17775), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (17772, 17775), True, 'import numpy as np\n'), ((18141, 18161), 'numpy.min', 'np.min', (['ellipse_axes'], {}), '(ellipse_axes)\n', (18147, 18161), True, 'import numpy as np\n'), ((18164, 18184), 'numpy.max', 'np.max', (['ellipse_axes'], {}), '(ellipse_axes)\n', (18170, 18184), True, 'import numpy as np\n'), ((18507, 18519), 'numpy.nanmax', 'np.nanmax', (['A'], {}), '(A)\n', (18516, 18519), True, 'import numpy as np\n'), ((18534, 18547), 'numpy.nanmean', 'np.nanmean', (['A'], {}), '(A)\n', (18544, 18547), True, 'import numpy as np\n'), ((18571, 18594), 'numpy.mean', 'np.mean', (['sub_field_size'], {}), '(sub_field_size)\n', (18578, 18594), True, 'import numpy as np\n'), ((20365, 20379), 'numpy.array', 'np.array', (['angs'], {}), '(angs)\n', (20373, 20379), True, 'import numpy as np\n'), ((20568, 20626), 'skimage.transform.resize', 'skimage.transform.resize', (['map2', 'map1.shape'], {'mode': '"""reflect"""'}), "(map2, map1.shape, mode='reflect')\n", (20592, 20626), False, 'import skimage\n'), ((21391, 21406), 'numpy.isnan', 'np.isnan', (['smthd'], {}), '(smthd)\n', (21399, 21406), True, 'import numpy as np\n'), ((21417, 21434), 'numpy.isnan', 'np.isnan', (['unsmthd'], {}), '(unsmthd)\n', (21425, 21434), True, 'import numpy as np\n'), ((22347, 22366), 'numpy.isnan', 'np.isnan', (['polarPlot'], {}), '(polarPlot)\n', (22355, 22366), True, 'import numpy as np\n'), ((22520, 22543), 'numpy.ones_like', 'np.ones_like', (['polarPlot'], {}), '(polarPlot)\n', (22532, 22543), True, 'import numpy as np\n'), ((22618, 22634), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (22631, 22634), True, 'import numpy as np\n'), ((24452, 24542), 'warnings.warn', 'warnings.warn', (['"""X contains duplicate values. Treated as distinct values."""', 'UserWarning'], {}), "('X contains duplicate values. Treated as distinct values.',\n UserWarning)\n", (24465, 24542), False, 'import warnings\n'), ((24873, 24931), 'warnings.warn', 'warnings.warn', (['"""Probabilities dont sum to 1."""', 'UserWarning'], {}), "('Probabilities dont sum to 1.', UserWarning)\n", (24886, 24931), False, 'import warnings\n'), ((26623, 26654), 'numpy.nansum', 'np.nansum', (['(ratemap * dwelltimes)'], {}), '(ratemap * dwelltimes)\n', (26632, 26654), True, 'import numpy as np\n'), ((26847, 26862), 'numpy.nonzero', 'np.nonzero', (['dum'], {}), '(dum)\n', (26857, 26862), True, 'import numpy as np\n'), ((28412, 28423), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (28420, 28423), True, 'import numpy as np\n'), ((29092, 29111), 'numpy.arange', 'np.arange', (['(0)', 'nLbls'], {}), '(0, nLbls)\n', (29101, 29111), True, 'import numpy as np\n'), ((33832, 33853), 'numpy.array', 'np.array', (['peak_coords'], {}), '(peak_coords)\n', (33840, 33853), True, 'import numpy as np\n'), ((36807, 36830), 'numpy.arange', 'np.arange', (['(0)', '(181)', 'step'], {}), '(0, 181, step)\n', (36816, 36830), True, 'import numpy as np\n'), ((37045, 37061), 'numpy.nanmin', 'np.nanmin', (['image'], {}), '(image)\n', (37054, 37061), True, 'import numpy as np\n'), ((37063, 37079), 'numpy.nanmax', 'np.nanmax', (['image'], {}), '(image)\n', (37072, 37079), True, 'import numpy as np\n'), ((37361, 37448), 'skimage.transform.rotate', 'skimage.transform.rotate', (['autoCorrMiddleRescaled'], {'angle': 'angle', 'cval': 'np.nan', 'order': '(3)'}), '(autoCorrMiddleRescaled, angle=angle, cval=np.nan,\n order=3)\n', (37385, 37448), False, 'import skimage\n'), ((37549, 37589), 'numpy.logical_or', 'np.logical_or', (['origNanIdx', 'rotatedNanIdx'], {}), '(origNanIdx, rotatedNanIdx)\n', (37562, 37589), True, 'import numpy as np\n'), ((37878, 37935), 'numpy.min', 'np.min', (['(rotationalCorrVals[60], rotationalCorrVals[120])'], {}), '((rotationalCorrVals[60], rotationalCorrVals[120]))\n', (37884, 37935), True, 'import numpy as np\n'), ((37972, 38058), 'numpy.max', 'np.max', (['(rotationalCorrVals[150], rotationalCorrVals[30], rotationalCorrVals[90])'], {}), '((rotationalCorrVals[150], rotationalCorrVals[30], rotationalCorrVals\n [90]))\n', (37978, 38058), True, 'import numpy as np\n'), ((39587, 39598), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (39595, 39598), True, 'import numpy as np\n'), ((877, 892), 'numpy.max', 'np.max', (['A.shape'], {}), '(A.shape)\n', (883, 892), True, 'import numpy as np\n'), ((3179, 3196), 'numpy.array', 'np.array', (['A.shape'], {}), '(A.shape)\n', (3187, 3196), True, 'import numpy as np\n'), ((9685, 9704), 'numpy.shape', 'np.shape', (['dist_mask'], {}), '(dist_mask)\n', (9693, 9704), True, 'import numpy as np\n'), ((9707, 9718), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (9715, 9718), True, 'import numpy as np\n'), ((10142, 10168), 'skimage.morphology.disk', 'skimage.morphology.disk', (['(1)'], {}), '(1)\n', (10165, 10168), False, 'import skimage\n'), ((10336, 10370), 'numpy.zeros', 'np.zeros', (['[A_rows + 2, A_cols + 2]'], {}), '([A_rows + 2, A_cols + 2])\n', (10344, 10370), True, 'import numpy as np\n'), ((10391, 10406), 'numpy.ones_like', 'np.ones_like', (['A'], {}), '(A)\n', (10403, 10406), True, 'import numpy as np\n'), ((10459, 10504), 'scipy.ndimage.morphology.distance_transform_bf', 'ndimage.morphology.distance_transform_bf', (['tmp'], {}), '(tmp)\n', (10499, 10504), False, 'from scipy import ndimage\n'), ((12641, 12681), 'numpy.logical_or', 'np.logical_or', (['fieldsToKeep', '(labels == i)'], {}), '(fieldsToKeep, labels == i)\n', (12654, 12681), True, 'import numpy as np\n'), ((14040, 14054), 'numpy.isfinite', 'np.isfinite', (['A'], {}), '(A)\n', (14051, 14054), True, 'import numpy as np\n'), ((17189, 17252), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'neighbours', 'algorithm': '"""ball_tree"""'}), "(n_neighbors=neighbours, algorithm='ball_tree')\n", (17205, 17252), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((17686, 17719), 'numpy.count_nonzero', 'np.count_nonzero', (['(A_non_field > 0)'], {}), '(A_non_field > 0)\n', (17702, 17719), True, 'import numpy as np\n'), ((20219, 20233), 'numpy.dot', 'np.dot', (['e1', 'e2'], {}), '(e1, e2)\n', (20225, 20233), True, 'import numpy as np\n'), ((20676, 20734), 'skimage.transform.resize', 'skimage.transform.resize', (['map1', 'map2.shape'], {'mode': '"""reflect"""'}), "(map1, map2.shape, mode='reflect')\n", (20700, 20734), False, 'import skimage\n'), ((22449, 22469), 'numpy.nansum', 'np.nansum', (['polarPlot'], {}), '(polarPlot)\n', (22458, 22469), True, 'import numpy as np\n'), ((24410, 24422), 'numpy.unique', 'np.unique', (['X'], {}), '(X)\n', (24419, 24422), True, 'import numpy as np\n'), ((24431, 24441), 'numpy.sort', 'np.sort', (['X'], {}), '(X)\n', (24438, 24441), True, 'import numpy as np\n'), ((25001, 25031), 'numpy.log2', 'np.log2', (['((pvect2 + pvect1) / 2)'], {}), '((pvect2 + pvect1) / 2)\n', (25008, 25031), True, 'import numpy as np\n'), ((26908, 26925), 'numpy.log2', 'np.log2', (['p_r[ind]'], {}), '(p_r[ind])\n', (26915, 26925), True, 'import numpy as np\n'), ((28344, 28358), 'numpy.isfinite', 'np.isfinite', (['A'], {}), '(A)\n', (28355, 28358), True, 'import numpy as np\n'), ((29215, 29226), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (29223, 29226), True, 'import numpy as np\n'), ((31533, 31555), 'numpy.abs', 'np.abs', (['dist_to_centre'], {}), '(dist_to_centre)\n', (31539, 31555), True, 'import numpy as np\n'), ((32671, 32764), 'scipy.ndimage.labeled_comprehension', 'ndimage.labeled_comprehension', (['A', 'half_peak_labels', 'closest_peak_idx', 'fn2', 'tuple', '(0)', '(True)'], {}), '(A, half_peak_labels, closest_peak_idx, fn2,\n tuple, 0, True)\n', (32700, 32764), False, 'from scipy import ndimage\n'), ((32820, 32865), 'numpy.array', 'np.array', (['[(x, y) for x, y in ellipse_coords]'], {}), '([(x, y) for x, y in ellipse_coords])\n', (32828, 32865), True, 'import numpy as np\n'), ((32935, 32949), 'skimage.measure.EllipseModel', 'EllipseModel', ([], {}), '()\n', (32947, 32949), False, 'from skimage.measure import EllipseModel\n'), ((35364, 35403), 'ephysiopy.common.utils.polar', 'polar', (['peaks[:, 1]', '(-peaks[:, 0])'], {'deg': '(1)'}), '(peaks[:, 1], -peaks[:, 0], deg=1)\n', (35369, 35403), False, 'from ephysiopy.common.utils import polar\n'), ((2636, 2654), 'numpy.zeros', 'np.zeros', (['Ac.shape'], {}), '(Ac.shape)\n', (2644, 2654), True, 'import numpy as np\n'), ((2953, 2978), 'numpy.logical_and', 'np.logical_and', (['tmp2', 'tmp'], {}), '(tmp2, tmp)\n', (2967, 2978), True, 'import numpy as np\n'), ((5799, 5817), 'numpy.zeros', 'np.zeros', (['Ac.shape'], {}), '(Ac.shape)\n', (5807, 5817), True, 'import numpy as np\n'), ((10981, 10992), 'numpy.ravel', 'np.ravel', (['A'], {}), '(A)\n', (10989, 10992), True, 'import numpy as np\n'), ((16636, 16654), 'numpy.zeros', 'np.zeros', (['Ac.shape'], {}), '(Ac.shape)\n', (16644, 16654), True, 'import numpy as np\n'), ((18629, 18651), 'numpy.sum', 'np.sum', (['sub_field_mask'], {}), '(sub_field_mask)\n', (18635, 18651), True, 'import numpy as np\n'), ((19384, 19396), 'numpy.nanmax', 'np.nanmax', (['A'], {}), '(A)\n', (19393, 19396), True, 'import numpy as np\n'), ((19449, 19462), 'numpy.nanmean', 'np.nanmean', (['A'], {}), '(A)\n', (19459, 19462), True, 'import numpy as np\n'), ((19569, 19592), 'numpy.mean', 'np.mean', (['sub_field_size'], {}), '(sub_field_size)\n', (19576, 19592), True, 'import numpy as np\n'), ((20254, 20272), 'numpy.linalg.norm', 'np.linalg.norm', (['e1'], {}), '(e1)\n', (20268, 20272), True, 'import numpy as np\n'), ((20275, 20293), 'numpy.linalg.norm', 'np.linalg.norm', (['e2'], {}), '(e2)\n', (20289, 20293), True, 'import numpy as np\n'), ((20863, 20877), 'numpy.isnan', 'np.isnan', (['map1'], {}), '(map1)\n', (20871, 20877), True, 'import numpy as np\n'), ((20927, 20941), 'numpy.isnan', 'np.isnan', (['map2'], {}), '(map2)\n', (20935, 20941), True, 'import numpy as np\n'), ((20993, 21007), 'numpy.isnan', 'np.isnan', (['map1'], {}), '(map1)\n', (21001, 21007), True, 'import numpy as np\n'), ((21030, 21044), 'numpy.isnan', 'np.isnan', (['map2'], {}), '(map2)\n', (21038, 21044), True, 'import numpy as np\n'), ((25460, 25518), 'warnings.warn', 'warnings.warn', (['"""Last argument not recognised"""', 'UserWarning'], {}), "('Last argument not recognised', UserWarning)\n", (25473, 25518), False, 'import warnings\n'), ((25548, 25563), 'numpy.log2', 'np.log2', (['pvect1'], {}), '(pvect1)\n', (25555, 25563), True, 'import numpy as np\n'), ((25566, 25581), 'numpy.log2', 'np.log2', (['pvect2'], {}), '(pvect2)\n', (25573, 25581), True, 'import numpy as np\n'), ((29975, 29986), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (29983, 29986), True, 'import numpy as np\n'), ((31138, 31148), 'numpy.any', 'np.any', (['xc'], {}), '(xc)\n', (31144, 31148), True, 'import numpy as np\n'), ((31153, 31163), 'numpy.any', 'np.any', (['yc'], {}), '(yc)\n', (31159, 31163), True, 'import numpy as np\n'), ((32432, 32459), 'numpy.unravel_index', 'np.unravel_index', (['pos', 'A_sz'], {}), '(pos, A_sz)\n', (32448, 32459), True, 'import numpy as np\n'), ((33150, 33179), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (33161, 33179), True, 'import numpy as np\n'), ((33382, 33402), 'numpy.min', 'np.min', (['ellipse_axes'], {}), '(ellipse_axes)\n', (33388, 33402), True, 'import numpy as np\n'), ((33469, 33498), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(50)'], {}), '(0, 2 * np.pi, 50)\n', (33480, 33498), True, 'import numpy as np\n'), ((9602, 9613), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (9610, 9613), True, 'import numpy as np\n'), ((12247, 12263), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (12257, 12263), True, 'import numpy as np\n'), ((12351, 12378), 'numpy.count_nonzero', 'np.count_nonzero', (['fieldMask'], {}), '(fieldMask)\n', (12367, 12378), True, 'import numpy as np\n'), ((19163, 19185), 'numpy.sum', 'np.sum', (['sub_field_mask'], {}), '(sub_field_mask)\n', (19169, 19185), True, 'import numpy as np\n'), ((19277, 19310), 'numpy.count_nonzero', 'np.count_nonzero', (['(A_non_field > 0)'], {}), '(A_non_field > 0)\n', (19293, 19310), True, 'import numpy as np\n'), ((24778, 24792), 'numpy.sum', 'np.sum', (['pvect1'], {}), '(pvect1)\n', (24784, 24792), True, 'import numpy as np\n'), ((24833, 24847), 'numpy.sum', 'np.sum', (['pvect2'], {}), '(pvect2)\n', (24839, 24847), True, 'import numpy as np\n'), ((26454, 26471), 'numpy.shape', 'np.shape', (['ratemap'], {}), '(ratemap)\n', (26462, 26471), True, 'import numpy as np\n'), ((26544, 26564), 'numpy.shape', 'np.shape', (['dwelltimes'], {}), '(dwelltimes)\n', (26552, 26564), True, 'import numpy as np\n'), ((28969, 28980), 'numpy.max', 'np.max', (['val'], {}), '(val)\n', (28975, 28980), True, 'import numpy as np\n'), ((31191, 31212), 'numpy.floor', 'np.floor', (['(A_sz[0] / 2)'], {}), '(A_sz[0] / 2)\n', (31199, 31212), True, 'import numpy as np\n'), ((31237, 31258), 'numpy.floor', 'np.floor', (['(A_sz[1] / 2)'], {}), '(A_sz[1] / 2)\n', (31245, 31258), True, 'import numpy as np\n'), ((31284, 31300), 'numpy.hypot', 'np.hypot', (['xc', 'yc'], {}), '(xc, yc)\n', (31292, 31300), True, 'import numpy as np\n'), ((32486, 32507), 'numpy.floor', 'np.floor', (['(A_sz[0] / 2)'], {}), '(A_sz[0] / 2)\n', (32494, 32507), True, 'import numpy as np\n'), ((32532, 32553), 'numpy.floor', 'np.floor', (['(A_sz[1] / 2)'], {}), '(A_sz[1] / 2)\n', (32540, 32553), True, 'import numpy as np\n'), ((32584, 32600), 'numpy.hypot', 'np.hypot', (['xc', 'yc'], {}), '(xc, yc)\n', (32592, 32600), True, 'import numpy as np\n'), ((33427, 33440), 'skimage.measure.CircleModel', 'CircleModel', ([], {}), '()\n', (33438, 33440), False, 'from skimage.measure import CircleModel\n'), ((12408, 12437), 'numpy.count_nonzero', 'np.count_nonzero', (['(labels == i)'], {}), '(labels == i)\n', (12424, 12437), True, 'import numpy as np\n'), ((13076, 13087), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (13084, 13087), True, 'import numpy as np\n'), ((20318, 20340), 'numpy.arccos', 'np.arccos', (['(num / denom)'], {}), '(num / denom)\n', (20327, 20340), True, 'import numpy as np\n'), ((24593, 24604), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (24601, 24604), True, 'import numpy as np\n'), ((24606, 24622), 'numpy.shape', 'np.shape', (['pvect1'], {}), '(pvect1)\n', (24614, 24622), True, 'import numpy as np\n'), ((24659, 24670), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (24667, 24670), True, 'import numpy as np\n'), ((24672, 24688), 'numpy.shape', 'np.shape', (['pvect2'], {}), '(pvect2)\n', (24680, 24688), True, 'import numpy as np\n'), ((28607, 28623), 'numpy.min', 'np.min', (['(A_sz / 2)'], {}), '(A_sz / 2)\n', (28613, 28623), True, 'import numpy as np\n'), ((25269, 25284), 'numpy.log2', 'np.log2', (['pvect1'], {}), '(pvect1)\n', (25276, 25284), True, 'import numpy as np\n'), ((25287, 25302), 'numpy.log2', 'np.log2', (['pvect2'], {}), '(pvect2)\n', (25294, 25302), True, 'import numpy as np\n'), ((25343, 25358), 'numpy.log2', 'np.log2', (['pvect2'], {}), '(pvect2)\n', (25350, 25358), True, 'import numpy as np\n'), ((25361, 25376), 'numpy.log2', 'np.log2', (['pvect1'], {}), '(pvect1)\n', (25368, 25376), True, 'import numpy as np\n'), ((25076, 25091), 'numpy.log2', 'np.log2', (['pvect1'], {}), '(pvect1)\n', (25083, 25091), True, 'import numpy as np\n'), ((25148, 25163), 'numpy.log2', 'np.log2', (['pvect2'], {}), '(pvect2)\n', (25155, 25163), True, 'import numpy as np\n')] |
"""
Unit Tests for EM module.
"""
import unittest
import sys
import argparse
import math
import numpy
from mixemt import phylotree
from mixemt import preprocess
from mixemt import em
class TestEMHelpers(unittest.TestCase):
def test_init_props(self):
props = em.init_props(10)
self.assertEqual(len(props), 10)
self.assertTrue(numpy.abs(numpy.sum(props) - 1.0) < 0.000000001)
def test_converged(self):
prev = numpy.array([math.log(1.0)] * 10)
cur = numpy.array([math.log(2.0)] * 10)
self.assertTrue(em.converged(cur, cur))
self.assertTrue(em.converged(prev, prev))
self.assertFalse(em.converged(prev, cur))
self.assertFalse(em.converged(cur, prev))
close = numpy.array(cur)
close[3] = math.log(2.0001)
self.assertTrue(em.converged(cur, prev, 20.0))
self.assertFalse(em.converged(cur, close))
self.assertTrue(em.converged(cur, close, 0.001))
def test_em_step_simple(self):
inf = float('Inf')
in_mat = numpy.array([[ 0.0, -inf, -inf],
[-inf, 0.0, -inf],
[-inf, -inf, 0.0]])
wts = numpy.array([1, 1, 1])
props = numpy.log(numpy.array([0.6, 0.2, 0.2]))
mix_mat = numpy.empty_like(in_mat)
res_mat, res_props = em.em_step(in_mat, wts, props, mix_mat)
self.assertTrue(numpy.all(res_mat == mix_mat))
self.assertTrue(numpy.all(in_mat == mix_mat))
self.assertTrue(numpy.all(res_props ==
numpy.log(numpy.array([1.0,1.0,1.0]) / 3.0)))
def test_em_step_weights(self):
inf = float('Inf')
in_mat = numpy.array([[ 0.0, -inf, -inf],
[-inf, 0.0, -inf],
[-inf, -inf, 0.0]])
wts = numpy.array([2,1,1])
props = numpy.log(numpy.array([0.6, 0.2, 0.2]))
mix_mat = numpy.empty_like(in_mat)
new_props = numpy.empty_like(props)
res_mat, res_props = em.em_step(in_mat, wts, props, mix_mat)
self.assertTrue(numpy.all(in_mat == mix_mat))
self.assertTrue(numpy.all(res_props ==
numpy.log(numpy.array([2.0,1.0,1.0]) / 4.0)))
class TestAllEM(unittest.TestCase):
def setUp(self):
parser = argparse.ArgumentParser()
self.args = parser.parse_args([])
self.args.init_alpha = 1.0
self.args.tolerance = 0.0001
self.args.max_iter = 1000
self.args.n_multi = 1
self.args.verbose = False
phy_in = ['I, A1G ,,',
',H, A3T A5T ,,',
',,F, A6T ,,',
',,,B, A8T ,,',
',,,C, T5A ,,',
',,G, A7T ,,',
',,,D, A9T ,,',
',,,E, A4T ,,',
',A, A2T A4T ,,']
phy = phylotree.Phylotree(phy_in)
ref = "AAAAAAAAA"
reads = list(["1:A,2:T,3:A", "2:T,3:A", "3:A,4:T,5:T", "5:T,6:A",
"6:A,7:T", "6:A,7:T,8:A", "7:T,8:A", "4:T,5:T",
"1:A,2:T,3:T,4:T", "5:A,6:T,7:A,8:A"])
haps = list('ABCDEFGHI')
self.input_mat = preprocess.build_em_matrix(ref, phy, reads,
haps, self.args)
self.wts = numpy.ones(len(reads))
self.true_props = numpy.array(
[0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0])
inf = float('Inf')
self.true_haps = numpy.full_like(self.input_mat, -inf)
self.true_haps[0:8, 1] = 0.0
self.true_haps[8:10, 4] = 0.0
def test_em_1_run(self):
props, read_mix = em.run_em(self.input_mat, self.wts, self.args)
self.assertTrue(numpy.allclose(props, self.true_props, atol=0.02))
self.assertTrue(numpy.allclose(numpy.exp(read_mix),
numpy.exp(self.true_haps), atol=0.05))
def test_em_10_run(self):
self.args.n_multi = 10
true_props = numpy.array([0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0])
props, read_mix = em.run_em(self.input_mat, self.wts, self.args)
self.assertTrue(numpy.allclose(props, self.true_props, atol=0.02))
self.assertTrue(numpy.allclose(numpy.exp(read_mix),
numpy.exp(self.true_haps), atol=0.05))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"mixemt.preprocess.build_em_matrix",
"numpy.full_like",
"mixemt.em.em_step",
"mixemt.em.run_em",
"argparse.ArgumentParser",
"numpy.sum",
"mixemt.em.init_props",
"numpy.allclose",
"numpy.empty_like",
"numpy.all",
"mixemt.em.converged",
"numpy.array",
"numpy.exp",
"math.lo... | [((4439, 4454), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4452, 4454), False, 'import unittest\n'), ((274, 291), 'mixemt.em.init_props', 'em.init_props', (['(10)'], {}), '(10)\n', (287, 291), False, 'from mixemt import em\n'), ((749, 765), 'numpy.array', 'numpy.array', (['cur'], {}), '(cur)\n', (760, 765), False, 'import numpy\n'), ((785, 801), 'math.log', 'math.log', (['(2.0001)'], {}), '(2.0001)\n', (793, 801), False, 'import math\n'), ((1045, 1115), 'numpy.array', 'numpy.array', (['[[0.0, -inf, -inf], [-inf, 0.0, -inf], [-inf, -inf, 0.0]]'], {}), '([[0.0, -inf, -inf], [-inf, 0.0, -inf], [-inf, -inf, 0.0]])\n', (1056, 1115), False, 'import numpy\n'), ((1193, 1215), 'numpy.array', 'numpy.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1204, 1215), False, 'import numpy\n'), ((1290, 1314), 'numpy.empty_like', 'numpy.empty_like', (['in_mat'], {}), '(in_mat)\n', (1306, 1314), False, 'import numpy\n'), ((1345, 1384), 'mixemt.em.em_step', 'em.em_step', (['in_mat', 'wts', 'props', 'mix_mat'], {}), '(in_mat, wts, props, mix_mat)\n', (1355, 1384), False, 'from mixemt import em\n'), ((1703, 1773), 'numpy.array', 'numpy.array', (['[[0.0, -inf, -inf], [-inf, 0.0, -inf], [-inf, -inf, 0.0]]'], {}), '([[0.0, -inf, -inf], [-inf, 0.0, -inf], [-inf, -inf, 0.0]])\n', (1714, 1773), False, 'import numpy\n'), ((1851, 1873), 'numpy.array', 'numpy.array', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (1862, 1873), False, 'import numpy\n'), ((1946, 1970), 'numpy.empty_like', 'numpy.empty_like', (['in_mat'], {}), '(in_mat)\n', (1962, 1970), False, 'import numpy\n'), ((1991, 2014), 'numpy.empty_like', 'numpy.empty_like', (['props'], {}), '(props)\n', (2007, 2014), False, 'import numpy\n'), ((2045, 2084), 'mixemt.em.em_step', 'em.em_step', (['in_mat', 'wts', 'props', 'mix_mat'], {}), '(in_mat, wts, props, mix_mat)\n', (2055, 2084), False, 'from mixemt import em\n'), ((2343, 2368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2366, 2368), False, 'import argparse\n'), ((2910, 2937), 'mixemt.phylotree.Phylotree', 'phylotree.Phylotree', (['phy_in'], {}), '(phy_in)\n', (2929, 2937), False, 'from mixemt import phylotree\n'), ((3227, 3287), 'mixemt.preprocess.build_em_matrix', 'preprocess.build_em_matrix', (['ref', 'phy', 'reads', 'haps', 'self.args'], {}), '(ref, phy, reads, haps, self.args)\n', (3253, 3287), False, 'from mixemt import preprocess\n'), ((3409, 3467), 'numpy.array', 'numpy.array', (['[0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0])\n', (3420, 3467), False, 'import numpy\n'), ((3549, 3586), 'numpy.full_like', 'numpy.full_like', (['self.input_mat', '(-inf)'], {}), '(self.input_mat, -inf)\n', (3564, 3586), False, 'import numpy\n'), ((3718, 3764), 'mixemt.em.run_em', 'em.run_em', (['self.input_mat', 'self.wts', 'self.args'], {}), '(self.input_mat, self.wts, self.args)\n', (3727, 3764), False, 'from mixemt import em\n'), ((4061, 4119), 'numpy.array', 'numpy.array', (['[0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.8, 0.0, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0])\n', (4072, 4119), False, 'import numpy\n'), ((4146, 4192), 'mixemt.em.run_em', 'em.run_em', (['self.input_mat', 'self.wts', 'self.args'], {}), '(self.input_mat, self.wts, self.args)\n', (4155, 4192), False, 'from mixemt import em\n'), ((559, 581), 'mixemt.em.converged', 'em.converged', (['cur', 'cur'], {}), '(cur, cur)\n', (571, 581), False, 'from mixemt import em\n'), ((607, 631), 'mixemt.em.converged', 'em.converged', (['prev', 'prev'], {}), '(prev, prev)\n', (619, 631), False, 'from mixemt import em\n'), ((658, 681), 'mixemt.em.converged', 'em.converged', (['prev', 'cur'], {}), '(prev, cur)\n', (670, 681), False, 'from mixemt import em\n'), ((708, 731), 'mixemt.em.converged', 'em.converged', (['cur', 'prev'], {}), '(cur, prev)\n', (720, 731), False, 'from mixemt import em\n'), ((826, 855), 'mixemt.em.converged', 'em.converged', (['cur', 'prev', '(20.0)'], {}), '(cur, prev, 20.0)\n', (838, 855), False, 'from mixemt import em\n'), ((882, 906), 'mixemt.em.converged', 'em.converged', (['cur', 'close'], {}), '(cur, close)\n', (894, 906), False, 'from mixemt import em\n'), ((932, 963), 'mixemt.em.converged', 'em.converged', (['cur', 'close', '(0.001)'], {}), '(cur, close, 0.001)\n', (944, 963), False, 'from mixemt import em\n'), ((1242, 1270), 'numpy.array', 'numpy.array', (['[0.6, 0.2, 0.2]'], {}), '([0.6, 0.2, 0.2])\n', (1253, 1270), False, 'import numpy\n'), ((1409, 1438), 'numpy.all', 'numpy.all', (['(res_mat == mix_mat)'], {}), '(res_mat == mix_mat)\n', (1418, 1438), False, 'import numpy\n'), ((1465, 1493), 'numpy.all', 'numpy.all', (['(in_mat == mix_mat)'], {}), '(in_mat == mix_mat)\n', (1474, 1493), False, 'import numpy\n'), ((1898, 1926), 'numpy.array', 'numpy.array', (['[0.6, 0.2, 0.2]'], {}), '([0.6, 0.2, 0.2])\n', (1909, 1926), False, 'import numpy\n'), ((2110, 2138), 'numpy.all', 'numpy.all', (['(in_mat == mix_mat)'], {}), '(in_mat == mix_mat)\n', (2119, 2138), False, 'import numpy\n'), ((3789, 3838), 'numpy.allclose', 'numpy.allclose', (['props', 'self.true_props'], {'atol': '(0.02)'}), '(props, self.true_props, atol=0.02)\n', (3803, 3838), False, 'import numpy\n'), ((4217, 4266), 'numpy.allclose', 'numpy.allclose', (['props', 'self.true_props'], {'atol': '(0.02)'}), '(props, self.true_props, atol=0.02)\n', (4231, 4266), False, 'import numpy\n'), ((3879, 3898), 'numpy.exp', 'numpy.exp', (['read_mix'], {}), '(read_mix)\n', (3888, 3898), False, 'import numpy\n'), ((3939, 3964), 'numpy.exp', 'numpy.exp', (['self.true_haps'], {}), '(self.true_haps)\n', (3948, 3964), False, 'import numpy\n'), ((4307, 4326), 'numpy.exp', 'numpy.exp', (['read_mix'], {}), '(read_mix)\n', (4316, 4326), False, 'import numpy\n'), ((4367, 4392), 'numpy.exp', 'numpy.exp', (['self.true_haps'], {}), '(self.true_haps)\n', (4376, 4392), False, 'import numpy\n'), ((465, 478), 'math.log', 'math.log', (['(1.0)'], {}), '(1.0)\n', (473, 478), False, 'import math\n'), ((514, 527), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (522, 527), False, 'import math\n'), ((367, 383), 'numpy.sum', 'numpy.sum', (['props'], {}), '(props)\n', (376, 383), False, 'import numpy\n'), ((1586, 1614), 'numpy.array', 'numpy.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (1597, 1614), False, 'import numpy\n'), ((2231, 2259), 'numpy.array', 'numpy.array', (['[2.0, 1.0, 1.0]'], {}), '([2.0, 1.0, 1.0])\n', (2242, 2259), False, 'import numpy\n')] |
import torch
from torch.nn import functional as F
import numpy as np
import gtimer as gt
import rlkit.torch.pytorch_util as ptu
from diayn_original_tb.algo.algo_diayn_tb_eval import DIAYNTorchOnlineRLAlgorithmTbEval
class DIAYNTorchOnlineRLAlgorithmTbPerfLoggingEffiently(DIAYNTorchOnlineRLAlgorithmTbEval):
# Saves one evaluation sampling loop from env compared to
# DIAYNTorchOnlineRLAlgorithmTbPerfLogging (can be found below for comparison)
def _classifier_perf_eval(self, eval_paths):
obs_dim = eval_paths[0].obs.shape[0]
seq_len = eval_paths[0].obs.shape[-1]
next_obs = []
z_hat = []
for path in eval_paths:
next_obs.append(path.next_obs.transpose((1, 0))) # data_dim x seq_len
z_hat.append(path.mode.transpose((1, 0))) # data_dim x seq_len
next_obs = ptu.from_numpy(
np.concatenate(next_obs, axis=0)
)
z_hat = ptu.from_numpy(
np.concatenate(z_hat, axis=0)
)
z_hat = torch.argmax(z_hat, dim=-1)
assert next_obs.shape[0] % (self.policy.skill_dim * seq_len) == 0
assert next_obs.shape[-1] == obs_dim
d_pred = self.trainer.df(
next_obs,
)
d_pred_log_softmax = F.log_softmax(d_pred, dim=-1)
pred_z = torch.argmax(d_pred_log_softmax, dim=-1, keepdim=True)
assert z_hat.shape == pred_z.squeeze().shape
df_accuracy = torch.sum(
torch.eq(
z_hat,
pred_z.squeeze()
)).float() / pred_z.size(0)
return df_accuracy
def log_classifier_perf_eval(self, eval_paths, epoch):
classfier_accuracy_eval = self._classifier_perf_eval(eval_paths=eval_paths)
self.diagnostic_writer.writer.writer.add_scalar(
tag="Debug/Classfier accuracy eval",
scalar_value=classfier_accuracy_eval,
global_step=epoch
)
def _classifier_perf_on_memory(self):
len_memory = self.batch_size
batch_size = len_memory
batch = self.replay_buffer.random_batch(
batch_size=batch_size)
skills = batch['skills']
next_obs = batch['next_observations']
z_hat = ptu.from_numpy(np.argmax(skills, axis=-1))
d_pred = self.trainer.df(
ptu.from_numpy(next_obs))
pred_log_softmax = F.log_softmax(d_pred, dim=-1)
pred_z = torch.argmax(pred_log_softmax, dim=-1, keepdim=True)
assert z_hat.shape == pred_z.squeeze().shape
df_accuracy = torch.sum(
torch.eq(
z_hat,
pred_z.squeeze(),
)).float()/pred_z.size(0)
return df_accuracy
def log_classifier_perf_on_memory(self, epoch):
classfier_accuracy_memory = self._classifier_perf_on_memory()
self.diagnostic_writer.writer.writer.add_scalar(
tag="Debug/Classfier accuracy replay buffer",
scalar_value=classfier_accuracy_memory,
global_step=epoch
)
def _write_mode_influence_and_log(self, paths, epoch):
"""
Main logging function
Args:
eval_paths : (data_dim, seq_dim) evaluation paths sampled directly
from the environment
epoch : int
"""
super()._write_mode_influence_and_log(
paths=paths,
epoch=epoch,
)
self.log_classifier_perf_eval(
eval_paths=paths,
epoch=epoch,
)
self.log_classifier_perf_on_memory(
epoch=epoch,
)
class DIAYNTorchOnlineRLAlgorithmTbPerfLogging(DIAYNTorchOnlineRLAlgorithmTbEval):
def _end_epoch(self, epoch):
super()._end_epoch(epoch)
classfier_accuracy_memory = self._classfier_perf_on_memory()
self.diagnostic_writer.writer.writer.add_scalar(
tag="Debug/Classfier accuracy replay buffer",
scalar_value=classfier_accuracy_memory,
global_step=epoch
)
classfier_accuracy_eval = self._classfier_perf_eval()
self.diagnostic_writer.writer.writer.add_scalar(
tag="Debug/Classfier accuracy eval",
scalar_value=classfier_accuracy_eval,
global_step=epoch
)
gt.stamp('own logging')
def _classfier_perf_eval(self):
num_paths = 2
seq_len = 100
eval_paths = self._get_paths_mode_influence_test(
num_paths=num_paths,
seq_len=seq_len,
)
obs_dim = eval_paths[0].obs.shape[0]
next_obs = []
z_hat = []
for path in eval_paths:
next_obs.append(path.next_obs.transpose((1, 0))) # data_dim x seq_len
z_hat.append(path.mode.transpose((1, 0))) # data_dim x seq_len
next_obs = ptu.from_numpy(
np.concatenate(next_obs, axis=0)
)
z_hat = ptu.from_numpy(
np.concatenate(z_hat, axis=0)
)
z_hat = torch.argmax(z_hat, dim=-1)
assert next_obs.shape \
== torch.Size((num_paths * self.policy.skill_dim * seq_len, obs_dim))
d_pred = self.trainer.df(
next_obs,
)
d_pred_log_softmax = F.log_softmax(d_pred, dim=-1)
pred_z = torch.argmax(d_pred_log_softmax, dim=-1, keepdim=True)
assert z_hat.shape == pred_z.squeeze().shape
df_accuracy = torch.sum(
torch.eq(
z_hat,
pred_z.squeeze()
)).float() / pred_z.size(0)
return df_accuracy
def _classfier_perf_on_memory(self):
len_memory = self.batch_size
batch_size = len_memory
batch = self.replay_buffer.random_batch(
batch_size=batch_size)
skills = batch['skills']
next_obs = batch['next_observations']
z_hat = ptu.from_numpy(np.argmax(skills, axis=-1))
d_pred = self.trainer.df(
ptu.from_numpy(next_obs))
pred_log_softmax = F.log_softmax(d_pred, dim=-1)
pred_z = torch.argmax(pred_log_softmax, dim=-1, keepdim=True)
assert z_hat.shape == pred_z.squeeze().shape
df_accuracy = torch.sum(
torch.eq(
z_hat,
pred_z.squeeze(),
)).float()/pred_z.size(0)
return df_accuracy
| [
"numpy.argmax",
"torch.argmax",
"rlkit.torch.pytorch_util.from_numpy",
"torch.nn.functional.log_softmax",
"torch.Size",
"gtimer.stamp",
"numpy.concatenate"
] | [((1019, 1046), 'torch.argmax', 'torch.argmax', (['z_hat'], {'dim': '(-1)'}), '(z_hat, dim=-1)\n', (1031, 1046), False, 'import torch\n'), ((1262, 1291), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['d_pred'], {'dim': '(-1)'}), '(d_pred, dim=-1)\n', (1275, 1291), True, 'from torch.nn import functional as F\n'), ((1310, 1364), 'torch.argmax', 'torch.argmax', (['d_pred_log_softmax'], {'dim': '(-1)', 'keepdim': '(True)'}), '(d_pred_log_softmax, dim=-1, keepdim=True)\n', (1322, 1364), False, 'import torch\n'), ((2372, 2401), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['d_pred'], {'dim': '(-1)'}), '(d_pred, dim=-1)\n', (2385, 2401), True, 'from torch.nn import functional as F\n'), ((2419, 2471), 'torch.argmax', 'torch.argmax', (['pred_log_softmax'], {'dim': '(-1)', 'keepdim': '(True)'}), '(pred_log_softmax, dim=-1, keepdim=True)\n', (2431, 2471), False, 'import torch\n'), ((4345, 4368), 'gtimer.stamp', 'gt.stamp', (['"""own logging"""'], {}), "('own logging')\n", (4353, 4368), True, 'import gtimer as gt\n'), ((5048, 5075), 'torch.argmax', 'torch.argmax', (['z_hat'], {'dim': '(-1)'}), '(z_hat, dim=-1)\n', (5060, 5075), False, 'import torch\n'), ((5289, 5318), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['d_pred'], {'dim': '(-1)'}), '(d_pred, dim=-1)\n', (5302, 5318), True, 'from torch.nn import functional as F\n'), ((5337, 5391), 'torch.argmax', 'torch.argmax', (['d_pred_log_softmax'], {'dim': '(-1)', 'keepdim': '(True)'}), '(d_pred_log_softmax, dim=-1, keepdim=True)\n', (5349, 5391), False, 'import torch\n'), ((6058, 6087), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['d_pred'], {'dim': '(-1)'}), '(d_pred, dim=-1)\n', (6071, 6087), True, 'from torch.nn import functional as F\n'), ((6105, 6157), 'torch.argmax', 'torch.argmax', (['pred_log_softmax'], {'dim': '(-1)', 'keepdim': '(True)'}), '(pred_log_softmax, dim=-1, keepdim=True)\n', (6117, 6157), False, 'import torch\n'), ((876, 908), 'numpy.concatenate', 'np.concatenate', (['next_obs'], {'axis': '(0)'}), '(next_obs, axis=0)\n', (890, 908), True, 'import numpy as np\n'), ((963, 992), 'numpy.concatenate', 'np.concatenate', (['z_hat'], {'axis': '(0)'}), '(z_hat, axis=0)\n', (977, 992), True, 'import numpy as np\n'), ((2245, 2271), 'numpy.argmax', 'np.argmax', (['skills'], {'axis': '(-1)'}), '(skills, axis=-1)\n', (2254, 2271), True, 'import numpy as np\n'), ((2319, 2343), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['next_obs'], {}), '(next_obs)\n', (2333, 2343), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4905, 4937), 'numpy.concatenate', 'np.concatenate', (['next_obs'], {'axis': '(0)'}), '(next_obs, axis=0)\n', (4919, 4937), True, 'import numpy as np\n'), ((4992, 5021), 'numpy.concatenate', 'np.concatenate', (['z_hat'], {'axis': '(0)'}), '(z_hat, axis=0)\n', (5006, 5021), True, 'import numpy as np\n'), ((5126, 5192), 'torch.Size', 'torch.Size', (['(num_paths * self.policy.skill_dim * seq_len, obs_dim)'], {}), '((num_paths * self.policy.skill_dim * seq_len, obs_dim))\n', (5136, 5192), False, 'import torch\n'), ((5931, 5957), 'numpy.argmax', 'np.argmax', (['skills'], {'axis': '(-1)'}), '(skills, axis=-1)\n', (5940, 5957), True, 'import numpy as np\n'), ((6005, 6029), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['next_obs'], {}), '(next_obs)\n', (6019, 6029), True, 'import rlkit.torch.pytorch_util as ptu\n')] |
import numpy as np
from scipy.stats import chi2
import matplotlib.pyplot as plt
import termplotlib as tpl
class ChiQQ:
"""Object for creating Chisquare QQ plots.
params
------
src: input matrix of shape (n:int, p:int) where n is the sample size and p is the number of dependent variables
np.ndarray
methods
-------
_get_mean_vector: gets the mean vector along the features
np.array
_get_cov_mat: gets the covariance matrix in (p by p).
np.array
generalized_distance_squared: gets list of squared distance by dimension n.
list
_get_qq_tuples: gets the list of tuples of the qq pair for the chisquare distribution with df=p
list
draw: draws the plot by using matplotlib
"""
def __init__(self, src) -> None:
assert isinstance(src, np.ndarray)
self.src = src
self.n, self.p = self.src.shape
self.mean_vector = self._get_mean_vector()
self.cov_matrix = self._get_cov_mat()
def _get_mean_vector(self) -> np.array:
return (np.mean(self.src, axis=0))
def _get_cov_mat(self) -> np.array:
result = np.cov(self.src.transpose())
assert result.shape == (self.p, self.p)
return result
def generalized_distance_squared(self) -> list:
result = []
inv_cov = np.linalg.inv(self.cov_matrix)
for row in self.src:
diff = row - self.mean_vector
result.append(np.matmul(np.matmul(diff, inv_cov), diff))
assert len(result) == self.n
return result
def _get_qq_tuples(self) -> list:
result = []
sorted_general_distance = sorted(self.generalized_distance_squared())
for i, x in enumerate(sorted_general_distance):
x_probability_value = (i+1 - 0.5) / self.n
q_value = chi2.ppf(x_probability_value, self.p)
result.append(
(q_value, x)
)
return result
def draw(self, terminal=False):
qq_tuples = self._get_qq_tuples()
x = [x for x, _ in qq_tuples]
y = [y for _, y in qq_tuples]
if terminal:
fig = tpl.figure()
fig.plot(x, y, width=60, height=20)
fig.show()
else:
plt.scatter(x, y)
if __name__=="__main__":
a = list(np.random.uniform(-1, 1, 100))
b = list(np.random.normal(-1, 4, 100))
c = list(np.random.chisquare(10, 100))
data = np.array([a, b, c]).transpose()
cq = ChiQQ(data)
cq.draw(terminal=True)
| [
"numpy.random.uniform",
"numpy.random.chisquare",
"matplotlib.pyplot.scatter",
"scipy.stats.chi2.ppf",
"numpy.mean",
"numpy.linalg.inv",
"numpy.array",
"numpy.random.normal",
"numpy.matmul",
"termplotlib.figure"
] | [((1068, 1093), 'numpy.mean', 'np.mean', (['self.src'], {'axis': '(0)'}), '(self.src, axis=0)\n', (1075, 1093), True, 'import numpy as np\n'), ((1343, 1373), 'numpy.linalg.inv', 'np.linalg.inv', (['self.cov_matrix'], {}), '(self.cov_matrix)\n', (1356, 1373), True, 'import numpy as np\n'), ((2335, 2364), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (2352, 2364), True, 'import numpy as np\n'), ((2379, 2407), 'numpy.random.normal', 'np.random.normal', (['(-1)', '(4)', '(100)'], {}), '(-1, 4, 100)\n', (2395, 2407), True, 'import numpy as np\n'), ((2422, 2450), 'numpy.random.chisquare', 'np.random.chisquare', (['(10)', '(100)'], {}), '(10, 100)\n', (2441, 2450), True, 'import numpy as np\n'), ((1843, 1880), 'scipy.stats.chi2.ppf', 'chi2.ppf', (['x_probability_value', 'self.p'], {}), '(x_probability_value, self.p)\n', (1851, 1880), False, 'from scipy.stats import chi2\n'), ((2167, 2179), 'termplotlib.figure', 'tpl.figure', ([], {}), '()\n', (2177, 2179), True, 'import termplotlib as tpl\n'), ((2277, 2294), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (2288, 2294), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2482), 'numpy.array', 'np.array', (['[a, b, c]'], {}), '([a, b, c])\n', (2471, 2482), True, 'import numpy as np\n'), ((1481, 1505), 'numpy.matmul', 'np.matmul', (['diff', 'inv_cov'], {}), '(diff, inv_cov)\n', (1490, 1505), True, 'import numpy as np\n')] |
from hierarchical_emb_clustering import CFIEClustering
import pickle
import numpy as np
import networkx as nx
import pandas as pd
from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster
import itertools
import time
class ReutersEvaluator:
def __init__(self, reuters_df, topics_G, industry_cat_G, reuters_categories):
self.reuters_df = reuters_df
self.reuter_topic_G = nx.compose(topics_G, industry_cat_G)
self.reuters_df.categories = self.reuters_df.categories.apply(lambda x: [c for c in x if is_valid_category(c)])
self.category_doc_map = self.__get_category_doc_distribution(reuters_categories)
def __get_category_doc_distribution(self, reuters_categories):
# existing category distribution
categ_doc_map = {}
for c in reuters_categories:
#print(c)
#print(self.reuters_df[self.reuters_df.categories.apply(lambda x: c in x)].index.values)
categ_doc_map[c] = set(self.reuters_df[self.reuters_df.categories.apply(lambda x: c in x)].index.values)
return categ_doc_map
def evaluate(self, source_label_G, source_doc_df, source_label_name, pred_df, pred_label_name):
param_names = ['total_pairs', 'pos_pairs', 'neg_pairs']
metric_names = ['disconnected_clusters', 'precison', 'recall', 'tnr', 'fscore', 'accuracy']
param_values, metric_values = self.__get_evaluation_scores(source_label_G, source_doc_df, source_label_name, \
pred_df, pred_label_name)
eval_suffix = "@"+ source_label_name + '#' + pred_label_name
params_map = {}
for name, value in zip(param_names, param_values):
params_map[name+eval_suffix] = value
metrics_map = {}
for name, value in zip(metric_names, metric_values):
metrics_map[name + eval_suffix] = value
return params_map, metrics_map
def __get_evaluation_scores(self, cluster_G, doc_df, cluster_label_name, pred_df, pred_label_name):
print('\n\nGetting clusters by : ', cluster_label_name)
label_doc_G = get_label_doc_graph(cluster_G, doc_df, cluster_label_name)
labels_disconnected_docs, _ = get_disconnected_docs(label_doc_G, doc_df)
print('Disconnected clusters counts: ', len(labels_disconnected_docs),
[len(c) for c in labels_disconnected_docs])
print('\n\nEvaluating same clusters by matching: ', pred_label_name)
total_same_pairs, true_positives, false_negatives = 0, 0, 0
for connected_docs in labels_disconnected_docs:
if len(connected_docs) < 2:
continue
t0 = time.time()
total, tp, fn = parallelize(connected_docs, pred_label_name, pred_df, remove_labels=[])
print('Evaluator same cluster: {:.3f} sec'.format(time.time()-t0))
total_same_pairs += total
true_positives += tp
false_negatives += fn
print('\n\nEvaluating Different clusters by matching: ', pred_label_name)
docs1, docs2 = labels_disconnected_docs[0], list(itertools.chain(*labels_disconnected_docs[1:]))
t0 = time.time()
total_diff_pairs, false_positives, true_negatives = parallelize_intercluster(docs1, docs2, pred_label_name, pred_df)
print('Evaluator diff cluster: {:.3f} sec'.format(time.time() - t0))
total_pairs = total_same_pairs+total_diff_pairs
### calculate prediction scores
precision = get_score(true_positives, false_positives)
recall = get_score(true_positives, false_negatives)
tnr = get_score(true_negatives, false_positives)
fscore = get_fscore(precision, recall)
accuracy = get_accuracy(true_positives, true_negatives, total_pairs)
return (total_pairs, total_same_pairs, total_diff_pairs), \
(len(labels_disconnected_docs), precision, recall, tnr, fscore, accuracy)
def get_score(true_count, false_count):
if true_count==0 and false_count==0:
return None
return round(true_count*100/(true_count+false_count), 4)
def get_fscore(precision, recall):
if precision is None or recall is None or precision==0 or recall==0:
return None
return round(2*precision*recall/(precision+recall), 4)
def get_accuracy(true_positives, true_negative, total_pairs):
if total_pairs == 0:
return None
return round((true_positives+true_negative)*100/total_pairs, 4)
def evaluate_assigned_clusters(cluster_obj, re_obj):
t0 = time.time()
label_G = cluster_obj._closed_fi_graph
cluster_doc_df = cluster_obj.cluster_processed_df
cluster_doc_df.selected_labels = cluster_doc_df.selected_labels.apply(lambda x: list(np.array(x)[:,0]) if x else [])
cluster_doc_df.labels = cluster_doc_df.labels.apply(lambda x: list(np.array(x)[:,0]) if x else [])
# Cluster evaluation
print('\n', '='*10, 'Evaluating predicted label clusters..', '='*10)
l_params_map, l_metrics_map = re_obj.evaluate(label_G, cluster_doc_df, 'labels', re_obj.reuters_df, 'categories')
sel_params_map, sel_metrics_map = re_obj.evaluate(label_G, cluster_doc_df, 'selected_labels', re_obj.reuters_df, \
'categories')
## Evaluate Reuter clusters by their Categories
print('\n\n\n','='*10, 'Evaluating Reuters clusters by categories..', '='*10)
#assigned_clusters_df = cluster_doc_df[cluster_doc_df.labels.map(len)>0]
cat_l_params_map, cat_l_metrics_map = re_obj.evaluate(re_obj.reuter_topic_G, re_obj.reuters_df, 'categories', \
cluster_doc_df, 'labels')
#assigned_clusters_df = cluster_doc_df[cluster_doc_df.selected_labels.map(len) > 0]
cat_sel_params_map, cat_sel_metrics_map = re_obj.evaluate(re_obj.reuter_topic_G, re_obj.reuters_df, 'categories', \
cluster_doc_df, 'selected_labels')
params_map = {**l_params_map, **sel_params_map, **cat_l_params_map, **cat_sel_params_map}
metrics_map = {**l_metrics_map, **sel_metrics_map, **cat_l_metrics_map, **cat_sel_metrics_map}
print('{:.3f} mins.'.format((time.time()-t0)/60))
return params_map, metrics_map
def get_unassigned_scores(df, re_obj, label_name, outlier_thresh):
total_docs = len(df)
unassigned_df = df[df[label_name].map(len)==0]
outlier_predicted = len(unassigned_df)
absent_features = 0
true_outlier_docs = 0
false_positives = 0
for doc_id, row in unassigned_df.iterrows():
if len(row[label_name]) == 0:
absent_features += 1
categs = re_obj.reuters_df.loc[doc_id].categories
docs_count = [len(re_obj.category_doc_map[c]) for c in categs]
outlier_cands = [(cat, count) for cat, count in zip(categs, docs_count) if
count < outlier_thresh]
if outlier_cands:
true_outlier_docs += 1
else:
false_positives += 1
absent_feature_score = round(absent_features * 100 / total_docs, 4)
outlier_predicted_score = round(outlier_predicted * 100 / total_docs, 4)
tpr = round(true_outlier_docs * 100 / outlier_predicted, 4)
fpr = round(false_positives * 100 / outlier_predicted, 4)
metrics_map = {'absent_features':absent_feature_score, 'outlier_predicted': outlier_predicted_score, \
'outlier_precision': tpr, 'outlier_fpr': fpr}
return metrics_map
def evaluate_unassigned_docs(cluster_obj, re_obj):
clean_docs = cluster_obj.cluster_processed_df[~cluster_obj.cluster_processed_df.isNoisy]
total_docs = len(clean_docs)
### TMP assignment ***
outlier_thresh = cluster_obj.outlier_thresh
labels_metrics_map = get_unassigned_scores(clean_docs, re_obj, 'labels', outlier_thresh)
sel_labels_metrics_map = get_unassigned_scores(clean_docs, re_obj, 'selected_labels', outlier_thresh)
metrics_map = {}
for metric_map, suffix in zip(list([labels_metrics_map, sel_labels_metrics_map]), list(['@labels', '@selected_labels'])):
for name, value in metric_map.items():
metrics_map[name + suffix] = value
return metrics_map
def prepare_reuters_obj(reuters_dir):
df_file = reuters_dir + '19961119_selected_df.pkl'
topics_file = reuters_dir + 'reuters_topics_G.pkl'
industry_file = reuters_dir + 'reuters_industry_cat_G.pkl'
categ_file = reuters_dir + 'selected_categs.pkl'
reuters_df = pickle.load(open(df_file, 'rb'))
topics_G = pickle.load(open(topics_file, 'rb'))
industry_cat_G = pickle.load(open(industry_file, 'rb'))
reuters_categories = pickle.load(open(categ_file, 'rb'))
re_obj = ReutersEvaluator(reuters_df, topics_G, industry_cat_G, reuters_categories)
return re_obj
def re_eval_mlflowrun(reuters_dir, cluster_obj):
re_obj = prepare_reuters_obj(reuters_dir)
eval_params_map, cluster_metrics_map = evaluate_assigned_clusters(cluster_obj, re_obj)
outlier_metrics_map = evaluate_unassigned_docs(cluster_obj, re_obj)
return eval_params_map, {**cluster_metrics_map, **outlier_metrics_map}
if __name__=='__main__':
data_dir = '../../sample_data/'
reuters_dir = data_dir + 'reuters_selected/'
categ_file = reuters_dir + 'selected_categs.pkl'
df_file = reuters_dir + '19961119_selected_df.pkl'
topics_file = reuters_dir + 'reuters_topics_G.pkl'
industry_file = reuters_dir + 'reuters_industry_cat_G.pkl'
reuters_df = pickle.load(open(df_file, 'rb'))
topics_G = pickle.load(open(topics_file, 'rb'))
industry_cat_G = pickle.load(open(industry_file, 'rb'))
reuters_categories = pickle.load(open(categ_file, 'rb'))
re_obj = ReutersEvaluator(reuters_df, topics_G, industry_cat_G, reuters_categories)
"""
corpus_name = 'Reuters-text'
cluster_dir = data_dir + 'clustering_results/' + corpus_name + '/'
cluster_obj_file = cluster_dir + 'cfi_emb.pkl'
#eval_dir = data_dir + 'evaluation_results/re_text/dim0.1/'
cluster_properties = {'Dimension':0.1, 'FeaturesSize':195, 'Embedding':'Fasttext', 'NegativeLabels':False}
"""
cfi_sb_emb_obj = pickle.load(open('../../sample_data/clustering_results/Reuters-headline/cluster_sb_emb_obj.pkl', 'rb'))
#evaluate_unassigned_docs(cfi_sb_emb_obj, re_obj)
#print(cfi_sb_emb_obj.cluster_processed_df.head(1)[['labels', 'selected_labels']])
cfi_file = '../mlruns/0/a33df08dd917443097dea4f5d8365755/artifacts/cfi_obj_dir/cfi_obj.pkl'
cfi_obj = pickle.load(open(cfi_file, 'rb'))
doc_df = cfi_obj.cluster_processed_df
label_doc_G = get_label_doc_graph(cfi_obj._closed_fi_graph, doc_df, 'selected_labels')
labels_disconnected_docs, _ = get_disconnected_docs(label_doc_G, doc_df)
print(len(labels_disconnected_docs))
evaluate_assigned_clusters(cfi_obj, re_obj)
"""
max_dup = 2
max_labels_per_doc = math.ceil(max_dup * len(cfi_sb_emb_obj._clusters) / 100) if max_dup else None
print(max_dup, max_labels_per_doc)
illegal_docs_assigns = 0
for doc_id, row in cfi_sb_emb_obj.cluster_processed_df.iterrows():
if len(row.labels)> max_labels_per_doc:
illegal_docs_assigns += 1
print(row, '\n')
print('illegal_docs_assigns: ', illegal_docs_assigns)
"""
"""
print(reuters_df.index)
print(cfi_sb_emb_obj.cluster_processed_df.index)
params_map, metrics_map = re_eval_mlflowrun(reuters_dir, cfi_sb_emb_obj) #evaluate_unassigned_docs(cfi_sb_emb_obj, re_obj)
print(params_map)
print(metrics_map)
"""
"""
cluster_eval_df, category_eval_df = process_evaluation(cfi_sb_emb_obj, re_obj)
print("\ncluster scores..")
eval_df = get_eval_scores(cluster_eval_df)
for k, v in eval_df.iteritems():
print(k, ':', v.values)
print("\n\ncategory scores..")
eval_df = get_eval_scores(category_eval_df)
for k, v in eval_df.iteritems():
print(k, ':', v.values)
"""
"""
category_eval_df = pickle.load(open(eval_dir + 'category_eval_scores.pkl', 'rb'))
cluster_eval_df = pickle.load(open(eval_dir + 'cluster_eval_scores_df.pkl', 'rb'))
cluster_obj = pickle.load(open(cluster_obj_file, 'rb'))
run_mlflow('Reuters-text', cluster_obj, cluster_properties, cluster_eval_df)
run_mlflow('Reuters-text', cluster_obj, cluster_properties, category_eval_df)
"""
| [
"reuters_utils.get_disconnected_docs",
"time.time",
"reuters_utils.parallelize_intercluster",
"reuters_utils.is_valid_category",
"numpy.array",
"reuters_utils.parallelize",
"networkx.compose",
"itertools.chain",
"reuters_utils.get_label_doc_graph"
] | [((4717, 4728), 'time.time', 'time.time', ([], {}), '()\n', (4726, 4728), False, 'import time\n'), ((10872, 10944), 'reuters_utils.get_label_doc_graph', 'get_label_doc_graph', (['cfi_obj._closed_fi_graph', 'doc_df', '"""selected_labels"""'], {}), "(cfi_obj._closed_fi_graph, doc_df, 'selected_labels')\n", (10891, 10944), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((10979, 11021), 'reuters_utils.get_disconnected_docs', 'get_disconnected_docs', (['label_doc_G', 'doc_df'], {}), '(label_doc_G, doc_df)\n', (11000, 11021), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((461, 497), 'networkx.compose', 'nx.compose', (['topics_G', 'industry_cat_G'], {}), '(topics_G, industry_cat_G)\n', (471, 497), True, 'import networkx as nx\n'), ((2227, 2285), 'reuters_utils.get_label_doc_graph', 'get_label_doc_graph', (['cluster_G', 'doc_df', 'cluster_label_name'], {}), '(cluster_G, doc_df, cluster_label_name)\n', (2246, 2285), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((2324, 2366), 'reuters_utils.get_disconnected_docs', 'get_disconnected_docs', (['label_doc_G', 'doc_df'], {}), '(label_doc_G, doc_df)\n', (2345, 2366), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((3347, 3358), 'time.time', 'time.time', ([], {}), '()\n', (3356, 3358), False, 'import time\n'), ((3419, 3483), 'reuters_utils.parallelize_intercluster', 'parallelize_intercluster', (['docs1', 'docs2', 'pred_label_name', 'pred_df'], {}), '(docs1, docs2, pred_label_name, pred_df)\n', (3443, 3483), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((2848, 2859), 'time.time', 'time.time', ([], {}), '()\n', (2857, 2859), False, 'import time\n'), ((2888, 2959), 'reuters_utils.parallelize', 'parallelize', (['connected_docs', 'pred_label_name', 'pred_df'], {'remove_labels': '[]'}), '(connected_docs, pred_label_name, pred_df, remove_labels=[])\n', (2899, 2959), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((3285, 3331), 'itertools.chain', 'itertools.chain', (['*labels_disconnected_docs[1:]'], {}), '(*labels_disconnected_docs[1:])\n', (3300, 3331), False, 'import itertools\n'), ((3542, 3553), 'time.time', 'time.time', ([], {}), '()\n', (3551, 3553), False, 'import time\n'), ((6453, 6464), 'time.time', 'time.time', ([], {}), '()\n', (6462, 6464), False, 'import time\n'), ((595, 615), 'reuters_utils.is_valid_category', 'is_valid_category', (['c'], {}), '(c)\n', (612, 615), False, 'from reuters_utils import is_valid_category, get_label_doc_graph, get_disconnected_docs, parallelize, parallelize_intercluster\n'), ((3022, 3033), 'time.time', 'time.time', ([], {}), '()\n', (3031, 3033), False, 'import time\n'), ((4917, 4928), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4925, 4928), True, 'import numpy as np\n'), ((5020, 5031), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5028, 5031), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import codecs
import json
import os
import tempfile
import random
import string
import copy
import torch
import logging
import shutil
from losses.BaseLossConf import BaseLossConf
#import traceback
from settings import LanguageTypes, ProblemTypes, TaggingSchemes, SupportedMetrics, PredictionTypes, DefaultPredictionFields, ConstantStatic
from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json
from utils.exceptions import ConfigurationError
import numpy as np
class ConstantStaticItems(ConstantStatic):
@staticmethod
def concat_key_desc(key_prefix_desc, key):
return key_prefix_desc + '.' + key
@staticmethod
def get_value_by_key(json, key, key_prefix='', use_default=False, default=None):
"""
Args:
json: a json object
key: a key pointing to the value wanted to acquire
use_default: if you really want to use default value when key can not be found in json object, set use_default=True
default: if key is not found and default is None, we would raise an Exception, except that use_default is True
Returns:
value:
"""
try:
value = json[key]
except:
if not use_default:
raise ConfigurationError("key[%s] can not be found in configuration file" % (key_prefix + key))
else:
value = default
return value
@staticmethod
def add_item(item_name, use_default=False, default=None):
def add_item_loading_func(use_default, default, func_get_value_by_key):
@classmethod
def load_data(cls, obj, json, key_prefix_desc='', use_default=use_default, default=default, func_get_value_by_key=func_get_value_by_key):
obj.__dict__[cls.__name__] = func_get_value_by_key(json, cls.__name__, key_prefix_desc, use_default, default)
return obj
return load_data
return type(item_name, (ConstantStatic, ), dict(load_data=add_item_loading_func(use_default, default, __class__.get_value_by_key)))
@classmethod
def load_data(cls, obj, json, key_prefix_desc=''):
if cls.__name__ in json.keys():
json = json[cls.__name__]
for key in cls.__dict__.keys():
if not hasattr(cls.__dict__[key], 'load_data'):
continue
item = cls.__dict__[key]
obj = item.load_data(obj, json, cls.concat_key_desc(key_prefix_desc, item.__name__))
return obj
class ModelConf(object):
def __init__(self, phase, conf_path, nb_version, params=None, mode='normal'):
""" loading configuration from configuration file and argparse parameters
Args:
phase: train/test/predict/cache
specially, 'cache' phase is used for verifying old cache
conf_path:
params:
mode: 'normal', 'philly'
"""
self.phase = phase
assert self.phase in set(['train', 'test', 'predict', 'cache'])
self.conf_path = conf_path
self.params = params
self.mode = mode.lower()
assert self.mode in set(['normal', 'philly']), 'Your mode %s is illegal, supported modes are: normal and philly!'
self.load_from_file(conf_path)
self.check_version_compat(nb_version, self.tool_version)
if phase != 'cache':
self.check_conf()
logging.debug('Print ModelConf below:')
logging.debug('=' * 80)
# print ModelConf
for name, value in vars(self).items():
if name.startswith("__") is False:
logging.debug('%s: %s' % (str(name), str(value)))
logging.debug('=' * 80)
class Conf(ConstantStaticItems):
license = ConstantStaticItems.add_item('license')
tool_version = ConstantStaticItems.add_item('tool_version')
model_description = ConstantStaticItems.add_item('model_description')
language = ConstantStaticItems.add_item('language', use_default=True, default='english')
class inputs(ConstantStaticItems):
use_cache = ConstantStaticItems.add_item('use_cache', use_default=True, default=True)
dataset_type = ConstantStaticItems.add_item('dataset_type')
tagging_scheme = ConstantStaticItems.add_item('tagging_scheme', use_default=True, default=None)
class data_paths(ConstantStaticItems):
train_data_path = ConstantStaticItems.add_item('train_data_path', use_default=True, default=None)
valid_data_path = ConstantStaticItems.add_item('valid_data_path', use_default=True, default=None)
test_data_path = ConstantStaticItems.add_item('test_data_path', use_default=True, default=None)
predict_data_path = ConstantStaticItems.add_item('predict_data_path', use_default=True, default=None)
pre_trained_emb = ConstantStaticItems.add_item('pre_trained_emb', use_default=True, default=None)
pretrained_model_path = ConstantStaticItems.add_item('pretrained_model_path', use_default=True, default=None)
file_with_col_header = ConstantStaticItems.add_item('file_with_col_header', use_default=True, default=False)
pretrained_emb_type = ConstantStaticItems.add_item('pretrained_emb_type', use_default=True, default='glove')
pretrained_emb_binary_or_text = ConstantStaticItems.add_item('pretrained_emb_binary_or_text', use_default=True, default='text')
involve_all_words_in_pretrained_emb = ConstantStaticItems.add_item('involve_all_words_in_pretrained_emb', use_default=True, default=False)
add_start_end_for_seq = ConstantStaticItems.add_item('add_start_end_for_seq', use_default=True, default=False)
file_header = ConstantStaticItems.add_item('file_header', use_default=True, default=None)
predict_file_header = ConstantStaticItems.add_item('predict_file_header', use_default=True, default=None)
model_inputs = ConstantStaticItems.add_item('model_inputs')
target = ConstantStaticItems.add_item('target', use_default=True, default=None)
positive_label = ConstantStaticItems.add_item('positive_label', use_default=True, default=None)
class outputs(ConstantStaticItems):
save_base_dir = ConstantStaticItems.add_item('save_base_dir', use_default=True, default=None)
model_name = ConstantStaticItems.add_item('model_name')
train_log_name = ConstantStaticItems.add_item('train_log_name', use_default=True, default=None)
test_log_name = ConstantStaticItems.add_item('test_log_name', use_default=True, default=None)
predict_log_name = ConstantStaticItems.add_item('predict_log_name', use_default=True, default=None)
predict_fields = ConstantStaticItems.add_item('predict_fields', use_default=True, default=None)
predict_output_name = ConstantStaticItems.add_item('predict_output_name', use_default=True, default='predict.tsv')
cache_dir = ConstantStaticItems.add_item('cache_dir', use_default=True, default=None)
class training_params(ConstantStaticItems):
class vocabulary(ConstantStaticItems):
min_word_frequency = ConstantStaticItems.add_item('min_word_frequency', use_default=True, default=3)
max_vocabulary = ConstantStaticItems.add_item('max_vocabulary', use_default=True, default=800 * 1000)
max_building_lines = ConstantStaticItems.add_item('max_building_lines', use_default=True, default=1000 * 1000)
optimizer = ConstantStaticItems.add_item('optimizer', use_default=True, default=None)
clip_grad_norm_max_norm = ConstantStaticItems.add_item('clip_grad_norm_max_norm', use_default=True, default=-1)
chunk_size = ConstantStaticItems.add_item('chunk_size', use_default=True, default=1000 * 1000)
lr_decay = ConstantStaticItems.add_item('lr_decay', use_default=True, default=1)
minimum_lr = ConstantStaticItems.add_item('minimum_lr', use_default=True, default=0)
epoch_start_lr_decay = ConstantStaticItems.add_item('epoch_start_lr_decay', use_default=True, default=1)
use_gpu = ConstantStaticItems.add_item('use_gpu', use_default=True, default=False)
cpu_num_workers = ConstantStaticItems.add_item('cpu_num_workers', use_default=True, default=-1) #by default, use all workers cpu supports
batch_size = ConstantStaticItems.add_item('batch_size', use_default=True, default=1)
batch_num_to_show_results = ConstantStaticItems.add_item('batch_num_to_show_results', use_default=True, default=10)
max_epoch = ConstantStaticItems.add_item('max_epoch', use_default=True, default=float('inf'))
valid_times_per_epoch = ConstantStaticItems.add_item('valid_times_per_epoch', use_default=True, default=None)
steps_per_validation = ConstantStaticItems.add_item('steps_per_validation', use_default=True, default=10)
text_preprocessing = ConstantStaticItems.add_item('text_preprocessing', use_default=True, default=list())
max_lengths = ConstantStaticItems.add_item('max_lengths', use_default=True, default=None)
fixed_lengths = ConstantStaticItems.add_item('fixed_lengths', use_default=True, default=None)
tokenizer = ConstantStaticItems.add_item('tokenizer', use_default=True, default=None)
architecture = ConstantStaticItems.add_item('architecture')
loss = ConstantStaticItems.add_item('loss', use_default=True, default=None)
metrics = ConstantStaticItems.add_item('metrics', use_default=True, default=None)
def raise_configuration_error(self, key):
raise ConfigurationError(
"The configuration file %s is illegal. the item [%s] is not found." % (self.conf_path, key))
def load_from_file(self, conf_path):
# load file
self.conf = load_from_json(conf_path, debug=False)
self = self.Conf.load_data(self, {'Conf' : self.conf}, key_prefix_desc='Conf')
self.language = self.language.lower()
self.configurate_outputs()
self.configurate_inputs()
self.configurate_training_params()
self.configurate_architecture()
self.configurate_loss()
self.configurate_cache()
def configurate_outputs(self):
def configurate_logger(self):
if self.phase == 'cache':
return
# dir
if hasattr(self.params, 'log_dir') and self.params.log_dir:
self.log_dir = self.params.log_dir
prepare_dir(self.log_dir, True, allow_overwrite=True)
else:
self.log_dir = self.save_base_dir
# path
self.train_log_path = os.path.join(self.log_dir, self.train_log_name)
self.test_log_path = os.path.join(self.log_dir, self.test_log_name)
self.predict_log_path = os.path.join(self.log_dir, self.predict_log_name)
if self.phase == 'train':
log_path = self.train_log_path
elif self.phase == 'test':
log_path = self.test_log_path
elif self.phase == 'predict':
log_path = self.predict_log_path
if log_path is None:
self.raise_configuration_error(self.phase + '_log_name')
# log level
if self.mode == 'philly' or self.params.debug:
log_set(log_path, console_level='DEBUG', console_detailed=True, disable_log_file=self.params.disable_log_file)
else:
log_set(log_path, disable_log_file=self.params.disable_log_file)
# save base dir
if hasattr(self.params, 'model_save_dir') and self.params.model_save_dir:
self.save_base_dir = self.params.model_save_dir
elif self.save_base_dir is None:
self.raise_configuration_error('save_base_dir')
# prepare save base dir
if self.phase != 'cache':
prepare_dir(self.save_base_dir, True, allow_overwrite=self.params.force or self.mode == 'philly',
extra_info='will overwrite model file and train.log' if self.phase=='train' else 'will add %s.log and predict file'%self.phase)
# logger
configurate_logger(self)
# predict output path
if self.phase != 'cache':
if self.params.predict_output_path:
self.predict_output_path = self.params.predict_output_path
else:
self.predict_output_path = os.path.join(self.save_base_dir, self.predict_output_name)
logging.debug('Prepare dir for: %s' % self.predict_output_path)
prepare_dir(self.predict_output_path, False, allow_overwrite=self.params.force or self.mode == 'philly')
if self.predict_fields is None:
self.predict_fields = DefaultPredictionFields[ProblemTypes[self.problem_type]]
self.model_save_path = os.path.join(self.save_base_dir, self.model_name)
def configurate_inputs(self):
def configurate_data_path(self):
self.pretrained_emb_path =self.pre_trained_emb
if self.mode != "normal":
self.train_data_path = None
self.valid_data_path = None
self.test_data_path = None
self.predict_data_path = None
self.pretrained_emb_path = None
if hasattr(self.params, 'train_data_path') and self.params.train_data_path:
self.train_data_path = self.params.train_data_path
if hasattr(self.params, 'valid_data_path') and self.params.valid_data_path:
self.valid_data_path = self.params.valid_data_path
if hasattr(self.params, 'test_data_path') and self.params.test_data_path:
self.test_data_path = self.params.test_data_path
if hasattr(self.params, 'predict_data_path') and self.params.predict_data_path:
self.predict_data_path = self.params.predict_data_path
if hasattr(self.params, 'pretrained_emb_path') and self.params.pretrained_emb_path:
self.pretrained_emb_path = self.params.pretrained_emb_path
if self.phase == 'train' or self.phase == 'cache':
if self.valid_data_path is None and self.test_data_path is not None:
# We support test_data_path == None, if someone set valid_data_path to None while test_data_path is not None,
# swap the valid_data_path and test_data_path
self.valid_data_path = self.test_data_path
self.test_data_path = None
elif self.phase == 'predict':
if self.predict_data_path is None and self.test_data_path is not None:
self.predict_data_path = self.test_data_path
self.test_data_path = None
return self
def configurate_data_format(self):
# file columns
if self.phase == 'train' or self.phase == 'test' or self.phase == 'cache':
self.file_columns = self.file_header
if self.file_columns is None:
self.raise_configuration_error('file_columns')
if self.phase == 'predict':
self.file_columns, self.predict_file_columns = self.file_header, self.predict_file_header
if self.file_columns is None and self.predict_file_columns is None:
self.raise_configuration_error('predict_file_columns')
if self.file_columns and self.predict_file_columns is None:
self.predict_file_columns = self.file_columns
# target
if self.phase != 'predict':
self.answer_column_name = self.target
if self.target is None and self.phase != 'cache':
self.raise_configuration_error('target')
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging and self.add_start_end_for_seq is None:
self.add_start_end_for_seq = True
# pretrained embedding
if 'word' in self.architecture[0]['conf'] and self.pretrained_emb_path:
if hasattr(self.params, 'involve_all_words_in_pretrained_emb') and self.params.involve_all_words_in_pretrained_emb:
self.involve_all_words_in_pretrained_emb = self.params.involve_all_words_in_pretrained_emb
if hasattr(self.params, 'pretrained_emb_type') and self.params.pretrained_emb_type:
self.pretrained_emb_type = self.params.pretrained_emb_type
if hasattr(self.params, 'pretrained_emb_binary_or_text') and self.params.pretrained_emb_binary_or_text:
self.pretrained_emb_binary_or_text = self.params.pretrained_emb_binary_or_text
self.pretrained_emb_dim = self.architecture[0]['conf']['word']['dim']
else:
self.pretrained_emb_path = None
self.involve_all_words_in_pretrained_emb = None
self.pretrained_emb_type = None
self.pretrained_emb_binary_or_text = None
self.pretrained_emb_dim = None
return self
def configurate_model_input(self):
self.object_inputs = self.model_inputs
self.object_inputs_names = [name for name in self.object_inputs]
return self
self.problem_type = self.dataset_type.lower()
# previous model path
if hasattr(self.params, 'previous_model_path') and self.params.previous_model_path:
self.previous_model_path = self.params.previous_model_path
else:
self.previous_model_path = os.path.join(self.save_base_dir, self.model_name)
# pretrained model path
if hasattr(self.params, 'pretrained_model_path') and self.params.pretrained_model_path:
self.pretrained_model_path = self.params.pretrained_model_path
# saved problem path
model_path = None
if self.phase == 'train':
model_path = self.pretrained_model_path
elif self.phase == 'test' or self.phase == 'predict':
model_path = self.previous_model_path
if model_path:
model_path_dir = os.path.dirname(model_path)
self.saved_problem_path = os.path.join(model_path_dir, '.necessary_cache', 'problem.pkl')
if not os.path.isfile(self.saved_problem_path):
self.saved_problem_path = os.path.join(model_path_dir, 'necessary_cache', 'problem.pkl')
if not (os.path.isfile(model_path) and os.path.isfile(self.saved_problem_path)):
raise Exception('Previous trained model %s or its dictionaries %s does not exist!' % (model_path, self.saved_problem_path))
configurate_data_path(self)
configurate_data_format(self)
configurate_model_input(self)
def configurate_training_params(self):
# optimizer
if self.phase == 'train':
if self.optimizer is None:
self.raise_configuration_error('training_params.optimizer')
if 'name' not in self.optimizer.keys():
self.raise_configuration_error('training_params.optimizer.name')
self.optimizer_name = self.optimizer['name']
if 'params' not in self.optimizer.keys():
self.raise_configuration_error('training_params.optimizer.params')
self.optimizer_params = self.optimizer['params']
if hasattr(self.params, 'learning_rate') and self.params.learning_rate:
self.optimizer_params['lr'] = self.params.learning_rate
# batch size
self.batch_size_each_gpu = self.batch_size # the batch_size in conf file is the batch_size on each GPU
if hasattr(self.params, 'batch_size') and self.params.batch_size:
self.batch_size_each_gpu = self.params.batch_size
if self.batch_size_each_gpu is None:
self.raise_configuration_error('training_params.batch_size')
self.batch_size_total = self.batch_size_each_gpu
if torch.cuda.device_count() > 1:
self.batch_size_total = torch.cuda.device_count() * self.batch_size_each_gpu
self.batch_num_to_show_results = self.batch_num_to_show_results // torch.cuda.device_count()
if hasattr(self.params, 'max_epoch') and self.params.max_epoch:
self.max_epoch = self.params.max_epoch
if self.valid_times_per_epoch is not None:
logging.info("configuration[training_params][valid_times_per_epoch] is deprecated, please use configuration[training_params][steps_per_validation] instead")
# sequence length
if self.fixed_lengths:
self.max_lengths = None
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging:
self.fixed_lengths = None
self.max_lengths = None
# text preprocessing
self.__text_preprocessing = self.text_preprocessing
self.DBC2SBC = True if 'DBC2SBC' in self.__text_preprocessing else False
self.unicode_fix = True if 'unicode_fix' in self.__text_preprocessing else False
self.remove_stopwords = True if 'remove_stopwords' in self.__text_preprocessing else False
# tokenzier
if self.tokenizer is None:
self.tokenizer = 'jieba' if self.language == 'chinese' else 'nltk'
# GPU/CPU
if self.phase != 'cache':
if torch.cuda.is_available() and torch.cuda.device_count() > 0 and self.use_gpu:
logging.info("Activating GPU mode, there are %d GPUs available" % torch.cuda.device_count())
else:
self.use_gpu = False
logging.info("Activating CPU mode")
def configurate_architecture(self):
self.input_types = self.architecture[0]['conf']
# extra feature
feature_all = set([_.lower() for _ in self.input_types.keys()])
formal_feature = set(['word', 'char'])
extra_feature_num = feature_all - formal_feature
self.extra_feature = len(extra_feature_num) != 0
if self.extra_feature:
if self.DBC2SBC:
logging.warning("Detect the extra feature %s, set the DBC2sbc is False." % ''.join(list(extra_feature_num)))
if self.unicode_fix:
logging.warning("Detect the extra feature %s, set the unicode_fix is False." % ''.join(list(extra_feature_num)))
if self.remove_stopwords:
logging.warning("Detect the extra feature %s, set the remove_stopwords is False." % ''.join(list(extra_feature_num)))
# output layer
self.output_layer_id = []
for single_layer in self.architecture:
if 'output_layer_flag' in single_layer and single_layer['output_layer_flag']:
self.output_layer_id.append(single_layer['layer_id'])
# check CNN layer & change min sentence length
cnn_rele_layers = ['Conv', 'ConvPooling']
self.min_sentence_len = 0
for layer_index, single_layer in enumerate(self.architecture):
if layer_index == 0:
continue
if sum([_ == single_layer['layer'] for _ in cnn_rele_layers]):
# get window_size conf: type maybe int or list
for single_conf, single_conf_value in single_layer['conf'].items():
if 'window' in single_conf.lower():
self.min_sentence_len = max(self.min_sentence_len, np.max(np.array([single_conf_value])))
break
def configurate_loss(self):
if self.phase != 'train' and self.phase != 'test':
return
if self.loss is None or self.metrics is None:
self.raise_configuration_error('loss/metrics')
self.loss = BaseLossConf.get_conf(**self.loss)
if 'auc' in self.metrics and ProblemTypes[self.problem_type] == ProblemTypes.classification:
self.pos_label = self.positive_label
def configurate_cache(self):
# whether use cache
if self.mode == 'philly':
self.use_cache = True
# cache dir
if self.phase == 'train':
if hasattr(self.params, 'cache_dir') and self.params.cache_dir:
self.cache_dir = self.params.cache_dir
else:
if self.mode == 'normal':
if self.use_cache is False:
self.cache_dir = os.path.join(tempfile.gettempdir(), 'neuron_blocks', ''.join(random.sample(string.ascii_letters+string.digits, 16)))
else:
# for philly mode, we can only save files in model_path or scratch_path
self.cache_dir = os.path.join(self.save_base_dir, 'cache')
self.problem_path = os.path.join(self.cache_dir, 'problem.pkl')
if self.pretrained_emb_path is not None:
self.emb_pkl_path = os.path.join(self.cache_dir, 'emb.pkl')
else:
self.emb_pkl_path = None
else:
tmp_problem_path = os.path.join(self.save_base_dir, '.necessary_cache', 'problem.pkl')
self.problem_path = tmp_problem_path if os.path.isfile(tmp_problem_path) else os.path.join(self.save_base_dir, 'necessary_cache', 'problem.pkl')
# md5 of training data and problem
self.train_data_md5 = None
if self.phase == 'train' and self.train_data_path:
logging.info("Calculating the md5 of traing data ...")
self.train_data_md5 = md5([self.train_data_path])
logging.info("the md5 of traing data is %s"%(self.train_data_md5))
self.problem_md5 = None
# encoding
self.encoding_cache_dir = None
self.encoding_cache_index_file_path = None
self.encoding_cache_index_file_md5_path = None
self.encoding_file_index = None
self.encoding_cache_legal_line_cnt = 0
self.encoding_cache_illegal_line_cnt = 0
self.load_encoding_cache_generator = None
def check_conf(self):
""" verify if the configuration is legal or not
Returns:
"""
# In philly mode, ensure the data and model etc. are not the local paths defined in configuration file.
if self.mode == 'philly':
assert not (hasattr(self.params, 'train_data_path') and self.params.train_data_path is None and hasattr(self, 'train_data_path') and self.train_data_path), 'In philly mode, but you define a local train_data_path:%s in your configuration file' % self.train_data_path
assert not (hasattr(self.params, 'valid_data_path') and self.params.valid_data_path is None and hasattr(self, 'valid_data_path') and self.valid_data_path), 'In philly mode, but you define a local valid_data_path:%s in your configuration file' % self.valid_data_path
assert not (hasattr(self.params, 'test_data_path') and self.params.test_data_path is None and hasattr(self, 'test_data_path') and self.test_data_path), 'In philly mode, but you define a local test_data_path:%s in your configuration file' % self.test_data_path
if self.phase == 'train':
assert hasattr(self.params, 'model_save_dir') and self.params.model_save_dir, 'In philly mode, you must define a model save dir through the training params'
assert not (self.params.pretrained_model_path is None and self.pretrained_model_path), 'In philly mode, but you define a local pretrained model path:%s in your configuration file' % self.pretrained_model_path
assert not (self.pretrained_model_path is None and self.params.pretrained_emb_path is None and self.pretrained_emb_path), 'In philly mode, but you define a local pretrained embedding:%s in your configuration file' % self.pretrained_emb_path
elif self.phase == 'test' or self.phase == 'predict':
assert not (self.params.previous_model_path is None and self.previous_model_path), 'In philly mode, but you define a local model trained previously %s in your configuration file' % self.previous_model_path
# check inputs
# it seems that os.path.isfile cannot detect hdfs files
if self.phase == 'train':
assert self.train_data_path is not None, "Please define train_data_path"
assert os.path.isfile(self.train_data_path), "Training data %s does not exist!" % self.train_data_path
assert self.valid_data_path is not None, "Please define valid_data_path"
assert os.path.isfile(self.valid_data_path), "Training data %s does not exist!" % self.valid_data_path
if hasattr(self, 'pretrained_emb_type') and self.pretrained_emb_type:
assert self.pretrained_emb_type in set(['glove', 'word2vec', 'fasttext']), 'Embedding type %s is not supported! We support glove, word2vec, fasttext now.'
if hasattr(self, 'pretrained_emb_binary_or_text') and self.pretrained_emb_binary_or_text:
assert self.pretrained_emb_binary_or_text in set(['text', 'binary']), 'Embedding file type %s is not supported! We support text and binary.'
elif self.phase == 'test':
assert self.test_data_path is not None, "Please define test_data_path"
assert os.path.isfile(self.test_data_path), "Training data %s does not exist!" % self.test_data_path
elif self.phase == 'predict':
assert self.predict_data_path is not None, "Please define predict_data_path"
assert os.path.isfile(self.predict_data_path), "Training data %s does not exist!" % self.predict_data_path
# check language types
SUPPORTED_LANGUAGES = set(LanguageTypes._member_names_)
assert self.language in SUPPORTED_LANGUAGES, "Language type %s is not supported now. Supported types: %s" % (self.language, ",".join(SUPPORTED_LANGUAGES))
# check problem types
SUPPORTED_PROBLEMS = set(ProblemTypes._member_names_)
assert self.problem_type in SUPPORTED_PROBLEMS, "Data type %s is not supported now. Supported types: %s" % (self.problem_type, ",".join(SUPPORTED_PROBLEMS))
if ProblemTypes[self.problem_type] == ProblemTypes.sequence_tagging:
SUPPORTED_TAGGING_SCHEMES = set(TaggingSchemes._member_names_)
assert self.tagging_scheme is not None, "For sequence tagging proble, tagging scheme must be defined at configuration[\'inputs\'][\'tagging_scheme\']!"
assert self.tagging_scheme in SUPPORTED_TAGGING_SCHEMES, "Tagging scheme %s is not supported now. Supported schemes: %s" % (self.tagging_scheme, ",".join(SUPPORTED_TAGGING_SCHEMES))
# the max_lengths of all the inputs and targets should be consistent
if self.max_lengths:
max_lengths = list(self.max_lengths.values())
for i in range(len(max_lengths) - 1):
assert max_lengths[i] == max_lengths[i + 1], "For sequence tagging tasks, the max_lengths of all the inputs and targets should be consistent!"
# check appliable metrics
if self.phase == 'train' or self.phase == 'test':
self.metrics_post_check = set() # saved to check later
diff = set(self.metrics) - SupportedMetrics[ProblemTypes[self.problem_type]]
illegal_metrics = []
for diff_metric in diff:
if diff_metric.find('@') != -1:
field, target = diff_metric.split('@')
#if not field in PredictionTypes[ProblemTypes[self.problem_type]]:
if field != 'auc':
illegal_metrics.append(diff_metric)
else:
if target != 'average':
self.metrics_post_check.add(diff_metric)
if len(illegal_metrics) > 0:
raise Exception("Metrics %s are not supported for %s tasks!" % (",".join(list(illegal_metrics)), self.problem_type))
# check predict fields
if self.phase == 'predict':
self.predict_fields_post_check = set() # saved to check later
diff = set(self.predict_fields) - PredictionTypes[ProblemTypes[self.problem_type]]
illegal_fields = []
for diff_field in diff:
if diff_field.find('@') != -1 and diff_field.startswith('confidence'):
field, target = diff_field.split('@')
#if not field in PredictionTypes[ProblemTypes[self.problem_type]]:
if field != 'confidence':
illegal_fields.append(diff_field)
else:
# don't know if the target exists in the output dictionary, check after problem loaded
self.predict_fields_post_check.add(diff_field)
else:
illegal_fields.append(diff_field)
if len(illegal_fields) > 0:
raise Exception("The prediction fields %s is/are not supported!" % ",".join(illegal_fields))
def check_version_compat(self, nb_version, conf_version):
""" check if the version of toolkit and configuration file is compatible
Args:
nb_version: x.y.z
conf_version: x.y.z
Returns:
If the x field and y field are both the same, return True, else return False
"""
nb_version_split = nb_version.split('.')
conf_version_split = conf_version.split('.')
if len(nb_version_split) != len(conf_version_split):
raise ConfigurationError('The tool_version field of your configuration is illegal!')
if not (nb_version_split[0] == conf_version_split[0] and nb_version_split[1] == conf_version_split[1]):
raise ConfigurationError('The NeuronBlocks version is %s, but the configuration version is %s, please update your configuration to %s.%s.X' % (nb_version, conf_version, nb_version_split[0], nb_version_split[1]))
def back_up(self, params):
shutil.copy(params.conf_path, self.save_base_dir)
logging.info('Configuration file is backed up to %s' % (self.save_base_dir))
| [
"utils.common_utils.md5",
"utils.common_utils.log_set",
"logging.debug",
"losses.BaseLossConf.BaseLossConf.get_conf",
"random.sample",
"os.path.dirname",
"tempfile.gettempdir",
"utils.common_utils.load_from_json",
"json.keys",
"torch.cuda.device_count",
"logging.info",
"os.path.isfile",
"tor... | [((3541, 3580), 'logging.debug', 'logging.debug', (['"""Print ModelConf below:"""'], {}), "('Print ModelConf below:')\n", (3554, 3580), False, 'import logging\n'), ((3589, 3612), 'logging.debug', 'logging.debug', (["('=' * 80)"], {}), "('=' * 80)\n", (3602, 3612), False, 'import logging\n'), ((3807, 3830), 'logging.debug', 'logging.debug', (["('=' * 80)"], {}), "('=' * 80)\n", (3820, 3830), False, 'import logging\n'), ((9954, 10075), 'utils.exceptions.ConfigurationError', 'ConfigurationError', (["('The configuration file %s is illegal. the item [%s] is not found.' % (\n self.conf_path, key))"], {}), "(\n 'The configuration file %s is illegal. the item [%s] is not found.' % (\n self.conf_path, key))\n", (9972, 10075), False, 'from utils.exceptions import ConfigurationError\n'), ((10162, 10200), 'utils.common_utils.load_from_json', 'load_from_json', (['conf_path'], {'debug': '(False)'}), '(conf_path, debug=False)\n', (10176, 10200), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((13238, 13287), 'os.path.join', 'os.path.join', (['self.save_base_dir', 'self.model_name'], {}), '(self.save_base_dir, self.model_name)\n', (13250, 13287), False, 'import os\n'), ((24281, 24315), 'losses.BaseLossConf.BaseLossConf.get_conf', 'BaseLossConf.get_conf', ([], {}), '(**self.loss)\n', (24302, 24315), False, 'from losses.BaseLossConf import BaseLossConf\n'), ((34501, 34550), 'shutil.copy', 'shutil.copy', (['params.conf_path', 'self.save_base_dir'], {}), '(params.conf_path, self.save_base_dir)\n', (34512, 34550), False, 'import shutil\n'), ((34559, 34633), 'logging.info', 'logging.info', (["('Configuration file is backed up to %s' % self.save_base_dir)"], {}), "('Configuration file is backed up to %s' % self.save_base_dir)\n", (34571, 34633), False, 'import logging\n'), ((2297, 2308), 'json.keys', 'json.keys', ([], {}), '()\n', (2306, 2308), False, 'import json\n'), ((11032, 11079), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.train_log_name'], {}), '(self.log_dir, self.train_log_name)\n', (11044, 11079), False, 'import os\n'), ((11113, 11159), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.test_log_name'], {}), '(self.log_dir, self.test_log_name)\n', (11125, 11159), False, 'import os\n'), ((11196, 11245), 'os.path.join', 'os.path.join', (['self.log_dir', 'self.predict_log_name'], {}), '(self.log_dir, self.predict_log_name)\n', (11208, 11245), False, 'import os\n'), ((12272, 12515), 'utils.common_utils.prepare_dir', 'prepare_dir', (['self.save_base_dir', '(True)'], {'allow_overwrite': "(self.params.force or self.mode == 'philly')", 'extra_info': "('will overwrite model file and train.log' if self.phase == 'train' else \n 'will add %s.log and predict file' % self.phase)"}), "(self.save_base_dir, True, allow_overwrite=self.params.force or \n self.mode == 'philly', extra_info=\n 'will overwrite model file and train.log' if self.phase == 'train' else\n 'will add %s.log and predict file' % self.phase)\n", (12283, 12515), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((12893, 12956), 'logging.debug', 'logging.debug', (["('Prepare dir for: %s' % self.predict_output_path)"], {}), "('Prepare dir for: %s' % self.predict_output_path)\n", (12906, 12956), False, 'import logging\n'), ((12969, 13078), 'utils.common_utils.prepare_dir', 'prepare_dir', (['self.predict_output_path', '(False)'], {'allow_overwrite': "(self.params.force or self.mode == 'philly')"}), "(self.predict_output_path, False, allow_overwrite=self.params.\n force or self.mode == 'philly')\n", (12980, 13078), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((18068, 18117), 'os.path.join', 'os.path.join', (['self.save_base_dir', 'self.model_name'], {}), '(self.save_base_dir, self.model_name)\n', (18080, 18117), False, 'import os\n'), ((18628, 18655), 'os.path.dirname', 'os.path.dirname', (['model_path'], {}), '(model_path)\n', (18643, 18655), False, 'import os\n'), ((18694, 18757), 'os.path.join', 'os.path.join', (['model_path_dir', '""".necessary_cache"""', '"""problem.pkl"""'], {}), "(model_path_dir, '.necessary_cache', 'problem.pkl')\n", (18706, 18757), False, 'import os\n'), ((20490, 20515), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (20513, 20515), False, 'import torch\n'), ((20916, 21082), 'logging.info', 'logging.info', (['"""configuration[training_params][valid_times_per_epoch] is deprecated, please use configuration[training_params][steps_per_validation] instead"""'], {}), "(\n 'configuration[training_params][valid_times_per_epoch] is deprecated, please use configuration[training_params][steps_per_validation] instead'\n )\n", (20928, 21082), False, 'import logging\n'), ((25275, 25318), 'os.path.join', 'os.path.join', (['self.cache_dir', '"""problem.pkl"""'], {}), "(self.cache_dir, 'problem.pkl')\n", (25287, 25318), False, 'import os\n'), ((25552, 25619), 'os.path.join', 'os.path.join', (['self.save_base_dir', '""".necessary_cache"""', '"""problem.pkl"""'], {}), "(self.save_base_dir, '.necessary_cache', 'problem.pkl')\n", (25564, 25619), False, 'import os\n'), ((25935, 25989), 'logging.info', 'logging.info', (['"""Calculating the md5 of traing data ..."""'], {}), "('Calculating the md5 of traing data ...')\n", (25947, 25989), False, 'import logging\n'), ((26024, 26051), 'utils.common_utils.md5', 'md5', (['[self.train_data_path]'], {}), '([self.train_data_path])\n', (26027, 26051), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((26064, 26130), 'logging.info', 'logging.info', (["('the md5 of traing data is %s' % self.train_data_md5)"], {}), "('the md5 of traing data is %s' % self.train_data_md5)\n", (26076, 26130), False, 'import logging\n'), ((28810, 28846), 'os.path.isfile', 'os.path.isfile', (['self.train_data_path'], {}), '(self.train_data_path)\n', (28824, 28846), False, 'import os\n'), ((29010, 29046), 'os.path.isfile', 'os.path.isfile', (['self.valid_data_path'], {}), '(self.valid_data_path)\n', (29024, 29046), False, 'import os\n'), ((34046, 34124), 'utils.exceptions.ConfigurationError', 'ConfigurationError', (['"""The tool_version field of your configuration is illegal!"""'], {}), "('The tool_version field of your configuration is illegal!')\n", (34064, 34124), False, 'from utils.exceptions import ConfigurationError\n'), ((34255, 34470), 'utils.exceptions.ConfigurationError', 'ConfigurationError', (["('The NeuronBlocks version is %s, but the configuration version is %s, please update your configuration to %s.%s.X'\n % (nb_version, conf_version, nb_version_split[0], nb_version_split[1]))"], {}), "(\n 'The NeuronBlocks version is %s, but the configuration version is %s, please update your configuration to %s.%s.X'\n % (nb_version, conf_version, nb_version_split[0], nb_version_split[1]))\n", (34273, 34470), False, 'from utils.exceptions import ConfigurationError\n'), ((10844, 10897), 'utils.common_utils.prepare_dir', 'prepare_dir', (['self.log_dir', '(True)'], {'allow_overwrite': '(True)'}), '(self.log_dir, True, allow_overwrite=True)\n', (10855, 10897), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((11714, 11828), 'utils.common_utils.log_set', 'log_set', (['log_path'], {'console_level': '"""DEBUG"""', 'console_detailed': '(True)', 'disable_log_file': 'self.params.disable_log_file'}), "(log_path, console_level='DEBUG', console_detailed=True,\n disable_log_file=self.params.disable_log_file)\n", (11721, 11828), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((11859, 11923), 'utils.common_utils.log_set', 'log_set', (['log_path'], {'disable_log_file': 'self.params.disable_log_file'}), '(log_path, disable_log_file=self.params.disable_log_file)\n', (11866, 11923), False, 'from utils.common_utils import log_set, prepare_dir, md5, load_from_json, dump_to_json\n'), ((12822, 12880), 'os.path.join', 'os.path.join', (['self.save_base_dir', 'self.predict_output_name'], {}), '(self.save_base_dir, self.predict_output_name)\n', (12834, 12880), False, 'import os\n'), ((18777, 18816), 'os.path.isfile', 'os.path.isfile', (['self.saved_problem_path'], {}), '(self.saved_problem_path)\n', (18791, 18816), False, 'import os\n'), ((18860, 18922), 'os.path.join', 'os.path.join', (['model_path_dir', '"""necessary_cache"""', '"""problem.pkl"""'], {}), "(model_path_dir, 'necessary_cache', 'problem.pkl')\n", (18872, 18922), False, 'import os\n'), ((20557, 20582), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (20580, 20582), False, 'import torch\n'), ((20689, 20714), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (20712, 20714), False, 'import torch\n'), ((21896, 21921), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21919, 21921), False, 'import torch\n'), ((22154, 22189), 'logging.info', 'logging.info', (['"""Activating CPU mode"""'], {}), "('Activating CPU mode')\n", (22166, 22189), False, 'import logging\n'), ((25408, 25447), 'os.path.join', 'os.path.join', (['self.cache_dir', '"""emb.pkl"""'], {}), "(self.cache_dir, 'emb.pkl')\n", (25420, 25447), False, 'import os\n'), ((25672, 25704), 'os.path.isfile', 'os.path.isfile', (['tmp_problem_path'], {}), '(tmp_problem_path)\n', (25686, 25704), False, 'import os\n'), ((25710, 25776), 'os.path.join', 'os.path.join', (['self.save_base_dir', '"""necessary_cache"""', '"""problem.pkl"""'], {}), "(self.save_base_dir, 'necessary_cache', 'problem.pkl')\n", (25722, 25776), False, 'import os\n'), ((29759, 29794), 'os.path.isfile', 'os.path.isfile', (['self.test_data_path'], {}), '(self.test_data_path)\n', (29773, 29794), False, 'import os\n'), ((1378, 1472), 'utils.exceptions.ConfigurationError', 'ConfigurationError', (["('key[%s] can not be found in configuration file' % (key_prefix + key))"], {}), "('key[%s] can not be found in configuration file' % (\n key_prefix + key))\n", (1396, 1472), False, 'from utils.exceptions import ConfigurationError\n'), ((18943, 18969), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (18957, 18969), False, 'import os\n'), ((18974, 19013), 'os.path.isfile', 'os.path.isfile', (['self.saved_problem_path'], {}), '(self.saved_problem_path)\n', (18988, 19013), False, 'import os\n'), ((21926, 21951), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (21949, 21951), False, 'import torch\n'), ((25200, 25241), 'os.path.join', 'os.path.join', (['self.save_base_dir', '"""cache"""'], {}), "(self.save_base_dir, 'cache')\n", (25212, 25241), False, 'import os\n'), ((29999, 30037), 'os.path.isfile', 'os.path.isfile', (['self.predict_data_path'], {}), '(self.predict_data_path)\n', (30013, 30037), False, 'import os\n'), ((22056, 22081), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (22079, 22081), False, 'import torch\n'), ((24945, 24966), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (24964, 24966), False, 'import tempfile\n'), ((23966, 23995), 'numpy.array', 'np.array', (['[single_conf_value]'], {}), '([single_conf_value])\n', (23974, 23995), True, 'import numpy as np\n'), ((24993, 25048), 'random.sample', 'random.sample', (['(string.ascii_letters + string.digits)', '(16)'], {}), '(string.ascii_letters + string.digits, 16)\n', (25006, 25048), False, 'import random\n')] |
"""Mixture model using EM"""
from typing import Tuple
import numpy as np
from utils import GaussianMixture
def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:
"""E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment
"""
n, d = X.shape
K, _ = mixture.mu.shape
post = np.zeros((n, K))
dr = np.sqrt((2*np.pi*mixture.var)**d)
exponent_nr = -0.5*(np.linalg.norm(X[:,:,None] - mixture.mu.T,axis=1)**2)
normal_ = np.exp(exponent_nr/mixture.var)/dr
log_likelihood = np.sum(np.log(np.sum(mixture.p*normal_,axis=1)))
post = (mixture.p*normal_)/(np.sum(mixture.p*normal_,axis=1,keepdims=True))
return post, log_likelihood
def mstep(X: np.ndarray, post: np.ndarray) -> GaussianMixture:
"""M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
"""
n, d = X.shape
_, K = post.shape
n_hat = post.sum(axis=0, keepdims=True)
p = n_hat / n
mu = np.zeros((K, d))
var = np.zeros(K)
mu = post.T @ X / n_hat.T
var = np.sum(post * (np.linalg.norm(X[:,:,None] - mu.T,axis=1)**2),axis=0)/(d*n_hat)
return GaussianMixture(mu, var[0], p[0])
def run(X: np.ndarray, mixture: GaussianMixture,
post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:
"""Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment
"""
prev_cost = None
cost = None
while True:
prev_cost = cost
post, cost = estep(X, mixture)
mixture = mstep(X, post)
try:
if (abs(prev_cost - cost) < (1e-6*np.abs(cost))):
break
else:
continue
except:
pass
return mixture, post, cost
| [
"numpy.sum",
"numpy.abs",
"numpy.zeros",
"utils.GaussianMixture",
"numpy.linalg.norm",
"numpy.exp",
"numpy.sqrt"
] | [((590, 606), 'numpy.zeros', 'np.zeros', (['(n, K)'], {}), '((n, K))\n', (598, 606), True, 'import numpy as np\n'), ((616, 655), 'numpy.sqrt', 'np.sqrt', (['((2 * np.pi * mixture.var) ** d)'], {}), '((2 * np.pi * mixture.var) ** d)\n', (623, 655), True, 'import numpy as np\n'), ((1471, 1487), 'numpy.zeros', 'np.zeros', (['(K, d)'], {}), '((K, d))\n', (1479, 1487), True, 'import numpy as np\n'), ((1498, 1509), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (1506, 1509), True, 'import numpy as np\n'), ((1641, 1674), 'utils.GaussianMixture', 'GaussianMixture', (['mu', 'var[0]', 'p[0]'], {}), '(mu, var[0], p[0])\n', (1656, 1674), False, 'from utils import GaussianMixture\n'), ((742, 775), 'numpy.exp', 'np.exp', (['(exponent_nr / mixture.var)'], {}), '(exponent_nr / mixture.var)\n', (748, 775), True, 'import numpy as np\n'), ((879, 929), 'numpy.sum', 'np.sum', (['(mixture.p * normal_)'], {'axis': '(1)', 'keepdims': '(True)'}), '(mixture.p * normal_, axis=1, keepdims=True)\n', (885, 929), True, 'import numpy as np\n'), ((674, 726), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[:, :, None] - mixture.mu.T)'], {'axis': '(1)'}), '(X[:, :, None] - mixture.mu.T, axis=1)\n', (688, 726), True, 'import numpy as np\n'), ((812, 847), 'numpy.sum', 'np.sum', (['(mixture.p * normal_)'], {'axis': '(1)'}), '(mixture.p * normal_, axis=1)\n', (818, 847), True, 'import numpy as np\n'), ((1565, 1609), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[:, :, None] - mu.T)'], {'axis': '(1)'}), '(X[:, :, None] - mu.T, axis=1)\n', (1579, 1609), True, 'import numpy as np\n'), ((2421, 2433), 'numpy.abs', 'np.abs', (['cost'], {}), '(cost)\n', (2427, 2433), True, 'import numpy as np\n')] |
import os
import argparse
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
import sys
from surfboard.sound import Waveform
from surfboard.feature_extraction import extract_features
from config import *
DOC_PATH = 'alc_original/DOC/IS2011CHALLENGE'
DATA_PATH = 'alc_original'
TRAIN_TABLE = 'TRAIN.TBL'
D1_TABLE = 'D1.TBL'
D2_TABLE = 'D2.TBL'
TEST_TABLE = 'TESTMAPPING.txt'
class ALCDataset:
def __init__(self, path):
self.dataset_path = path
self.__load_meta_file()
def __process_meta(self, meta):
meta['file_name'] = meta['file_name'].map(lambda x: x[x.find('/') + 1:].lower())
meta['file_name'] = meta['file_name'].map(lambda x: x[:-8] + 'm' + x[-7:])
meta['session'] = meta['file_name'].map(lambda x: x[:x.find('/')])
meta['label'] = meta['user_state'].map(lambda x: 1 if x == 'I' else 0)
return meta
def __load_meta_file(self):
assert os.path.exists(self.dataset_path)
doc_folder = os.path.join(self.dataset_path, DOC_PATH)
print(doc_folder)
train_meta_path = os.path.join(doc_folder, TRAIN_TABLE)
self.train_meta = pd.read_csv(train_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.train_meta = self.__process_meta(self.train_meta)
d1_meta_path = os.path.join(doc_folder, D1_TABLE)
self.d1_meta = pd.read_csv(d1_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.d1_meta = self.__process_meta(self.d1_meta)
d2_meta_path = os.path.join(doc_folder, D2_TABLE)
self.d2_meta = pd.read_csv(d2_meta_path, sep='\t', names=['file_name', 'bac', 'user_state'])
self.d2_meta = self.__process_meta(self.d2_meta)
test_meta_path = os.path.join(doc_folder, TEST_TABLE)
self.test_meta = pd.read_csv(test_meta_path, sep='\t',
names=['file_name', 'bac', 'user_state', 'test_file_name'])
self.test_meta = self.test_meta[['file_name', 'bac', 'user_state']]
self.test_meta = self.__process_meta(self.test_meta)
def extract_opensmile_feature(self, split):
split = split.lower()
assert split in ('train', 'd1', 'd2', 'test')
meta = getattr(self, f'{split}_meta')
features = []
for file_name in tqdm(meta['file_name']):
wav_input_path = os.path.join(self.dataset_path, DATA_PATH, file_name)
csv_output_path = "opensmile_feature.csv"
if os.path.exists(csv_output_path):
os.remove(csv_output_path)
subprocess.run([OPENSMILE_PATH, "-C", OPENSMILE_CONF_PATH, "-I", wav_input_path, "-csvoutput", csv_output_path])
feature = pd.read_csv(csv_output_path, delimiter=";").iloc[0, 2:].to_numpy()
features.append(feature)
features = np.stack(features)
labels = meta['label'].to_numpy()
if not os.path.exists(OPENSMILE_FEATURE_PATH):
os.mkdir(OPENSMILE_FEATURE_PATH)
np.save(os.path.join(OPENSMILE_FEATURE_PATH, f'{split}_x.npy'), features)
np.save(os.path.join(OPENSMILE_FEATURE_PATH, f'{split}_y.npy'), labels)
return features, labels
def extract_surfboard_feature(self, split):
split = split.lower()
assert split in ('train', 'd1', 'd2', 'test')
meta = getattr(self, f'{split}_meta')
sounds = []
for file_name in tqdm(meta['file_name']):
sound = Waveform(path=os.path.join(self.dataset_path, DATA_PATH, file_name), sample_rate=SR)
sounds.append(sound)
features_df = extract_features(sounds, SURFBOARD_COMPONENTS, SURFBOARD_STATISTICS)
features = features_df.to_numpy()
labels = meta['label'].to_numpy()
if not os.path.exists(SURFBOARD_FEATURE_PATH):
os.makedirs(SURFBOARD_FEATURE_PATH)
np.save(os.path.join(SURFBOARD_FEATURE_PATH, f'{split}_x.npy'), features)
np.save(os.path.join(SURFBOARD_FEATURE_PATH, f'{split}_y.npy'), labels)
return features, labels
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='args for processing dataset')
parser.add_argument('--toolbox', '-t', help='toolbox to extract features', default='surfboard')
args = parser.parse_args()
dataset = ALCDataset(DATASET_PATH)
if args.toolbox == 'opensmile':
print("Extracting opensmile features from train set...")
feature_train, label_train = dataset.extract_opensmile_feature("train")
print("Extracting opensmile features from dev1 set...")
feature_d1, label_d1 = dataset.extract_opensmile_feature("d1")
print("Extracting opensmile features from dev2 set...")
feature_d2, label_d2 = dataset.extract_opensmile_feature("d2")
print("Extracting opensmile features from test set...")
feature_test, label_test = dataset.extract_opensmile_feature("test")
print("Finished!")
if args.toolbox == 'surfboard':
print("Extracting surfboard features from train set...")
feature_train, label_train = dataset.extract_surfboard_feature("train")
print("Extracting surfboard features from dev1 set...")
feature_d1, label_d1 = dataset.extract_surfboard_feature("d1")
print("Extracting surfboard features from dev2 set...")
feature_d2, label_d2 = dataset.extract_surfboard_feature("d2")
print("Extracting surfboard features from test set...")
feature_test, label_test = dataset.extract_surfboard_feature("test")
print("Finished!")
| [
"numpy.stack",
"subprocess.run",
"tqdm.tqdm",
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"os.makedirs",
"pandas.read_csv",
"os.path.exists",
"surfboard.feature_extraction.extract_features",
"os.path.join"
] | [((4229, 4295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""args for processing dataset"""'}), "(description='args for processing dataset')\n", (4252, 4295), False, 'import argparse\n'), ((951, 984), 'os.path.exists', 'os.path.exists', (['self.dataset_path'], {}), '(self.dataset_path)\n', (965, 984), False, 'import os\n'), ((1006, 1047), 'os.path.join', 'os.path.join', (['self.dataset_path', 'DOC_PATH'], {}), '(self.dataset_path, DOC_PATH)\n', (1018, 1047), False, 'import os\n'), ((1101, 1138), 'os.path.join', 'os.path.join', (['doc_folder', 'TRAIN_TABLE'], {}), '(doc_folder, TRAIN_TABLE)\n', (1113, 1138), False, 'import os\n'), ((1165, 1250), 'pandas.read_csv', 'pd.read_csv', (['train_meta_path'], {'sep': '"""\t"""', 'names': "['file_name', 'bac', 'user_state']"}), "(train_meta_path, sep='\\t', names=['file_name', 'bac', 'user_state']\n )\n", (1176, 1250), True, 'import pandas as pd\n'), ((1333, 1367), 'os.path.join', 'os.path.join', (['doc_folder', 'D1_TABLE'], {}), '(doc_folder, D1_TABLE)\n', (1345, 1367), False, 'import os\n'), ((1391, 1468), 'pandas.read_csv', 'pd.read_csv', (['d1_meta_path'], {'sep': '"""\t"""', 'names': "['file_name', 'bac', 'user_state']"}), "(d1_meta_path, sep='\\t', names=['file_name', 'bac', 'user_state'])\n", (1402, 1468), True, 'import pandas as pd\n'), ((1550, 1584), 'os.path.join', 'os.path.join', (['doc_folder', 'D2_TABLE'], {}), '(doc_folder, D2_TABLE)\n', (1562, 1584), False, 'import os\n'), ((1608, 1685), 'pandas.read_csv', 'pd.read_csv', (['d2_meta_path'], {'sep': '"""\t"""', 'names': "['file_name', 'bac', 'user_state']"}), "(d2_meta_path, sep='\\t', names=['file_name', 'bac', 'user_state'])\n", (1619, 1685), True, 'import pandas as pd\n'), ((1769, 1805), 'os.path.join', 'os.path.join', (['doc_folder', 'TEST_TABLE'], {}), '(doc_folder, TEST_TABLE)\n', (1781, 1805), False, 'import os\n'), ((1831, 1932), 'pandas.read_csv', 'pd.read_csv', (['test_meta_path'], {'sep': '"""\t"""', 'names': "['file_name', 'bac', 'user_state', 'test_file_name']"}), "(test_meta_path, sep='\\t', names=['file_name', 'bac',\n 'user_state', 'test_file_name'])\n", (1842, 1932), True, 'import pandas as pd\n'), ((2346, 2369), 'tqdm.tqdm', 'tqdm', (["meta['file_name']"], {}), "(meta['file_name'])\n", (2350, 2369), False, 'from tqdm import tqdm\n'), ((2889, 2907), 'numpy.stack', 'np.stack', (['features'], {}), '(features)\n', (2897, 2907), True, 'import numpy as np\n'), ((3503, 3526), 'tqdm.tqdm', 'tqdm', (["meta['file_name']"], {}), "(meta['file_name'])\n", (3507, 3526), False, 'from tqdm import tqdm\n'), ((3695, 3763), 'surfboard.feature_extraction.extract_features', 'extract_features', (['sounds', 'SURFBOARD_COMPONENTS', 'SURFBOARD_STATISTICS'], {}), '(sounds, SURFBOARD_COMPONENTS, SURFBOARD_STATISTICS)\n', (3711, 3763), False, 'from surfboard.feature_extraction import extract_features\n'), ((2400, 2453), 'os.path.join', 'os.path.join', (['self.dataset_path', 'DATA_PATH', 'file_name'], {}), '(self.dataset_path, DATA_PATH, file_name)\n', (2412, 2453), False, 'import os\n'), ((2523, 2554), 'os.path.exists', 'os.path.exists', (['csv_output_path'], {}), '(csv_output_path)\n', (2537, 2554), False, 'import os\n'), ((2611, 2727), 'subprocess.run', 'subprocess.run', (["[OPENSMILE_PATH, '-C', OPENSMILE_CONF_PATH, '-I', wav_input_path,\n '-csvoutput', csv_output_path]"], {}), "([OPENSMILE_PATH, '-C', OPENSMILE_CONF_PATH, '-I',\n wav_input_path, '-csvoutput', csv_output_path])\n", (2625, 2727), False, 'import subprocess\n'), ((2974, 3012), 'os.path.exists', 'os.path.exists', (['OPENSMILE_FEATURE_PATH'], {}), '(OPENSMILE_FEATURE_PATH)\n', (2988, 3012), False, 'import os\n'), ((3026, 3058), 'os.mkdir', 'os.mkdir', (['OPENSMILE_FEATURE_PATH'], {}), '(OPENSMILE_FEATURE_PATH)\n', (3034, 3058), False, 'import os\n'), ((3075, 3129), 'os.path.join', 'os.path.join', (['OPENSMILE_FEATURE_PATH', 'f"""{split}_x.npy"""'], {}), "(OPENSMILE_FEATURE_PATH, f'{split}_x.npy')\n", (3087, 3129), False, 'import os\n'), ((3157, 3211), 'os.path.join', 'os.path.join', (['OPENSMILE_FEATURE_PATH', 'f"""{split}_y.npy"""'], {}), "(OPENSMILE_FEATURE_PATH, f'{split}_y.npy')\n", (3169, 3211), False, 'import os\n'), ((3872, 3910), 'os.path.exists', 'os.path.exists', (['SURFBOARD_FEATURE_PATH'], {}), '(SURFBOARD_FEATURE_PATH)\n', (3886, 3910), False, 'import os\n'), ((3924, 3959), 'os.makedirs', 'os.makedirs', (['SURFBOARD_FEATURE_PATH'], {}), '(SURFBOARD_FEATURE_PATH)\n', (3935, 3959), False, 'import os\n'), ((3988, 4042), 'os.path.join', 'os.path.join', (['SURFBOARD_FEATURE_PATH', 'f"""{split}_x.npy"""'], {}), "(SURFBOARD_FEATURE_PATH, f'{split}_x.npy')\n", (4000, 4042), False, 'import os\n'), ((4070, 4124), 'os.path.join', 'os.path.join', (['SURFBOARD_FEATURE_PATH', 'f"""{split}_y.npy"""'], {}), "(SURFBOARD_FEATURE_PATH, f'{split}_y.npy')\n", (4082, 4124), False, 'import os\n'), ((2572, 2598), 'os.remove', 'os.remove', (['csv_output_path'], {}), '(csv_output_path)\n', (2581, 2598), False, 'import os\n'), ((3562, 3615), 'os.path.join', 'os.path.join', (['self.dataset_path', 'DATA_PATH', 'file_name'], {}), '(self.dataset_path, DATA_PATH, file_name)\n', (3574, 3615), False, 'import os\n'), ((2746, 2789), 'pandas.read_csv', 'pd.read_csv', (['csv_output_path'], {'delimiter': '""";"""'}), "(csv_output_path, delimiter=';')\n", (2757, 2789), True, 'import pandas as pd\n')] |
import qi
import time
import numpy as np
import cv2
import argparse
import imutils
import os
class YOLO():
def __init__(self, istiny = False):
# loading the yolo & network
if (istiny == True):
self.net = cv2.dnn.readNet("./darknet/yolov3-tiny.weights", "./darknet/cfg/yolov3-tiny.cfg")
else:
self.net = cv2.dnn.readNet("./darknet/yolov3.weights", "./darknet/cfg/yolov3.cfg")
self.classes = []
# Load Yolo
with open("./darknet/data/coco.names", "r") as f:
self.classes = [line.strip() for line in f.readlines()]
layer_names = self.net.getLayerNames()
self.output_layers = [layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))
def useWebcam(self):
# Loading webcam(camera on computer)
try:
self.cap = cv2.VideoCapture(0)
except:
try:
self.cap = cv2.VideoCapture(1)
except:
print ("Webcam has some problems. Please check the device.")
self.font = cv2.FONT_HERSHEY_PLAIN
self.starting_time = time.time()
self.frame_id = 0
def detectingWebcam(self):
while True:
_, frame = self.cap.read()
self.frame_id += 1
self.height, self.width, self.channels = frame.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.2:
# Object detected
center_x = int(detection[0] * self.width)
center_y = int(detection[1] * self.height)
w = int(detection[2] * self.width)
h = int(detection[3] * self.height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.3)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(self.classes[class_ids[i]])
confidence = confidences[i]
color = self.colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.rectangle(frame, (x, y), (x + w, y + 30), color, -1)
cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), self.font, 3, (255,255,255), 3)
elapsed_time = time.time() - self.starting_time
fps = self.frame_id / elapsed_time
#print ("fps: ", fps)
cv2.putText(frame, "FPS: " + str(round(fps, 2)), (10, 50), self.font, 3, (0, 0, 0), 3)
cv2.imshow("Image", frame)
key = cv2.waitKey(1)
if key == 27:
break
self.cap.release()
cv2.destroyAllWindows()
def useImage(self, imgname):
# Loading image
self.imgname = imgname
self.img = cv2.imread(self.imgname) #ex. "test.jpg"
self.img = cv2.resize(self.img, None, fx=0.4, fy=0.4)
self.height, self.width, self.channels = self.img.shape
def detectingImage(self):
# Detecting objects
blob = cv2.dnn.blobFromImage(self.img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
self.net.setInput(blob)
outs = self.net.forward(self.output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * self.width)
center_y = int(detection[1] * self.height)
w = int(detection[2] * self.width)
h = int(detection[3] * self.height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
self.font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(self.classes[class_ids[i]])
color = self.colors[i]
cv2.rectangle(self.img, (x, y), (x + w, y + h), color, 2)
cv2.putText(self.img, label, (x, y + 30), self.font, 3, color, 3)
cv2.imshow("Image", self.img)
cv2.waitKey(0)
cv2.destroyAllWindows()
"""
try:
cap = cv2.VideoCapture(-1)
print ("camera index: -1")
except:
cap = cv2.VideoCapture(1)
print ("camera index: 1")
time.sleep(2)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
"""
if __name__ == "__main__":
applyYOLO = YOLO()
# if use webcam
applyYOLO.useWebcam()
applyYOLO.detectingWebcam()
# if use image
#applyYOLO.useImage("test.jpg")
#applyYOLO.detectingImage() | [
"cv2.putText",
"cv2.dnn.NMSBoxes",
"numpy.argmax",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"cv2.imshow",
"time.time",
"cv2.dnn.readNet",
"cv2.imread",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.destroyAllWindows",
"cv2.resize"
] | [((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n'), ((3652, 3675), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3673, 3675), False, 'import cv2\n'), ((3784, 3808), 'cv2.imread', 'cv2.imread', (['self.imgname'], {}), '(self.imgname)\n', (3794, 3808), False, 'import cv2\n'), ((3844, 3886), 'cv2.resize', 'cv2.resize', (['self.img', 'None'], {'fx': '(0.4)', 'fy': '(0.4)'}), '(self.img, None, fx=0.4, fy=0.4)\n', (3854, 3886), False, 'import cv2\n'), ((4025, 4111), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['self.img', '(0.00392)', '(416, 416)', '(0, 0, 0)', '(True)'], {'crop': '(False)'}), '(self.img, 0.00392, (416, 416), (0, 0, 0), True, crop=\n False)\n', (4046, 4111), False, 'import cv2\n'), ((5111, 5157), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.5)', '(0.4)'], {}), '(boxes, confidences, 0.5, 0.4)\n', (5127, 5157), False, 'import cv2\n'), ((5564, 5593), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'self.img'], {}), "('Image', self.img)\n", (5574, 5593), False, 'import cv2\n'), ((5602, 5616), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5613, 5616), False, 'import cv2\n'), ((5625, 5648), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5646, 5648), False, 'import cv2\n'), ((238, 323), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""./darknet/yolov3-tiny.weights"""', '"""./darknet/cfg/yolov3-tiny.cfg"""'], {}), "('./darknet/yolov3-tiny.weights',\n './darknet/cfg/yolov3-tiny.cfg')\n", (253, 323), False, 'import cv2\n'), ((357, 428), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""./darknet/yolov3.weights"""', '"""./darknet/cfg/yolov3.cfg"""'], {}), "('./darknet/yolov3.weights', './darknet/cfg/yolov3.cfg')\n", (372, 428), False, 'import cv2\n'), ((934, 953), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (950, 953), False, 'import cv2\n'), ((1479, 1557), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(0.00392)', '(416, 416)', '(0, 0, 0)', '(True)'], {'crop': '(False)'}), '(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n', (1500, 1557), False, 'import cv2\n'), ((2656, 2702), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.4)', '(0.3)'], {}), '(boxes, confidences, 0.4, 0.3)\n', (2672, 2702), False, 'import cv2\n'), ((3509, 3535), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'frame'], {}), "('Image', frame)\n", (3519, 3535), False, 'import cv2\n'), ((3554, 3568), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3565, 3568), False, 'import cv2\n'), ((3284, 3295), 'time.time', 'time.time', ([], {}), '()\n', (3293, 3295), False, 'import time\n'), ((4429, 4446), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (4438, 4446), True, 'import numpy as np\n'), ((5416, 5473), 'cv2.rectangle', 'cv2.rectangle', (['self.img', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(self.img, (x, y), (x + w, y + h), color, 2)\n', (5429, 5473), False, 'import cv2\n'), ((5490, 5555), 'cv2.putText', 'cv2.putText', (['self.img', 'label', '(x, y + 30)', 'self.font', '(3)', 'color', '(3)'], {}), '(self.img, label, (x, y + 30), self.font, 3, color, 3)\n', (5501, 5555), False, 'import cv2\n'), ((1014, 1033), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (1030, 1033), False, 'import cv2\n'), ((1919, 1936), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1928, 1936), True, 'import numpy as np\n'), ((3000, 3054), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(frame, (x, y), (x + w, y + h), color, 2)\n', (3013, 3054), False, 'import cv2\n'), ((3075, 3131), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + 30)', 'color', '(-1)'], {}), '(frame, (x, y), (x + w, y + 30), color, -1)\n', (3088, 3131), False, 'import cv2\n')] |
"""
Utility functions for simulations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
from sklearn.model_selection import ParameterGrid
import pathlib
import numpy as np
import pandas as pd
import glob
import os
from active_learning_dd.utils.evaluation import eval_on_metrics
from active_learning_dd.database_loaders.prepare_loader import prepare_loader
"""
Helper function to return max hits and max cluster hits from the
unlabeled data. Note this is used in the simulation since in a real
scenario these values are unknown.
"""
def get_unlabeled_maxes(training_loader_params,
unlabeled_loader_params,
task_names,
batch_size):
if not isinstance(task_names, list):
task_names = [task_names]
# load loaders
training_loader = prepare_loader(data_loader_params=training_loader_params,
task_names=task_names)
unlabeled_loader = prepare_loader(data_loader_params=unlabeled_loader_params,
task_names=task_names)
# remove already labeled data
unlabeled_loader.drop_duplicates_via_smiles(training_loader.get_smiles())
# now get labels and clusters
y_unlabeled = unlabeled_loader.get_labels()
unlabeled_clusters = unlabeled_loader.get_clusters()
training_clusters = training_loader.get_clusters()
max_hits_list = np.sum(y_unlabeled, axis=0)
max_hits_list = [min(batch_size, actives_count) for actives_count in max_hits_list]
max_cluster_hits_list = [0 for _ in range(len(task_names))]
max_novel_hits_list = [0 for _ in range(len(task_names))]
for ti in range(len(task_names)):
# Get the clusters with actives
active_indices = np.where(y_unlabeled[:,ti] == 1)[0]
clusters_with_actives_ti = unlabeled_clusters[active_indices]
unique_clusters_with_actives_ti = np.unique(clusters_with_actives_ti)
max_cluster_hits_list[ti] = min(batch_size,
unique_clusters_with_actives_ti.shape[0])
novel_clusters_with_actives = np.setdiff1d(unique_clusters_with_actives_ti,
training_clusters)
max_novel_hits_list[ti] = min(batch_size,
novel_clusters_with_actives.shape[0])
return max_hits_list, max_cluster_hits_list, max_novel_hits_list
"""
Random sample from the given parameter set.
Assumes uniform distribution. Samples index in the range [0, total_num_parameter_sets].
"""
def get_random_params_int_based(nbs_config,
rnd_seed=0):
# pop the batch_size, since we want to simulate all batch sizes for this param set
next_batch_selector_params = nbs_config["next_batch_selector_params"]
batch_sizes = next_batch_selector_params.pop("batch_size", None)
# sample random param
param_grid = SimulationParameterGrid(next_batch_selector_params)
np.random.seed(rnd_seed)
param_idx = np.random.randint(len(param_grid), size=1, dtype='int64')[0]
next_batch_selector_params = param_grid[param_idx]
next_batch_selector_params["batch_size"] = batch_sizes
return next_batch_selector_params
"""
Random sample from the given parameter set using the
distribution given in the config file.
If use_uniform=True, then samples each parameter uniformly.
"""
def get_param_from_dist(nbs_config,
rnd_seed=0,
use_uniform=False,
exploration_strategy='weighted'):
nbs_params = nbs_config["next_batch_selector_params"]
nbs_params_probas = nbs_config["nbs_params_probas"]
# sample random param
np.random.seed(rnd_seed)
sorted_params = sorted(nbs_params_probas.keys())
if exploration_strategy not in nbs_params["exploration_strategy"]:
raise ValueError('Given exploration strategy not supported in config file.')
nbs_params["exploration_strategy"] = exploration_strategy
if exploration_strategy == 'random' or exploration_strategy == 'dissimilar':
for removable_param, default_value in [('exploration_use_quantile_for_weight', False),
('exploration_weight_threshold', 0.0),
('exploration_beta', 0.0),
('exploration_dissimilarity_lambda', 0.0)]:
nbs_params[removable_param] = default_value
sorted_params.remove(removable_param)
while len(sorted_params) > 0:
param = sorted_params.pop()
param_choices = np.array(nbs_params[param])
param_probas = nbs_params_probas[param]
if param_choices.ndim > 1:
param_choices = param_choices.flatten()
if use_uniform:
param_probas = [1.0/len(param_choices) for _ in range(len(param_choices))] # discrete uniform sampling
param_sampled_choice = np.random.choice(param_choices, size=1, p=param_probas)[0]
# modify nbs_params dict with sampled choice
nbs_params[param] = param_sampled_choice
nbs_params["class"] = nbs_params["class"][0]
return nbs_params
"""
Evaluates selected batch by assuming all are active/hits.
"""
def evaluate_selected_batch(exploitation_df, exploration_df,
exploitation_array, exploration_array,
params_set_results_dir,
pipeline_config,
iter_num,
batch_size,
total_selection_time,
add_mean_medians=False):
w_novelty = pipeline_config['common']['metrics_params']['w_novelty']
perc_vec = pipeline_config['common']['metrics_params']['perc_vec']
task_names = pipeline_config['common']['task_names']
cost_col_name = pipeline_config['unlabeled_data_params']['cost_col_name']
iter_results_dir = params_set_results_dir+'/'+pipeline_config['common']['iter_results_dir'].format(iter_num)
eval_dest_file = iter_results_dir+'/'+pipeline_config['common']['eval_dest_file']
pathlib.Path(eval_dest_file).parent.mkdir(parents=True, exist_ok=True)
cols_names = task_names
if add_mean_medians:
cols_names = cols_names+['Mean', 'Median']
# retrieve max_hits_list, max_cluster_hits_list of the unlabeled data for this iteration
max_hits_list, max_cluster_hits_list, max_novel_hits_list = get_unlabeled_maxes(training_loader_params=pipeline_config['training_data_params'],
unlabeled_loader_params=pipeline_config['unlabeled_data_params'],
task_names=task_names,
batch_size=batch_size)
train_clusters = prepare_loader(data_loader_params=pipeline_config['training_data_params'],
task_names=task_names).get_clusters()
exploitation_batch_size, exploitation_batch_cost = 0, 0
if exploitation_df is not None:
exploitation_df.to_csv(iter_results_dir+'/'+pipeline_config['common']['batch_csv'].format('exploitation'),
index=False)
exploitation_metrics_mat, metrics_names = eval_on_metrics(exploitation_df[task_names].values, np.ones_like(exploitation_df[task_names].values),
train_clusters, exploitation_array[:,1],
max_hits_list, max_cluster_hits_list, max_novel_hits_list,
add_mean_medians, w_novelty, perc_vec)
exploitation_batch_size = exploitation_df[task_names].shape[0]
try:
exploitation_costs = exploitation_df[cost_col_name].values.astype(float)
except:
exploitation_costs = np.ones(shape=(exploitation_df.shape[0],))
exploitation_batch_cost = np.sum(exploitation_costs)
else:
exploitation_metrics_mat, metrics_names = eval_on_metrics(None, None,
train_clusters, None,
max_hits_list, max_cluster_hits_list, max_novel_hits_list,
add_mean_medians, w_novelty, perc_vec)
exploration_batch_size, exploration_batch_cost = 0, 0
if exploration_df is not None:
exploration_df.to_csv(iter_results_dir+'/'+pipeline_config['common']['batch_csv'].format('exploration'),
index=False)
exploration_metrics_mat, metrics_names = eval_on_metrics(exploration_df[task_names].values, np.ones_like(exploration_df[task_names].values),
train_clusters, exploration_array[:,1],
max_hits_list, max_cluster_hits_list, max_novel_hits_list,
add_mean_medians, w_novelty, perc_vec)
exploration_batch_size = exploration_df[task_names].shape[0]
try:
exploration_costs = exploration_df[cost_col_name].values.astype(float)
except:
exploration_costs = np.ones(shape=(exploration_df.shape[0],))
exploration_batch_cost = np.sum(exploration_costs)
else:
exploration_metrics_mat, metrics_names = eval_on_metrics(None, None,
train_clusters, None,
max_hits_list, max_cluster_hits_list, max_novel_hits_list,
add_mean_medians, w_novelty, perc_vec)
# record rest of metrics
exploitation_metrics_mat = np.vstack([exploitation_metrics_mat, [[exploitation_batch_size], [exploitation_batch_cost]]])
exploration_metrics_mat = np.vstack([exploration_metrics_mat, [[exploration_batch_size], [exploration_batch_cost]]])
# construct exploitation + exploration metrics
total_df = pd.concat([exploitation_df, exploration_df])
if (exploitation_df is not None) and (exploration_df is not None):
total_array = np.vstack([exploitation_array, exploration_array])
elif (exploitation_df is not None) and (exploration_df is None):
total_array = exploitation_array
elif (exploitation_df is None) and (exploration_df is not None):
total_array = exploration_array
else:
raise ValueError('Error in evaluating batch: total selection array is empty.')
total_metrics_mat, metrics_names = eval_on_metrics(total_df[task_names].values, np.ones_like(total_df[task_names].values),
train_clusters, total_array[:,1],
max_hits_list, max_cluster_hits_list, max_novel_hits_list,
add_mean_medians, w_novelty, perc_vec)
metrics_names = metrics_names + ['batch_size', 'batch_cost']
total_batch_size = exploitation_batch_size + exploration_batch_size
try:
total_batch_cost = total_df[cost_col_name].values.astype(float)
except:
total_batch_cost = np.ones(shape=(total_df.shape[0],))
total_batch_cost = np.sum(total_batch_cost)
total_metrics_mat = np.vstack([total_metrics_mat, [[total_batch_size], [total_batch_cost]]])
total_cherry_picking_time = total_batch_size * pipeline_config['common']['cherry_picking_time_per_cpd']
screening_time_per_batch = pipeline_config['common']['screening_time_per_batch']
total_screening_time = total_cherry_picking_time + screening_time_per_batch
metrics_mat = np.vstack([exploitation_metrics_mat, exploration_metrics_mat, total_metrics_mat,
[[total_cherry_picking_time]], [[screening_time_per_batch]], [[total_screening_time]]])
metrics_names = ['exploitation_'+m for m in metrics_names] + \
['exploration_'+m for m in metrics_names] + \
['total_'+m for m in metrics_names] + \
['total_cherry_picking_time', 'screening_time_per_batch', 'total_screening_time']
# save to destination
metrics_df = pd.DataFrame(data=metrics_mat,
columns=[iter_num],
index=metrics_names).T
metrics_df.index.name = 'iter_num'
metrics_df.to_csv(eval_dest_file, index=True)
"""
Summarize simulation evaluation results by aggregating.
"""
def summarize_simulation(params_set_results_dir,
pipeline_config):
summary_dest_file = params_set_results_dir+'/'+pipeline_config['common']['summary_dest_file']
pathlib.Path(summary_dest_file).parent.mkdir(parents=True, exist_ok=True)
metrics_df_list = []
iter_dirs = glob.glob(params_set_results_dir+'/*/')
for i in range(len(iter_dirs)):
iter_d = params_set_results_dir+'/'+pipeline_config['common']['iter_results_dir'].format(i)
eval_dest_file = iter_d+'/'+pipeline_config['common']['eval_dest_file']
if not os.path.exists(eval_dest_file):
print(eval_dest_file, '\nDoes not exist.')
else:
metrics_df_list.append(pd.read_csv(eval_dest_file))
metrics_df_concat = pd.concat(metrics_df_list)
metrics_ordering = [m for m in metrics_df_concat.columns if 'ratio' not in m or 'exploration' in m] + [m for m in metrics_df_concat.columns if 'ratio' in m and 'exploration' not in m]
summary_df = pd.concat([metrics_df_concat[[m for m in metrics_df_concat.columns if 'ratio' not in m or 'exploration' in m]].sum(),
metrics_df_concat[[m for m in metrics_df_concat.columns if 'ratio' in m and 'exploration' not in m]].mean()]).to_frame().T
summary_df.iloc[-1,0] = 9999
summary_df = pd.concat([metrics_df_concat[metrics_ordering], summary_df])
summary_df.to_csv(summary_dest_file, index=False)
class SimulationParameterGrid(ParameterGrid):
"""
Custom parameter grid class due to sklearn's ParameterGrid restriction to int32.
"""
def __getitem__(self, ind):
"""
Same as sklearn's ParameterGrid class but np.product(sizes, dtype='int64').
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes, dtype='int64')
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('SimulationParameterGrid index out of range') | [
"numpy.random.seed",
"numpy.sum",
"active_learning_dd.utils.evaluation.eval_on_metrics",
"pandas.read_csv",
"active_learning_dd.database_loaders.prepare_loader.prepare_loader",
"numpy.ones",
"numpy.product",
"pathlib.Path",
"glob.glob",
"numpy.unique",
"pandas.DataFrame",
"os.path.exists",
"... | [((932, 1017), 'active_learning_dd.database_loaders.prepare_loader.prepare_loader', 'prepare_loader', ([], {'data_loader_params': 'training_loader_params', 'task_names': 'task_names'}), '(data_loader_params=training_loader_params, task_names=task_names\n )\n', (946, 1017), False, 'from active_learning_dd.database_loaders.prepare_loader import prepare_loader\n'), ((1073, 1159), 'active_learning_dd.database_loaders.prepare_loader.prepare_loader', 'prepare_loader', ([], {'data_loader_params': 'unlabeled_loader_params', 'task_names': 'task_names'}), '(data_loader_params=unlabeled_loader_params, task_names=\n task_names)\n', (1087, 1159), False, 'from active_learning_dd.database_loaders.prepare_loader import prepare_loader\n'), ((1521, 1548), 'numpy.sum', 'np.sum', (['y_unlabeled'], {'axis': '(0)'}), '(y_unlabeled, axis=0)\n', (1527, 1548), True, 'import numpy as np\n'), ((3115, 3139), 'numpy.random.seed', 'np.random.seed', (['rnd_seed'], {}), '(rnd_seed)\n', (3129, 3139), True, 'import numpy as np\n'), ((3865, 3889), 'numpy.random.seed', 'np.random.seed', (['rnd_seed'], {}), '(rnd_seed)\n', (3879, 3889), True, 'import numpy as np\n'), ((10232, 10330), 'numpy.vstack', 'np.vstack', (['[exploitation_metrics_mat, [[exploitation_batch_size], [\n exploitation_batch_cost]]]'], {}), '([exploitation_metrics_mat, [[exploitation_batch_size], [\n exploitation_batch_cost]]])\n', (10241, 10330), True, 'import numpy as np\n'), ((10356, 10451), 'numpy.vstack', 'np.vstack', (['[exploration_metrics_mat, [[exploration_batch_size], [exploration_batch_cost]]]'], {}), '([exploration_metrics_mat, [[exploration_batch_size], [\n exploration_batch_cost]]])\n', (10365, 10451), True, 'import numpy as np\n'), ((10518, 10562), 'pandas.concat', 'pd.concat', (['[exploitation_df, exploration_df]'], {}), '([exploitation_df, exploration_df])\n', (10527, 10562), True, 'import pandas as pd\n'), ((11780, 11804), 'numpy.sum', 'np.sum', (['total_batch_cost'], {}), '(total_batch_cost)\n', (11786, 11804), True, 'import numpy as np\n'), ((11829, 11901), 'numpy.vstack', 'np.vstack', (['[total_metrics_mat, [[total_batch_size], [total_batch_cost]]]'], {}), '([total_metrics_mat, [[total_batch_size], [total_batch_cost]]])\n', (11838, 11901), True, 'import numpy as np\n'), ((12204, 12381), 'numpy.vstack', 'np.vstack', (['[exploitation_metrics_mat, exploration_metrics_mat, total_metrics_mat, [[\n total_cherry_picking_time]], [[screening_time_per_batch]], [[\n total_screening_time]]]'], {}), '([exploitation_metrics_mat, exploration_metrics_mat,\n total_metrics_mat, [[total_cherry_picking_time]], [[\n screening_time_per_batch]], [[total_screening_time]]])\n', (12213, 12381), True, 'import numpy as np\n'), ((13346, 13387), 'glob.glob', 'glob.glob', (["(params_set_results_dir + '/*/')"], {}), "(params_set_results_dir + '/*/')\n", (13355, 13387), False, 'import glob\n'), ((13807, 13833), 'pandas.concat', 'pd.concat', (['metrics_df_list'], {}), '(metrics_df_list)\n', (13816, 13833), True, 'import pandas as pd\n'), ((14358, 14418), 'pandas.concat', 'pd.concat', (['[metrics_df_concat[metrics_ordering], summary_df]'], {}), '([metrics_df_concat[metrics_ordering], summary_df])\n', (14367, 14418), True, 'import pandas as pd\n'), ((2015, 2050), 'numpy.unique', 'np.unique', (['clusters_with_actives_ti'], {}), '(clusters_with_actives_ti)\n', (2024, 2050), True, 'import numpy as np\n'), ((2233, 2297), 'numpy.setdiff1d', 'np.setdiff1d', (['unique_clusters_with_actives_ti', 'training_clusters'], {}), '(unique_clusters_with_actives_ti, training_clusters)\n', (2245, 2297), True, 'import numpy as np\n'), ((4818, 4845), 'numpy.array', 'np.array', (['nbs_params[param]'], {}), '(nbs_params[param])\n', (4826, 4845), True, 'import numpy as np\n'), ((8290, 8316), 'numpy.sum', 'np.sum', (['exploitation_costs'], {}), '(exploitation_costs)\n', (8296, 8316), True, 'import numpy as np\n'), ((8377, 8532), 'active_learning_dd.utils.evaluation.eval_on_metrics', 'eval_on_metrics', (['None', 'None', 'train_clusters', 'None', 'max_hits_list', 'max_cluster_hits_list', 'max_novel_hits_list', 'add_mean_medians', 'w_novelty', 'perc_vec'], {}), '(None, None, train_clusters, None, max_hits_list,\n max_cluster_hits_list, max_novel_hits_list, add_mean_medians, w_novelty,\n perc_vec)\n', (8392, 8532), False, 'from active_learning_dd.utils.evaluation import eval_on_metrics\n'), ((9744, 9769), 'numpy.sum', 'np.sum', (['exploration_costs'], {}), '(exploration_costs)\n', (9750, 9769), True, 'import numpy as np\n'), ((9829, 9984), 'active_learning_dd.utils.evaluation.eval_on_metrics', 'eval_on_metrics', (['None', 'None', 'train_clusters', 'None', 'max_hits_list', 'max_cluster_hits_list', 'max_novel_hits_list', 'add_mean_medians', 'w_novelty', 'perc_vec'], {}), '(None, None, train_clusters, None, max_hits_list,\n max_cluster_hits_list, max_novel_hits_list, add_mean_medians, w_novelty,\n perc_vec)\n', (9844, 9984), False, 'from active_learning_dd.utils.evaluation import eval_on_metrics\n'), ((10656, 10706), 'numpy.vstack', 'np.vstack', (['[exploitation_array, exploration_array]'], {}), '([exploitation_array, exploration_array])\n', (10665, 10706), True, 'import numpy as np\n'), ((11118, 11159), 'numpy.ones_like', 'np.ones_like', (['total_df[task_names].values'], {}), '(total_df[task_names].values)\n', (11130, 11159), True, 'import numpy as np\n'), ((12761, 12832), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'metrics_mat', 'columns': '[iter_num]', 'index': 'metrics_names'}), '(data=metrics_mat, columns=[iter_num], index=metrics_names)\n', (12773, 12832), True, 'import pandas as pd\n'), ((1867, 1900), 'numpy.where', 'np.where', (['(y_unlabeled[:, ti] == 1)'], {}), '(y_unlabeled[:, ti] == 1)\n', (1875, 1900), True, 'import numpy as np\n'), ((5164, 5219), 'numpy.random.choice', 'np.random.choice', (['param_choices'], {'size': '(1)', 'p': 'param_probas'}), '(param_choices, size=1, p=param_probas)\n', (5180, 5219), True, 'import numpy as np\n'), ((7096, 7197), 'active_learning_dd.database_loaders.prepare_loader.prepare_loader', 'prepare_loader', ([], {'data_loader_params': "pipeline_config['training_data_params']", 'task_names': 'task_names'}), "(data_loader_params=pipeline_config['training_data_params'],\n task_names=task_names)\n", (7110, 7197), False, 'from active_learning_dd.database_loaders.prepare_loader import prepare_loader\n'), ((7607, 7655), 'numpy.ones_like', 'np.ones_like', (['exploitation_df[task_names].values'], {}), '(exploitation_df[task_names].values)\n', (7619, 7655), True, 'import numpy as np\n'), ((9073, 9120), 'numpy.ones_like', 'np.ones_like', (['exploration_df[task_names].values'], {}), '(exploration_df[task_names].values)\n', (9085, 9120), True, 'import numpy as np\n'), ((11721, 11756), 'numpy.ones', 'np.ones', ([], {'shape': '(total_df.shape[0],)'}), '(shape=(total_df.shape[0],))\n', (11728, 11756), True, 'import numpy as np\n'), ((13617, 13647), 'os.path.exists', 'os.path.exists', (['eval_dest_file'], {}), '(eval_dest_file)\n', (13631, 13647), False, 'import os\n'), ((15348, 15380), 'numpy.product', 'np.product', (['sizes'], {'dtype': '"""int64"""'}), "(sizes, dtype='int64')\n", (15358, 15380), True, 'import numpy as np\n'), ((6293, 6321), 'pathlib.Path', 'pathlib.Path', (['eval_dest_file'], {}), '(eval_dest_file)\n', (6305, 6321), False, 'import pathlib\n'), ((8213, 8255), 'numpy.ones', 'np.ones', ([], {'shape': '(exploitation_df.shape[0],)'}), '(shape=(exploitation_df.shape[0],))\n', (8220, 8255), True, 'import numpy as np\n'), ((9669, 9710), 'numpy.ones', 'np.ones', ([], {'shape': '(exploration_df.shape[0],)'}), '(shape=(exploration_df.shape[0],))\n', (9676, 9710), True, 'import numpy as np\n'), ((13230, 13261), 'pathlib.Path', 'pathlib.Path', (['summary_dest_file'], {}), '(summary_dest_file)\n', (13242, 13261), False, 'import pathlib\n'), ((13753, 13780), 'pandas.read_csv', 'pd.read_csv', (['eval_dest_file'], {}), '(eval_dest_file)\n', (13764, 13780), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : nba_test.py
@Time : 2021/02/06 00:25:00
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Desc : None
'''
# 這邊是在測試把algorithms 引入進來的時候 if __name__ == '__main__':內的東西是否會跳過
# import algorithms
# print('這邊測試下~')
# algorithms.call_foo()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from nba_api.stats.static import players
df = pd.DataFrame(np.random.randint(low=1,high=11,size=25).reshape([5,5]), index=['A','B','C','D','E'],columns=['c1','c2','c3','c4','c5'])
print(df)
print(df.reindex(['A','B']))
# plt.plot(df['c2'],df['c5'],color='b')
# plt.show()
# player_dict = players.get_players()
# LeBron = [player for player in player_dict if player['full_name'] == '<NAME>'][0]
# print(LeBron)
# Lebron_id = LeBron['id'] | [
"numpy.random.randint"
] | [((451, 493), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(11)', 'size': '(25)'}), '(low=1, high=11, size=25)\n', (468, 493), True, 'import numpy as np\n')] |
import argparse, datetime, os, sympy
import numpy as np
###################################
# Encryption using Diffie-Hellman #
###################################
# The bounds for the public and private key
# These should be very large and prime
large_bounds = [1e300, 1e301]
# Fix the public base for simplicity
# This is prime and commonly small, like 2 or 3
public_base = 2
# Set filenames for public and private keys
key_folder = 'keys'
private_filename = 'private_key'
public_filename = 'public_key'
modified_filename = 'modified_key'
file_mode = False
def get_prime(bounds):
"""
Generate a random prime within the given bounds (inclusive)
:param bounds: Prime will be returned between these bounds
"""
return sympy.randprime(bounds[0]-1, bounds[1])
def save_key(key, filename):
"""
Save a key to file
:param key: Key that is being saved
:param filename: Location to store key to
"""
if not os.path.isdir(key_folder):
os.mkdir(key_folder)
with open(os.path.join(key_folder, filename), 'w') as f:
f.write(str(key))
return
def get_key(filename):
"""
Return the key in the given file
:param filename: Location of key
:return: Key
"""
try:
with open('keys/{}'.format(filename), 'r') as f:
key = int(f.read())
except:
raise IOError(filename + " not set")
return key
def generate_key(bounds, filename):
"""
Generate and save a key
:param bounds: Key will be between these numeric bounds
:param filename: Location to store key to
"""
print("generating {}".format(filename))
save_key(get_prime(bounds), filename)
return
def print_key(filename):
"""
Print a key to terminal
:param filename: Location of key
"""
key = get_key(filename)
print("{}:\n{}".format(filename, key))
def check_keys(filenames):
"""
Make sure our keys follow good practices
:param filenames: Key names to check
:return: Whether keys exist and pass checks
"""
# Check for duplicate keys
try:
keys = [get_key(f) for f in filenames]
except Exception as e:
print(e)
return False
if (len(keys) > len(set(keys))):
print("WARNING: public and private keys are equal")
# Make sure our keys are prime
for k, f in zip(keys, filenames):
if not sympy.isprime(k):
print("WARNING: {} is not prime".format(f))
return True
def calculate_modified_key():
"""Calculate a modified key to pass publicly"""
print('generating {}'.format(modified_filename))
private_key = get_key(private_filename)
public_key = get_key(public_filename)
modified_key = pow(public_base, private_key, public_key)
save_key(modified_key, modified_filename)
def get_modified_filename(partner):
"""
Get filename of modified key from someone else
:param partner: Name of partner
:return: Filename of modified key for partner
"""
return '{}_{}'.format(modified_filename, partner)
def get_private_filename(partner):
"""
Get filename of private key from someone else
:param partner: Name of partner
:return: Filename of private key for partner
"""
return '{}_{}'.format(private_filename, partner)
def calculate_shared_private_key(partner):
"""
Calculate a shared private key
:param partner: Name of partner
"""
print('generating {}'.format(get_private_filename(partner)))
private_key = get_key(private_filename)
public_key = get_key(public_filename)
shared_modified_key = get_key(get_modified_filename(partner))
shared_private_key = pow(shared_modified_key, private_key, public_key)
save_key(shared_private_key, get_private_filename(partner))
################################################################
# Encryption and decryption using simple matrix multiplication #
################################################################
# Set filenames for messages
message_folder = 'messages'
def get_new_message_name(partner):
"""
Get unique name for message
:param partner: Person receiving message
:return: Path of message
"""
time = datetime.datetime.now()
name = "{}_{}-{}-{}-{}-{}-{}".format(partner,
time.year,
time.month,
time.day,
time.hour,
time.minute,
time.second)
return os.path.join(message_folder, name)
def save_message(partner, message):
"""
Save message to file
:param message: Any data structure to be saved
:param partner: Person receiving message
:return: Filename
"""
if not os.path.isdir(message_folder):
os.mkdir(message_folder)
filename = get_new_message_name(partner)
np.savetxt(filename, message, newline=' ', fmt='%d')
print("saving message to {}".format(filename))
return filename
def load_message(filename):
"""
Load message from file
:param filename: Filename to load data from
:return: Data that was stored
"""
return np.loadtxt(filename)
def string_to_numbers(string):
"""
Convert a string to a list of numbers
:param string: Message as string
:return: Message as numbers
"""
vals = [ord(s) for s in string]
return vals
def numbers_to_string(numbers):
"""
Convert a list of numbers to a string
:param numbers: Message as numbers
:return: Message as string
"""
val = ''.join(chr(n) for n in numbers)
return val
def get_encryption_matrix(key):
"""
Get encryption matrix from key
:param key: Encryption key
:return: Encryption matrix
"""
elements = [str(i) for i in str(key)]
num_int = len(elements)
rank = int(np.floor(np.sqrt(num_int)))
matrix = np.empty((rank, rank), dtype=int)
for i in range(rank):
for j in range(rank):
matrix[i][j] = elements[i + rank * j]
return matrix
def get_decryption_matrix(key):
"""
Get decryption matrix from key
:param key: Encryption key
:return: Decryption matrix
"""
return np.linalg.inv(get_encryption_matrix(key))
def encrypt_message(partner, message):
"""
Encrypt a message
:param parner: Name of partner
:param message: Message as string
:return: Message as numbers
"""
matrix = get_encryption_matrix(get_key(get_private_filename(partner)))
rank = np.linalg.matrix_rank(matrix)
num_blocks = int(np.ceil(1.0 * len(message) / rank))
padded_message = message
for i in range(len(message), rank * num_blocks):
padded_message += ' '
encoded_message = string_to_numbers(padded_message)
encrypted_numbers = np.empty(rank * num_blocks, dtype=int)
rhs = np.empty(rank, dtype=int)
for b in range(num_blocks):
for i in range(rank):
rhs[i] = encoded_message[i + rank * b]
lhs = np.dot(matrix, rhs)
for i in range(rank):
encrypted_numbers[i + rank * b] = lhs[i]
return encrypted_numbers
def decrypt_message(partner, message):
"""
Decrypt a message
:param partner: Name of partner
:param message: Message as numbers
:return: Message as string
"""
encrypted_numbers = np.array(message)
key = get_key(get_private_filename(partner))
matrix = get_decryption_matrix(get_key(get_private_filename(partner)))
rank = np.linalg.matrix_rank(matrix)
if len(message) % rank != 0:
print(len(message), rank)
raise ValueError("message is incorrect length")
num_blocks = int(len(message) / rank)
decrypted_message = ''
rhs = np.empty(rank, dtype=int)
for b in range(num_blocks):
for i in range(rank):
rhs[i] = encrypted_numbers[i + rank * b]
lhs = np.round(np.dot(matrix, rhs))
lhs = [int(i) for i in lhs]
decrypted_message += numbers_to_string(lhs)
return decrypted_message
############################################
# Run the program if this script is called #
############################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--generate_private_key', action='store_true',
help='generate new private key')
parser.add_argument('-u', '--generate_public_key', action='store_true',
help='generate new public key')
parser.add_argument('-s', '--set_public_key', type=int, nargs='?',
help='set public key')
parser.add_argument('-c', '--calculate_modified_key', action='store_true',
help='calculate modified public key')
parser.add_argument('-t', '--partner', type=str, nargs='?',
help='specify recipient of message or key')
parser.add_argument('-m', '--set_modified_key', nargs='?',
help='set modified key from partner')
parser.add_argument('-e', '--encrypt_message', type=str, nargs='?',
help='message that will be encrypted')
if file_mode:
parser.add_argument('-d', '--decrypt_message', type=str, nargs='?',
help='message that will be decrypted')
else:
parser.add_argument('-d', '--decrypt_message', type=int, nargs='*',
help='message that will be decrypted as a list of integers')
args = parser.parse_args()
# Generate and set keys
generating = False
if args.set_public_key is not None:
save_key(args.set_public_key, public_filename)
generating = True
elif args.generate_public_key:
generate_key(large_bounds, public_filename)
generating = True
if args.generate_private_key:
generate_key(large_bounds, private_filename)
generating = True
# Check keys once they are generated and set
if not check_keys([public_filename, private_filename]):
quit()
# Calculate modified key
if args.calculate_modified_key or generating:
calculate_modified_key()
generating = True
# If we have generated keys, then we need to quit so we can trade keys
if generating:
print('exiting so keys can be traded')
quit()
# Set modified key from partner
if args.set_modified_key is not None:
if args.partner is None:
raise IOError('must specify partner to set their modified key')
save_key(args.set_modified_key, get_modified_filename(args.partner))
calculate_shared_private_key(args.partner)
# Encrypt a message to partner
if args.encrypt_message is not None:
if args.partner is None:
raise IOError('must specify partner to encrypt message')
message = encrypt_message(args.partner, args.encrypt_message)
if file_mode:
filename = save_message(args.partner, message)
else:
print('encrypted message:')
print(' '.join(str(m) for m in message))
# Decrypt a message from partner
if args.decrypt_message is not None:
if args.partner is None:
raise IOError('must specify partner to decrypt message')
if file_mode:
encrypted = load_message(args.decrypt_message)
decrypted = decrypt_message(args.partner, encrypted)
else:
decrypted = decrypt_message(args.partner, args.decrypt_message)
print('decrypted message:')
print(decrypted)
| [
"os.mkdir",
"sympy.randprime",
"os.path.join",
"argparse.ArgumentParser",
"os.path.isdir",
"numpy.empty",
"numpy.savetxt",
"numpy.linalg.matrix_rank",
"numpy.array",
"numpy.loadtxt",
"numpy.dot",
"sympy.isprime",
"datetime.datetime.now",
"numpy.sqrt"
] | [((745, 786), 'sympy.randprime', 'sympy.randprime', (['(bounds[0] - 1)', 'bounds[1]'], {}), '(bounds[0] - 1, bounds[1])\n', (760, 786), False, 'import argparse, datetime, os, sympy\n'), ((4255, 4278), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4276, 4278), False, 'import argparse, datetime, os, sympy\n'), ((4656, 4690), 'os.path.join', 'os.path.join', (['message_folder', 'name'], {}), '(message_folder, name)\n', (4668, 4690), False, 'import argparse, datetime, os, sympy\n'), ((5012, 5064), 'numpy.savetxt', 'np.savetxt', (['filename', 'message'], {'newline': '""" """', 'fmt': '"""%d"""'}), "(filename, message, newline=' ', fmt='%d')\n", (5022, 5064), True, 'import numpy as np\n'), ((5302, 5322), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (5312, 5322), True, 'import numpy as np\n'), ((6039, 6072), 'numpy.empty', 'np.empty', (['(rank, rank)'], {'dtype': 'int'}), '((rank, rank), dtype=int)\n', (6047, 6072), True, 'import numpy as np\n'), ((6675, 6704), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['matrix'], {}), '(matrix)\n', (6696, 6704), True, 'import numpy as np\n'), ((6954, 6992), 'numpy.empty', 'np.empty', (['(rank * num_blocks)'], {'dtype': 'int'}), '(rank * num_blocks, dtype=int)\n', (6962, 6992), True, 'import numpy as np\n'), ((7003, 7028), 'numpy.empty', 'np.empty', (['rank'], {'dtype': 'int'}), '(rank, dtype=int)\n', (7011, 7028), True, 'import numpy as np\n'), ((7502, 7519), 'numpy.array', 'np.array', (['message'], {}), '(message)\n', (7510, 7519), True, 'import numpy as np\n'), ((7655, 7684), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['matrix'], {}), '(matrix)\n', (7676, 7684), True, 'import numpy as np\n'), ((7887, 7912), 'numpy.empty', 'np.empty', (['rank'], {'dtype': 'int'}), '(rank, dtype=int)\n', (7895, 7912), True, 'import numpy as np\n'), ((8365, 8390), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8388, 8390), False, 'import argparse, datetime, os, sympy\n'), ((955, 980), 'os.path.isdir', 'os.path.isdir', (['key_folder'], {}), '(key_folder)\n', (968, 980), False, 'import argparse, datetime, os, sympy\n'), ((990, 1010), 'os.mkdir', 'os.mkdir', (['key_folder'], {}), '(key_folder)\n', (998, 1010), False, 'import argparse, datetime, os, sympy\n'), ((4899, 4928), 'os.path.isdir', 'os.path.isdir', (['message_folder'], {}), '(message_folder)\n', (4912, 4928), False, 'import argparse, datetime, os, sympy\n'), ((4938, 4962), 'os.mkdir', 'os.mkdir', (['message_folder'], {}), '(message_folder)\n', (4946, 4962), False, 'import argparse, datetime, os, sympy\n'), ((7156, 7175), 'numpy.dot', 'np.dot', (['matrix', 'rhs'], {}), '(matrix, rhs)\n', (7162, 7175), True, 'import numpy as np\n'), ((1025, 1059), 'os.path.join', 'os.path.join', (['key_folder', 'filename'], {}), '(key_folder, filename)\n', (1037, 1059), False, 'import argparse, datetime, os, sympy\n'), ((2416, 2432), 'sympy.isprime', 'sympy.isprime', (['k'], {}), '(k)\n', (2429, 2432), False, 'import argparse, datetime, os, sympy\n'), ((6007, 6023), 'numpy.sqrt', 'np.sqrt', (['num_int'], {}), '(num_int)\n', (6014, 6023), True, 'import numpy as np\n'), ((8051, 8070), 'numpy.dot', 'np.dot', (['matrix', 'rhs'], {}), '(matrix, rhs)\n', (8057, 8070), True, 'import numpy as np\n')] |
# lint-amnesty, pylint: disable=missing-module-docstring
import logging
import time
import numpy as np
from edxval.api import get_videos_for_course
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from scipy import stats
from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.graph_traversals import traverse_pre_order
from xmodule.modulestore.django import modulestore
from .utils import course_author_access_required, get_bool_param
log = logging.getLogger(__name__)
@view_auth_classes()
class CourseQualityView(DeveloperErrorViewMixin, GenericAPIView):
"""
**Use Case**
**Example Requests**
GET /api/courses/v1/quality/{course_id}/
**GET Parameters**
A GET request may include the following parameters.
* all
* sections
* subsections
* units
* videos
* exclude_graded (boolean) - whether to exclude graded subsections in the subsections and units information.
**GET Response Values**
The HTTP 200 response has the following values.
* is_self_paced - whether the course is self-paced.
* sections
* total_number - number of sections in the course.
* total_visible - number of sections visible to learners in the course.
* number_with_highlights - number of sections that have at least one highlight entered.
* highlights_enabled - whether highlights are enabled in the course.
* subsections
* total_visible - number of subsections visible to learners in the course.
* num_with_one_block_type - number of visible subsections containing only one type of block.
* num_block_types - statistics for number of block types across all visible subsections.
* min
* max
* mean
* median
* mode
* units
* total_visible - number of units visible to learners in the course.
* num_blocks - statistics for number of block across all visible units.
* min
* max
* mean
* median
* mode
* videos
* total_number - number of video blocks in the course.
* num_with_val_id - number of video blocks that include video pipeline IDs.
* num_mobile_encoded - number of videos encoded through the video pipeline.
* durations - statistics for video duration across all videos encoded through the video pipeline.
* min
* max
* mean
* median
* mode
"""
@course_author_access_required
def get(self, request, course_key):
"""
Returns validation information for the given course.
"""
def _execute_method_and_log_time(log_time, func, *args):
"""
Call func passed in method with logging the time it took to complete.
Logging is temporary, we will remove this once we get required information.
"""
if log_time:
start_time = time.time()
output = func(*args)
log.info('[%s] completed in [%f]', func.__name__, (time.time() - start_time))
else:
output = func(*args)
return output
all_requested = get_bool_param(request, 'all', False)
store = modulestore()
with store.bulk_operations(course_key):
course = store.get_course(course_key, depth=self._required_course_depth(request, all_requested))
# Added for EDUCATOR-3660
course_key_harvard = str(course_key) == 'course-v1:HarvardX+SW12.1x+2016'
response = dict(
is_self_paced=course.self_paced,
)
if get_bool_param(request, 'sections', all_requested):
response.update(
sections=_execute_method_and_log_time(course_key_harvard, self._sections_quality, course)
)
if get_bool_param(request, 'subsections', all_requested):
response.update(
subsections=_execute_method_and_log_time(
course_key_harvard, self._subsections_quality, course, request
)
)
if get_bool_param(request, 'units', all_requested):
response.update(
units=_execute_method_and_log_time(course_key_harvard, self._units_quality, course, request)
)
if get_bool_param(request, 'videos', all_requested):
response.update(
videos=_execute_method_and_log_time(course_key_harvard, self._videos_quality, course)
)
return Response(response)
def _required_course_depth(self, request, all_requested): # lint-amnesty, pylint: disable=missing-function-docstring
if get_bool_param(request, 'units', all_requested):
# The num_blocks metric for "units" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'subsections', all_requested):
# The num_block_types metric for "subsections" requires retrieving all blocks in the graph.
return None
elif get_bool_param(request, 'sections', all_requested):
return 1
else:
return 0
def _sections_quality(self, course):
sections, visible_sections = self._get_sections(course)
sections_with_highlights = [section for section in visible_sections if section.highlights]
return dict(
total_number=len(sections),
total_visible=len(visible_sections),
number_with_highlights=len(sections_with_highlights),
highlights_active_for_course=course.highlights_enabled_for_messaging,
highlights_enabled=True, # used to be controlled by a waffle switch, now just always enabled
)
def _subsections_quality(self, course, request): # lint-amnesty, pylint: disable=missing-function-docstring
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_block_types_per_subsection_dict = {}
for subsection_key, unit_dict in subsection_unit_dict.items():
leaf_block_types_in_subsection = (
unit_info['leaf_block_types']
for unit_info in unit_dict.values()
)
num_block_types_per_subsection_dict[subsection_key] = len(set().union(*leaf_block_types_in_subsection))
return dict(
total_visible=len(num_block_types_per_subsection_dict),
num_with_one_block_type=list(num_block_types_per_subsection_dict.values()).count(1),
num_block_types=self._stats_dict(list(num_block_types_per_subsection_dict.values())),
)
def _units_quality(self, course, request): # lint-amnesty, pylint: disable=missing-function-docstring
subsection_unit_dict = self._get_subsections_and_units(course, request)
num_leaf_blocks_per_unit = [
unit_info['num_leaf_blocks']
for unit_dict in subsection_unit_dict.values()
for unit_info in unit_dict.values()
]
return dict(
total_visible=len(num_leaf_blocks_per_unit),
num_blocks=self._stats_dict(num_leaf_blocks_per_unit),
)
def _videos_quality(self, course): # lint-amnesty, pylint: disable=missing-function-docstring
video_blocks_in_course = modulestore().get_items(course.id, qualifiers={'category': 'video'})
videos, __ = get_videos_for_course(course.id)
videos_in_val = list(videos)
video_durations = [video['duration'] for video in videos_in_val]
return dict(
total_number=len(video_blocks_in_course),
num_mobile_encoded=len(videos_in_val),
num_with_val_id=len([v for v in video_blocks_in_course if v.edx_video_id]),
durations=self._stats_dict(video_durations),
)
@classmethod
@request_cached()
def _get_subsections_and_units(cls, course, request):
"""
Returns {subsection_key: {unit_key: {num_leaf_blocks: <>, leaf_block_types: set(<>) }}}
for all visible subsections and units.
"""
_, visible_sections = cls._get_sections(course)
subsection_dict = {}
for section in visible_sections:
visible_subsections = cls._get_visible_children(section)
if get_bool_param(request, 'exclude_graded', False):
visible_subsections = [s for s in visible_subsections if not s.graded]
for subsection in visible_subsections:
unit_dict = {}
visible_units = cls._get_visible_children(subsection)
for unit in visible_units:
leaf_blocks = cls._get_leaf_blocks(unit)
unit_dict[unit.location] = dict(
num_leaf_blocks=len(leaf_blocks),
leaf_block_types={block.location.block_type for block in leaf_blocks},
)
subsection_dict[subsection.location] = unit_dict
return subsection_dict
@classmethod
@request_cached()
def _get_sections(cls, course):
return cls._get_all_children(course)
@classmethod
def _get_all_children(cls, parent): # lint-amnesty, pylint: disable=missing-function-docstring
store = modulestore()
children = [store.get_item(child_usage_key) for child_usage_key in cls._get_children(parent)]
visible_children = [
c for c in children
if not c.visible_to_staff_only and not c.hide_from_toc
]
return children, visible_children
@classmethod
def _get_visible_children(cls, parent):
_, visible_chidren = cls._get_all_children(parent)
return visible_chidren
@classmethod
def _get_children(cls, parent): # lint-amnesty, pylint: disable=missing-function-docstring
if not hasattr(parent, 'children'):
return []
else:
return parent.children
@classmethod
def _get_leaf_blocks(cls, unit): # lint-amnesty, pylint: disable=missing-function-docstring
def leaf_filter(block):
return (
block.location.block_type not in ('chapter', 'sequential', 'vertical') and
len(cls._get_children(block)) == 0
)
return [
block for block in # lint-amnesty, pylint: disable=unnecessary-comprehension
traverse_pre_order(unit, cls._get_visible_children, leaf_filter)
]
def _stats_dict(self, data): # lint-amnesty, pylint: disable=missing-function-docstring
if not data:
return dict(
min=None,
max=None,
mean=None,
median=None,
mode=None,
)
else:
return dict(
min=min(data),
max=max(data),
mean=np.around(np.mean(data)),
median=np.around(np.median(data)),
mode=stats.mode(data, axis=None)[0][0],
)
| [
"xmodule.modulestore.django.modulestore",
"scipy.stats.mode",
"numpy.median",
"openedx.core.lib.cache_utils.request_cached",
"time.time",
"edxval.api.get_videos_for_course",
"rest_framework.response.Response",
"numpy.mean",
"openedx.core.lib.api.view_utils.view_auth_classes",
"openedx.core.lib.gra... | [((602, 629), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (619, 629), False, 'import logging\n'), ((633, 652), 'openedx.core.lib.api.view_utils.view_auth_classes', 'view_auth_classes', ([], {}), '()\n', (650, 652), False, 'from openedx.core.lib.api.view_utils import DeveloperErrorViewMixin, view_auth_classes\n'), ((8292, 8308), 'openedx.core.lib.cache_utils.request_cached', 'request_cached', ([], {}), '()\n', (8306, 8308), False, 'from openedx.core.lib.cache_utils import request_cached\n'), ((9488, 9504), 'openedx.core.lib.cache_utils.request_cached', 'request_cached', ([], {}), '()\n', (9502, 9504), False, 'from openedx.core.lib.cache_utils import request_cached\n'), ((3610, 3623), 'xmodule.modulestore.django.modulestore', 'modulestore', ([], {}), '()\n', (3621, 3623), False, 'from xmodule.modulestore.django import modulestore\n'), ((4984, 5002), 'rest_framework.response.Response', 'Response', (['response'], {}), '(response)\n', (4992, 5002), False, 'from rest_framework.response import Response\n'), ((7844, 7876), 'edxval.api.get_videos_for_course', 'get_videos_for_course', (['course.id'], {}), '(course.id)\n', (7865, 7876), False, 'from edxval.api import get_videos_for_course\n'), ((9720, 9733), 'xmodule.modulestore.django.modulestore', 'modulestore', ([], {}), '()\n', (9731, 9733), False, 'from xmodule.modulestore.django import modulestore\n'), ((3306, 3317), 'time.time', 'time.time', ([], {}), '()\n', (3315, 3317), False, 'import time\n'), ((7754, 7767), 'xmodule.modulestore.django.modulestore', 'modulestore', ([], {}), '()\n', (7765, 7767), False, 'from xmodule.modulestore.django import modulestore\n'), ((10841, 10905), 'openedx.core.lib.graph_traversals.traverse_pre_order', 'traverse_pre_order', (['unit', 'cls._get_visible_children', 'leaf_filter'], {}), '(unit, cls._get_visible_children, leaf_filter)\n', (10859, 10905), False, 'from openedx.core.lib.graph_traversals import traverse_pre_order\n'), ((3422, 3433), 'time.time', 'time.time', ([], {}), '()\n', (3431, 3433), False, 'import time\n'), ((11337, 11350), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (11344, 11350), True, 'import numpy as np\n'), ((11386, 11401), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (11395, 11401), True, 'import numpy as np\n'), ((11425, 11452), 'scipy.stats.mode', 'stats.mode', (['data'], {'axis': 'None'}), '(data, axis=None)\n', (11435, 11452), False, 'from scipy import stats\n')] |
import numpy as np
from numba import njit
import scipy.sparse as sp
import scipy.linalg as spl
from graphadv.utils.estimate_utils import (estimate_loss_with_delta_eigenvals,
estimate_loss_with_perturbation_gradient)
from graphadv.utils import filter_singletons
from graphadv.attack.targeted.targeted_attacker import TargetedAttacker
class NodeEmbeddingAttack(TargetedAttacker):
""" This implementation is not exactly right.
"""
def __init__(self, adj, k=50, name=None, seed=None, **kwargs):
super().__init__(adj=adj, name=name, seed=seed, **kwargs)
self.nodes_set = set(range(self.n_nodes))
deg_matrix = sp.diags(self.degree).astype('float64')
self.vals_org, self.vecs_org = sp.linalg.eigsh(self.adj.astype('float64'), k=k, M=deg_matrix)
def attack(self, target, n_perturbations=None, dim=32, window_size=5,
n_neg_samples=3, direct_attack=True, structure_attack=True, feature_attack=False):
super().attack(target, n_perturbations, direct_attack, structure_attack, feature_attack)
n_perturbations = self.n_perturbations
n_nodes = self.n_nodes
adj = self.adj.astype('float64')
if direct_attack:
influence_nodes = [target]
candidates = np.column_stack(
(np.tile(target, n_nodes-1), list(self.nodes_set-set([target]))))
else:
influence_nodes = adj[target].indices
# influence_nodes = adj[target].nonzero()[1]
candidates = np.row_stack([np.column_stack((np.tile(infl, n_nodes - 2),
list(self.nodes_set - set([target, infl])))) for infl in
influence_nodes])
if not self.allow_singleton:
candidates = filter_singletons(candidates, adj)
delta_w = 1. - 2 * adj[candidates[:, 0], candidates[:, 1]].A1
loss_for_candidates = estimate_loss_with_delta_eigenvals(candidates, delta_w,
self.vals_org, self.vecs_org,
self.n_nodes,
dim, window_size)
self.structure_flips = candidates[loss_for_candidates.argsort()[-n_perturbations:]]
| [
"graphadv.utils.filter_singletons",
"graphadv.utils.estimate_utils.estimate_loss_with_delta_eigenvals",
"scipy.sparse.diags",
"numpy.tile"
] | [((1986, 2108), 'graphadv.utils.estimate_utils.estimate_loss_with_delta_eigenvals', 'estimate_loss_with_delta_eigenvals', (['candidates', 'delta_w', 'self.vals_org', 'self.vecs_org', 'self.n_nodes', 'dim', 'window_size'], {}), '(candidates, delta_w, self.vals_org, self\n .vecs_org, self.n_nodes, dim, window_size)\n', (2020, 2108), False, 'from graphadv.utils.estimate_utils import estimate_loss_with_delta_eigenvals, estimate_loss_with_perturbation_gradient\n'), ((1850, 1884), 'graphadv.utils.filter_singletons', 'filter_singletons', (['candidates', 'adj'], {}), '(candidates, adj)\n', (1867, 1884), False, 'from graphadv.utils import filter_singletons\n'), ((690, 711), 'scipy.sparse.diags', 'sp.diags', (['self.degree'], {}), '(self.degree)\n', (698, 711), True, 'import scipy.sparse as sp\n'), ((1348, 1376), 'numpy.tile', 'np.tile', (['target', '(n_nodes - 1)'], {}), '(target, n_nodes - 1)\n', (1355, 1376), True, 'import numpy as np\n'), ((1590, 1616), 'numpy.tile', 'np.tile', (['infl', '(n_nodes - 2)'], {}), '(infl, n_nodes - 2)\n', (1597, 1616), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
FloodTool2DockWidget
A QGIS plugin
MyCoast FloodTool
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2019-02-27
git sha : $Format:%H$
copyright : (C) 2019 by Meteogalicia
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt5 import QtGui, QtWidgets, uic
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import QDate, QTime, QDateTime, Qt, QVariant
from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis
from datetime import datetime
import re
from netCDF4 import Dataset, num2date
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from .utils import THREDDS_parser, modelGrid, tidalSolution
'''
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'flood_tool2_dockwidget_base.ui'))
'''
from .flood_tool2_dockwidget_base import Ui_FloodTool2DockWidgetBase
class FloodTool2DockWidget(QtWidgets.QDockWidget, Ui_FloodTool2DockWidgetBase):
closingPlugin = pyqtSignal()
def __init__(self, iface, parent=None):
"""Constructor."""
super(FloodTool2DockWidget, self).__init__(parent)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://doc.qt.io/qt-5/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
self.progressBar.setValue(0)
# Setting up combo boxes and connecting signals:
self.hydro_grid_comboBox.addItems(['artabro (MOHID)', 'arousa (MOHID)', 'vigo (MOHID)', 'noia (MOHID)', 'iberia (ROMS)', 'cantabrico (ROMS)'])
self.hydro_grid_comboBox.currentIndexChanged.connect(self.enable_calendar_hydro)
self.wave_grid_comboBox.addItems(['galicia (SWAN)', 'galicia (WW3)', 'iberia (WW3)'])
self.wave_grid_comboBox.currentIndexChanged.connect(self.enable_calendar_wave)
# Triggers initial evaluation of dates:
self.enable_calendar_hydro()
self.enable_calendar_wave()
self.tideSolution_checkBox.stateChanged.connect(self.checkBoxState)
self.run_button.clicked.connect(self.run)
# The plugin acts over active layer:
campos = [field.name() for field in iface.activeLayer().fields()]
self.wave_field_comboBox.addItems(campos)
self.hydro_field_comboBox.addItems(campos)
# Access to iface from plugin:
self.iface = iface
def checkBoxState(self):
self.hydro_variable_comboBox.setEnabled( not self.tideSolution_checkBox.isChecked() )
def closeEvent(self, event):
self.closingPlugin.emit()
event.accept()
def run(self):
# QRadioButton state checker:
_, self.time_control = [button.isChecked() for button in self.groupBox_time.findChildren(QtWidgets.QRadioButton)]
_, self.color_codes = [button.isChecked() for button in self.groupBox_codes.findChildren(QtWidgets.QRadioButton)]
self.progressBar.setValue(0)
text = ''
text += 'Wave variable: %s\n' % self.wave_variable_comboBox.currentText()
text += 'Wave layer field: %s\n' % self.wave_field_comboBox.currentText()
text += 'Selected model date: %s\n' % self.wave_calendarWidget.selectedDate().toString("yyyy-MM-dd")
text += '----------------------------------------\n'
text += 'Hydro variable: %s\n' % self.hydro_variable_comboBox.currentText()
text += 'Hydro layer field: %s\n' % self.hydro_field_comboBox.currentText()
text += 'Selected model date: %s\n' % self.hydro_calendarWidget.selectedDate().toString("yyyy-MM-dd")
text += '\nRetrieving active layer fields...\n'
self.results_textBrowser.setText(text)
features = self.iface.activeLayer().getFeatures()
wave_id = []
hydro_id = []
# Accedemos a la linea para obtener los vertices:
for current, feature in enumerate(features):
wave_id.append( feature[self.wave_field_comboBox.currentText()] )
hydro_id.append( feature[self.hydro_field_comboBox.currentText()] )
wave_id = np.array(wave_id)
hydro_id = np.array(hydro_id)
self.progressBar.setValue(20)
# Processing of wave model data:
self.results_textBrowser.append('Retrieving THREDDS wave data...\n')
wave_date = self.wave_calendarWidget.selectedDate().toPyDate()
wave_url = wave_date.strftime(self.waveGrid.template)
QgsMessageLog.logMessage('Wave URL: %s' % wave_url, 'DEBUG', level=Qgis.Info)
datos = Dataset(wave_url)
var_name = self.waveGrid.standard_names_to_var[self.wave_variable_comboBox.currentText()]
wave_data = datos.variables[var_name][:]
wave_dates = num2date(datos.variables['time'][:], datos.variables['time'].units)
datos.close()
# Reshaping of matrices in order to translate i,j to nodes ids in active layer:
if len(wave_data.shape)==3:
(nt, j, i) = wave_data.shape
wave_data = wave_data.reshape(nt,j*i)
self.progressBar.setValue(40)
# Processing of hydrodynamic model data:
self.results_textBrowser.append('Retrieving THREDDS hydrodynamic data...\n')
hydro_date = self.hydro_calendarWidget.selectedDate().toPyDate()
hydro_url = hydro_date.strftime(self.hydroGrid.template)
if self.tideSolution_checkBox.isChecked():
romsTide = tidalSolution()
romsTide.read_grid()
romsTide.least_squares()
hydro_dates = wave_dates
else:
datos = Dataset(hydro_url)
var_name = self.hydroGrid.standard_names_to_var[self.hydro_variable_comboBox.currentText()]
hydro_data = datos.variables[var_name][:]
hydro_dates = num2date(datos.variables['time'][:], datos.variables['time'].units)
datos.close()
# Reshaping of matrices in order to translate i,j to nodes ids in active layer:
if len(hydro_data.shape)==3:
(nt, j, i) = hydro_data.shape
hydro_data = hydro_data.reshape(nt,j*i)
self.progressBar.setValue(60)
self.results_textBrowser.append('Performing date selection over data...\n')
# Date selection: Only process data where dates overlap:
start = np.max((hydro_dates.min(), wave_dates.min()))
end = np.min((hydro_dates.max(), wave_dates.max()))
self.results_textBrowser.append('Start: %s, End: %s\n' % (start.strftime('%Y/%m/%d %H:%M'), end.strftime('%Y/%m/%d %H:%M')))
condition = (wave_dates>=start) & (wave_dates<=end)
wave_data = wave_data[condition][:, wave_id]
condition = (hydro_dates>=start) & (hydro_dates<=end)
'''
plt.plot(hydro_dates[condition], hydro_data[condition][:, 4239]+2.08,'r--')
tideSerie = romsTide.tideSynthesis(pd.date_range(start,end,freq='1H'), [128325])
tideSerie[128325].plot()
plt.show()
'''
if self.tideSolution_checkBox.isChecked():
hydro_data = romsTide.tideSynthesis(pd.date_range(start,end,freq='1H'), hydro_id)
hydro_data = hydro_data[hydro_id].values # Needs a reshape cause tideSynthesis only calculate in unique id
else:
hydro_data = hydro_data[condition][:, hydro_id]
QgsMessageLog.logMessage('hydro_data shape: (%i,%i)' % hydro_data.shape, 'DEBUG', level=Qgis.Info)
self.progressBar.setValue(80)
self.results_textBrowser.append('Running FL calculations...\n')
# Very simple parametrization of flood level based on Vousdoukas et al. 2017:
if self.time_control:
FL = hydro_data + 0.2*wave_data
fechas = pd.date_range(start,end,freq='1H')
self.fechas = [fecha.strftime('%Y/%m/%d %H:%M') for fecha in fechas]
else:
FL = np.max(hydro_data + 0.2*wave_data, axis=0)
# Processing output layer:
features = self.iface.activeLayer().getFeatures()
new_features = []
# Accedemos a la linea para obtener los vertices:
for current, feature in enumerate(features):
field_names = [field.name() for field in feature.fields()]
if self.time_control:
for i,fecha in enumerate(self.fechas):
new_feature = QgsFeature(feature)
new_feature.setAttributes([current, float(FL[i,current]), fecha])
new_features.append(new_feature)
else:
new_feature = QgsFeature(feature)
new_feature.setAttributes([current, float(FL[current])])
new_features.append(new_feature)
# Needs code for CRS:
Crc_source_id = int(self.iface.activeLayer().sourceCrs().authid().split(':')[-1])
vectorlayer = QgsVectorLayer("MultiPolygon?crs=epsg:%i" % Crc_source_id, "Output", "memory")
pr = vectorlayer.dataProvider()
if self.time_control:
atributos = [QgsField("Id", QVariant.Int), QgsField("FL", QVariant.Double), QgsField("time" , QVariant.String)]
else:
atributos = [QgsField("Id", QVariant.Int), QgsField("FL", QVariant.Double)]
pr.addAttributes(atributos)
vectorlayer.updateFields()
pr.addFeatures(new_features)
# Add layer to project:
proyecto = QgsProject.instance()
proyecto.addMapLayer(vectorlayer)
self.progressBar.setValue(100)
def enable_calendar_hydro(self, **kwargs):
calendarWidget = self.hydro_calendarWidget
variable_comboBox = self.hydro_variable_comboBox
self.hydroGrid = modelGrid(self.hydro_grid_comboBox.currentText())
self.tideSolution_checkBox.setEnabled(self.hydroGrid.tide_solution)
fechas = [fecha.strftime('%Y%m%d') for fecha in self.hydroGrid.THREDDS_parser.parse_dates()]
inicio = datetime.strptime(fechas[ 0],'%Y%m%d')
fin = datetime.strptime(fechas[-1],'%Y%m%d')
calendarWidget.setDateRange(QDate(inicio), QDate(fin))
variable_comboBox.clear()
#variable_comboBox.addItems(self.hydroGrid.variables)
variable_comboBox.addItems(self.hydroGrid.standard_names_to_var.keys())
def enable_calendar_wave(self, **kwargs):
calendarWidget = self.wave_calendarWidget
variable_comboBox = self.wave_variable_comboBox
self.waveGrid = modelGrid(self.wave_grid_comboBox.currentText())
fechas = [fecha.strftime('%Y%m%d') for fecha in self.waveGrid.THREDDS_parser.parse_dates()]
inicio = datetime.strptime(fechas[ 0],'%Y%m%d')
fin = datetime.strptime(fechas[-1],'%Y%m%d')
calendarWidget.setDateRange(QDate(inicio), QDate(fin))
variable_comboBox.clear()
#variable_comboBox.addItems(self.waveGrid.variables)
variable_comboBox.addItems(self.waveGrid.standard_names_to_var.keys())
| [
"PyQt5.QtCore.pyqtSignal",
"netCDF4.Dataset",
"qgis.core.QgsVectorLayer",
"qgis.core.QgsProject.instance",
"pandas.date_range",
"PyQt5.QtCore.QDate",
"qgis.core.QgsFeature",
"datetime.datetime.strptime",
"numpy.max",
"numpy.array",
"qgis.core.QgsField",
"qgis.core.QgsMessageLog.logMessage",
... | [((1954, 1966), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (1964, 1966), False, 'from PyQt5.QtCore import pyqtSignal\n'), ((5206, 5223), 'numpy.array', 'np.array', (['wave_id'], {}), '(wave_id)\n', (5214, 5223), True, 'import numpy as np\n'), ((5243, 5261), 'numpy.array', 'np.array', (['hydro_id'], {}), '(hydro_id)\n', (5251, 5261), True, 'import numpy as np\n'), ((5566, 5643), 'qgis.core.QgsMessageLog.logMessage', 'QgsMessageLog.logMessage', (["('Wave URL: %s' % wave_url)", '"""DEBUG"""'], {'level': 'Qgis.Info'}), "('Wave URL: %s' % wave_url, 'DEBUG', level=Qgis.Info)\n", (5590, 5643), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((5661, 5678), 'netCDF4.Dataset', 'Dataset', (['wave_url'], {}), '(wave_url)\n', (5668, 5678), False, 'from netCDF4 import Dataset, num2date\n'), ((5851, 5918), 'netCDF4.num2date', 'num2date', (["datos.variables['time'][:]", "datos.variables['time'].units"], {}), "(datos.variables['time'][:], datos.variables['time'].units)\n", (5859, 5918), False, 'from netCDF4 import Dataset, num2date\n'), ((8468, 8570), 'qgis.core.QgsMessageLog.logMessage', 'QgsMessageLog.logMessage', (["('hydro_data shape: (%i,%i)' % hydro_data.shape)", '"""DEBUG"""'], {'level': 'Qgis.Info'}), "('hydro_data shape: (%i,%i)' % hydro_data.shape,\n 'DEBUG', level=Qgis.Info)\n", (8492, 8570), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10006, 10084), 'qgis.core.QgsVectorLayer', 'QgsVectorLayer', (["('MultiPolygon?crs=epsg:%i' % Crc_source_id)", '"""Output"""', '"""memory"""'], {}), "('MultiPolygon?crs=epsg:%i' % Crc_source_id, 'Output', 'memory')\n", (10020, 10084), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10546, 10567), 'qgis.core.QgsProject.instance', 'QgsProject.instance', ([], {}), '()\n', (10565, 10567), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((11087, 11125), 'datetime.datetime.strptime', 'datetime.strptime', (['fechas[0]', '"""%Y%m%d"""'], {}), "(fechas[0], '%Y%m%d')\n", (11104, 11125), False, 'from datetime import datetime\n'), ((11143, 11182), 'datetime.datetime.strptime', 'datetime.strptime', (['fechas[-1]', '"""%Y%m%d"""'], {}), "(fechas[-1], '%Y%m%d')\n", (11160, 11182), False, 'from datetime import datetime\n'), ((11773, 11811), 'datetime.datetime.strptime', 'datetime.strptime', (['fechas[0]', '"""%Y%m%d"""'], {}), "(fechas[0], '%Y%m%d')\n", (11790, 11811), False, 'from datetime import datetime\n'), ((11829, 11868), 'datetime.datetime.strptime', 'datetime.strptime', (['fechas[-1]', '"""%Y%m%d"""'], {}), "(fechas[-1], '%Y%m%d')\n", (11846, 11868), False, 'from datetime import datetime\n'), ((6708, 6726), 'netCDF4.Dataset', 'Dataset', (['hydro_url'], {}), '(hydro_url)\n', (6715, 6726), False, 'from netCDF4 import Dataset, num2date\n'), ((6915, 6982), 'netCDF4.num2date', 'num2date', (["datos.variables['time'][:]", "datos.variables['time'].units"], {}), "(datos.variables['time'][:], datos.variables['time'].units)\n", (6923, 6982), False, 'from netCDF4 import Dataset, num2date\n'), ((8875, 8911), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""1H"""'}), "(start, end, freq='1H')\n", (8888, 8911), True, 'import pandas as pd\n'), ((9023, 9067), 'numpy.max', 'np.max', (['(hydro_data + 0.2 * wave_data)'], {'axis': '(0)'}), '(hydro_data + 0.2 * wave_data, axis=0)\n', (9029, 9067), True, 'import numpy as np\n'), ((11219, 11232), 'PyQt5.QtCore.QDate', 'QDate', (['inicio'], {}), '(inicio)\n', (11224, 11232), False, 'from PyQt5.QtCore import QDate, QTime, QDateTime, Qt, QVariant\n'), ((11234, 11244), 'PyQt5.QtCore.QDate', 'QDate', (['fin'], {}), '(fin)\n', (11239, 11244), False, 'from PyQt5.QtCore import QDate, QTime, QDateTime, Qt, QVariant\n'), ((11905, 11918), 'PyQt5.QtCore.QDate', 'QDate', (['inicio'], {}), '(inicio)\n', (11910, 11918), False, 'from PyQt5.QtCore import QDate, QTime, QDateTime, Qt, QVariant\n'), ((11920, 11930), 'PyQt5.QtCore.QDate', 'QDate', (['fin'], {}), '(fin)\n', (11925, 11930), False, 'from PyQt5.QtCore import QDate, QTime, QDateTime, Qt, QVariant\n'), ((8220, 8256), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': '"""1H"""'}), "(start, end, freq='1H')\n", (8233, 8256), True, 'import pandas as pd\n'), ((9721, 9740), 'qgis.core.QgsFeature', 'QgsFeature', (['feature'], {}), '(feature)\n', (9731, 9740), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10182, 10210), 'qgis.core.QgsField', 'QgsField', (['"""Id"""', 'QVariant.Int'], {}), "('Id', QVariant.Int)\n", (10190, 10210), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10212, 10243), 'qgis.core.QgsField', 'QgsField', (['"""FL"""', 'QVariant.Double'], {}), "('FL', QVariant.Double)\n", (10220, 10243), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10245, 10278), 'qgis.core.QgsField', 'QgsField', (['"""time"""', 'QVariant.String'], {}), "('time', QVariant.String)\n", (10253, 10278), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10321, 10349), 'qgis.core.QgsField', 'QgsField', (['"""Id"""', 'QVariant.Int'], {}), "('Id', QVariant.Int)\n", (10329, 10349), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((10351, 10382), 'qgis.core.QgsField', 'QgsField', (['"""FL"""', 'QVariant.Double'], {}), "('FL', QVariant.Double)\n", (10359, 10382), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n'), ((9514, 9533), 'qgis.core.QgsFeature', 'QgsFeature', (['feature'], {}), '(feature)\n', (9524, 9533), False, 'from qgis.core import QgsProject, QgsVectorLayer, QgsFeature, QgsField, QgsMessageLog, Qgis\n')] |
import argparse
import gzip
import numpy as np
import tensorflow as tf
from attalos.dataset.dataset import Dataset
from attalos.evaluation.evaluation import Eval
def tags_2_vec(tags, w2v_model=None):
"""
Takes a list of text tags and returns the normalized sum of the word vectors
Args:
tags: A iterable of text tags
w2v_model: a dictionary like object where the keys are words and the values are word vectors
Returns:
Normalized sum of the word vectors
"""
if len(tags) == 0:
return np.zeros(300)
else:
output = np.sum([w2v_model[tag] for tag in tags], axis=0)
return output / np.linalg.norm(output)
def evaluate_regressor(regressor, val_image_feats, val_text_tags, w2v_model, k=5, verbose=False):
"""
Takes a regressor and returns the precision/recall on the test data
Args:
regressor: a tensorflow.contrib.learn regression estimator
val_image_feats: Image features to test performance on
val_text_tags: Text Tags to test performance on
w2v_model: a dictionary like object where the keys are words and the values are word vectors
k: Top number of items to retrieve to test precision/recall on
verbose: Verbose output or not
Returns:
evaluator: A attalos.evaluation.evaluation.Eval object
"""
val_pred = regressor.predict(val_image_feats)
w2ind = {}
reverse_w2v_model = {}
wordmatrix = np.zeros((len(w2v_model), len(w2v_model[w2v_model.keys()[0]])))
for i, word in enumerate(w2v_model):
w2ind[word] = i
wordmatrix[i, :] = w2v_model[word]
reverse_w2v_model[i] = word
ground_truth_one_hot = np.zeros((len(val_text_tags), len(w2v_model)))
num_skipped = 0
total = 0
skipped = set()
for i, tags in enumerate(val_text_tags):
for tag in tags:
try:
total += 1
ground_truth_one_hot[i, w2ind[tag]] = 1
except KeyError:
skipped.add(tag)
num_skipped +=1
if verbose:
print('Skipped {} of {} total'.format(num_skipped, total))
predictions_one_hot = np.zeros((len(val_text_tags), len(w2v_model)))
for i in range(val_pred.shape[0]):
normalized_val = val_pred[i, :]/np.linalg.norm(val_pred[i, :])
# np.dot(wordmatrix, normalized_val) gets the similarity between the two vectors
# argpartition gets the topk (where k=5)
indices = np.argpartition(np.dot(wordmatrix,normalized_val), -1*k)[-1*k:]
for index in indices:
predictions_one_hot[i, index] = 1
evaluator = Eval(ground_truth_one_hot, predictions_one_hot)
return evaluator
def train_model(train_dataset,
test_dataset,
w2v_model,
batch_size=128,
num_epochs=200,
save_path=None,
verbose=True):
"""
Train a regression model to map image features into the word vector space
Args:
train_dataset: Training attalos.dataset.dataset object
test_dataset: Test attalos.dataset.dataset object
w2v_model: A dictionary like object where the keys are words and the values are word vectors
batch_size: Batch size to use for training
num_epochs: Number of epochs to train for
save_path: Path to save model to allow restart
verbose: Amounto fdebug information to output
Returns:
regressor: The trained regression estimator
"""
num_items = train_dataset.num_images
# Get validation data
# Extract features from first image
image_feats, tags = test_dataset.get_index(0)
# Get shape and initialize numpy matrix
image_feat_size = image_feats.shape[0]
val_image_feats = np.zeros((test_dataset.num_images, image_feat_size))
val_text_tags = []
# Extract features and place in numpy matrix
for i in test_dataset:
image_feats, tags = test_dataset[i]
val_image_feats[i, :] = image_feats/np.linalg.norm(image_feats)
val_text_tags.append(tags)
# Allocate GPU memory as needed (vs. allocating all the memory)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Build regressor
regressor = tf.contrib.learn.TensorFlowDNNRegressor(hidden_units=[200,200],
steps=10,
continue_training=True,
verbose=0)
for epoch in range(num_epochs):
for batch in range(int(num_items/batch_size)):
image_feats, text_tags = train_dataset.get_next_batch(batch_size)
for i in range(batch_size):
image_feats[i, :] = image_feats[i, :]/ np.linalg.norm(image_feats[i, :])
word_feats = [tags_2_vec(tags, w2v_model) for tags in text_tags]
regressor.fit(image_feats, word_feats)
if verbose:
evaluator = evaluate_regressor(regressor, val_image_feats, val_text_tags, w2v_model, verbose=verbose)
# Evaluate accuracy
print('Epoch: {}'.format(epoch))
evaluator.print_evaluation()
if save_path:
regressor.save(save_path)
return regressor
def main():
import os
parser = argparse.ArgumentParser(description='Two layer linear regression')
parser.add_argument("image_feature_file_train",
type=str,
help="Image Feature file for the training set")
parser.add_argument("text_feature_file_train",
type=str,
help="Text Feature file for the training set")
parser.add_argument("image_feature_file_test",
type=str,
help="Image Feature file for the test set")
parser.add_argument("text_feature_file_test",
type=str,
help="Text Feature file for the test set")
parser.add_argument("word_vector_file",
type=str,
help="Text file containing the word vectors")
# Optional Args
parser.add_argument("--learning_rate",
type=float,
default=.05,
help="Learning Rate")
parser.add_argument("--epochs",
type=int,
default=200,
help="Number of epochs to run for")
parser.add_argument("--batch_size",
type=int,
default=128,
help="Batch size to use for training")
args = parser.parse_args()
train_dataset = Dataset(args.image_feature_file_train, args.text_feature_file_train)
test_dataset = Dataset(args.image_feature_file_test, args.text_feature_file_test)
# Get the full vocab so we can extract only the word vectors we care about
dataset_tags = set()
for dataset in [train_dataset, test_dataset]:
for tags in dataset.text_feats.values():
dataset_tags.update(tags)
# Read w2vec
w2v_lookup = {}
if os.path.exists(args.word_vector_file):
if args.word_vector_file.endswith('.gz'):
input_file = gzip.open(args.word_vector_file)
else:
input_file = open(args.word_vector_file)
for i, line in enumerate(input_file):
first_word = line[:line.find(' ')]
if first_word in dataset_tags:
line = line.strip().split(' ')
w2v_vector = np.array([float(j) for j in line[1:]])
# Normalize vector before storing
w2v_lookup[line[0]] = w2v_vector / np.linalg.norm(w2v_vector)
train_model(train_dataset,
test_dataset,
w2v_lookup,
batch_size=args.batch_size,
num_epochs=args.epochs)
if __name__ == '__main__':
main()
| [
"numpy.sum",
"argparse.ArgumentParser",
"gzip.open",
"attalos.evaluation.evaluation.Eval",
"numpy.zeros",
"os.path.exists",
"tensorflow.ConfigProto",
"numpy.linalg.norm",
"attalos.dataset.dataset.Dataset",
"numpy.dot",
"tensorflow.contrib.learn.TensorFlowDNNRegressor"
] | [((2646, 2693), 'attalos.evaluation.evaluation.Eval', 'Eval', (['ground_truth_one_hot', 'predictions_one_hot'], {}), '(ground_truth_one_hot, predictions_one_hot)\n', (2650, 2693), False, 'from attalos.evaluation.evaluation import Eval\n'), ((3802, 3854), 'numpy.zeros', 'np.zeros', (['(test_dataset.num_images, image_feat_size)'], {}), '((test_dataset.num_images, image_feat_size))\n', (3810, 3854), True, 'import numpy as np\n'), ((4187, 4203), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (4201, 4203), True, 'import tensorflow as tf\n'), ((4286, 4399), 'tensorflow.contrib.learn.TensorFlowDNNRegressor', 'tf.contrib.learn.TensorFlowDNNRegressor', ([], {'hidden_units': '[200, 200]', 'steps': '(10)', 'continue_training': '(True)', 'verbose': '(0)'}), '(hidden_units=[200, 200], steps=10,\n continue_training=True, verbose=0)\n', (4325, 4399), True, 'import tensorflow as tf\n'), ((5359, 5425), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Two layer linear regression"""'}), "(description='Two layer linear regression')\n", (5382, 5425), False, 'import argparse\n'), ((6768, 6836), 'attalos.dataset.dataset.Dataset', 'Dataset', (['args.image_feature_file_train', 'args.text_feature_file_train'], {}), '(args.image_feature_file_train, args.text_feature_file_train)\n', (6775, 6836), False, 'from attalos.dataset.dataset import Dataset\n'), ((6856, 6922), 'attalos.dataset.dataset.Dataset', 'Dataset', (['args.image_feature_file_test', 'args.text_feature_file_test'], {}), '(args.image_feature_file_test, args.text_feature_file_test)\n', (6863, 6922), False, 'from attalos.dataset.dataset import Dataset\n'), ((7210, 7247), 'os.path.exists', 'os.path.exists', (['args.word_vector_file'], {}), '(args.word_vector_file)\n', (7224, 7247), False, 'import os\n'), ((545, 558), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (553, 558), True, 'import numpy as np\n'), ((586, 634), 'numpy.sum', 'np.sum', (['[w2v_model[tag] for tag in tags]'], {'axis': '(0)'}), '([w2v_model[tag] for tag in tags], axis=0)\n', (592, 634), True, 'import numpy as np\n'), ((659, 681), 'numpy.linalg.norm', 'np.linalg.norm', (['output'], {}), '(output)\n', (673, 681), True, 'import numpy as np\n'), ((2302, 2332), 'numpy.linalg.norm', 'np.linalg.norm', (['val_pred[i, :]'], {}), '(val_pred[i, :])\n', (2316, 2332), True, 'import numpy as np\n'), ((4042, 4069), 'numpy.linalg.norm', 'np.linalg.norm', (['image_feats'], {}), '(image_feats)\n', (4056, 4069), True, 'import numpy as np\n'), ((7324, 7356), 'gzip.open', 'gzip.open', (['args.word_vector_file'], {}), '(args.word_vector_file)\n', (7333, 7356), False, 'import gzip\n'), ((2505, 2539), 'numpy.dot', 'np.dot', (['wordmatrix', 'normalized_val'], {}), '(wordmatrix, normalized_val)\n', (2511, 2539), True, 'import numpy as np\n'), ((7748, 7774), 'numpy.linalg.norm', 'np.linalg.norm', (['w2v_vector'], {}), '(w2v_vector)\n', (7762, 7774), True, 'import numpy as np\n'), ((4828, 4861), 'numpy.linalg.norm', 'np.linalg.norm', (['image_feats[i, :]'], {}), '(image_feats[i, :])\n', (4842, 4861), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# by <EMAIL>
__version__ = "0.0.1"
from datetime import datetime
from glob import glob
from os.path import dirname
from pathlib import Path
import openTSNE
import plotly.express as px
from numpy import array, sqrt, unique
from pandas import concat, read_pickle
from plotly.offline import plot
from sklearn.manifold import TSNE
from tqdm import tqdm
from SeqEN2.autoencoder.utils import Architecture
from SeqEN2.model.data_loader import read_json
from SeqEN2.model.model import Model
from SeqEN2.utils.custom_arg_parser import TestSessionArgParser
def now():
print(datetime.now().strftime("%Y%m%d%H%M%S"))
EXCEPTIONS = ["AF-A0A1D8PLB7-F1-model_v1_1", "AF-O60086-F1-model_v1_1"]
class TestSession:
root = Path(dirname(__file__)).parent.parent
MIN_SPOT_SIZE = 0.05
def __init__(self):
# setup dirs
self.models_dir = self.root / "models"
if not self.models_dir.exists():
raise NotADirectoryError("models dir is not found.")
self.data_dir = self.root / "data"
if not self.data_dir.exists():
raise NotADirectoryError("data dir is not found.")
self.arch_dir = self.root / "config" / "arch"
if not self.arch_dir.exists():
raise NotADirectoryError("arch dir is not found.")
# model placeholder
self.model = None
self.version = None
self.model_id = None
self.embedding_results = {}
self.all_embeddings = None
# run dir
self.result_dir = None
# attrs
self.smooth_embed = False
def add_model(self, name, arch, version, model_id):
arch = self.load_arch(arch)
self.version = version
self.model_id = model_id
if self.model is None:
self.model = Model(name, arch)
self.model.load_model(version, model_id)
self.result_dir = self.models_dir / self.model.name / "results" / self.version
if not self.result_dir.exists():
self.result_dir.mkdir()
def load_data(self, key, dataset_name):
self.model.eval_only = True
self.model.load_eval_data(key, dataset_name)
def load_arch(self, arch):
arch_path = self.arch_dir / f"{arch}.json"
return Architecture(read_json(str(arch_path)))
def test(self, num_test_items=-1):
self.model.test(num_test_items=num_test_items)
def get_embedding(self, num_test_items=-1, test_items=None):
print("embedding proteins ....")
now()
# embeddings dir
embeddings_dir = (
self.result_dir / f"embeddings_only_{self.model_id}_{self.model.eval_data_loader_name}"
)
if not embeddings_dir.exists():
embeddings_dir.mkdir()
# getting embeddings
self.embedding_results = {}
for item in tqdm(
self.model.get_embedding(num_test_items=num_test_items, test_items=test_items)
):
if item.attrs["name"] in EXCEPTIONS:
continue
if len(self.embedding_results) < 1000:
self.embedding_results[item.attrs["name"]] = item
datafile = embeddings_dir / f"{item.attrs['name']}.pkl.bz2"
item.to_pickle(datafile)
now()
def tsne_embeddings(self, dim=2):
# combine embeddings
print("tsne Embeddings ...")
now()
self.all_embeddings = concat(
[df.assign(pr=key) for key, df in self.embedding_results.items()], ignore_index=True
)
self.all_embeddings["uid"] = self.all_embeddings.apply(
lambda x: f"{x.pr}_{x.unique_id}", axis=1
)
perplexity = sqrt(len(self.all_embeddings["uid"]))
model = TSNE(
n_components=dim,
learning_rate="auto",
init="pca",
perplexity=perplexity,
n_iter=10000,
n_jobs=-1,
)
x_embedded = model.fit_transform(self.get_embeddings_from_df())
for i in range(dim):
self.all_embeddings[f"tsne_{i}"] = x_embedded[:, i]
def get_embeddings_from_df(self):
return array(self.all_embeddings["embedding"].values.tolist())
def tsne_embeddings_from_init(self, init, split):
aff50 = openTSNE.affinity.PerplexityBasedNN(
init,
perplexity=100,
n_jobs=32,
random_state=0,
verbose=True,
)
embedding = openTSNE.TSNEEmbedding(
init,
aff50,
n_jobs=32,
verbose=True,
random_state=42,
)
embedding1 = embedding.optimize(n_iter=500, exaggeration=12, momentum=0.5)
embedding2 = embedding1.optimize(n_iter=250, exaggeration=12, momentum=0.8)
embedding3 = embedding2.optimize(n_iter=250, exaggeration=12, momentum=0.8)
embedding4 = embedding3.optimize(n_iter=250, exaggeration=12, momentum=0.8)
embedding = embedding4[split:]
for i in range(2):
self.all_embeddings[f"tsne_{i}"] = embedding[:, i].tolist()
def tsne_embeddings_2(self, dim=2, return_embeddings=False):
# combine embeddings
print("tsne Embeddings ...")
now()
if self.all_embeddings is None and len(self.embedding_results) > 0:
self.all_embeddings = concat(
[df.assign(pr=key) for key, df in self.embedding_results.items()], ignore_index=True
)
self.all_embeddings["uid"] = self.all_embeddings.apply(
lambda x: f"{x.pr}_{x.unique_id}", axis=1
)
exaggeration = 12
data = self.get_embeddings_from_df()
aff50 = openTSNE.affinity.PerplexityBasedNN(
data,
perplexity=100,
n_jobs=32,
random_state=0,
verbose=True,
)
init = openTSNE.initialization.pca(data, random_state=0)
embedding_standard = openTSNE.TSNE(
exaggeration=exaggeration,
n_jobs=32,
verbose=True,
).fit(affinities=aff50, initialization=init)
for i in range(dim):
self.all_embeddings[f"tsne_{i}"] = embedding_standard[:, i].tolist()
if return_embeddings:
return embedding_standard
else:
return None
def load_embeddings(self, save_aggregate=False):
embeddings_dir = (
self.result_dir / f"embeddings_only_{self.model_id}_{self.model.eval_data_loader_name}"
)
files = glob(f"{embeddings_dir}/*.pkl.bz2")
self.all_embeddings = concat(
[read_pickle(fp).assign(pr=fp.split(".")[0]) for fp in tqdm(files)], ignore_index=True
)
self.all_embeddings["uid"] = self.all_embeddings.apply(
lambda x: f"{x.pr}_{x.unique_id}", axis=1
)
if save_aggregate:
# embeddings dir
embeddings_dir = (
self.result_dir / f"tsne_{self.model_id}_{self.model.eval_data_loader_name}"
)
datafile = embeddings_dir / f"all_tsne_dim_2.pkl.bz2"
self.all_embeddings.to_pickle(datafile)
def plot_embedding_2d(self, auto_open=False, pr_ids=None, text=""):
if pr_ids is None:
pr_ids = []
# embeddings dir
plots_dir = (
self.result_dir / f"embeddings_plots_{self.model_id}_{self.model.eval_data_loader_name}"
)
if not plots_dir.exists():
plots_dir.mkdir()
# embeddings dir
embeddings_dir = (
self.result_dir / f"tsne_{self.model_id}_{self.model.eval_data_loader_name}"
)
datafile = embeddings_dir / f"tsne_dim_2.pkl.bz2"
if not embeddings_dir.exists():
embeddings_dir.mkdir()
if not datafile.exists():
if self.all_embeddings is None:
self.tsne_embeddings_2(dim=2)
self.all_embeddings["size"] = self.all_embeddings["pred_class"] + self.MIN_SPOT_SIZE
self.all_embeddings.to_pickle(datafile)
else:
self.all_embeddings = read_pickle(datafile)
if len(pr_ids) > 0:
pattern = "|".join(pr_ids)
mask = self.all_embeddings["pr"].str.contains(pattern, case=False, na=False)
self.all_embeddings = self.all_embeddings[mask]
print("tsne Done.")
text = f"_{text}" if text != "" else ""
now()
# calculate embeddings and tsne to dim dimensions
num_samples = len(unique(self.all_embeddings["pr"]))
fig = px.scatter(
self.all_embeddings,
x="tsne_0",
y="tsne_1",
color="pr",
hover_data=[
"w_seq",
"w_cons_seq",
"w_trg_class",
"pred_class",
"w_trg_ss",
"w_cons_ss",
"pr",
],
size="size",
)
html_filename = plots_dir / f"tsne_dim_2_color_by_pr_{num_samples}{text}.html"
plot(fig, filename=str(html_filename), auto_open=auto_open)
####
fig = px.scatter(
self.all_embeddings,
x="tsne_0",
y="tsne_1",
color="w_trg_class",
hover_data=[
"w_seq",
"w_cons_seq",
"w_trg_class",
"pred_class",
"w_trg_ss",
"w_cons_ss",
"pr",
],
size="size",
)
html_filename = plots_dir / f"tsne_dim_2_color_by_act_{num_samples}{text}.html"
plot(fig, filename=str(html_filename), auto_open=auto_open)
####
fig = px.line(
self.all_embeddings,
x="tsne_0",
y="tsne_1",
color="pr",
hover_data=[
"w_seq",
"w_cons_seq",
"w_trg_class",
"pred_class",
"w_trg_ss",
"w_cons_ss",
"pr",
],
markers=True,
render_mode="svg",
line_shape="spline",
)
html_filename = plots_dir / f"tsne_dim_2_color_by_pr_lines_{num_samples}{text}.html"
plot(fig, filename=str(html_filename), auto_open=auto_open)
#####
fig = px.scatter(
self.all_embeddings,
x="tsne_0",
y="tsne_1",
color="w_trg_class",
hover_data=[
"w_seq",
"w_cons_seq",
"w_trg_class",
"pred_class",
"w_trg_ss",
"w_cons_ss",
"pr",
],
)
fig.update_traces(
marker=dict(size=1, line=dict(width=2, color="DarkSlateGrey")),
selector=dict(mode="markers"),
)
html_filename = plots_dir / f"tsne_dim_2_color_by_act_small_{num_samples}{text}.html"
plot(fig, filename=str(html_filename), auto_open=auto_open)
# python ./SeqEN2/sessions/test_session.py -n dummy -mv 202201222143_AAECSS_arch7 -mid 0 -dcl kegg_ndx_ACTp_100 -a arch7 -teb 100 -ge -tsne 2
# python3 ../../../SeqEN2/sessions/test_session.py -n AECSS -mv 202203042153_AECSS_arch66 -mid 24 -dclss single_act_clss_test -a arch66
# -teb -1 -ge
def main(args):
# session
test_session = TestSession()
test_session.add_model(
args["Model Name"], args["Arch"], args["Model Version"], args["Model ID"]
)
test_session.model.embed_only = args["Embed Only"]
test_session.smooth_embed = args["Smooth Embed"]
# load datafiles
if args["Dataset_cl"] != "":
test_session.load_data("cl", args["Dataset_cl"])
elif args["Dataset_ss"] != "":
test_session.load_data("ss", args["Dataset_ss"])
elif args["Dataset_clss"] != "":
test_session.load_data("clss", args["Dataset_clss"])
# tests
# embeddings
if args["Get Embedding"]:
test_session.get_embedding(num_test_items=args["Test Batch"])
if args["tSNE dim"]:
if args["tSNE dim"] == 2:
# pr_ids = args["Pr IDs"].split(",") if args["Pr IDs"] != "" else None
text = args["Text"]
test_session.plot_embedding_2d(text=text)
if __name__ == "__main__":
# parse arguments
parser = TestSessionArgParser()
parsed_args = parser.parsed()
main(parsed_args)
| [
"tqdm.tqdm",
"openTSNE.TSNEEmbedding",
"sklearn.manifold.TSNE",
"openTSNE.TSNE",
"os.path.dirname",
"pandas.read_pickle",
"plotly.express.line",
"SeqEN2.model.model.Model",
"openTSNE.affinity.PerplexityBasedNN",
"SeqEN2.utils.custom_arg_parser.TestSessionArgParser",
"glob.glob",
"plotly.expres... | [((12402, 12424), 'SeqEN2.utils.custom_arg_parser.TestSessionArgParser', 'TestSessionArgParser', ([], {}), '()\n', (12422, 12424), False, 'from SeqEN2.utils.custom_arg_parser import TestSessionArgParser\n'), ((3750, 3859), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'dim', 'learning_rate': '"""auto"""', 'init': '"""pca"""', 'perplexity': 'perplexity', 'n_iter': '(10000)', 'n_jobs': '(-1)'}), "(n_components=dim, learning_rate='auto', init='pca', perplexity=\n perplexity, n_iter=10000, n_jobs=-1)\n", (3754, 3859), False, 'from sklearn.manifold import TSNE\n'), ((4284, 4386), 'openTSNE.affinity.PerplexityBasedNN', 'openTSNE.affinity.PerplexityBasedNN', (['init'], {'perplexity': '(100)', 'n_jobs': '(32)', 'random_state': '(0)', 'verbose': '(True)'}), '(init, perplexity=100, n_jobs=32,\n random_state=0, verbose=True)\n', (4319, 4386), False, 'import openTSNE\n'), ((4474, 4551), 'openTSNE.TSNEEmbedding', 'openTSNE.TSNEEmbedding', (['init', 'aff50'], {'n_jobs': '(32)', 'verbose': '(True)', 'random_state': '(42)'}), '(init, aff50, n_jobs=32, verbose=True, random_state=42)\n', (4496, 4551), False, 'import openTSNE\n'), ((5703, 5805), 'openTSNE.affinity.PerplexityBasedNN', 'openTSNE.affinity.PerplexityBasedNN', (['data'], {'perplexity': '(100)', 'n_jobs': '(32)', 'random_state': '(0)', 'verbose': '(True)'}), '(data, perplexity=100, n_jobs=32,\n random_state=0, verbose=True)\n', (5738, 5805), False, 'import openTSNE\n'), ((5888, 5937), 'openTSNE.initialization.pca', 'openTSNE.initialization.pca', (['data'], {'random_state': '(0)'}), '(data, random_state=0)\n', (5915, 5937), False, 'import openTSNE\n'), ((6547, 6582), 'glob.glob', 'glob', (['f"""{embeddings_dir}/*.pkl.bz2"""'], {}), "(f'{embeddings_dir}/*.pkl.bz2')\n", (6551, 6582), False, 'from glob import glob\n'), ((8600, 8784), 'plotly.express.scatter', 'px.scatter', (['self.all_embeddings'], {'x': '"""tsne_0"""', 'y': '"""tsne_1"""', 'color': '"""pr"""', 'hover_data': "['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class', 'w_trg_ss',\n 'w_cons_ss', 'pr']", 'size': '"""size"""'}), "(self.all_embeddings, x='tsne_0', y='tsne_1', color='pr',\n hover_data=['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class',\n 'w_trg_ss', 'w_cons_ss', 'pr'], size='size')\n", (8610, 8784), True, 'import plotly.express as px\n'), ((9169, 9362), 'plotly.express.scatter', 'px.scatter', (['self.all_embeddings'], {'x': '"""tsne_0"""', 'y': '"""tsne_1"""', 'color': '"""w_trg_class"""', 'hover_data': "['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class', 'w_trg_ss',\n 'w_cons_ss', 'pr']", 'size': '"""size"""'}), "(self.all_embeddings, x='tsne_0', y='tsne_1', color='w_trg_class',\n hover_data=['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class',\n 'w_trg_ss', 'w_cons_ss', 'pr'], size='size')\n", (9179, 9362), True, 'import plotly.express as px\n'), ((9748, 9971), 'plotly.express.line', 'px.line', (['self.all_embeddings'], {'x': '"""tsne_0"""', 'y': '"""tsne_1"""', 'color': '"""pr"""', 'hover_data': "['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class', 'w_trg_ss',\n 'w_cons_ss', 'pr']", 'markers': '(True)', 'render_mode': '"""svg"""', 'line_shape': '"""spline"""'}), "(self.all_embeddings, x='tsne_0', y='tsne_1', color='pr', hover_data\n =['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class', 'w_trg_ss',\n 'w_cons_ss', 'pr'], markers=True, render_mode='svg', line_shape='spline')\n", (9755, 9971), True, 'import plotly.express as px\n'), ((10386, 10566), 'plotly.express.scatter', 'px.scatter', (['self.all_embeddings'], {'x': '"""tsne_0"""', 'y': '"""tsne_1"""', 'color': '"""w_trg_class"""', 'hover_data': "['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class', 'w_trg_ss',\n 'w_cons_ss', 'pr']"}), "(self.all_embeddings, x='tsne_0', y='tsne_1', color='w_trg_class',\n hover_data=['w_seq', 'w_cons_seq', 'w_trg_class', 'pred_class',\n 'w_trg_ss', 'w_cons_ss', 'pr'])\n", (10396, 10566), True, 'import plotly.express as px\n'), ((1816, 1833), 'SeqEN2.model.model.Model', 'Model', (['name', 'arch'], {}), '(name, arch)\n', (1821, 1833), False, 'from SeqEN2.model.model import Model\n'), ((8123, 8144), 'pandas.read_pickle', 'read_pickle', (['datafile'], {}), '(datafile)\n', (8134, 8144), False, 'from pandas import concat, read_pickle\n'), ((8551, 8584), 'numpy.unique', 'unique', (["self.all_embeddings['pr']"], {}), "(self.all_embeddings['pr'])\n", (8557, 8584), False, 'from numpy import array, sqrt, unique\n'), ((612, 626), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (624, 626), False, 'from datetime import datetime\n'), ((764, 781), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (771, 781), False, 'from os.path import dirname\n'), ((5967, 6032), 'openTSNE.TSNE', 'openTSNE.TSNE', ([], {'exaggeration': 'exaggeration', 'n_jobs': '(32)', 'verbose': '(True)'}), '(exaggeration=exaggeration, n_jobs=32, verbose=True)\n', (5980, 6032), False, 'import openTSNE\n'), ((6688, 6699), 'tqdm.tqdm', 'tqdm', (['files'], {}), '(files)\n', (6692, 6699), False, 'from tqdm import tqdm\n'), ((6634, 6649), 'pandas.read_pickle', 'read_pickle', (['fp'], {}), '(fp)\n', (6645, 6649), False, 'from pandas import concat, read_pickle\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 10:20:15 2017
@author: <NAME>
@email: <EMAIL>
Descripcion
-----------
Pequeno script para realizar el calculo de la energia y forma de los orbitales
del atomo de hidrogeno partiendo de sus funciones.
Referencias
-----------
(1) <NAME>. (2012). The Spherical Harmonics. Recuperado
noviembre 16, 2017, de
http://scipp.ucsc.edu/~haber/ph116C/SphericalHarmonics_12.pdf
(2) <NAME>. (2001). Química Cuántica (5ta ed.). Madrid: Prentice Hall.
(3) The SciPy Community. (2017, June 21). Scipy.special.sph_harm. Recuperado
noviembre 16, 2017, de https://docs.scipy.org/doc/scipy-0.19.1/reference/
generated/scipy.special.sph_harm.html
"""
import numpy as np
import scipy.special as spe
import scipy.constants as cnts
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Cambio de coordenadas de cartesianas a esfericas
rho = lambda x, y, z: (x**2 + y**2 + z**2)**0.5
theta = lambda x, y: np.arctan(y/x)
phi = lambda x, y, z: np.arctan((x**2 + y**2)**0.5 / z)
# Funcion para calcular factorial n!
factorial = lambda n: np.prod( np.array( [i for i in range(1,n)] ) )
# *********************************
# Funcion de coordenadas radiales
# Referencia (2) p. 141 eq. (6.100)
# *********************************
def Rho(n, l, r, Z=1):
rho = 2 * Z * r / n
el = rho**l
N_ln = (Z**3 * factorial(n - l - 1) / (n**4 * factorial(l + n)))**0.5
L_nl = spe.assoc_laguerre(rho, l, n) # Polinomio asociado de Laguerre
return 2 * el * N_ln * np.exp(-rho/2) * L_nl
# *********************************
# Funcion de coordenadas azimutales
# Referencia (3)
# *********************************
def Theta(x, y, m):
t = theta(x, y)
return np.exp(m*t*1j) / (2*np.pi)**0.5
# ******************************
# Funcion de coordenadas polares
# Referencia (3)
# ******************************
def Phi(x, y, z, l, m):
N_lm = ( (2*l+1)/2 * factorial(l - m)/factorial(l + m))**0.5
p = phi(x, y, z)
P_lm = spe.lpmv(m, l, np.cos(p)) # Polinomio asociado de Legendre
return N_lm * P_lm
# ***************************
# Armonicos Esfericos
# Referencia (1) p. 3 eq. (9)
# Referencia (3)
# ***************************
def Y_ae(x, y, z, l, m):
ytp = Theta(x, y, m) * Phi(x, y, z, l, m)
#ytp = spe.sph_harm(m, l, phi(x, y, z), theta(x, y))
return np.real(ytp)
# **************************************
# Funcion de onda del atomo de hidrogeno
# Referencia (2) p. 136 eq. (6.61)
# **************************************
def Psi2(x, y, z, n, l, m, Z=1):
r = rho(x, y, z)
return (Rho(n, l, r, Z) * Y_ae(x, y, z, l, m))**2
# *************************************
# Energia del atomo de hidrogeno para n
# Referencia (2) p. 140 eq. (6.94)
# *************************************
def E_h(n, Z=1):
a = -(Z**2) * cnts.m_e * cnts.elementary_charge**4
d = ( 8 * cnts.h**2 * cnts.epsilon_0**2 * n**2)
return a / d
# ***************************************
# Funcion para graficar el orbital radial
# en 2 dimensiones
# ***************************************
def orbital_r(n, l, Z=1, d=[-1,5,0.1]):
x = np.arange(d[0], d[1], d[2])
vr = np.vectorize(Rho)
y = vr(n, l, x, Z)
y2 = vr(n, l, x, Z)**2
plt.title("Funcion de onda del atomo de hidrogeno $H \cdot$")
plt.plot(x, y, "r--", label="$\Psi$")
plt.plot(x, y2, "b-", label="$\Psi^2$")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
axes = plt.gca()
axes.set_ylim([-0.2, 1])
axes.set_xlim([-0.5, d[1] - 0.5])
plt.grid(True)
plt.show()
# *****************************************
# Funcion para graficar los orbitales del
# atomo de hidrogeno en 3 diferentes planos
# *****************************************
def orbital2D(n=1, l=0, m=0, Z=1, d=[-4,4,40]):
x = np.linspace(d[0], d[1], d[2])
y = np.linspace(d[0], d[1], d[2])
z = np.linspace(d[0], d[1], d[2])
Xi, Yi, Zi = np.meshgrid(x, y, z)
vf = np.vectorize(Psi2)
orb = vf(x=Xi, y=Yi, z=Zi, n=n, l=l, m=m, Z=Z)
plano_xz = orb[:,:,int(d[2]/2)]
plano_yz = orb[:,int(d[2]/2),:]
plano_xy = orb[int(d[2]/2),:,:]
fig = plt.figure()
ax = fig.add_subplot(221)
ax.title.set_text("Eje X - plano yz")
plt.contourf(y, z, plano_yz, 20, cmap=cm.bone)
plt.colorbar()
ay = fig.add_subplot(222)
ay.title.set_text("Eje Y - plano xz")
plt.contourf(x, z, plano_xz, 20, cmap=cm.bone)
plt.colorbar()
az = fig.add_subplot(223)
az.title.set_text("Eje Z - plano xy")
plt.contourf(x, y, plano_xy, 20, cmap=cm.bone)
plt.colorbar()
fig.tight_layout()
fig.set_size_inches(w=5.4,h=4.4)
| [
"matplotlib.pyplot.title",
"numpy.meshgrid",
"numpy.vectorize",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.special.assoc_laguerre",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.contourf",
"numpy.real",
... | [((962, 978), 'numpy.arctan', 'np.arctan', (['(y / x)'], {}), '(y / x)\n', (971, 978), True, 'import numpy as np\n'), ((999, 1038), 'numpy.arctan', 'np.arctan', (['((x ** 2 + y ** 2) ** 0.5 / z)'], {}), '((x ** 2 + y ** 2) ** 0.5 / z)\n', (1008, 1038), True, 'import numpy as np\n'), ((1431, 1460), 'scipy.special.assoc_laguerre', 'spe.assoc_laguerre', (['rho', 'l', 'n'], {}), '(rho, l, n)\n', (1449, 1460), True, 'import scipy.special as spe\n'), ((2341, 2353), 'numpy.real', 'np.real', (['ytp'], {}), '(ytp)\n', (2348, 2353), True, 'import numpy as np\n'), ((3149, 3176), 'numpy.arange', 'np.arange', (['d[0]', 'd[1]', 'd[2]'], {}), '(d[0], d[1], d[2])\n', (3158, 3176), True, 'import numpy as np\n'), ((3186, 3203), 'numpy.vectorize', 'np.vectorize', (['Rho'], {}), '(Rho)\n', (3198, 3203), True, 'import numpy as np\n'), ((3258, 3320), 'matplotlib.pyplot.title', 'plt.title', (['"""Funcion de onda del atomo de hidrogeno $H \\\\cdot$"""'], {}), "('Funcion de onda del atomo de hidrogeno $H \\\\cdot$')\n", (3267, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3362), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r--"""'], {'label': '"""$\\\\Psi$"""'}), "(x, y, 'r--', label='$\\\\Psi$')\n", (3332, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3406), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2', '"""b-"""'], {'label': '"""$\\\\Psi^2$"""'}), "(x, y2, 'b-', label='$\\\\Psi^2$')\n", (3374, 3406), True, 'import matplotlib.pyplot as plt\n'), ((3410, 3472), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (3420, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3483, 3492), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3490, 3492), True, 'import matplotlib.pyplot as plt\n'), ((3564, 3578), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3572, 3578), True, 'import matplotlib.pyplot as plt\n'), ((3583, 3593), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3591, 3593), True, 'import matplotlib.pyplot as plt\n'), ((3825, 3854), 'numpy.linspace', 'np.linspace', (['d[0]', 'd[1]', 'd[2]'], {}), '(d[0], d[1], d[2])\n', (3836, 3854), True, 'import numpy as np\n'), ((3863, 3892), 'numpy.linspace', 'np.linspace', (['d[0]', 'd[1]', 'd[2]'], {}), '(d[0], d[1], d[2])\n', (3874, 3892), True, 'import numpy as np\n'), ((3901, 3930), 'numpy.linspace', 'np.linspace', (['d[0]', 'd[1]', 'd[2]'], {}), '(d[0], d[1], d[2])\n', (3912, 3930), True, 'import numpy as np\n'), ((3948, 3968), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3959, 3968), True, 'import numpy as np\n'), ((3978, 3996), 'numpy.vectorize', 'np.vectorize', (['Psi2'], {}), '(Psi2)\n', (3990, 3996), True, 'import numpy as np\n'), ((4166, 4178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4176, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4260, 4306), 'matplotlib.pyplot.contourf', 'plt.contourf', (['y', 'z', 'plano_yz', '(20)'], {'cmap': 'cm.bone'}), '(y, z, plano_yz, 20, cmap=cm.bone)\n', (4272, 4306), True, 'import matplotlib.pyplot as plt\n'), ((4311, 4325), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4323, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4407, 4453), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'z', 'plano_xz', '(20)'], {'cmap': 'cm.bone'}), '(x, z, plano_xz, 20, cmap=cm.bone)\n', (4419, 4453), True, 'import matplotlib.pyplot as plt\n'), ((4458, 4472), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4470, 4472), True, 'import matplotlib.pyplot as plt\n'), ((4554, 4600), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'plano_xy', '(20)'], {'cmap': 'cm.bone'}), '(x, y, plano_xy, 20, cmap=cm.bone)\n', (4566, 4600), True, 'import matplotlib.pyplot as plt\n'), ((4605, 4619), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4617, 4619), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1740), 'numpy.exp', 'np.exp', (['(m * t * 1.0j)'], {}), '(m * t * 1.0j)\n', (1726, 1740), True, 'import numpy as np\n'), ((2005, 2014), 'numpy.cos', 'np.cos', (['p'], {}), '(p)\n', (2011, 2014), True, 'import numpy as np\n'), ((1521, 1537), 'numpy.exp', 'np.exp', (['(-rho / 2)'], {}), '(-rho / 2)\n', (1527, 1537), True, 'import numpy as np\n')] |
import numpy as np
class SquaredL2ErrorMeasure:
def __init__(self, inputs: int):
self._inputs = inputs
def forward(self, input, target):
r = input - target
return np.sum(r*r)
def backward(self, input, target):
return 2*(input - target)
| [
"numpy.sum"
] | [((197, 210), 'numpy.sum', 'np.sum', (['(r * r)'], {}), '(r * r)\n', (203, 210), True, 'import numpy as np\n')] |
# coding=utf-8
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
plt.rcParams["figure.figsize"] = [6,6]
matplotlib.rcParams[u'font.sans-serif'] = ['simhei']
flows = []
if os.path.exists('flows.npy'):
flows = np.load('flows.npy')
else:
# Read size from local file
filename = 'attempt.csv'
with open(filename) as ifile:
reader = csv.DictReader(ifile)
for row in reader:
# Pick up the size
if (row['shuffleTime']):
size = int(row['sortTime']) - int(row['shuffleTime'])
flows.append(size)
flows = np.asarray(flows)
np.save('flows.npy', flows)
# Normalize flow sizes
print('Total number of flows = %d' % len(flows))
flows = np.sort(flows)
normalize_flows = (flows - np.min(flows).astype(np.float)) / (np.max(flows) - np.min(flows))
# flows = flows[flows < 0.8]
# flows = (flows - np.min(flows)) / (np.max(flows) - np.min(flows))
x = normalize_flows
y = np.arange(0, len(flows), dtype=np.float32)
y = y / float(len(flows))
#plt.xlim((0, 1))
#plt.ylim((0, 1))
plt.plot(x, y, '-')
#plt.ylabel(u'流族数目占比(CDF)')
#plt.xlabel(u'流族大小(Normalized)')
plt.show()
x = np.log10(flows)
total = np.sum(x)
y = [x[0]]
for idx, val in enumerate(x[1:]):
y.append(y[idx] + val)
y = np.asarray(y) / total
plt.close('all')
plt.plot(x, y, '-')
plt.ylabel('Ratio of total coflow size')
plt.xticks(np.arange(8), [r'$10^%d$'%i for i in range(8)])
plt.show()
| [
"numpy.load",
"numpy.save",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.asarray",
"csv.DictReader",
"os.path.exists",
"numpy.sort",
"numpy.max",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"numpy.log10"
] | [((214, 241), 'os.path.exists', 'os.path.exists', (['"""flows.npy"""'], {}), "('flows.npy')\n", (228, 241), False, 'import os\n'), ((578, 605), 'numpy.save', 'np.save', (['"""flows.npy"""', 'flows'], {}), "('flows.npy', flows)\n", (585, 605), True, 'import numpy as np\n'), ((687, 701), 'numpy.sort', 'np.sort', (['flows'], {}), '(flows)\n', (694, 701), True, 'import numpy as np\n'), ((1023, 1042), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-"""'], {}), "(x, y, '-')\n", (1031, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1104, 1114), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1112, 1114), True, 'import matplotlib.pyplot as plt\n'), ((1121, 1136), 'numpy.log10', 'np.log10', (['flows'], {}), '(flows)\n', (1129, 1136), True, 'import numpy as np\n'), ((1145, 1154), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (1151, 1154), True, 'import numpy as np\n'), ((1250, 1266), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1259, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1286), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-"""'], {}), "(x, y, '-')\n", (1275, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1327), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ratio of total coflow size"""'], {}), "('Ratio of total coflow size')\n", (1297, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1395, 1397), True, 'import matplotlib.pyplot as plt\n'), ((252, 272), 'numpy.load', 'np.load', (['"""flows.npy"""'], {}), "('flows.npy')\n", (259, 272), True, 'import numpy as np\n'), ((559, 576), 'numpy.asarray', 'np.asarray', (['flows'], {}), '(flows)\n', (569, 576), True, 'import numpy as np\n'), ((1228, 1241), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1238, 1241), True, 'import numpy as np\n'), ((1339, 1351), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (1348, 1351), True, 'import numpy as np\n'), ((376, 397), 'csv.DictReader', 'csv.DictReader', (['ifile'], {}), '(ifile)\n', (390, 397), False, 'import csv\n'), ((764, 777), 'numpy.max', 'np.max', (['flows'], {}), '(flows)\n', (770, 777), True, 'import numpy as np\n'), ((780, 793), 'numpy.min', 'np.min', (['flows'], {}), '(flows)\n', (786, 793), True, 'import numpy as np\n'), ((729, 742), 'numpy.min', 'np.min', (['flows'], {}), '(flows)\n', (735, 742), True, 'import numpy as np\n')] |
import julia
from julia import DPMMSubClusters
import numpy as np
class prior:
def to_julia_prior(self):
pass
def get_type(self):
pass
def to_JSON(self):
pass
class niw(prior):
def __init__(self, kappa, mu, nu, psi):
if nu < len(mu):
raise Exception('nu should be atleast the Dim')
self.kappa = kappa
self.mu = mu
self.nu = nu
self.psi = psi
def to_julia_prior(self):
return DPMMSubClusters.niw_hyperparams(self.kappa,self.mu,self.nu, self.psi)
def get_type(self):
return 'Gaussian'
def to_JSON(self):
j = {'k': self.kappa,
'm': self.mu.tolist(),
'v': self.nu,
'psi': self.psi.tolist()
}
return j
class multinomial(prior):
def __init__(self, alpha,dim = 1):
if isinstance(alpha,np.ndarray):
self.alpha = alpha
else:
self.alpha = np.ones(dim)*alpha
def to_julia_prior(self):
return DPMMSubClusters.multinomial_hyper(self.alpha)
def get_type(self):
return 'Multinomial'
def to_JSON(self):
j = {'alpha': self.alpha.tolist()
}
return j | [
"julia.DPMMSubClusters.niw_hyperparams",
"julia.DPMMSubClusters.multinomial_hyper",
"numpy.ones"
] | [((491, 562), 'julia.DPMMSubClusters.niw_hyperparams', 'DPMMSubClusters.niw_hyperparams', (['self.kappa', 'self.mu', 'self.nu', 'self.psi'], {}), '(self.kappa, self.mu, self.nu, self.psi)\n', (522, 562), False, 'from julia import DPMMSubClusters\n'), ((1058, 1103), 'julia.DPMMSubClusters.multinomial_hyper', 'DPMMSubClusters.multinomial_hyper', (['self.alpha'], {}), '(self.alpha)\n', (1091, 1103), False, 'from julia import DPMMSubClusters\n'), ((975, 987), 'numpy.ones', 'np.ones', (['dim'], {}), '(dim)\n', (982, 987), True, 'import numpy as np\n')] |
"""Preprocess the face images
"""
import pickle
import argparse
import glob
import logging
import os
import sys
import numpy as np
import cv2
import dlib
# yapf: disable
# Copied from https://github.com/cmusatyalab/openface/blob/master/openface/align_dlib.py
TEMPLATE = np.float32([
(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943),
(0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066),
(0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778),
(0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149),
(0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107),
(0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279),
(0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421),
(0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744),
(0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053),
(0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323),
(0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851),
(0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854),
(0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114),
(0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193),
(0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758),
(0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668),
(0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208),
(0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656),
(0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002),
(0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083),
(0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225),
(0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267),
(0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656),
(0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172),
(0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073),
(0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768),
(0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516),
(0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972),
(0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792),
(0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727),
(0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612),
(0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691),
(0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626),
(0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)])
INV_TEMPLATE = np.float32([
(-0.04099179660567834, -0.008425234314031194, 2.575498465013183),
(0.04062510634554352, -0.009678089746831375, -1.2534351452524177),
(0.0003666902601348179, 0.01810332406086298, -0.32206331976076663)])
# yapf: enable
TPL_MIN, TPL_MAX = np.min(TEMPLATE, axis=0), np.max(TEMPLATE, axis=0)
MINMAX_TEMPLATE = (TEMPLATE - TPL_MIN) / (TPL_MAX - TPL_MIN)
#: Landmark indices.
INNER_EYES_AND_BOTTOM_LIP = [39, 42, 57]
OUTER_EYES_AND_NOSE = [36, 45, 33]
class CropAndAlign:
def __init__(self, face_predictor_path, landmark_indices, logger):
self.detector = dlib.get_frontal_face_detector()
self.land_mark_predictor = dlib.shape_predictor(face_predictor_path)
self.land_mark_indices = np.array(landmark_indices)
self.logger = logger
def find_all_bounding_boxes(self, rgb_img):
try:
# Upsample the image once
return self.detector(rgb_img, 1)
except Exception as e:
self.logger.warn(e)
return []
def get_largest_bounding_box(self, rgb_img):
faces = self.find_all_bounding_boxes(rgb_img)
if len(faces) > 0:
return max(faces, key=lambda box: box.width() * box.height())
else:
self.logger.warn('No face was found in image')
return None
def find_landmarks(self, rgb_img, box):
points = self.land_mark_predictor(rgb_img, box)
return np.float32(list(map(lambda p: (p.x, p.y), points.parts())))
def align_one_face(self, rgb_img, box, out_size):
landmarks = self.find_landmarks(rgb_img, box)
affine_transform = cv2.getAffineTransform(
landmarks[self.land_mark_indices],
out_size * MINMAX_TEMPLATE[self.land_mark_indices])
return cv2.warpAffine(rgb_img, affine_transform, (out_size, out_size))
def align_biggest_face(self, rgb_img, out_size):
box = self.get_largest_bounding_box(rgb_img)
if box is not None:
return self.align_one_face(rgb_img, box, out_size)
else:
return None
def align_all_faces(self, rgb_img, out_size):
boxes = self.find_all_bounding_boxes(rgb_img)
return [self.align_one_face(rgb_img, box, out_size) for box in boxes]
def process_image_(aligner, in_path, out_path, dim):
logger.info('Processing %s' % in_path)
image = cv2.imread(in_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if image is None:
logger.error('Failed to load image %s' % in_path)
return
aligned = aligner.align_biggest_face(image, dim)
if aligned is None:
logger.warn('No face found in %s' % in_path)
else:
aligned = cv2.cvtColor(aligned, cv2.COLOR_BGR2RGB)
cv2.imwrite(out_path, aligned)
del aligned
def preprocess_dataset(input_dir,
output_dir,
out_dim,
face_predictor_path,
logger,
landmark_indices=INNER_EYES_AND_BOTTOM_LIP):
if not os.path.exists(input_dir):
logger.error('Data dir %s doesn\'t exist' % input_dir)
return
elif not os.path.exists(face_predictor_path):
logger.error('Predictor %s doesn\'t exist' % face_predictor_path)
return
aligner = CropAndAlign(face_predictor_path, landmark_indices, logger)
global global_aligner
global_aligner = aligner
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for image_dir in os.listdir(input_dir):
image_output_dir = os.path.join(
output_dir, os.path.basename(os.path.basename(image_dir)))
if not os.path.exists(image_output_dir):
os.makedirs(image_output_dir)
image_paths = glob.glob(os.path.join(input_dir, '**/*.jpg'))
for image_path in image_paths:
image_output_dir = os.path.join(
output_dir, os.path.basename(os.path.dirname(image_path)))
output_path = os.path.join(image_output_dir,
os.path.basename(image_path))
process_image_(aligner, image_path, output_path, out_dim)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser('Dataset preprocessor')
parser.add_argument('--input_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--face_predictor', type=str, required=True)
parser.add_argument('--output_dim', type=int, default=72)
if len(sys.argv) == 1:
parser.print_help()
args = parser.parse_args()
preprocess_dataset(args.input_dir, args.output_dir, args.output_dim,
args.face_predictor, logger)
| [
"argparse.ArgumentParser",
"cv2.warpAffine",
"dlib.shape_predictor",
"os.path.join",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.dirname",
"os.path.exists",
"numpy.max",
"os.path.basename",
"numpy.min",
"dlib.get_frontal_face_detector",
"os.listdir",
"os.makedirs",
"logging.basicConfig",
"... | [((274, 2745), 'numpy.float32', 'np.float32', (['[(0.0792396913815, 0.339223741112), (0.0829219487236, 0.456955367943), (\n 0.0967927109165, 0.575648016728), (0.122141515615, 0.691921601066), (\n 0.168687863544, 0.800341263616), (0.239789390707, 0.895732504778), (\n 0.325662452515, 0.977068762493), (0.422318282013, 1.04329000149), (\n 0.531777802068, 1.06080371126), (0.641296298053, 1.03981924107), (\n 0.738105872266, 0.972268833998), (0.824444363295, 0.889624082279), (\n 0.894792677532, 0.792494155836), (0.939395486253, 0.681546643421), (\n 0.96111933829, 0.562238253072), (0.970579841181, 0.441758925744), (\n 0.971193274221, 0.322118743967), (0.163846223133, 0.249151738053), (\n 0.21780354657, 0.204255863861), (0.291299351124, 0.192367318323), (\n 0.367460241458, 0.203582210627), (0.4392945113, 0.233135599851), (\n 0.586445962425, 0.228141644834), (0.660152671635, 0.195923841854), (\n 0.737466449096, 0.182360984545), (0.813236546239, 0.192828009114), (\n 0.8707571886, 0.235293377042), (0.51534533827, 0.31863546193), (\n 0.516221448289, 0.396200446263), (0.517118861835, 0.473797687758), (\n 0.51816430343, 0.553157797772), (0.433701156035, 0.604054457668), (\n 0.475501237769, 0.62076344024), (0.520712933176, 0.634268222208), (\n 0.565874114041, 0.618796581487), (0.607054002672, 0.60157671656), (\n 0.252418718401, 0.331052263829), (0.298663015648, 0.302646354002), (\n 0.355749724218, 0.303020650651), (0.403718978315, 0.33867711083), (\n 0.352507175597, 0.349987615384), (0.296791759886, 0.350478978225), (\n 0.631326076346, 0.334136672344), (0.679073381078, 0.29645404267), (\n 0.73597236153, 0.294721285802), (0.782865376271, 0.321305281656), (\n 0.740312274764, 0.341849376713), (0.68499850091, 0.343734332172), (\n 0.353167761422, 0.746189164237), (0.414587777921, 0.719053835073), (\n 0.477677654595, 0.706835892494), (0.522732900812, 0.717092275768), (\n 0.569832064287, 0.705414478982), (0.635195811927, 0.71565572516), (\n 0.69951672331, 0.739419187253), (0.639447159575, 0.805236879972), (\n 0.576410514055, 0.835436670169), (0.525398405766, 0.841706377792), (\n 0.47641545769, 0.837505914975), (0.41379548902, 0.810045601727), (\n 0.380084785646, 0.749979603086), (0.477955996282, 0.74513234612), (\n 0.523389793327, 0.748924302636), (0.571057789237, 0.74332894691), (\n 0.672409137852, 0.744177032192), (0.572539621444, 0.776609286626), (\n 0.5240106503, 0.783370783245), (0.477561227414, 0.778476346951)]'], {}), '([(0.0792396913815, 0.339223741112), (0.0829219487236, \n 0.456955367943), (0.0967927109165, 0.575648016728), (0.122141515615, \n 0.691921601066), (0.168687863544, 0.800341263616), (0.239789390707, \n 0.895732504778), (0.325662452515, 0.977068762493), (0.422318282013, \n 1.04329000149), (0.531777802068, 1.06080371126), (0.641296298053, \n 1.03981924107), (0.738105872266, 0.972268833998), (0.824444363295, \n 0.889624082279), (0.894792677532, 0.792494155836), (0.939395486253, \n 0.681546643421), (0.96111933829, 0.562238253072), (0.970579841181, \n 0.441758925744), (0.971193274221, 0.322118743967), (0.163846223133, \n 0.249151738053), (0.21780354657, 0.204255863861), (0.291299351124, \n 0.192367318323), (0.367460241458, 0.203582210627), (0.4392945113, \n 0.233135599851), (0.586445962425, 0.228141644834), (0.660152671635, \n 0.195923841854), (0.737466449096, 0.182360984545), (0.813236546239, \n 0.192828009114), (0.8707571886, 0.235293377042), (0.51534533827, \n 0.31863546193), (0.516221448289, 0.396200446263), (0.517118861835, \n 0.473797687758), (0.51816430343, 0.553157797772), (0.433701156035, \n 0.604054457668), (0.475501237769, 0.62076344024), (0.520712933176, \n 0.634268222208), (0.565874114041, 0.618796581487), (0.607054002672, \n 0.60157671656), (0.252418718401, 0.331052263829), (0.298663015648, \n 0.302646354002), (0.355749724218, 0.303020650651), (0.403718978315, \n 0.33867711083), (0.352507175597, 0.349987615384), (0.296791759886, \n 0.350478978225), (0.631326076346, 0.334136672344), (0.679073381078, \n 0.29645404267), (0.73597236153, 0.294721285802), (0.782865376271, \n 0.321305281656), (0.740312274764, 0.341849376713), (0.68499850091, \n 0.343734332172), (0.353167761422, 0.746189164237), (0.414587777921, \n 0.719053835073), (0.477677654595, 0.706835892494), (0.522732900812, \n 0.717092275768), (0.569832064287, 0.705414478982), (0.635195811927, \n 0.71565572516), (0.69951672331, 0.739419187253), (0.639447159575, \n 0.805236879972), (0.576410514055, 0.835436670169), (0.525398405766, \n 0.841706377792), (0.47641545769, 0.837505914975), (0.41379548902, \n 0.810045601727), (0.380084785646, 0.749979603086), (0.477955996282, \n 0.74513234612), (0.523389793327, 0.748924302636), (0.571057789237, \n 0.74332894691), (0.672409137852, 0.744177032192), (0.572539621444, \n 0.776609286626), (0.5240106503, 0.783370783245), (0.477561227414, \n 0.778476346951)])\n', (284, 2745), True, 'import numpy as np\n'), ((2729, 2952), 'numpy.float32', 'np.float32', (['[(-0.04099179660567834, -0.008425234314031194, 2.575498465013183), (\n 0.04062510634554352, -0.009678089746831375, -1.2534351452524177), (\n 0.0003666902601348179, 0.01810332406086298, -0.32206331976076663)]'], {}), '([(-0.04099179660567834, -0.008425234314031194, 2.575498465013183\n ), (0.04062510634554352, -0.009678089746831375, -1.2534351452524177), (\n 0.0003666902601348179, 0.01810332406086298, -0.32206331976076663)])\n', (2739, 2952), True, 'import numpy as np\n'), ((2992, 3016), 'numpy.min', 'np.min', (['TEMPLATE'], {'axis': '(0)'}), '(TEMPLATE, axis=0)\n', (2998, 3016), True, 'import numpy as np\n'), ((3018, 3042), 'numpy.max', 'np.max', (['TEMPLATE'], {'axis': '(0)'}), '(TEMPLATE, axis=0)\n', (3024, 3042), True, 'import numpy as np\n'), ((5104, 5123), 'cv2.imread', 'cv2.imread', (['in_path'], {}), '(in_path)\n', (5114, 5123), False, 'import cv2\n'), ((5136, 5174), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5148, 5174), False, 'import cv2\n'), ((6254, 6275), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (6264, 6275), False, 'import os\n'), ((6911, 6950), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (6930, 6950), False, 'import logging\n'), ((6964, 6991), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6981, 6991), False, 'import logging\n'), ((7005, 7052), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Dataset preprocessor"""'], {}), "('Dataset preprocessor')\n", (7028, 7052), False, 'import argparse\n'), ((3319, 3351), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (3349, 3351), False, 'import dlib\n'), ((3387, 3428), 'dlib.shape_predictor', 'dlib.shape_predictor', (['face_predictor_path'], {}), '(face_predictor_path)\n', (3407, 3428), False, 'import dlib\n'), ((3462, 3488), 'numpy.array', 'np.array', (['landmark_indices'], {}), '(landmark_indices)\n', (3470, 3488), True, 'import numpy as np\n'), ((4362, 4475), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['landmarks[self.land_mark_indices]', '(out_size * MINMAX_TEMPLATE[self.land_mark_indices])'], {}), '(landmarks[self.land_mark_indices], out_size *\n MINMAX_TEMPLATE[self.land_mark_indices])\n', (4384, 4475), False, 'import cv2\n'), ((4512, 4575), 'cv2.warpAffine', 'cv2.warpAffine', (['rgb_img', 'affine_transform', '(out_size, out_size)'], {}), '(rgb_img, affine_transform, (out_size, out_size))\n', (4526, 4575), False, 'import cv2\n'), ((5428, 5468), 'cv2.cvtColor', 'cv2.cvtColor', (['aligned', 'cv2.COLOR_BGR2RGB'], {}), '(aligned, cv2.COLOR_BGR2RGB)\n', (5440, 5468), False, 'import cv2\n'), ((5477, 5507), 'cv2.imwrite', 'cv2.imwrite', (['out_path', 'aligned'], {}), '(out_path, aligned)\n', (5488, 5507), False, 'import cv2\n'), ((5785, 5810), 'os.path.exists', 'os.path.exists', (['input_dir'], {}), '(input_dir)\n', (5799, 5810), False, 'import os\n'), ((6172, 6198), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (6186, 6198), False, 'import os\n'), ((6208, 6231), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (6219, 6231), False, 'import os\n'), ((6509, 6544), 'os.path.join', 'os.path.join', (['input_dir', '"""**/*.jpg"""'], {}), "(input_dir, '**/*.jpg')\n", (6521, 6544), False, 'import os\n'), ((5903, 5938), 'os.path.exists', 'os.path.exists', (['face_predictor_path'], {}), '(face_predictor_path)\n', (5917, 5938), False, 'import os\n'), ((6404, 6436), 'os.path.exists', 'os.path.exists', (['image_output_dir'], {}), '(image_output_dir)\n', (6418, 6436), False, 'import os\n'), ((6450, 6479), 'os.makedirs', 'os.makedirs', (['image_output_dir'], {}), '(image_output_dir)\n', (6461, 6479), False, 'import os\n'), ((6782, 6810), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (6798, 6810), False, 'import os\n'), ((6359, 6386), 'os.path.basename', 'os.path.basename', (['image_dir'], {}), '(image_dir)\n', (6375, 6386), False, 'import os\n'), ((6664, 6691), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (6679, 6691), False, 'import os\n')] |
# <NAME> 2014-2020
# mlxtend Machine Learning Library Extensions
#
# A function for loading the open-source MNIST.
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
import os
this_dir, this_filename = os.path.split(__file__)
DATA_PATH = os.path.join(this_dir, "data", "mnist_5k.csv.gz")
def mnist_data():
"""5000 samples from the MNIST handwritten digits dataset.
Data Source : http://yann.lecun.com/exdb/mnist/
Returns
--------
X, y : [n_samples, n_features], [n_class_labels]
X is the feature matrix with 5000 image samples as rows,
each row consists of 28x28 pixels that were unrolled into
784 pixel feature vectors.
y contains the 10 unique class labels 0-9.
Examples
-----------
For usage examples, please see
http://rasbt.github.io/mlxtend/user_guide/data/mnist_data/
"""
tmp = np.genfromtxt(fname=DATA_PATH, delimiter=',')
X, y = tmp[:, :-1], tmp[:, -1]
y = y.astype(int)
return X, y
| [
"os.path.split",
"os.path.join",
"numpy.genfromtxt"
] | [((225, 248), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import os\n'), ((261, 310), 'os.path.join', 'os.path.join', (['this_dir', '"""data"""', '"""mnist_5k.csv.gz"""'], {}), "(this_dir, 'data', 'mnist_5k.csv.gz')\n", (273, 310), False, 'import os\n'), ((890, 935), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'DATA_PATH', 'delimiter': '""","""'}), "(fname=DATA_PATH, delimiter=',')\n", (903, 935), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
import math
import dlib
predictor_path = "shape_predictor_68_face_landmarks.dat"
image_path = 'women/'
def detect_landmarks(image, filepath):
# obtain detector and predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
# convert image to numpy array
numpy_image = np.asanyarray(image)
numpy_image.flags.writeable = True
# output list
face_landmark_tuples = []
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should up sample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
detected_faces = detector(numpy_image, 1)
print("Number of faces detected: {}".format(len(detected_faces)))
points = []
for k, rect in enumerate(detected_faces):
# Get the landmarks/parts for the face in box rect.
shape = predictor(numpy_image, rect)
for index in xrange(0, shape.num_parts, 1):
x, y = "{}\n".format(shape.part(index)).replace("(", "").replace(")", "").replace(",", "").split()
points.append((int(x), int(y)))
print("created points for " + filepath)
return points
# Create landmarks for each image in folder.
def create_landmarks():
landmarks_array = []
# List all files in the directory and read points from text files one by one
for filePath in sorted(os.listdir(image_path)):
if filePath.endswith(".jpg"):
# Read image found.
image = cv2.imread(os.path.join(image_path, filePath))
# detect faces and landmarks
landmarks_array.append(detect_landmarks(image, filePath))
return landmarks_array
# Read all jpg images in folder.
def read_images():
# Create array of array of images.
images_array = []
# List all files in the directory and read points from text files one by one
for filePath in sorted(os.listdir(image_path)):
if filePath.endswith(".jpg"):
# Read image found.
read_image = cv2.imread(os.path.join(image_path, filePath))
# Convert to floating point
read_image = np.float32(read_image) / 255.0
# Add to array of images
images_array.append(read_image)
return images_array
# Compute similarity transform given two sets of two points.
# OpenCV requires 3 pairs of corresponding points.
# We are faking the third one.
def similarity_transform(in_points, out_points):
s60 = math.sin(60 * math.pi / 180)
c60 = math.cos(60 * math.pi / 180)
in_pts = np.copy(in_points).tolist()
out_pts = np.copy(out_points).tolist()
xin = c60 * (in_pts[0][0] - in_pts[1][0]) - s60 * (in_pts[0][1] - in_pts[1][1]) + in_pts[1][0]
yin = s60 * (in_pts[0][0] - in_pts[1][0]) + c60 * (in_pts[0][1] - in_pts[1][1]) + in_pts[1][1]
in_pts.append([np.int(xin), np.int(yin)])
xout = c60 * (out_pts[0][0] - out_pts[1][0]) - s60 * (out_pts[0][1] - out_pts[1][1]) + out_pts[1][0]
yout = s60 * (out_pts[0][0] - out_pts[1][0]) + c60 * (out_pts[0][1] - out_pts[1][1]) + out_pts[1][1]
out_pts.append([np.int(xout), np.int(yout)])
return cv2.estimateRigidTransform(np.array([in_pts]), np.array([out_pts]), False)
# Check if a point is inside a rectangle
def rect_contains(rect, point):
if point[0] < rect[0]:
return False
elif point[1] < rect[1]:
return False
elif point[0] > rect[2]:
return False
elif point[1] > rect[3]:
return False
return True
# Calculate Delaunay triangle
def calculate_delaunay_triangles(rect, points):
# Create sub_div
sub_div = cv2.Subdiv2D(rect)
# Insert points into sub_div
for p in points:
sub_div.insert((p[0], p[1]))
# List of triangles. Each triangle is a list of 3 points ( 6 numbers )
triangle_list = sub_div.getTriangleList()
# Find the indices of triangles in the points array
delaunay_tri = []
for t in triangle_list:
pt = [(t[0], t[1]), (t[2], t[3]), (t[4], t[5])]
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rect_contains(rect, pt1) and rect_contains(rect, pt2) and rect_contains(rect, pt3):
ind = []
for j in xrange(0, 3):
for k in xrange(0, len(points)):
if abs(pt[j][0] - points[k][0]) < 1.0 and abs(pt[j][1] - points[k][1]) < 1.0:
ind.append(k)
if len(ind) == 3:
delaunay_tri.append((ind[0], ind[1], ind[2]))
return delaunay_tri
def constrain_point(p, w, h):
p = (min(max(p[0], 0), w - 1), min(max(p[1], 0), h - 1))
return p
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def apply_affine_transform(src, src_tri, dst_tri, size):
# Given a pair of triangles, find the affine transform.
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine(src, warp_mat, (size[0], size[1]), None,
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
return dst
# Warps and alpha blends triangular regions from img1 and img2 to img
def warp_triangle(img1, img2, t1, t2):
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
# Offset points by left top corner of the respective rectangles
t1_rect = []
t2_rect = []
t2_rect_int = []
for index in xrange(0, 3):
t1_rect.append(((t1[index][0] - r1[0]), (t1[index][1] - r1[1])))
t2_rect.append(((t2[index][0] - r2[0]), (t2[index][1] - r2[1])))
t2_rect_int.append(((t2[index][0] - r2[0]), (t2[index][1] - r2[1])))
# Get mask by filling triangle
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2_rect_int), (1.0, 1.0, 1.0), 16, 0)
# Apply warpImage to small rectangular patches
img1_rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2_rect = apply_affine_transform(img1_rect, t1_rect, t2_rect, size)
img2_rect = img2_rect * mask
# Copy triangular region of the rectangular patch to the output image
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] =\
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] * ((1.0, 1.0, 1.0) - mask)
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2_rect
if __name__ == '__main__':
# Dimensions of output image
w = 600
h = 600
# Read landmarks for all images
allPoints = create_landmarks()
# Read all images
images = read_images()
# Eye corners
eyecornerDst = [(np.int(0.3 * w),
np.int(h / 3)),
(np.int(0.7 * w),
np.int(h / 3))]
imagesNorm = []
pointsNorm = []
# Add boundary points for delaunay triangulation
boundaryPts = np.array(
[(0, 0), (w / 2, 0), (w - 1, 0), (w - 1, h / 2), (w - 1, h - 1), (w / 2, h - 1), (0, h - 1), (0, h / 2)])
# Initialize location of average points to 0s
pointsAvg = np.array([(0, 0)] * (len(allPoints[0]) + len(boundaryPts)), np.float32)
n = len(allPoints[0])
numImages = len(images)
# Warp images and transform landmarks to output coordinate system,
# and find average of transformed landmarks.
for i in xrange(0, numImages):
points1 = allPoints[i]
# Corners of the eye in input image
eyecornerSrc = [allPoints[i][36], allPoints[i][45]]
# Compute similarity transform
tform = similarity_transform(eyecornerSrc, eyecornerDst)
# Apply similarity transformation
img = cv2.warpAffine(images[i], tform, (w, h))
# Apply similarity transform on points
points2 = np.reshape(np.array(points1), (68, 1, 2))
points = cv2.transform(points2, tform)
points = np.float32(np.reshape(points, (68, 2)))
# Append boundary points. Will be used in Delaunay Triangulation
points = np.append(points, boundaryPts, axis=0)
# Calculate location of average landmark points.
pointsAvg = pointsAvg + points / numImages
pointsNorm.append(points)
imagesNorm.append(img)
# Delaunay triangulation
rect = (0, 0, w, h)
dt = calculate_delaunay_triangles(rect, np.array(pointsAvg))
# Output image
output = np.zeros((h, w, 3), np.float32)
# Warp input images to average image landmarks
for i in xrange(0, len(imagesNorm)):
img = np.zeros((h, w, 3), np.float32)
# Transform triangles one by one
for j in xrange(0, len(dt)):
tin = []
tout = []
for k in xrange(0, 3):
pIn = pointsNorm[i][dt[j][k]]
pIn = constrain_point(pIn, w, h)
pOut = pointsAvg[dt[j][k]]
pOut = constrain_point(pOut, w, h)
tin.append(pIn)
tout.append(pOut)
warp_triangle(imagesNorm[i], img, tin, tout)
# Add image intensities for averaging
output = output + img
# Divide by numImages to get average
output = output / numImages
# Display result
cv2.imshow('image', output)
cv2.waitKey(0)
| [
"cv2.warpAffine",
"cv2.imshow",
"dlib.shape_predictor",
"os.path.join",
"numpy.copy",
"numpy.append",
"numpy.int",
"math.cos",
"numpy.int32",
"numpy.reshape",
"cv2.Subdiv2D",
"cv2.waitKey",
"math.sin",
"cv2.transform",
"dlib.get_frontal_face_detector",
"os.listdir",
"numpy.float32",
... | [((236, 268), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (266, 268), False, 'import dlib\n'), ((285, 321), 'dlib.shape_predictor', 'dlib.shape_predictor', (['predictor_path'], {}), '(predictor_path)\n', (305, 321), False, 'import dlib\n'), ((376, 396), 'numpy.asanyarray', 'np.asanyarray', (['image'], {}), '(image)\n', (389, 396), True, 'import numpy as np\n'), ((2583, 2611), 'math.sin', 'math.sin', (['(60 * math.pi / 180)'], {}), '(60 * math.pi / 180)\n', (2591, 2611), False, 'import math\n'), ((2622, 2650), 'math.cos', 'math.cos', (['(60 * math.pi / 180)'], {}), '(60 * math.pi / 180)\n', (2630, 2650), False, 'import math\n'), ((3734, 3752), 'cv2.Subdiv2D', 'cv2.Subdiv2D', (['rect'], {}), '(rect)\n', (3746, 3752), False, 'import cv2\n'), ((5143, 5262), 'cv2.warpAffine', 'cv2.warpAffine', (['src', 'warp_mat', '(size[0], size[1])', 'None'], {'flags': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_REFLECT_101'}), '(src, warp_mat, (size[0], size[1]), None, flags=cv2.\n INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)\n', (5157, 5262), False, 'import cv2\n'), ((5972, 6017), 'numpy.zeros', 'np.zeros', (['(r2[3], r2[2], 3)'], {'dtype': 'np.float32'}), '((r2[3], r2[2], 3), dtype=np.float32)\n', (5980, 6017), True, 'import numpy as np\n'), ((7160, 7279), 'numpy.array', 'np.array', (['[(0, 0), (w / 2, 0), (w - 1, 0), (w - 1, h / 2), (w - 1, h - 1), (w / 2, h -\n 1), (0, h - 1), (0, h / 2)]'], {}), '([(0, 0), (w / 2, 0), (w - 1, 0), (w - 1, h / 2), (w - 1, h - 1), (\n w / 2, h - 1), (0, h - 1), (0, h / 2)])\n', (7168, 7279), True, 'import numpy as np\n'), ((8646, 8677), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.float32'], {}), '((h, w, 3), np.float32)\n', (8654, 8677), True, 'import numpy as np\n'), ((9466, 9493), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'output'], {}), "('image', output)\n", (9476, 9493), False, 'import cv2\n'), ((9498, 9512), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (9509, 9512), False, 'import cv2\n'), ((1480, 1502), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (1490, 1502), False, 'import os\n'), ((2007, 2029), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (2017, 2029), False, 'import os\n'), ((3282, 3300), 'numpy.array', 'np.array', (['[in_pts]'], {}), '([in_pts])\n', (3290, 3300), True, 'import numpy as np\n'), ((3302, 3321), 'numpy.array', 'np.array', (['[out_pts]'], {}), '([out_pts])\n', (3310, 3321), True, 'import numpy as np\n'), ((5029, 5048), 'numpy.float32', 'np.float32', (['src_tri'], {}), '(src_tri)\n', (5039, 5048), True, 'import numpy as np\n'), ((5050, 5069), 'numpy.float32', 'np.float32', (['dst_tri'], {}), '(dst_tri)\n', (5060, 5069), True, 'import numpy as np\n'), ((5484, 5500), 'numpy.float32', 'np.float32', (['[t1]'], {}), '([t1])\n', (5494, 5500), True, 'import numpy as np\n'), ((5528, 5544), 'numpy.float32', 'np.float32', (['[t2]'], {}), '([t2])\n', (5538, 5544), True, 'import numpy as np\n'), ((6047, 6068), 'numpy.int32', 'np.int32', (['t2_rect_int'], {}), '(t2_rect_int)\n', (6055, 6068), True, 'import numpy as np\n'), ((7934, 7974), 'cv2.warpAffine', 'cv2.warpAffine', (['images[i]', 'tform', '(w, h)'], {}), '(images[i], tform, (w, h))\n', (7948, 7974), False, 'import cv2\n'), ((8101, 8130), 'cv2.transform', 'cv2.transform', (['points2', 'tform'], {}), '(points2, tform)\n', (8114, 8130), False, 'import cv2\n'), ((8280, 8318), 'numpy.append', 'np.append', (['points', 'boundaryPts'], {'axis': '(0)'}), '(points, boundaryPts, axis=0)\n', (8289, 8318), True, 'import numpy as np\n'), ((8592, 8611), 'numpy.array', 'np.array', (['pointsAvg'], {}), '(pointsAvg)\n', (8600, 8611), True, 'import numpy as np\n'), ((8785, 8816), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.float32'], {}), '((h, w, 3), np.float32)\n', (8793, 8816), True, 'import numpy as np\n'), ((2665, 2683), 'numpy.copy', 'np.copy', (['in_points'], {}), '(in_points)\n', (2672, 2683), True, 'import numpy as np\n'), ((2707, 2726), 'numpy.copy', 'np.copy', (['out_points'], {}), '(out_points)\n', (2714, 2726), True, 'import numpy as np\n'), ((2955, 2966), 'numpy.int', 'np.int', (['xin'], {}), '(xin)\n', (2961, 2966), True, 'import numpy as np\n'), ((2968, 2979), 'numpy.int', 'np.int', (['yin'], {}), '(yin)\n', (2974, 2979), True, 'import numpy as np\n'), ((3214, 3226), 'numpy.int', 'np.int', (['xout'], {}), '(xout)\n', (3220, 3226), True, 'import numpy as np\n'), ((3228, 3240), 'numpy.int', 'np.int', (['yout'], {}), '(yout)\n', (3234, 3240), True, 'import numpy as np\n'), ((6918, 6933), 'numpy.int', 'np.int', (['(0.3 * w)'], {}), '(0.3 * w)\n', (6924, 6933), True, 'import numpy as np\n'), ((6956, 6969), 'numpy.int', 'np.int', (['(h / 3)'], {}), '(h / 3)\n', (6962, 6969), True, 'import numpy as np\n'), ((6993, 7008), 'numpy.int', 'np.int', (['(0.7 * w)'], {}), '(0.7 * w)\n', (6999, 7008), True, 'import numpy as np\n'), ((7031, 7044), 'numpy.int', 'np.int', (['(h / 3)'], {}), '(h / 3)\n', (7037, 7044), True, 'import numpy as np\n'), ((8052, 8069), 'numpy.array', 'np.array', (['points1'], {}), '(points1)\n', (8060, 8069), True, 'import numpy as np\n'), ((8160, 8187), 'numpy.reshape', 'np.reshape', (['points', '(68, 2)'], {}), '(points, (68, 2))\n', (8170, 8187), True, 'import numpy as np\n'), ((1607, 1641), 'os.path.join', 'os.path.join', (['image_path', 'filePath'], {}), '(image_path, filePath)\n', (1619, 1641), False, 'import os\n'), ((2139, 2173), 'os.path.join', 'os.path.join', (['image_path', 'filePath'], {}), '(image_path, filePath)\n', (2151, 2173), False, 'import os\n'), ((2241, 2263), 'numpy.float32', 'np.float32', (['read_image'], {}), '(read_image)\n', (2251, 2263), True, 'import numpy as np\n')] |
import numpy as np
from robosuite.models.robots.robot import Robot
from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string
class JR2DiffDrive(Robot):
"""JR2."""
def __init__(self):
super().__init__(xml_path_completion("robots/jr2/jr2_diff_drive.xml"))
self.bottom_offset = np.array([0, 0, 0])
def set_base_xpos(self, pos):
"""Places the robot on position @pos."""
node = self.worldbody.find("./body[@name='base_footprint']")
node.set("pos", array_to_string(pos - self.bottom_offset))
@property
def dof(self):
return 8
@property
def joints(self):
return [
"wheel_l",
"wheel_r",
"m1n6s200_joint_1",
"m1n6s200_joint_2",
"m1n6s200_joint_3",
"m1n6s200_joint_4",
"m1n6s200_joint_5",
"m1n6s200_joint_6",
#"m1n6s200_joint_finger_1",
#"m1n6s200_joint_finger_2",
]
#@property
def init_qpos(self,distance):
# straight arm
pos = np.zeros(8)
#pos = np.array([ 3.71955388e-01, -4.32114760e-02, -5.92153450e-02, -1.71517591e+00,2.83001900e+00,3.37765872e+00,1.71800951e+00,1.87382209e-02,-1.78553740e-03])
#if distance == "close":
# # bent arm far from door
# pos = np.array([ 3.70740471e-01,-0.5,-5.92161524e-02,-1.71636826e+00,2.48744571e+00,4.35466325e+00,1.68285118e+00,4.26563177e-02,-1.51617785e-03])
#elif distance == "touching_angled":
# # 45 degree body
# pos = np.array([0.41899952,-0.3610791,0.85207989,-1.99673054,1.22329899,1.67109673,5.49132849,-0.66292144,-2.96626974])
#elif distance == "open_door":
# pos = np.array([-5.34618529e-02,-4.24167703e-01,1.99816930e-01,-1.31569118e+00,2.75077181e+00,4.20818782e+00,1.79594658e+00,-1.78916482e-01,-2.12040016e-01])
#else:
# # 45 degree body
# pos = np.array([0.41899952,-0.3610791,0.85207989,-1.99673054,1.22329899,1.67109673,5.49132849,-0.66292144,-2.96626974])
return pos
@property
def visualization_sites(self):
return ["r_grip_site",]
@property
def body_contact_geoms(self):
return[
"body",
"neck",
"head",
"front_caster",
"rear_caster",
"l_wheel_link",
"r_wheel_link",
]
@property
def arm_contact_geoms(self):
return[
"armlink_base",
"armlink_2",
"armlink_3",
"armlink_5",
"armlink_6",
]
@property
def gripper_contact_geoms(self):
return[
"fingertip_2",
"fingertip_2_hook",
]
| [
"numpy.zeros",
"numpy.array",
"robosuite.utils.mjcf_utils.array_to_string",
"robosuite.utils.mjcf_utils.xml_path_completion"
] | [((321, 340), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (329, 340), True, 'import numpy as np\n'), ((1128, 1139), 'numpy.zeros', 'np.zeros', (['(8)'], {}), '(8)\n', (1136, 1139), True, 'import numpy as np\n'), ((237, 289), 'robosuite.utils.mjcf_utils.xml_path_completion', 'xml_path_completion', (['"""robots/jr2/jr2_diff_drive.xml"""'], {}), "('robots/jr2/jr2_diff_drive.xml')\n", (256, 289), False, 'from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string\n'), ((518, 559), 'robosuite.utils.mjcf_utils.array_to_string', 'array_to_string', (['(pos - self.bottom_offset)'], {}), '(pos - self.bottom_offset)\n', (533, 559), False, 'from robosuite.utils.mjcf_utils import xml_path_completion, array_to_string\n')] |
import os
import json
import argparse
import io
import numpy as np
import torch
import faiss
import glob
import torch.nn as nn
import requests
import matplotlib.pyplot as plt
import torchvision.transforms.functional as F
import clip
from PIL import Image
from torchvision.utils import make_grid
import model
import retrofit
import sys
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using device:", device)
# Helper functions
def show(imgs):
if not isinstance(imgs, list):
imgs = [imgs]
fix, axs = plt.subplots(ncols=len(imgs), squeeze=False)
for i, img in enumerate(imgs):
img = img.detach()
img = F.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
def display_grid(imgs):
reshaped = [F.to_tensor(x) for x in imgs]
show(make_grid(reshaped))
def load_image(img, preprocess):
img = Image.open(fetch(img))
return img, preprocess(img).unsqueeze(0).to(device)
def clip_rescoring(args, net, candidates, x):
textemb = net.perceiver.encode_text(
clip.tokenize(candidates).to(args.device)
).float()
textemb /= textemb.norm(dim=-1, keepdim=True)
similarity = (100.0 * x @ textemb.T).softmax(dim=-1)
_, indices = similarity[0].topk(args.num_return_sequences)
return [candidates[idx] for idx in indices[0]]
def fetch(url_or_path):
if str(url_or_path).startswith("http://") or str(url_or_path).startswith(
"https://"
):
r = requests.get(url_or_path)
r.raise_for_status()
fd = io.BytesIO()
fd.write(r.content)
fd.seek(0)
return fd
return open(url_or_path, "rb")
def caption_image(path, args, net, preprocess, context):
captions = []
img, mat = load_image(path, preprocess)
table, x = net.build_table(
mat.half(),
net.perceiver,
ctx=context,
indices=net.indices,
indices_data=net.indices_data,
knn=args.knn,
tokenize=clip.tokenize,
device=args.device,
is_image=True,
return_images=True,
)
table = net.tokenizer.encode(table[0], return_tensors="pt").to(device)
table = table.squeeze()[:-1].unsqueeze(0)
out = net.model.generate(
table,
max_length=args.maxlen,
do_sample=args.do_sample,
num_beams=args.num_beams,
temperature=args.temperature,
top_p=args.top_p,
num_return_sequences=args.num_return_sequences,
)
candidates = []
for seq in out:
decoded = net.tokenizer.decode(seq, skip_special_tokens=True)
decoded = decoded.split("|||")[1:][0].strip()
candidates.append(decoded)
captions = clip_rescoring(args, net, candidates, x[None, :])
print(f"Personality: {context[0]}\n")
for c in captions[: args.display]:
print(c)
display_grid([img])
return captions
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Settings
args = argparse.Namespace(
config="./checkpoints/12xdqrwd-config",
index_dirs="./unigrams,./bigrams,./artstyles,./emotions",
clip_model="ViT-B/16",
knn=3,
maxlen=72,
num_return_sequences=32, # decrease this is you get GPU OOM, increase if using float16 model
num_beams=1,
temperature=0.8,
top_p=0.9,
display=1,
do_sample=True,
device=device,
)
# Load indices
indices = []
indices_data = []
index_dirs = args.index_dirs.split(",")
index_dirs = list(filter(lambda t: len(t) > 0, index_dirs))
for index_dir in index_dirs:
fname = os.path.join(index_dir, "args.txt")
with open(fname, "r") as f:
index_args = dotdict(json.load(f))
entries = []
fname = os.path.join(index_dir, "entries.txt")
with open(fname, "r") as f:
entries.extend([line.strip() for line in f])
indices_data.append(entries)
indices.append(faiss.read_index(glob.glob(f"{index_dir}/*.index")[0]))
preprocess = clip.load(args.clip_model, jit=False)[1]
# Load model
config = dotdict(torch.load(args.config))
config.task = "txt2txt"
config.adapter = "./checkpoints/12xdqrwd.ckpt"
net = retrofit.load_params(config).to(device)
net.indices = indices
net.indices_data = indices_data
# Specify an image
img0 = "https://www.gannett-cdn.com/-mm-/5cad672d53ae2cf4d6ce21b0495d3bb8438d5b41/c=0-94-3625-2139/local/-/media/Phoenix/ClayThompson/2014/08/21/1408663997000-Geese.jpg"
# Generate captions!
# The more aligned the image is to a personality, the more likely it will work.
# Some personalities will not have an effect for certain images.
captions = caption_image(img0, args, net, preprocess, context=["Happy"])
| [
"argparse.Namespace",
"io.BytesIO",
"json.load",
"torchvision.transforms.functional.to_tensor",
"torch.load",
"numpy.asarray",
"clip.tokenize",
"clip.load",
"torchvision.utils.make_grid",
"torchvision.transforms.functional.to_pil_image",
"torch.cuda.is_available",
"requests.get",
"glob.glob"... | [((3150, 3428), 'argparse.Namespace', 'argparse.Namespace', ([], {'config': '"""./checkpoints/12xdqrwd-config"""', 'index_dirs': '"""./unigrams,./bigrams,./artstyles,./emotions"""', 'clip_model': '"""ViT-B/16"""', 'knn': '(3)', 'maxlen': '(72)', 'num_return_sequences': '(32)', 'num_beams': '(1)', 'temperature': '(0.8)', 'top_p': '(0.9)', 'display': '(1)', 'do_sample': '(True)', 'device': 'device'}), "(config='./checkpoints/12xdqrwd-config', index_dirs=\n './unigrams,./bigrams,./artstyles,./emotions', clip_model='ViT-B/16',\n knn=3, maxlen=72, num_return_sequences=32, num_beams=1, temperature=0.8,\n top_p=0.9, display=1, do_sample=True, device=device)\n", (3168, 3428), False, 'import argparse\n'), ((3724, 3759), 'os.path.join', 'os.path.join', (['index_dir', '"""args.txt"""'], {}), "(index_dir, 'args.txt')\n", (3736, 3759), False, 'import os\n'), ((3865, 3903), 'os.path.join', 'os.path.join', (['index_dir', '"""entries.txt"""'], {}), "(index_dir, 'entries.txt')\n", (3877, 3903), False, 'import os\n'), ((4111, 4148), 'clip.load', 'clip.load', (['args.clip_model'], {'jit': '(False)'}), '(args.clip_model, jit=False)\n', (4120, 4148), False, 'import clip\n'), ((4183, 4206), 'torch.load', 'torch.load', (['args.config'], {}), '(args.config)\n', (4193, 4206), False, 'import torch\n'), ((372, 397), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (395, 397), False, 'import torch\n'), ((670, 689), 'torchvision.transforms.functional.to_pil_image', 'F.to_pil_image', (['img'], {}), '(img)\n', (684, 689), True, 'import torchvision.transforms.functional as F\n'), ((850, 864), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['x'], {}), '(x)\n', (861, 864), True, 'import torchvision.transforms.functional as F\n'), ((889, 908), 'torchvision.utils.make_grid', 'make_grid', (['reshaped'], {}), '(reshaped)\n', (898, 908), False, 'from torchvision.utils import make_grid\n'), ((1550, 1575), 'requests.get', 'requests.get', (['url_or_path'], {}), '(url_or_path)\n', (1562, 1575), False, 'import requests\n'), ((1618, 1630), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1628, 1630), False, 'import io\n'), ((4285, 4313), 'retrofit.load_params', 'retrofit.load_params', (['config'], {}), '(config)\n', (4305, 4313), False, 'import retrofit\n'), ((715, 730), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (725, 730), True, 'import numpy as np\n'), ((3821, 3833), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3830, 3833), False, 'import json\n'), ((4059, 4092), 'glob.glob', 'glob.glob', (['f"""{index_dir}/*.index"""'], {}), "(f'{index_dir}/*.index')\n", (4068, 4092), False, 'import glob\n'), ((1131, 1156), 'clip.tokenize', 'clip.tokenize', (['candidates'], {}), '(candidates)\n', (1144, 1156), False, 'import clip\n')] |
#!/home/andrew/.envs/venv38/bin/python3
import sys
import numpy as np
def get_input():
cuboids = []
for line in sys.stdin:
line = line.strip()
if len(line) == 0:
continue
fields = line.split()
cuboid = {}
cuboid["mode"] = fields[0]
for dim_region in fields[1].split(","):
axis = dim_region.split("=")[0]
lower_bound = int(dim_region.split("=")[1].split("..")[0])
upper_bound = int(dim_region.split("=")[1].split("..")[1])
assert lower_bound <= upper_bound
cuboid[axis] = (lower_bound, upper_bound)
cuboids.append(cuboid)
return cuboids
def cuboid_to_bbox(cuboid):
"""Change notation from closed intervals to the half open intervals that are
typically used in python/numpy indexing. This enables us to think of the
numbers as 3D bounding boxes (wireframe line segments enclosing a volume).
Example:
on x=-20..26,y=-36..17,z=-47..7
becomes
on x=[-20, 27), y=[-36, 18), z=[-47, 8)
"""
bbox = {}
bbox["mode"] = cuboid["mode"]
bbox["x"] = (cuboid["x"][0], cuboid["x"][1] + 1)
bbox["y"] = (cuboid["y"][0], cuboid["y"][1] + 1)
bbox["z"] = (cuboid["z"][0], cuboid["z"][1] + 1)
return bbox
class BoundingGrid(object):
"""Union of all bounding planes of the form x=*, y=*, and z=*.
Also provide functions to convert between bounding planes and cuboids
(slice_indices) of a hypothetical bit matrix, such that each cell represents
an individual bounding box being on or off.
"""
def __init__(self, bboxes):
self.bounding_planes = {}
for axis in ["x", "y", "z"]:
bounds_min = set(b[axis][0] for b in bboxes)
bounds_max = set(b[axis][1] for b in bboxes)
self.bounding_planes[axis] = sorted(bounds_min | bounds_max)
self.slice_indices = {"x":{}, "y":{}, "z":{}}
for axis in ["x", "y", "z"]:
for i in range(len(self.bounding_planes[axis])):
bound = self.bounding_planes[axis][i]
self.slice_indices[axis][bound] = i
def __str__(self):
return "Bounding planes: " + str(self.bounding_planes)
def slice_index(self, axis, bound):
return self.slice_indices[axis][bound]
def bound(self, axis, slice_index):
return self.bounding_planes[axis][slice_index]
def cell_lengths(self):
cl = {}
cl["x"] = np.ediff1d(self.bounding_planes["x"])
cl["y"] = np.ediff1d(self.bounding_planes["y"])
cl["z"] = np.ediff1d(self.bounding_planes["z"])
return cl
def bitmatrix_shape(self):
s = tuple(len(self.bounding_planes[axis]) - 1 for axis in ["x","y","z"])
return s
#def areas_yz(self):
# dy = np.ediff1d(self.bounding_planes["y"])
# dz = np.ediff1d(self.bounding_planes["z"])
# return np.outer(dy, dz)
def run_bboxes(bboxes):
"""Apply the on/off instructions to the bounding boxes, compressed as
a bit matrix representing indivisible bounding boxes in the entire region.
"""
bg = BoundingGrid(bboxes)
bm = np.zeros(bg.bitmatrix_shape(), dtype=bool)
for b in bboxes:
x0 = bg.slice_index("x", b["x"][0])
x1 = bg.slice_index("x", b["x"][1])
y0 = bg.slice_index("y", b["y"][0])
y1 = bg.slice_index("y", b["y"][1])
z0 = bg.slice_index("z", b["z"][0])
z1 = bg.slice_index("z", b["z"][1])
if b["mode"] == "on":
value = True
else:
value = False
print("Applying bbox:", b)
bm[x0:x1, y0:y1, z0:z1] = value
# Compute volume (in original bounding box space) that is "on"
cell_lengths = bg.cell_lengths()
areas_yz = np.outer(cell_lengths["y"], cell_lengths["z"])
on_volume = 0
for i in range(bm.shape[0]):
assert bm[i].shape == areas_yz.shape
on_volume += cell_lengths["x"][i] * np.sum(bm[i] * areas_yz)
return bm, bg, on_volume
############################################################################
cuboids = get_input()
#print("cuboids:", cuboids)
bboxes = [cuboid_to_bbox(c) for c in cuboids]
print("bboxes: ", bboxes)
bg = BoundingGrid(bboxes)
print(bg)
print("Cell lengths:", bg.cell_lengths())
print("Bitmatrix shape:", bg.bitmatrix_shape())
bm, _, on_volume = run_bboxes(bboxes)
#print("Bitmatrix:", bm.astype(int))
print("Volume of 'on' cells:", on_volume)
| [
"numpy.outer",
"numpy.sum",
"numpy.ediff1d"
] | [((3787, 3833), 'numpy.outer', 'np.outer', (["cell_lengths['y']", "cell_lengths['z']"], {}), "(cell_lengths['y'], cell_lengths['z'])\n", (3795, 3833), True, 'import numpy as np\n'), ((2483, 2520), 'numpy.ediff1d', 'np.ediff1d', (["self.bounding_planes['x']"], {}), "(self.bounding_planes['x'])\n", (2493, 2520), True, 'import numpy as np\n'), ((2539, 2576), 'numpy.ediff1d', 'np.ediff1d', (["self.bounding_planes['y']"], {}), "(self.bounding_planes['y'])\n", (2549, 2576), True, 'import numpy as np\n'), ((2595, 2632), 'numpy.ediff1d', 'np.ediff1d', (["self.bounding_planes['z']"], {}), "(self.bounding_planes['z'])\n", (2605, 2632), True, 'import numpy as np\n'), ((3974, 3998), 'numpy.sum', 'np.sum', (['(bm[i] * areas_yz)'], {}), '(bm[i] * areas_yz)\n', (3980, 3998), True, 'import numpy as np\n')] |
"""
Defines class Clusters that holds clusters-related data from one or more observations
(experiments) divided (classified) in groups.
The observations are expected to be generated by scripts/cluster_analysis.py.
# Author: <NAME> (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from builtins import zip
from builtins import range
#from past.utils import old_div
__version__ = "$Revision$"
import warnings
import logging
import os.path
from copy import copy, deepcopy
import numpy
import scipy
import pyto
from ..util import nested
from .groups import Groups
from .observations import Observations
class Clusters(Groups):
"""
"""
###############################################################
#
# Initialization
#
##############################################################
def __init__(self):
"""
Initializes attributes
"""
# initialize super
super(Clusters, self).__init__()
###############################################################
#
# Input
#
##############################################################
@classmethod
def read(cls, files, mode, catalog=None, categories=None, order=None,
distances=None):
"""
Reads values form cluster pickles.
If distances are not None the boundary distances are also read. This
argument can be specified in the same form as arg files, or set to
'default' in which case the distance pickle names are derived from
cluster pickle names (substitute '_cluster.pkl' by
'_bound_distances.pkl').
Argument files has to be a dictionary of dictionaries, where ouside
keys are group names, inside keys experiment identifiers and
inside values file names. For example:
files = {'group_a' : {'exp_1' : file_1,
'exp_2' : file_2,
... },
'group_b' : {'exp_5' : file_5,
... },
... }
It is recommended that the data about experiments is specified by
arg catalog. Catalog has to be a Catalog object where the groups are
already formed (by using Catalog.makeGroups(), for example). That is,
catalog has to contain the data in attributes that are themselves
of type dict. For example:
catalog.pixel_size = {'group_a' : {'exp_1' : pix_size_1,
'exp_2' : pix_size_2,
... },
'group_b' : {'exp_5' : pix_size_5,
... },
... }
Args files and catalog have to have the same groups and observations.
A category specified by arg categories, or an experiment
identifier specified by arg order that does not exist in the data
(arg files) is ignored and a warning is generated. This condition
often generates an exception at a later point.
Arguments:
- files: dictionary of cluster pickle files
- catalog: (Catalog) data about experiments (
- categories: list of categories
- mode: clustering mode: 'connectivity' (or 'conn'),
'hierarchicalBoundaries' (or 'hiBound'), or 'hierarchicalConnections'
(or 'hiConn')
- order: another Groups instance (or just a dictionary with group
names as keys and identifier lists as values that defines the
order of the identifiers here
- distances: dictionary of boundary distances pickle files, or
'default' to derive distance pickle names from cluster pickle names
Sets properties:
- 'ids' (indexed): (list of ndarrays) cluster ids
- 'identifiers'
- 'categories'
- 'n': (list) number of clusters in each observation
- 'bound_clusters' (indexed): (list of ndarrays of sets) boundary ids
arranged by (boundary) clusters
- 'conn_clusters' (indexed): (list of ndarrays of sets) connection ids
arranged by (connection) clusters
- 'bound_ids (not indexed): (list of ndarrays) all boundary ids in
the order corresponding to distances
- 'bound_dist': (list of ndarrays) pairwise distances between
boundaries in the vector form (as returned by
scipy.spatial.distance.pdist) according to the order given in
bound_ids
- 'n_connections', 'n_links' (indexed): (list of ndarrays) number of
connections, links
- 'euler', 'euler_links' (indexed): (list of ndarrays) Euler
characteristics based on connections, links
- 'loops', 'loops_links' (indexed): (list of ndarrays) number of
independent loops based on connections, links
- 'branches', 'branches_links' (indexed): (list of ndarrays) number of
branches based on connections, links
- 'distances' and 'distances_nm': array of pairwise distances between
boundaries in the vector form (as returned by
scipy.spatial.distance.pdist()) according to the order given in
bound_ids
See calculateProperties() for other properties that are calculated
and set in the
"""
if catalog is not None:
clust = cls._readCatalog(
files=files, mode=mode, catalog=catalog, categories=categories,
order=order, distances=distances)
else:
clust = cls._readOld(
files=files, mode=mode, categories=categories,
order=order, distances=distances)
return clust
@classmethod
def _readOld(cls, files, mode, categories=None, order=None, distances=None):
"""
Does the job of read() for old style meta-data (not based on catalog).
"""
# initialize
db = pyto.io.Pickled(files)
clusters = cls()
# use all categories if not specified
if categories is None:
categories = list(db.categories())
# set properties to be read
if (mode == 'conn') or (mode == 'connectivity'):
properties = ['connectivityBoundaries.clusters',
'connectivityBoundaries.nClusters',
'connectivityBoundaries.dataIds',
'connectivityBoundaries.nConnections',
'connectivityBoundaries.nLinks',
'connectivityBoundaries.euler',
'connectivityBoundaries.nLoops',
'connectivityBoundaries.eulerLinks',
'connectivityBoundaries.nLoopsLinks',
'connectivityConnections.clusters']
elif (mode == 'hiBound') or (mode == 'hierarchicalBoundaries'):
properties = ['hierarchyBoundaries.clusters',
'hierarchyBoundaries.nClusters',
'hierarchyBoundaries.dataIds',
'dualHierarchyConnections.clusters']
elif (mode == 'hiConn') or (mode == 'hierarchicalConnections'):
properties = ['dualHierarchyBoundaries.clusters',
'dualHierarchyBoundaries.nClusters',
'dualHierarchyBoundaries.dataIds',
'hierarchyConnections.clusters']
# read property values for categories
for categ in categories:
# check if data for the current category exist
logging.debug('Clusters: Reading group ' + categ)
if categ not in list(db.categories()):
logging.warning(
'Clusters: Data for group ' + categ + ' do not exist')
# make sure the identifier order is the same
if order is not None:
if isinstance(order[categ], Observations):
identifier = order[categ].identifiers
elif isinstance(order[categ], (list, tuple)):
identifier = order[categ]
else:
identifier = None
# check if requested identifiers exist in the database
if identifier is not None:
clean = []
for requested in identifier:
if requested in db.identifiers():
clean.append(requested)
else:
logging.warning(
'CleftRegions: Data for experiment ' + requested +
' do not exist')
identifier = clean
# read data
clusters[categ] = \
db.readProperties(category=categ, identifier=identifier,
index=None, properties=properties, compactify=False)
# rename properties
if (mode == 'conn') or (mode == 'connectivity'):
for categ in categories:
clusters[categ].bound_clusters = \
clusters[categ].connectivityBoundaries_clusters
clusters[categ].n = \
clusters[categ].connectivityBoundaries_nClusters
clusters[categ].bound_ids = \
clusters[categ].connectivityBoundaries_dataIds
clusters[categ].n_connections = \
clusters[categ].connectivityBoundaries_nConnections
clusters[categ].n_links = \
clusters[categ].connectivityBoundaries_nLinks
clusters[categ].euler = \
clusters[categ].connectivityBoundaries_euler
clusters[categ].loops = \
clusters[categ].connectivityBoundaries_nLoops
clusters[categ].euler_links = \
clusters[categ].connectivityBoundaries_eulerLinks
clusters[categ].loops_links = \
clusters[categ].connectivityBoundaries_nLoopsLinks
clusters[categ].conn_clusters = \
clusters[categ].connectivityConnections_clusters
elif (mode == 'hiBound') or (mode == 'hierarchicalBoundaries'):
for categ in categories:
clusters[categ].bound_clusters = \
clusters[categ].hierarchyBoundaries_clusters
clusters[categ].n = \
clusters[categ].hierarchyBoundaries_nClusters
clusters[categ].bound_ids = \
clusters[categ].hierarchyBoundaries_dataIds
clusters[categ].conn_clusters = \
clusters[categ].dualHierarchyConnections_clusters
elif (mode == 'hiConn') or (mode == 'hierarchicalConnections'):
for categ in categories:
clusters[categ].bound_clusters = \
clusters[categ].dualHierarchyBoundaries_clusters
clusters[categ].n = \
clusters[categ].dualHierarchyBoundaries_nClusters
clusters[categ].bound_ids = \
clusters[categ].dualHierarchyBoundaries_dataIds
clusters[categ].conn_clusters = \
clusters[categ].hierarchyConnections_clusters
# set ids and convert properties including removing index 0 (sum) in
# topology related
for categ in list(clusters.values()):
categ.ids = [numpy.arange(num) + 1 for num in categ.n]
categ.bound_clusters = \
[numpy.asarray(clust) for clust in categ.bound_clusters]
categ.conn_clusters = \
[numpy.asarray(clust) for clust in categ.conn_clusters]
for name in ['n_connections', 'n_links', 'euler', 'loops',
'euler_links', 'loops_links']:
value = [val[1:] for val in getattr(categ, name)]
setattr(categ, name, value)
# set book-keeping attributes
for categ in list(clusters.values()):
categ.index = 'ids'
categ.indexed = set(['ids', 'bound_clusters', 'conn_clusters'])
categ.properties = set([
'identifiers', 'categories', 'n', 'ids',
'bound_ids', 'bound_clusters', 'conn_clusters'])
if (mode == 'conn') or (mode == 'connectivity'):
categ.indexed = categ.indexed.union([
'n_connections', 'n_links',
'euler', 'euler_links', 'loops', 'loops_links'])
categ.properties = categ.properties.union([
'n_connections', 'n_links',
'euler', 'euler_links', 'loops', 'loops_links'])
# set boundary and connection cluster size
clusters.findNItems()
# read boundary distances
if distances is not None:
clusters.addDistances(files=distances, clusters=files)
return clusters
@classmethod
def _readCatalog(cls, files, catalog, mode,
categories=None, order=None, distances=None):
"""
Does the job of read() for catalog based meta-data.
"""
# initialize
db = pyto.io.Pickled(files)
inst = cls()
# use all categories if not specified
if categories is None:
categories = list(db.categories())
# set properties to be read
if (mode == 'conn') or (mode == 'connectivity'):
inst._full_properties = {
'connectivityBoundaries.ids' : 'ids',
'connectivityBoundaries.clusters' : 'bound_clusters',
'connectivityBoundaries.nClusters' : 'n',
'connectivityBoundaries.dataIds' : 'bound_ids',
'connectivityBoundaries.nConnections' : 'n_connections',
'connectivityBoundaries.nLinks' : 'n_links',
'connectivityBoundaries.euler' : 'euler',
'connectivityBoundaries.nLoops' : 'loops',
'connectivityBoundaries.branches' : 'branches',
'connectivityBoundaries.eulerLinks' : 'euler_links',
'connectivityBoundaries.nLoopsLinks' : 'loops_links',
'connectivityBoundaries.branchesLinks' : 'branches_links',
'connectivityConnections.clusters' : 'conn_clusters'}
# 'bound_clusters' and 'conn_clusters' added to indexed later
inst._full_indexed = [
'connectivityBoundaries.ids',
'connectivityBoundaries.nConnections',
'connectivityBoundaries.nLinks',
'connectivityBoundaries.euler',
'connectivityBoundaries.nLoops',
'connectivityBoundaries.branches',
'connectivityBoundaries.eulerLinks',
'connectivityBoundaries.nLoopsLinks',
'connectivityBoundaries.branchesLinks']
index_full_name = 'connectivityBoundaries.ids'
elif (mode == 'hiBound') or (mode == 'hierarchicalBoundaries'):
inst._full_properties = {
'hierarchyBoundaries.ids' : 'ids',
'hierarchyBoundaries.clusters' : 'bound_clusters',
'hierarchyBoundaries.nClusters' : 'n',
'hierarchyBoundaries.dataIds' : 'bound_ids',
'dualHierarchyConnections.clusters' : 'conn_clusters'}
# 'bound_clusters' and 'conn_clusters' added to indexed later
inst._full_indexed = ['hierarchyBoundaries.ids']
index_full_name = 'hierarchyBoundaries.ids'
elif (mode == 'hiConn') or (mode == 'hierarchicalConnections'):
inst._full_properties = {
'hierarchyConnections.ids' : 'ids',
'hierarchyConnections.clusters' : 'conn_clusters',
'hierarchyConnections.nClusters' : 'n',
'hierarchyConnections.dataIds' : 'bound_ids',
'dualHierarchyBoundaries.clusters' : 'bound_clusters'}
# 'bound_clusters' and 'conn_clusters' added to indexed later
inst._full_indexed = ['hierarchyConnections.ids']
index_full_name = 'hierarchyConnections.ids'
else:
raise ValueError(
"Mode ", mode, " is not understood. Acceptable values are ",
"'conn' ('connectivity'), 'hiBound' ('hierarchicalBoundaries')",
" and 'hiConn' ('hierarchicalConnections').")
# properties and indexed for all modes
inst._properties = set(inst._full_properties.values())
inst._indexed = set([inst._full_properties[full_indexed]
for full_indexed in inst._full_indexed])
# loop over categories
props_found = {}
for categ in categories:
# check if data for the current category exist
logging.debug('Clusters: Reading group ' + categ)
if categ not in list(db.categories()):
logging.warning(
'Clusters: Data for group ' + categ + ' do not exist')
# make sure the identifier order is the same
if order is not None:
if isinstance(order[categ], Observations):
identifier = order[categ].identifiers
elif isinstance(order[categ], (list, tuple)):
identifier = order[categ]
else:
identifier = None
# check if requested identifiers exist in the database
if identifier is not None:
clean = []
for requested in identifier:
if requested in db.identifiers():
clean.append(requested)
else:
logging.warning(
'CleftRegions: Data for experiment ' + requested +
' do not exist')
identifier = clean
# get data from all experiments of this category
group = Observations()
for group, obj, categ_tmp, name_tmp in db.readPropertiesGen(
category=categ, identifier=identifier,
properties=inst._full_properties, index=index_full_name,
indexed=inst._full_indexed, multi=group):
logging.debug('Read data of experiment ' + name_tmp)
# do something, perhaps
pass
# bound_clusters and conn_clusters converted to indexed here
group.bound_clusters = [numpy.asarray(clust) for clust
in group.bound_clusters]
group.conn_clusters = [numpy.asarray(clust) for clust
in group.conn_clusters]
inst._indexed.update(['bound_clusters', 'conn_clusters'])
# add data for this category
inst[categ] = group
# set array properties to empty arrays for observations without ids
for obs_index in range(len(inst[categ].identifiers)):
if inst[categ].ids[obs_index] is None:
for name in inst._indexed:
value = getattr(inst[categ], name)
value[obs_index] = numpy.array([])
# figure out if some properties were not found
found = set()
for name in inst._properties:
value = getattr(group, name, None)
if value is None:
continue
if all([x is None for x in value]):
continue
found.add(name)
# set book-keeping attributes
inst[categ].index = 'ids'
inst[categ].indexed = inst._indexed.intersection(found)
inst[categ].properties = inst._properties.intersection(found)
# need to have identifiers in
inst[categ].properties.update(['identifiers', 'categories'])
# add properties from catalog
inst[categ].addCatalog(catalog=catalog)
# read boundary distances
if distances is not None:
inst.addDistances(files=distances, catalog=catalog, clusters=files)
# calculate additional data properties
inst.calculateProperties(mode=mode)
# convert to nm
#sinst.convertToNm(catalog=catalog)
# check if all groups have the same properties
last = None
for categ in categories:
if last is not None:
if inst[categ].properties != last:
raise ValueError("Groups have different properties")
last = inst[categ].properties
inst._indexed.intersection_update(last)
inst._properties.intersection_update(last)
return inst
def addDistances(self, files, catalog, clusters=None):
"""
Reads distances between boundaries and saves them as property
'bound_dist'.
If arg files is 'default' the distance pickle names are derived from
cluster pickle names (arg clusters) by substituting '_cluster.pkl' by
'_bound_distances.pkl').
Arguments:
- files: dictionary of boundary distance pickle file names, or
'default' to derive distance pickle names from cluster pickle names
- clusters: dictionary of cluster distance pickle file names
Sets property:
- 'bound_dist': (list of ndarrays) pairwise distances between
boundaries in the vector form (as returned by
scipy.spatial.distance.pdist()) according to the order given in
bound_ids
"""
# initialize
if files == 'default':
# convert all cluster pickle names to distance pickle names
clusters = deepcopy(clusters)
db = pyto.io.Pickled(clusters)
for categ, ident_dict in db.files.items():
for ident, file_name in db.files[categ].items():
dir, clust_base = os.path.split(file_name)
index = clust_base.rindex('_cluster.pkl')
dist_base = clust_base[:index] + '_bound_distances.pkl'
db.files[categ][ident] = os.path.join(dir, dist_base)
else:
# distance pickle names given
db = pyto.io.Pickled(files)
# loop over categories
for categ in self:
pixel = catalog.pixel_size
# make sure the identifier order is the same
identifiers = self[categ].identifiers
# read distances
self[categ].bound_dist = [
dist for dist, foo, foo
in db.data(category=categ, identifier=identifiers)]
self[categ].properties.add('bound_dist')
# convert to nm
bd_nm = self[categ].pixels2nm(
name='bound_dist', conversion=pixel[categ])
self[categ].bound_dist_nm = bd_nm
self[categ].properties.add('bound_dist_nm')
###############################################################
#
# Methods that remove items
#
##############################################################
def removeBoundaries(self, ids):
"""
Removes boundaries with specified ids from flat clusters and removes
empty clusters, but does not reassign items to clusters.
Arguments:
- ids: (Groups) boundary ids, have to have the same structure
(categories and observations) as this instance. The ids are read from
property ids'
"""
# remove boundary ids
for categ in list(ids.keys()):
# continue if current category not this instance
if categ not in list(self.keys()):
continue
for obs_ind in range(len(ids[categ].ids)):
# remove boundary ids from current observation
clean = [one_ids.difference(ids[categ].ids[obs_ind]) \
for one_ids in self[categ].bound_clusters[obs_ind]]
self[categ].bound_clusters[obs_ind] = numpy.array(clean)
# remove empty boundary clusters
self.removeEmpty(mode='boundary')
def removeEmpty(self, mode='boundary'):
"""
Removes empty boundary (mode 'boundary', or 'bound') or connection
(mode 'connection' or 'conn') clusters.
"""
# set name of boundary / connection variable
if (mode == 'boundary') or (mode == 'bound'):
name = 'bound_clusters'
elif (mode == 'connection') or (mode == 'conn'):
name = 'conn_clusters'
else:
raise ValueError ('Sorry, mode ' + mode + " was not understood. " \
+ "Mode can be 'boundary' or 'connection'.")
# remove
for categ in list(self.keys()):
non_empty = []
for obs_ind in range(len(self[categ].ids)):
# make condition for current observation
item_ids = getattr(self[categ], name)
cond = [len(ids) > 0 for ids in item_ids[obs_ind]]
non_empty.append(numpy.array(cond))
# remove from current observations
self[categ] = self[categ].extractIndexed(condition=non_empty)
###############################################################
#
# Calculation of other properties
#
##############################################################
def calculateProperties(self, mode):
"""
Calculates additional properties:
- number of items (see findNItems())
- cluster fractions (see findBoundFract())
- connections and links redundancy factors (see findRedundancy())
See these methods for the properties that are assigned.
Argument:
- mode: clustering mode: 'connectivity' (or 'conn'),
'hierarchicalBoundaries' (or 'hiBound'), or 'hierarchicalConnections'
(or 'hiConn')
"""
# calculate number of items
self.findNItems()
# calculate cluster fractions
self.findBoundFract()
# calculate connections and links redundancy factors
if (mode == 'conn') or (mode == 'connectivity'):
self.findRedundancy()
def findNItems(self, mode=None):
"""
Calculates numbers of items (boundaries and connections) in each
cluster (mode 'in_cluster') or for each observation (mode
'in_observation'). Sets calculated numbers to self.n_bound_clust and
self.n_conn_clust (mode 'in_cluster') or to self.n_bound_obs and
self.n_conn_obs (mode 'in_observation').
Argument:
- mode: can be 'in_cluster' (same as 'in_clust'), 'in_observation'
('in_obs') or None (both 'in_cluster' and 'in_observation' are
calculated.
Sets properties:
- n_bound_clust (indexed): (list of ndarrays) number of boundaries in
each (boundary) cluster, that is size of each boundary cluster
- n_conn_clust (indexed): (list of ndarrays) number of connections in
each (connection) cluster, that is size of each connection cluster
- n_bound_obs: (list) number of boundaries in each observation
- n_conn_obs: (list) number of connections in each observation
"""
# both modes
if mode is None:
self.findNItems(mode='in_cluster')
self.findNItems(mode='in_obs')
return
for categ in self:
# calculate number of boundaries for each observation, in each
# cluster
n_bound = []
for bound in self[categ].bound_clusters:
n_bound.append(numpy.array([len(clust) for clust in bound]))
# calculate number of connections for each observation, in each
# cluster
n_conn = []
for conn in self[categ].conn_clusters:
n_conn.append(numpy.array([len(clust) for clust in conn]))
# set data and book-keeping attributes
if (mode == 'in_clust') or (mode == 'in_cluster'):
self[categ].n_bound_clust = n_bound
self[categ].n_conn_clust = n_conn
self[categ].properties.add('n_bound_clust')
self[categ].properties.add('n_conn_clust')
self[categ].indexed.add('n_bound_clust')
self[categ].indexed.add('n_conn_clust')
elif (mode == 'in_obs') or (mode == 'in_observation'):
self[categ].n_bound_obs = [nb_clust.sum()
for nb_clust in n_bound]
self[categ].n_conn_obs = [nc_clust.sum() for nc_clust in n_conn]
self[categ].properties.add('n_bound_obs')
self[categ].properties.add('n_conn_obs')
else:
raise ValueError (
'Sorry, mode ' + mode + ' was not understood. '
+ "Valid modes are 'in_cluster' and 'in_observation'." )
def findBoundFract(self, categories=None):
"""
For each boundary cluster calculates the fraction of total number of
boundaries present (in that observation) that exist in the cluster.
Arguments:
- categories
Sets properties:
- fract_bound (indexed): (list of ndarrays) fraction of the total
boundaries in a cluster
- fract_bound_max: (list) max boundary fraction for each observation
"""
# get categories
if categories is None:
categories = list(self.keys())
# calculate
for categ in categories:
# calculate bound fractions for each observation
fract = [nb_clust / float(nb_obs) for nb_clust, nb_obs
in zip(self[categ].n_bound_clust, self[categ].n_bound_obs)]
self[categ].fract_bound = fract
self[categ].fract_bound_max = [fract_obs.max()
for fract_obs in fract]
# update book-keeping properties
self[categ].properties.add('fract_bound')
self[categ].indexed.add('fract_bound')
self[categ].properties.add('fract_bound_max')
def findDistance(self, items, distance, mode=None, categories=None):
"""
In each cluster, finds item that has min or max distance (depending on
the mode). The distances are read from attribute distance of
items[category].
Arguments:
- items: (Groups) items of the cluster, have to have indexed property
named (arg) distance
- distance: name of the distance property of items (typically
'meanDistance_nm' or 'minDistance_nm' if items is an Vesicles object)
- mode: distance mode, 'min' or 'max'
- categories: list of categories
Sets property:
- min_distance: (indexed) set if mode is min or None
- max_distance: (indexed) set if mode is max or None
"""
# do all if mode is None
if mode is None:
self.findDistance(items=items, distance=distance, mode='min')
self.findDistance(items=items, distance=distance, mode='max')
return
# get distance function
if mode == 'min':
dist_func = min
name = 'min_distance'
elif mode == 'max':
dist_func = max
name = 'max_distance'
else:
raise ValueError("Argument mode can be None, 'min', or 'max'.")
# get categories
if categories is None:
categories = list(self.keys())
# calculate
for categ in categories:
# initialize property that will hold the distances
setattr(self[categ], name, [])
curr_distance = getattr(self[categ], name)
# get variables for this categ
item_ids = getattr(items[categ], items[categ].index)
item_dist = getattr(items[categ], distance)
for obs_ind in range(len(self[categ].identifiers)):
# make dictionary of item distances
dist_dict = dict(list(zip(item_ids[obs_ind], item_dist[obs_ind])))
# find min distance for all clusters in this observation
dist = [dist_func(dist_dict[item_id]
for item_id in cluster)
for cluster in self[categ].bound_clusters[obs_ind]]
curr_distance.append(numpy.asarray(dist))
# book-keeping
self[categ].properties.add(name)
self[categ].indexed.add(name)
def findRedundancy(self, categories=None, mode=None):
"""
If mode is 'connections' calculates connection redundancy factor,
that is:
redundancy = number of loops / number of connections
for each cluster separately and for each observation.
If mode is 'links' calculates link redundancy factor, that is:
redundancy = number of loops_links / number of links
for each cluster separately and for each observation.
If mode is None calculates all of the above
Arguments:
- categories: categories
- mode: calculation mode 'connections', 'links' or None (both)
Sets properties:
- redundancy (indexed): (list of ndarrays) number of redundant
connections for each cluster (if mode is None or 'connection')
- redundancy_obs: (list) number of redundant connections for each
observation (if mode is None or 'connection')
- redundancy_links (indexed): (list of ndarrays) number of redundant
links for each cluster (if mode is None or 'links')
- redundancy_links obs: (list) number of redundant links for each
observation (if mode is None or 'links')
"""
# deal with mode
if mode is None:
# mode is None, do all
self.findRedundancy(categories=categories, mode='connections')
self.findRedundancy(categories=categories, mode='links')
return
elif mode == 'connections':
# connections mode
conn_name = 'n_connections'
loop_name = 'loops'
red_name = 'redundancy'
red_obs_name = 'redundancy_obs'
elif mode == 'links':
# links mode
conn_name = 'n_links'
loop_name = 'loops_links'
red_name = 'redundancy_links'
red_obs_name = 'redundancy_links_obs'
else:
raise ValueError("Argument mode not understood. Acceptable values "
+ "are: None, 'connections' and 'links'.")
# get categories
if categories is None:
categories = list(self.keys())
# calculate
for categ in categories:
setattr(self[categ], red_name, [])
setattr(self[categ], red_obs_name, [])
conn_value = getattr(self[categ], conn_name)
loop_value = getattr(self[categ], loop_name)
for n_loop, n_conn in zip(loop_value, conn_value):
# calculate redundancy for each cluster separately
n_loop = numpy.asarray(n_loop)
n_conn = numpy.asarray(n_conn, dtype='float')
#n_conn = numpy.array([len(x) for x in conn], dtype='float')
# just to avoid divide by zero warning
n_conn_fixed = numpy.where(n_conn==0, -1, n_conn)
red = numpy.where(n_conn==0, 0, n_loop / n_conn_fixed)
# calculate redundancy for each observation
try:
red_obs = n_loop.sum() / n_conn.sum()
except ZeroDivisionError:
if n_loop == 0:
red = 0.
else:
raise ValueError("Number of loops is 0, but number of "
+ "connections is not 0.")
# set the values
red_value = getattr(self[categ], red_name)
setattr(self[categ], red_name, red_value + [red])
red_obs_value = getattr(self[categ], red_obs_name)
setattr(self[categ], red_obs_name, red_obs_value + [red_obs])
# book-keeping
self[categ].properties.add(red_name)
self[categ].indexed.add(red_name)
self[categ].properties.add(red_obs_name)
def findBranching(self, categories=None, mode=None):
"""
Work in progress
"""
# deal with mode
if mode is None:
# mode is None, do all
self.findBranching(categories=categories, mode='connections')
self.findBranching(categories=categories, mode='links')
return
elif mode == 'connections':
# connections mode
conn_name = 'n_connections'
branch_name = 'branches'
branch_fract_name = 'branches_fract'
branch_obs_name = 'branches_obs'
branch_fract_obs_name = 'branches_fract_obst'
elif mode == 'links':
# links mode
conn_name = 'n_links'
branch_name = 'branches_links'
else:
raise ValueError("Argument mode not understood. Acceptable values "
+ "are: None, 'connections' and 'links'.")
# get categories
if categories is None:
categories = list(self.keys())
# calculate
for categ in categories:
for ident in self[categ].identifiers():
# get values
conn = self.getValue(identifier=ident, name=conn_name)
branch = self.getValue(identifier=ident, name=branch_name)
# make branching fractions
conn_fix = numpy.where(conn > 0, conn, -1).astype('float')
branch_fract = numpy.where(conn > 0, branch / conn_fix, 0)
self.setValue(identifier=ident, name=branch_fract_name,
value=branch_fract, indexed=True)
# make branching per observation
branch_obs = branch.sum()
self.setValue(identifier=ident, name=branch_obs_name,
value=branch_obs, indexed=False)
# make branching fraction per observation
branch_fract_obs = branch.sum() / conn.sum().astype('float')
self.setValue(identifier=ident, name=branch_fract_obs_name,
value=branch_fract_obs, indexed=False)
| [
"copy.deepcopy",
"logging.debug",
"logging.warning",
"numpy.asarray",
"pyto.io.Pickled",
"numpy.where",
"numpy.array",
"numpy.arange",
"builtins.zip"
] | [((6257, 6279), 'pyto.io.Pickled', 'pyto.io.Pickled', (['files'], {}), '(files)\n', (6272, 6279), False, 'import pyto\n'), ((13674, 13696), 'pyto.io.Pickled', 'pyto.io.Pickled', (['files'], {}), '(files)\n', (13689, 13696), False, 'import pyto\n'), ((7964, 8013), 'logging.debug', 'logging.debug', (["('Clusters: Reading group ' + categ)"], {}), "('Clusters: Reading group ' + categ)\n", (7977, 8013), False, 'import logging\n'), ((17352, 17401), 'logging.debug', 'logging.debug', (["('Clusters: Reading group ' + categ)"], {}), "('Clusters: Reading group ' + categ)\n", (17365, 17401), False, 'import logging\n'), ((22314, 22332), 'copy.deepcopy', 'deepcopy', (['clusters'], {}), '(clusters)\n', (22322, 22332), False, 'from copy import copy, deepcopy\n'), ((22350, 22375), 'pyto.io.Pickled', 'pyto.io.Pickled', (['clusters'], {}), '(clusters)\n', (22365, 22375), False, 'import pyto\n'), ((22847, 22869), 'pyto.io.Pickled', 'pyto.io.Pickled', (['files'], {}), '(files)\n', (22862, 22869), False, 'import pyto\n'), ((35971, 35998), 'builtins.zip', 'zip', (['loop_value', 'conn_value'], {}), '(loop_value, conn_value)\n', (35974, 35998), False, 'from builtins import zip\n'), ((8081, 8151), 'logging.warning', 'logging.warning', (["('Clusters: Data for group ' + categ + ' do not exist')"], {}), "('Clusters: Data for group ' + categ + ' do not exist')\n", (8096, 8151), False, 'import logging\n'), ((11989, 12009), 'numpy.asarray', 'numpy.asarray', (['clust'], {}), '(clust)\n', (12002, 12009), False, 'import numpy\n'), ((12098, 12118), 'numpy.asarray', 'numpy.asarray', (['clust'], {}), '(clust)\n', (12111, 12118), False, 'import numpy\n'), ((17469, 17539), 'logging.warning', 'logging.warning', (["('Clusters: Data for group ' + categ + ' do not exist')"], {}), "('Clusters: Data for group ' + categ + ' do not exist')\n", (17484, 17539), False, 'import logging\n'), ((18813, 18865), 'logging.debug', 'logging.debug', (["('Read data of experiment ' + name_tmp)"], {}), "('Read data of experiment ' + name_tmp)\n", (18826, 18865), False, 'import logging\n'), ((19039, 19059), 'numpy.asarray', 'numpy.asarray', (['clust'], {}), '(clust)\n', (19052, 19059), False, 'import numpy\n'), ((19167, 19187), 'numpy.asarray', 'numpy.asarray', (['clust'], {}), '(clust)\n', (19180, 19187), False, 'import numpy\n'), ((24665, 24683), 'numpy.array', 'numpy.array', (['clean'], {}), '(clean)\n', (24676, 24683), False, 'import numpy\n'), ((36093, 36114), 'numpy.asarray', 'numpy.asarray', (['n_loop'], {}), '(n_loop)\n', (36106, 36114), False, 'import numpy\n'), ((36140, 36176), 'numpy.asarray', 'numpy.asarray', (['n_conn'], {'dtype': '"""float"""'}), "(n_conn, dtype='float')\n", (36153, 36176), False, 'import numpy\n'), ((36340, 36376), 'numpy.where', 'numpy.where', (['(n_conn == 0)', '(-1)', 'n_conn'], {}), '(n_conn == 0, -1, n_conn)\n', (36351, 36376), False, 'import numpy\n'), ((36397, 36447), 'numpy.where', 'numpy.where', (['(n_conn == 0)', '(0)', '(n_loop / n_conn_fixed)'], {}), '(n_conn == 0, 0, n_loop / n_conn_fixed)\n', (36408, 36447), False, 'import numpy\n'), ((38861, 38904), 'numpy.where', 'numpy.where', (['(conn > 0)', '(branch / conn_fix)', '(0)'], {}), '(conn > 0, branch / conn_fix, 0)\n', (38872, 38904), False, 'import numpy\n'), ((11893, 11910), 'numpy.arange', 'numpy.arange', (['num'], {}), '(num)\n', (11905, 11910), False, 'import numpy\n'), ((25736, 25753), 'numpy.array', 'numpy.array', (['cond'], {}), '(cond)\n', (25747, 25753), False, 'import numpy\n'), ((30501, 30556), 'builtins.zip', 'zip', (['self[categ].n_bound_clust', 'self[categ].n_bound_obs'], {}), '(self[categ].n_bound_clust, self[categ].n_bound_obs)\n', (30504, 30556), False, 'from builtins import zip\n'), ((33281, 33300), 'numpy.asarray', 'numpy.asarray', (['dist'], {}), '(dist)\n', (33294, 33300), False, 'import numpy\n'), ((8885, 8972), 'logging.warning', 'logging.warning', (["('CleftRegions: Data for experiment ' + requested + ' do not exist')"], {}), "('CleftRegions: Data for experiment ' + requested +\n ' do not exist')\n", (8900, 8972), False, 'import logging\n'), ((18261, 18348), 'logging.warning', 'logging.warning', (["('CleftRegions: Data for experiment ' + requested + ' do not exist')"], {}), "('CleftRegions: Data for experiment ' + requested +\n ' do not exist')\n", (18276, 18348), False, 'import logging\n'), ((19753, 19768), 'numpy.array', 'numpy.array', (['[]'], {}), '([])\n', (19764, 19768), False, 'import numpy\n'), ((32935, 32977), 'builtins.zip', 'zip', (['item_ids[obs_ind]', 'item_dist[obs_ind]'], {}), '(item_ids[obs_ind], item_dist[obs_ind])\n', (32938, 32977), False, 'from builtins import zip\n'), ((38782, 38813), 'numpy.where', 'numpy.where', (['(conn > 0)', 'conn', '(-1)'], {}), '(conn > 0, conn, -1)\n', (38793, 38813), False, 'import numpy\n')] |
from scipy.io import wavfile
from scipy import signal
import warnings
import numpy as np
import os
from os import path
from tqdm import tqdm
def process_wav_file(fpath, rate=12, norm=0.5):
"""
Decimates and normalizes a wav file to the range [-norm, norm].
Parameters
fpath : str
path to the .wav file to process (eg: '/foo/bar/recording.wav')
rate : int, optional (default: 12)
decimation rate (by default reduces samples by a factor of 12)
norm : float, optional (default: 0.5)
absolute value of the minimum and maximum sample
Returns
sr : int
new sample rate after decimation
data : np.ndarray
array of processed data
"""
sr, data = wavfile.read(fpath)
data = signal.decimate(data, rate)
data = data.astype(float)
data = data - data.min()
data = (data / data.max() * 2.0) - 1.0
data = data * norm
sr = sr // rate
return sr, data
def process_directory_wav_files(
input_directory,
output_directory,
rate=12,
norm=0.5,
dtype=np.int16,
show_progress=True):
"""
Decimates and normalizes every wav file in a directory then saves to an
output directory.
Parameters
input_directory : str
path to the input directory containing .wav files
output_directory : str
path to the output directory to save processed .wav files
rate : int, optional (default: 12)
decimation rate (by default reduces samples by a factor of 12)
norm : float, optional (default: 0.5)
absolute value of the minimum and maximum sample
dtype : integer data type, optional (default: np.int16)
integer data type to convert wav samples to
show_progress : bool, optional (default: True)
flag to control whether progress bar is shown or hidden
"""
os.makedirs(output_directory, exist_ok=True)
if norm < 0.0 or norm > 1.0:
new_norm = np.clip(norm, 0.0, 1.0)
warnings.warn(
"({}) Norm must be between 0.0 and 1.0, not {:g}. " \
"Clipping to {:g}.".format(
"process_directory_wav_files",
norm,
new_norm)
)
norm = new_norm
fnames = [
fname for fname in os.listdir(input_directory) if fname.endswith(".wav")
]
file_iter = tqdm(fnames) if show_progress else fnames
for fname in file_iter:
fpath = path.join(input_directory, fname)
sr, data = process_wav_file(fpath, rate=rate, norm=norm)
data = (data * np.iinfo(dtype).max).astype(dtype)
# Data now spans half of the dtype's span and is 0-centered.
out_fname = "{}_processed.wav".format(path.splitext(fname)[0])
wavfile.write(path.join(output_directory, out_fname), sr, data)
# TODO: Implement some unit tests for PCEN
def PCEN(spec, M_return_timestep, init_M=None, epsilon=1e-6, s=0.001, alpha=0.80, delta=2.0, r=0.5):
"""
Per-channel energy normalization.
Removes background noise by keeping track
of a smoothed intensity in each frequency bin (see [1] and [2] for more
information). Default values were handcrafted and should be examined
more thoroughly.
Since this function needs to be applied to (potentially overlapping) chunks
of a very long signal, it has been adapted to also return the smoothed
intensity at a requested time step.
Parameters
spec : numpy array
the input spectrogram
M_return_timestep : int
step in time to return the smoothed intensity
init_M : numpy array, optional (default: None)
initial state of the smoothed intensity
epsilon : float, optional (default: 1e-6)
Very small number used to prevent division by zero
s : float, optional (default: 0.001)
IIR smoothing coefficient
alpha : float, optional (default: 0.80)
gain normalization parameter [0.0, 1.0]
delta : float, optional (default: 2.0)
stabilized root compression offset
r : float, optional (default: 0.5)
stabilized root compression exponent
Returns
output : numpy array
spectrogram result of applying PCEN to input spec
out_M : ...
smoothed intensity of input spec at timestep 'M_return_timestep'
[1] - https://research.google/pubs/pub45911.pdf
[2] - https://www.justinsalamon.com/uploads/4/3/9/4/4394963/lostanlen_pcen_spl2018.pdf
"""
assert alpha > 0.0 and alpha < 1.0, "alpha must be between 0.0 and 1.0"
output = np.zeros_like(spec)
out_M = None
if init_M is None:
M = np.zeros(shape=(output.shape[0]))
else:
M = np.array(init_M)
assert M.shape[0] == output.shape[0]
for t in range(output.shape[1]):
M = (1 - s) * M + s * spec[:,t]
output[:,t] = ((spec[:,t] / ((M + epsilon) ** alpha)) ** r) - (delta ** r)
if t == M_return_timestep:
out_M = M
if out_M is None:
out_M = M
return output, out_M | [
"tqdm.tqdm",
"numpy.zeros_like",
"os.makedirs",
"numpy.zeros",
"numpy.iinfo",
"numpy.clip",
"scipy.io.wavfile.read",
"scipy.signal.decimate",
"numpy.array",
"os.path.splitext",
"os.path.join",
"os.listdir"
] | [((728, 747), 'scipy.io.wavfile.read', 'wavfile.read', (['fpath'], {}), '(fpath)\n', (740, 747), False, 'from scipy.io import wavfile\n'), ((759, 786), 'scipy.signal.decimate', 'signal.decimate', (['data', 'rate'], {}), '(data, rate)\n', (774, 786), False, 'from scipy import signal\n'), ((1887, 1931), 'os.makedirs', 'os.makedirs', (['output_directory'], {'exist_ok': '(True)'}), '(output_directory, exist_ok=True)\n', (1898, 1931), False, 'import os\n'), ((4587, 4606), 'numpy.zeros_like', 'np.zeros_like', (['spec'], {}), '(spec)\n', (4600, 4606), True, 'import numpy as np\n'), ((1985, 2008), 'numpy.clip', 'np.clip', (['norm', '(0.0)', '(1.0)'], {}), '(norm, 0.0, 1.0)\n', (1992, 2008), True, 'import numpy as np\n'), ((2386, 2398), 'tqdm.tqdm', 'tqdm', (['fnames'], {}), '(fnames)\n', (2390, 2398), False, 'from tqdm import tqdm\n'), ((2472, 2505), 'os.path.join', 'path.join', (['input_directory', 'fname'], {}), '(input_directory, fname)\n', (2481, 2505), False, 'from os import path\n'), ((4659, 4690), 'numpy.zeros', 'np.zeros', ([], {'shape': 'output.shape[0]'}), '(shape=output.shape[0])\n', (4667, 4690), True, 'import numpy as np\n'), ((4715, 4731), 'numpy.array', 'np.array', (['init_M'], {}), '(init_M)\n', (4723, 4731), True, 'import numpy as np\n'), ((2310, 2337), 'os.listdir', 'os.listdir', (['input_directory'], {}), '(input_directory)\n', (2320, 2337), False, 'import os\n'), ((2791, 2829), 'os.path.join', 'path.join', (['output_directory', 'out_fname'], {}), '(output_directory, out_fname)\n', (2800, 2829), False, 'from os import path\n'), ((2744, 2764), 'os.path.splitext', 'path.splitext', (['fname'], {}), '(fname)\n', (2757, 2764), False, 'from os import path\n'), ((2594, 2609), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (2602, 2609), True, 'import numpy as np\n')] |
import sys
import logging
import numpy as np
from os import listdir
from os.path import isfile, join
from matplotlib import pyplot as plt
from label_mapping import *
#pc_range=(-51.2, -51.2, -5.0, 51.2, 51.2, 3.0),
def load_cloud_from_bin_file(pc_f, lb_f):
logging.info('loading cloud from: {} and labels from : {}'.format(pc_f, lb_f))
num_features = 4
cloud = np.fromfile(pc_f, dtype=np.float32, count=-1).reshape([-1, num_features])
label = np.fromfile(lb_f, dtype=np.uint32)
label = label.reshape((-1))
return cloud, label
def main():
grid_size = 256
pos_offset = 51.2
pc_width = 51.2 * 2
num_classes = 26
save_png = False
bin_path=sys.argv[1]
lbl_path=sys.argv[2]
bin_files = [f for f in listdir(bin_path) if isfile(join(bin_path, f))]
lbl_files = [f for f in listdir(lbl_path) if isfile(join(lbl_path, f))]
assert(len(bin_files) == len(lbl_files)),"Number of Points and labels files should be the same!"
bin_files.sort()
lbl_files.sort()
for i, (bin, lbl) in enumerate(zip(bin_files, lbl_files)):
seg_grid = np.zeros([grid_size, grid_size, num_classes])
seg_grid.astype(int)
cloud, label = load_cloud_from_bin_file(bin_path+"/"+bin, lbl_path+"/"+lbl)
# print(f'cloud shape os :{np.shape(cloud)}')
assert(len(cloud) == len(label)),"Points and labels lists should be the same lenght!"
for j, (pt, lb) in enumerate(zip(cloud, label)):
if (lb > 259) or (pt[0] >= pos_offset) or (pt[1] >= pos_offset) or (pt[0] < -1 * pos_offset) or (pt[1] < -1 * pos_offset) :
continue
seg_grid[int((pt[1] + pos_offset) * grid_size / pc_width), int((pt[0] + pos_offset) * grid_size / pc_width), class2id[lb]] += 1
fin_grid = np.argmax(seg_grid, axis=2)
np.savetxt('{:0>7}'.format(int(lbl[:6])+11094)+'.txt', fin_grid, fmt='%d' , delimiter=',')
if save_png:
plt.figure()
plt.imshow(fin_grid, interpolation='nearest')
plt.savefig(lbl[:6]+'.png')
plt.clf()
if __name__=="__main__":
if len(sys.argv) < 3:
logging.error('Enter a lidar bin folder[1] path and label folder[2] path!')
else:
main()
| [
"logging.error",
"numpy.argmax",
"numpy.fromfile",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.clf",
"numpy.zeros",
"matplotlib.pyplot.figure",
"os.path.join",
"os.listdir",
"matplotlib.pyplot.savefig"
] | [((477, 511), 'numpy.fromfile', 'np.fromfile', (['lb_f'], {'dtype': 'np.uint32'}), '(lb_f, dtype=np.uint32)\n', (488, 511), True, 'import numpy as np\n'), ((1127, 1172), 'numpy.zeros', 'np.zeros', (['[grid_size, grid_size, num_classes]'], {}), '([grid_size, grid_size, num_classes])\n', (1135, 1172), True, 'import numpy as np\n'), ((1813, 1840), 'numpy.argmax', 'np.argmax', (['seg_grid'], {'axis': '(2)'}), '(seg_grid, axis=2)\n', (1822, 1840), True, 'import numpy as np\n'), ((2178, 2253), 'logging.error', 'logging.error', (['"""Enter a lidar bin folder[1] path and label folder[2] path!"""'], {}), "('Enter a lidar bin folder[1] path and label folder[2] path!')\n", (2191, 2253), False, 'import logging\n'), ((387, 432), 'numpy.fromfile', 'np.fromfile', (['pc_f'], {'dtype': 'np.float32', 'count': '(-1)'}), '(pc_f, dtype=np.float32, count=-1)\n', (398, 432), True, 'import numpy as np\n'), ((778, 795), 'os.listdir', 'listdir', (['bin_path'], {}), '(bin_path)\n', (785, 795), False, 'from os import listdir\n'), ((854, 871), 'os.listdir', 'listdir', (['lbl_path'], {}), '(lbl_path)\n', (861, 871), False, 'from os import listdir\n'), ((1975, 1987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1985, 1987), True, 'from matplotlib import pyplot as plt\n'), ((2000, 2045), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fin_grid'], {'interpolation': '"""nearest"""'}), "(fin_grid, interpolation='nearest')\n", (2010, 2045), True, 'from matplotlib import pyplot as plt\n'), ((2058, 2087), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(lbl[:6] + '.png')"], {}), "(lbl[:6] + '.png')\n", (2069, 2087), True, 'from matplotlib import pyplot as plt\n'), ((2098, 2107), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2105, 2107), True, 'from matplotlib import pyplot as plt\n'), ((806, 823), 'os.path.join', 'join', (['bin_path', 'f'], {}), '(bin_path, f)\n', (810, 823), False, 'from os.path import isfile, join\n'), ((882, 899), 'os.path.join', 'join', (['lbl_path', 'f'], {}), '(lbl_path, f)\n', (886, 899), False, 'from os.path import isfile, join\n')] |
### 1. Creating a training script for hyperparameter tuning
#Script must include 2 things- 1. Include an argument for each hyperparameter you want to vary. 2. Log the target performance metric. This enables the hyperdrive run to evaluate the performance of the child runs it initiates, and identify the one that produces the best performing model.
import argparse
import joblib
from azureml.core import Run
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
# Get regularization hyperparameter
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01)
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the training dataset
data = run.input_datasets['training_data'].to_pandas_dataframe()
# Separate features and labels, and split for training/validatiom
X = data[['feature1','feature2','feature3','feature4']].values
y = data['label'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# Train a logistic regression model with the reg hyperparameter
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate and log accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
run.log('Accuracy', np.float(acc))
# Save the trained model
os.makedirs('outputs', exist_ok=True)
joblib.dump(value=model, filename='outputs/model.pkl')
run.complete()
### Configuring and running a hyperdrive experiment
from azureml.core import Experiment
from azureml.train.hyperdrive import HyperDriveConfig, PrimaryMetricGoal
# Assumes ws, script_config and param_sampling are already defined
hyperdrive = HyperDriveConfig(run_config=script_config,
hyperparameter_sampling=param_sampling,
policy=None,
primary_metric_name='Accuracy',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=6,
max_concurrent_runs=4)
experiment = Experiment(workspace = ws, name = 'hyperdrive_training')
hyperdrive_run = experiment.submit(config=hyperdrive)
### Monitoring and reviewing hyperdrive runs
# The experiment will initiate a child run for each hyperparameter combination to be tried, and you can retrieve the logged metrics these runs using the following code:
for child_run in run.get_children():
print(child_run.id, child_run.get_metrics())
# You can also list all runs in descending order of performance like this:
for child_run in hyperdrive_run.get_children_sorted_by_primary_metric():
print(child_run)
# To retrieve the best performing run, you can use the following code:
best_run = hyperdrive_run.get_best_run_by_primary_metric()
| [
"numpy.average",
"argparse.ArgumentParser",
"sklearn.model_selection.train_test_split",
"azureml.core.Run.get_context",
"joblib.dump",
"azureml.train.hyperdrive.HyperDriveConfig",
"numpy.float",
"sklearn.linear_model.LogisticRegression",
"azureml.core.Experiment"
] | [((599, 624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (622, 624), False, 'import argparse\n'), ((795, 812), 'azureml.core.Run.get_context', 'Run.get_context', ([], {}), '()\n', (810, 812), False, 'from azureml.core import Run\n'), ((1097, 1134), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (1113, 1134), False, 'from sklearn.model_selection import train_test_split\n'), ((1345, 1372), 'numpy.average', 'np.average', (['(y_hat == y_test)'], {}), '(y_hat == y_test)\n', (1355, 1372), True, 'import numpy as np\n'), ((1472, 1526), 'joblib.dump', 'joblib.dump', ([], {'value': 'model', 'filename': '"""outputs/model.pkl"""'}), "(value=model, filename='outputs/model.pkl')\n", (1483, 1526), False, 'import joblib\n'), ((1789, 2018), 'azureml.train.hyperdrive.HyperDriveConfig', 'HyperDriveConfig', ([], {'run_config': 'script_config', 'hyperparameter_sampling': 'param_sampling', 'policy': 'None', 'primary_metric_name': '"""Accuracy"""', 'primary_metric_goal': 'PrimaryMetricGoal.MAXIMIZE', 'max_total_runs': '(6)', 'max_concurrent_runs': '(4)'}), "(run_config=script_config, hyperparameter_sampling=\n param_sampling, policy=None, primary_metric_name='Accuracy',\n primary_metric_goal=PrimaryMetricGoal.MAXIMIZE, max_total_runs=6,\n max_concurrent_runs=4)\n", (1805, 2018), False, 'from azureml.train.hyperdrive import HyperDriveConfig, PrimaryMetricGoal\n'), ((2200, 2252), 'azureml.core.Experiment', 'Experiment', ([], {'workspace': 'ws', 'name': '"""hyperdrive_training"""'}), "(workspace=ws, name='hyperdrive_training')\n", (2210, 2252), False, 'from azureml.core import Experiment\n'), ((1393, 1406), 'numpy.float', 'np.float', (['acc'], {}), '(acc)\n', (1401, 1406), True, 'import numpy as np\n'), ((1209, 1258), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1 / reg)', 'solver': '"""liblinear"""'}), "(C=1 / reg, solver='liblinear')\n", (1227, 1258), False, 'from sklearn.linear_model import LogisticRegression\n')] |
"""
Filename: plot_ts_hexbin.py
Author: <NAME>, <EMAIL>
Description: Plot ocean volume distribution in T-S space
"""
# Import general Python modules
import sys
import os
import re
import pdb
import argparse
import numpy
import pandas
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import iris
import cmdline_provenance as cmdprov
#import matplotlib as mpl
#mpl.rcParams['axes.labelsize'] = 'large'
#mpl.rcParams['axes.titlesize'] = 'x-large'
#mpl.rcParams['xtick.labelsize'] = 'medium'
#mpl.rcParams['ytick.labelsize'] = 'medium'
#mpl.rcParams['legend.fontsize'] = 'large'
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
ocean_names = {0: 'land', 1: 'southern_ocean', 2: 'atlantic',
3: 'pacific', 4: 'arctic', 5: 'indian',
6: 'mediterranean', 7: 'black_sea', 8: 'hudson_bay',
9: 'baltic_sea', 10: 'red_sea'}
def get_ocean_name(ocean_num):
return ocean_names[ocean_num]
def select_basin(df, basin_name):
"""Select basin"""
if not basin_name == 'globe':
df['basin'] = df['basin'].apply(get_ocean_name)
basin_components = basin_name.split('_')
if len(basin_components) == 1:
ocean = basin_components[0]
hemisphere = None
else:
hemisphere, ocean = basin_components
df = df[(df.basin == ocean)]
if hemisphere == 'north':
df = df[(df.latitude > 0)]
elif hemisphere == 'south':
df = df[(df.latitude < 0)]
return df
def create_df(tcube, scube, vcube, bcube, basin):
"""Create DataFrame"""
tcube = gio.temperature_unit_check(tcube, 'C')
scube = gio.salinity_unit_check(scube)
assert tcube.ndim == 3
assert scube.ndim == 3
lats = uconv.broadcast_array(tcube.coord('latitude').points, [1, 2], tcube.shape)
lons = uconv.broadcast_array(tcube.coord('longitude').points, [1, 2], tcube.shape)
levs = uconv.broadcast_array(tcube.coord('depth').points, 0, tcube.shape)
sdata = scube.data.flatten()
tdata = tcube.data.flatten()
vdata = vcube.data.flatten()
bdata = bcube.data.flatten()
lat_data = lats.flatten()
lon_data = lons.flatten()
depth_data = levs.flatten()
df = pandas.DataFrame(index=range(tdata.shape[0]))
df['temperature'] = tdata.filled(fill_value=5000)
df['salinity'] = sdata.filled(fill_value=5000)
df['volume'] = vdata.filled(fill_value=5000)
df['basin'] = bdata.filled(fill_value=5000)
df['latitude'] = lat_data.filled(fill_value=5000)
df['longitude'] = lon_data.filled(fill_value=5000)
df['depth'] = depth_data
df = df[df.temperature != 5000]
df = df[df.temperature != -273.15]
if basin:
df = select_basin(df, basin)
return df
def get_title(cube, basin):
"""Get the plot title."""
model = cube.attributes['model_id']
basin = basin.replace('_', ' ').title()
title = '%s volume distribution, %s model' %(basin, model)
return title
def plot_diff(hist_dict, inargs, extents, vmin, vmax):
"""Plot the difference between volume distributions"""
fig, ax = plt.subplots(figsize=(9, 9))
base_exp, experiment = inargs.labels
log_diff = numpy.log(hist_dict[experiment]) - numpy.log(hist_dict[base_exp])
plt.imshow(log_diff.T, origin='lower', extent=extents, aspect='auto', cmap='RdBu_r',
vmin=vmin, vmax=vmax)
cb = plt.colorbar()
cb.set_label('log(volume) ($m^3$)')
def plot_raw(hist_dict, inargs, extents, vmin, vmax, x_values, y_values):
"""Plot the raw volume distribution."""
fig, ax = plt.subplots(figsize=(9, 9))
legend_elements = []
for plotnum, label in enumerate(inargs.labels):
log_hist = numpy.log(hist_dict[label]).T
plt.imshow(log_hist, origin='lower',
extent=extents, aspect='auto', alpha=inargs.alphas[plotnum],
cmap=inargs.colors[plotnum], vmin=vmin, vmax=vmax)
x_points = []
for y_index in range(len(y_values)):
weights = hist_dict[label][:, y_index]
if weights.sum() == 0:
x_point = numpy.nan
else:
x_point = numpy.average(x_values, weights=weights)
x_points.append(x_point)
plt.plot(numpy.array(x_points), y_values, color=inargs.colors[plotnum][0:-1])
if plotnum == 0:
cb = plt.colorbar()
cb.set_label('log(volume) ($m^3$)')
color = inargs.colors[plotnum][0:-1].lower()
legend_elements.append(Line2D([0], [0], marker='o', color='w', markerfacecolor=color,
label=label, alpha=inargs.alphas[plotnum]))
ax.legend(handles=legend_elements, loc=4)
def main(inargs):
"""Run the program."""
vcube = iris.load_cube(inargs.volume_file)
bcube = iris.load_cube(inargs.basin_file)
smin, smax = inargs.salinity_bounds
tmin, tmax = inargs.temperature_bounds
sstep, tstep = inargs.bin_size
x_edges = numpy.arange(smin, smax, sstep)
y_edges = numpy.arange(tmin, tmax, tstep)
x_values = (x_edges[1:] + x_edges[:-1]) / 2
y_values = (y_edges[1:] + y_edges[:-1]) / 2
extents = [x_values[0], x_values[-1], y_values[0], y_values[-1]]
if inargs.colorbar_bounds:
vmin, vmax = inargs.colorbar_bounds
else:
vmin = vmax = None
hist_dict = {}
pairnum = 0
for tfile, sfile in zip(inargs.temperature_files, inargs.salinity_files):
tcube = iris.load_cube(tfile)
scube = iris.load_cube(sfile)
df = create_df(tcube, scube, vcube, bcube, basin=inargs.basin)
hist_dict[inargs.labels[pairnum]], xedges, yedges = numpy.histogram2d(df['salinity'].values,
df['temperature'].values,
weights=df['volume'].values,
bins=[x_edges, y_edges])
pairnum = pairnum + 1
if inargs.diff:
plot_diff(hist_dict, inargs, extents, vmin, vmax)
else:
plot_raw(hist_dict, inargs, extents, vmin, vmax, x_values, y_values)
title = get_title(tcube, inargs.basin)
plt.title(title)
plt.ylabel('temperature ($C$)')
plt.xlabel('salinity ($g kg^{-1}$)')
# Save output
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
# Metadata
metadata_dict = {inargs.basin_file: bcube.attributes['history'],
inargs.volume_file: vcube.attributes['history'],
tfile: tcube.attributes['history'],
sfile: scube.attributes['history']}
log_text = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)
log_file = re.sub('.png', '.met', inargs.outfile)
cmdprov.write_log(log_file, log_text)
if __name__ == '__main__':
extra_info ="""
author:
<NAME>, <EMAIL>
"""
description = 'Plot ocean volume distribution in T-S space.'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("volume_file", type=str, help="Volume file name")
parser.add_argument("basin_file", type=str, help="Basin file name")
parser.add_argument("outfile", type=str, help="Output file name")
parser.add_argument("--temperature_files", nargs='*', type=str, help="Temperature files")
parser.add_argument("--salinity_files", nargs='*', type=str, help="Salinity files")
parser.add_argument("--labels", nargs='*', type=str, required=True,
help="Label for each temperature/salinity pair")
parser.add_argument("--basin", type=str, default='globe',
choices=('globe', 'indian', 'north_atlantic', 'south_atlantic', 'north_pacific', 'south_pacific'),
help='ocean basin to plot')
parser.add_argument("--colors", nargs='*', type=str,
choices=('Greys', 'Reds', 'Blues', 'Greens', 'Oranges', 'Purples', 'viridis'),
help="Color for each temperature/salinity file pair")
parser.add_argument("--alphas", nargs='*', type=float,
help="Transparency for each temperature/salinity pair")
parser.add_argument("--diff", action="store_true", default=False,
help="Plot the difference between the two file pairs")
parser.add_argument("--salinity_bounds", type=float, nargs=2, default=(32, 37.5),
help='bounds for the salinity (X) axis')
parser.add_argument("--temperature_bounds", type=float, nargs=2, default=(-2, 30),
help='bounds for the temperature (Y) axis')
parser.add_argument("--bin_size", type=float, nargs=2, default=(0.05, 0.25),
help='bin size: salinity step, temperature step')
parser.add_argument("--colorbar_bounds", type=float, nargs=2, default=None,
help='bounds for the colorbar')
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
args = parser.parse_args()
assert len(args.temperature_files) == len(args.salinity_files)
if args.diff:
assert len(args.temperature_files) == 2
main(args)
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"cmdline_provenance.write_log",
"iris.load_cube",
"numpy.arange",
"os.path.join",
"sys.path.append",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.imshow",
"numpy.histogram2d",
"general_io.salinity_unit_check",
"matplotlib.pyplot.colorbar",... | [((642, 653), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (651, 653), False, 'import os\n'), ((822, 855), 'os.path.join', 'os.path.join', (['repo_dir', '"""modules"""'], {}), "(repo_dir, 'modules')\n", (834, 855), False, 'import os\n'), ((856, 884), 'sys.path.append', 'sys.path.append', (['modules_dir'], {}), '(modules_dir)\n', (871, 884), False, 'import sys\n'), ((721, 754), 'os.path.join', 'os.path.join', (['repo_dir', 'directory'], {}), '(repo_dir, directory)\n', (733, 754), False, 'import os\n'), ((2062, 2100), 'general_io.temperature_unit_check', 'gio.temperature_unit_check', (['tcube', '"""C"""'], {}), "(tcube, 'C')\n", (2088, 2100), True, 'import general_io as gio\n'), ((2113, 2143), 'general_io.salinity_unit_check', 'gio.salinity_unit_check', (['scube'], {}), '(scube)\n', (2136, 2143), True, 'import general_io as gio\n'), ((3572, 3600), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (3584, 3600), True, 'import matplotlib.pyplot as plt\n'), ((3728, 3839), 'matplotlib.pyplot.imshow', 'plt.imshow', (['log_diff.T'], {'origin': '"""lower"""', 'extent': 'extents', 'aspect': '"""auto"""', 'cmap': '"""RdBu_r"""', 'vmin': 'vmin', 'vmax': 'vmax'}), "(log_diff.T, origin='lower', extent=extents, aspect='auto', cmap=\n 'RdBu_r', vmin=vmin, vmax=vmax)\n", (3738, 3839), True, 'import matplotlib.pyplot as plt\n'), ((3860, 3874), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3872, 3874), True, 'import matplotlib.pyplot as plt\n'), ((4050, 4078), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (4062, 4078), True, 'import matplotlib.pyplot as plt\n'), ((5251, 5285), 'iris.load_cube', 'iris.load_cube', (['inargs.volume_file'], {}), '(inargs.volume_file)\n', (5265, 5285), False, 'import iris\n'), ((5298, 5331), 'iris.load_cube', 'iris.load_cube', (['inargs.basin_file'], {}), '(inargs.basin_file)\n', (5312, 5331), False, 'import iris\n'), ((5465, 5496), 'numpy.arange', 'numpy.arange', (['smin', 'smax', 'sstep'], {}), '(smin, smax, sstep)\n', (5477, 5496), False, 'import numpy\n'), ((5511, 5542), 'numpy.arange', 'numpy.arange', (['tmin', 'tmax', 'tstep'], {}), '(tmin, tmax, tstep)\n', (5523, 5542), False, 'import numpy\n'), ((6751, 6767), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6760, 6767), True, 'import matplotlib.pyplot as plt\n'), ((6772, 6803), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""temperature ($C$)"""'], {}), "('temperature ($C$)')\n", (6782, 6803), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6844), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""salinity ($g kg^{-1}$)"""'], {}), "('salinity ($g kg^{-1}$)')\n", (6818, 6844), True, 'import matplotlib.pyplot as plt\n'), ((6986, 7043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['inargs.outfile'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(inargs.outfile, bbox_inches='tight', dpi=dpi)\n", (6997, 7043), True, 'import matplotlib.pyplot as plt\n'), ((7332, 7396), 'cmdline_provenance.new_log', 'cmdprov.new_log', ([], {'infile_history': 'metadata_dict', 'git_repo': 'repo_dir'}), '(infile_history=metadata_dict, git_repo=repo_dir)\n', (7347, 7396), True, 'import cmdline_provenance as cmdprov\n'), ((7412, 7450), 're.sub', 're.sub', (['""".png"""', '""".met"""', 'inargs.outfile'], {}), "('.png', '.met', inargs.outfile)\n", (7418, 7450), False, 'import re\n'), ((7455, 7492), 'cmdline_provenance.write_log', 'cmdprov.write_log', (['log_file', 'log_text'], {}), '(log_file, log_text)\n', (7472, 7492), True, 'import cmdline_provenance as cmdprov\n'), ((7657, 7823), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'epilog': 'extra_info', 'argument_default': 'argparse.SUPPRESS', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=description, epilog=extra_info,\n argument_default=argparse.SUPPRESS, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (7680, 7823), False, 'import argparse\n'), ((3657, 3689), 'numpy.log', 'numpy.log', (['hist_dict[experiment]'], {}), '(hist_dict[experiment])\n', (3666, 3689), False, 'import numpy\n'), ((3692, 3722), 'numpy.log', 'numpy.log', (['hist_dict[base_exp]'], {}), '(hist_dict[base_exp])\n', (3701, 3722), False, 'import numpy\n'), ((4213, 4366), 'matplotlib.pyplot.imshow', 'plt.imshow', (['log_hist'], {'origin': '"""lower"""', 'extent': 'extents', 'aspect': '"""auto"""', 'alpha': 'inargs.alphas[plotnum]', 'cmap': 'inargs.colors[plotnum]', 'vmin': 'vmin', 'vmax': 'vmax'}), "(log_hist, origin='lower', extent=extents, aspect='auto', alpha=\n inargs.alphas[plotnum], cmap=inargs.colors[plotnum], vmin=vmin, vmax=vmax)\n", (4223, 4366), True, 'import matplotlib.pyplot as plt\n'), ((5955, 5976), 'iris.load_cube', 'iris.load_cube', (['tfile'], {}), '(tfile)\n', (5969, 5976), False, 'import iris\n'), ((5993, 6014), 'iris.load_cube', 'iris.load_cube', (['sfile'], {}), '(sfile)\n', (6007, 6014), False, 'import iris\n'), ((6148, 6273), 'numpy.histogram2d', 'numpy.histogram2d', (["df['salinity'].values", "df['temperature'].values"], {'weights': "df['volume'].values", 'bins': '[x_edges, y_edges]'}), "(df['salinity'].values, df['temperature'].values, weights=\n df['volume'].values, bins=[x_edges, y_edges])\n", (6165, 6273), False, 'import numpy\n'), ((4175, 4202), 'numpy.log', 'numpy.log', (['hist_dict[label]'], {}), '(hist_dict[label])\n', (4184, 4202), False, 'import numpy\n'), ((4738, 4759), 'numpy.array', 'numpy.array', (['x_points'], {}), '(x_points)\n', (4749, 4759), False, 'import numpy\n'), ((4850, 4864), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4862, 4864), True, 'import matplotlib.pyplot as plt\n'), ((5006, 5115), 'matplotlib.lines.Line2D', 'Line2D', (['[0]', '[0]'], {'marker': '"""o"""', 'color': '"""w"""', 'markerfacecolor': 'color', 'label': 'label', 'alpha': 'inargs.alphas[plotnum]'}), "([0], [0], marker='o', color='w', markerfacecolor=color, label=label,\n alpha=inargs.alphas[plotnum])\n", (5012, 5115), False, 'from matplotlib.lines import Line2D\n'), ((4634, 4674), 'numpy.average', 'numpy.average', (['x_values'], {'weights': 'weights'}), '(x_values, weights=weights)\n', (4647, 4674), False, 'import numpy\n')] |
"""
Various tools which may be needed in various processes.
"""
from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi
import numpy as np
R_EARTH = 6378.139
class InputError(Exception):
pass
def gen_mat(mrot, mlon, mlat):
"""
Precursor for xy2ll and ll2xy functions.
mrot: model rotation
mlon: model centre longitude
mlat: model centre latitude
"""
arg = radians(mrot)
cosA = cos(arg)
sinA = sin(arg)
arg = radians(90.0 - mlat)
cosT = cos(arg)
sinT = sin(arg)
arg = radians(mlon)
cosP = cos(arg)
sinP = sin(arg)
amat = np.array(
[
[
cosA * cosT * cosP + sinA * sinP,
sinA * cosT * cosP - cosA * sinP,
sinT * cosP,
],
[
cosA * cosT * sinP - sinA * cosP,
sinA * cosT * sinP + cosA * cosP,
sinT * sinP,
],
[-cosA * sinT, -sinA * sinT, cosT],
],
dtype="f",
)
ainv = amat.T * 1.0 / np.linalg.det(amat)
return amat.flatten(), ainv.flatten()
def xy2ll(xy_km, amat):
"""
Converts km offsets to longitude and latitude.
xy_km: 2D np array of [X, Y] offsets from origin (km)
amat: from gen_mat function
"""
x = xy_km[:, 0] / R_EARTH
sinB = np.sin(x)
y = xy_km[:, 1] / R_EARTH
sinG = np.sin(y)
z = np.sqrt(1.0 + sinB * sinB * sinG * sinG)
xp = sinG * np.cos(x) * z
yp = sinB * np.cos(y) * z
zp = np.sqrt(1.0 - xp * xp - yp * yp)
xg = xp * amat[0] + yp * amat[1] + zp * amat[2]
yg = xp * amat[3] + yp * amat[4] + zp * amat[5]
zg = xp * amat[6] + yp * amat[7] + zp * amat[8]
lat = np.where(
zg == 0.0, 0.0, 90.0 - np.degrees(np.arctan(np.sqrt(xg * xg + yg * yg) / zg))
)
lat[np.where(zg < 0.0)] -= 180.0
lon = np.where(xg == 0.0, 0.0, np.degrees(np.arctan(yg / xg)))
lon[np.where(xg < 0.0)] -= 180.0
lon[np.where(lon < -180.0)] += 360.0
return np.column_stack((lon, lat))
def gp2xy(gp, nx, ny, hh):
"""
Converts grid points to km offsets.
xy: 2D np array of [X, Y] gridpoints
nx: number of X grid positions
ny: number of Y grid positions
hh: grid spacing
"""
xy = gp.astype(np.float32) * hh
# shift for centre origin
xy[:, 0] -= (nx - 1) * hh * 0.5
xy[:, 1] -= (ny - 1) * hh * 0.5
return xy
def ll_shift(lat, lon, distance, bearing):
"""
Shift lat/long by distance at bearing.
"""
# formula is for radian values
lat, lon, bearing = list(map(radians, [lat, lon, bearing]))
shift = distance / R_EARTH
lat2 = asin(sin(lat) * cos(shift) + cos(lat) * sin(shift) * cos(bearing))
lon2 = lon + atan2(sin(bearing) * sin(shift) * cos(lat),
cos(shift) - sin(lat) * sin(lat2))
return degrees(lat2), degrees(lon2)
def ll_mid(lon1, lat1, lon2, lat2):
"""
Return midpoint between a pair of lat, long points.
"""
# functions based on radians
lon1, lat1, lat2, dlon = map(radians, [lon1, lat1, lat2, (lon2 - lon1)])
Bx = cos(lat2) * cos(dlon)
By = cos(lat2) * sin(dlon)
lat3 = atan2(sin(lat1) + sin(lat2), sqrt((cos(lat1) + Bx) ** 2 + By ** 2))
lon3 = lon1 + atan2(By, cos(lat1) + Bx)
return degrees(lon3), degrees(lat3)
def ll_dist(lon1, lat1, lon2, lat2):
"""
Return distance between a pair of lat, long points.
"""
# functions based on radians
lat1, lat2, dlon, dlat = map(radians, [lat1, lat2, (lon2 - lon1), (lat2 - lat1)])
a = sin(dlat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2.0) ** 2
return R_EARTH * 2.0 * atan2(sqrt(a), sqrt(1 - a))
def ll_bearing(lon1, lat1, lon2, lat2, midpoint=False):
"""
Initial bearing when traveling from 1 -> 2.
Direction facing from point 1 when looking at point 2.
"""
if midpoint:
lon1, lat1 = ll_mid(lon1, lat1, lon2, lat2)
lat1, lat2, lon_diff = map(radians, [lat1, lat2, (lon2 - lon1)])
return (
degrees(
atan2(
cos(lat2) * sin(lon_diff),
cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(lon_diff),
)
)
% 360
)
def angle_diff(b1, b2):
"""
Return smallest difference (clockwise, -180 -> 180) from b1 to b2.
"""
r = (b2 - b1) % 360
if r > 180:
return r - 360
return r
def avg_wbearing(angles):
"""
Return average angle given angles and weightings.
NB: angles are clockwise from North, not anti-clockwise from East.
angles: 2d list of (angle, weight)
"""
x = 0
y = 0
for a in angles:
x += a[1] * sin(radians(a[0]))
y += a[1] * cos(radians(a[0]))
q_diff = 0
if y < 0:
q_diff = pi
elif x < 0:
q_diff = 2 * pi
return degrees(atan(x / y) + q_diff)
def path_from_corners(
corners=None, output="sim.modelpath_hr", min_edge_points=100, close=True
):
"""
corners: python list (4 by 2) containing (lon, lat) in order
otherwise take from velocity model
output: where to store path of (lon, lat) values
min_edge_points: at least this many points wanted along edges
"""
# input data using velocity model
if corners is None:
# don't fail importing if not needed
from params import vel_mod_params
# load model_params
with open(vel_mod_params, "r") as mp:
lines = mp.readlines()
# find corners using tags
for tag in ["c1=", "c2=", "c3=", "c4="]:
for line in lines:
if tag in line:
corners.append(map(float, line.split()[1:3]))
# close the box by going back to origin
if close:
corners.append(corners[0])
# until each side has at least wanted number of points
while len(corners) < 4 * min_edge_points:
# work backwards, insertions don't change later indexes
for i in range(len(corners) - 1, 0, -1):
val = ll_mid(
corners[i][0], corners[i][1], corners[i - 1][0], corners[i - 1][1]
)
corners.insert(i, val)
# write points the make the path
if output != None:
with open(output, "w") as mp:
for point in corners:
mp.write("%s %s\n" % (point[0], point[1]))
else:
return corners
def get_distances(locations: np.ndarray, lon: float, lat: float):
"""Calculates the distance between the array of locations and
the specified reference location
Parameters
----------
locations : np.ndarray
List of locations
Shape [n_locations, 2], column format (lon, lat)
lon : float
Longitude of the reference location
lat
Latitude of the reference location
Returns
-------
np.ndarray
The distances, shape [n_locations]
"""
d = (
np.sin(np.radians(locations[:, 1] - lat) / 2.0) ** 2
+ np.cos(np.radians(lat))
* np.cos(np.radians(locations[:, 1]))
* np.sin(np.radians(locations[:, 0] - lon) / 2.0) ** 2
)
d = R_EARTH * 2.0 * np.arctan2(np.sqrt(d), np.sqrt(1 - d))
return d
def closest_location(locations, lon, lat):
"""
Find position and distance of closest location in 2D np.array of (lon, lat).
"""
d = get_distances(locations, lon, lat)
i = np.argmin(d)
return i, d[i]
| [
"numpy.radians",
"math.atan",
"math.sqrt",
"math.radians",
"math.sin",
"numpy.argmin",
"numpy.sin",
"numpy.array",
"math.cos",
"numpy.where",
"numpy.column_stack",
"numpy.cos",
"numpy.linalg.det",
"math.degrees",
"numpy.arctan",
"numpy.sqrt"
] | [((412, 425), 'math.radians', 'radians', (['mrot'], {}), '(mrot)\n', (419, 425), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((437, 445), 'math.cos', 'cos', (['arg'], {}), '(arg)\n', (440, 445), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((457, 465), 'math.sin', 'sin', (['arg'], {}), '(arg)\n', (460, 465), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((477, 497), 'math.radians', 'radians', (['(90.0 - mlat)'], {}), '(90.0 - mlat)\n', (484, 497), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((509, 517), 'math.cos', 'cos', (['arg'], {}), '(arg)\n', (512, 517), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((529, 537), 'math.sin', 'sin', (['arg'], {}), '(arg)\n', (532, 537), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((549, 562), 'math.radians', 'radians', (['mlon'], {}), '(mlon)\n', (556, 562), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((574, 582), 'math.cos', 'cos', (['arg'], {}), '(arg)\n', (577, 582), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((594, 602), 'math.sin', 'sin', (['arg'], {}), '(arg)\n', (597, 602), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((615, 850), 'numpy.array', 'np.array', (['[[cosA * cosT * cosP + sinA * sinP, sinA * cosT * cosP - cosA * sinP, sinT *\n cosP], [cosA * cosT * sinP - sinA * cosP, sinA * cosT * sinP + cosA *\n cosP, sinT * sinP], [-cosA * sinT, -sinA * sinT, cosT]]'], {'dtype': '"""f"""'}), "([[cosA * cosT * cosP + sinA * sinP, sinA * cosT * cosP - cosA *\n sinP, sinT * cosP], [cosA * cosT * sinP - sinA * cosP, sinA * cosT *\n sinP + cosA * cosP, sinT * sinP], [-cosA * sinT, -sinA * sinT, cosT]],\n dtype='f')\n", (623, 850), True, 'import numpy as np\n'), ((1348, 1357), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (1354, 1357), True, 'import numpy as np\n'), ((1399, 1408), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (1405, 1408), True, 'import numpy as np\n'), ((1417, 1457), 'numpy.sqrt', 'np.sqrt', (['(1.0 + sinB * sinB * sinG * sinG)'], {}), '(1.0 + sinB * sinB * sinG * sinG)\n', (1424, 1457), True, 'import numpy as np\n'), ((1527, 1559), 'numpy.sqrt', 'np.sqrt', (['(1.0 - xp * xp - yp * yp)'], {}), '(1.0 - xp * xp - yp * yp)\n', (1534, 1559), True, 'import numpy as np\n'), ((2025, 2052), 'numpy.column_stack', 'np.column_stack', (['(lon, lat)'], {}), '((lon, lat))\n', (2040, 2052), True, 'import numpy as np\n'), ((7403, 7415), 'numpy.argmin', 'np.argmin', (['d'], {}), '(d)\n', (7412, 7415), True, 'import numpy as np\n'), ((1061, 1080), 'numpy.linalg.det', 'np.linalg.det', (['amat'], {}), '(amat)\n', (1074, 1080), True, 'import numpy as np\n'), ((1838, 1856), 'numpy.where', 'np.where', (['(zg < 0.0)'], {}), '(zg < 0.0)\n', (1846, 1856), True, 'import numpy as np\n'), ((1943, 1961), 'numpy.where', 'np.where', (['(xg < 0.0)'], {}), '(xg < 0.0)\n', (1951, 1961), True, 'import numpy as np\n'), ((1980, 2002), 'numpy.where', 'np.where', (['(lon < -180.0)'], {}), '(lon < -180.0)\n', (1988, 2002), True, 'import numpy as np\n'), ((2868, 2881), 'math.degrees', 'degrees', (['lat2'], {}), '(lat2)\n', (2875, 2881), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2883, 2896), 'math.degrees', 'degrees', (['lon2'], {}), '(lon2)\n', (2890, 2896), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3127, 3136), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (3130, 3136), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3139, 3148), 'math.cos', 'cos', (['dlon'], {}), '(dlon)\n', (3142, 3148), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3158, 3167), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (3161, 3167), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3170, 3179), 'math.sin', 'sin', (['dlon'], {}), '(dlon)\n', (3173, 3179), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3316, 3329), 'math.degrees', 'degrees', (['lon3'], {}), '(lon3)\n', (3323, 3329), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3331, 3344), 'math.degrees', 'degrees', (['lat3'], {}), '(lat3)\n', (3338, 3344), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((1474, 1483), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (1480, 1483), True, 'import numpy as np\n'), ((1504, 1513), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (1510, 1513), True, 'import numpy as np\n'), ((1914, 1932), 'numpy.arctan', 'np.arctan', (['(yg / xg)'], {}), '(yg / xg)\n', (1923, 1932), True, 'import numpy as np\n'), ((3198, 3207), 'math.sin', 'sin', (['lat1'], {}), '(lat1)\n', (3201, 3207), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3210, 3219), 'math.sin', 'sin', (['lat2'], {}), '(lat2)\n', (3213, 3219), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3584, 3599), 'math.sin', 'sin', (['(dlat / 2.0)'], {}), '(dlat / 2.0)\n', (3587, 3599), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3685, 3692), 'math.sqrt', 'sqrt', (['a'], {}), '(a)\n', (3689, 3692), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3694, 3705), 'math.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (3698, 3705), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4865, 4876), 'math.atan', 'atan', (['(x / y)'], {}), '(x / y)\n', (4869, 4876), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((7168, 7178), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (7175, 7178), True, 'import numpy as np\n'), ((7180, 7194), 'numpy.sqrt', 'np.sqrt', (['(1 - d)'], {}), '(1 - d)\n', (7187, 7194), True, 'import numpy as np\n'), ((2675, 2683), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (2678, 2683), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2686, 2696), 'math.cos', 'cos', (['shift'], {}), '(shift)\n', (2689, 2696), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2723, 2735), 'math.cos', 'cos', (['bearing'], {}), '(bearing)\n', (2726, 2735), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2788, 2796), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (2791, 2796), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2821, 2831), 'math.cos', 'cos', (['shift'], {}), '(shift)\n', (2824, 2831), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3288, 3297), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (3291, 3297), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3607, 3616), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (3610, 3616), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3619, 3628), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (3622, 3628), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((3631, 3646), 'math.sin', 'sin', (['(dlon / 2.0)'], {}), '(dlon / 2.0)\n', (3634, 3646), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4703, 4716), 'math.radians', 'radians', (['a[0]'], {}), '(a[0])\n', (4710, 4716), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4742, 4755), 'math.radians', 'radians', (['a[0]'], {}), '(a[0])\n', (4749, 4755), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2699, 2707), 'math.cos', 'cos', (['lat'], {}), '(lat)\n', (2702, 2707), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2710, 2720), 'math.sin', 'sin', (['shift'], {}), '(shift)\n', (2713, 2720), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2760, 2772), 'math.sin', 'sin', (['bearing'], {}), '(bearing)\n', (2763, 2772), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2775, 2785), 'math.sin', 'sin', (['shift'], {}), '(shift)\n', (2778, 2785), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2834, 2842), 'math.sin', 'sin', (['lat'], {}), '(lat)\n', (2837, 2842), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((2845, 2854), 'math.sin', 'sin', (['lat2'], {}), '(lat2)\n', (2848, 2854), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4091, 4100), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (4094, 4100), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4103, 4116), 'math.sin', 'sin', (['lon_diff'], {}), '(lon_diff)\n', (4106, 4116), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((6938, 6971), 'numpy.radians', 'np.radians', (['(locations[:, 1] - lat)'], {}), '(locations[:, 1] - lat)\n', (6948, 6971), True, 'import numpy as np\n'), ((7001, 7016), 'numpy.radians', 'np.radians', (['lat'], {}), '(lat)\n', (7011, 7016), True, 'import numpy as np\n'), ((7035, 7062), 'numpy.radians', 'np.radians', (['locations[:, 1]'], {}), '(locations[:, 1])\n', (7045, 7062), True, 'import numpy as np\n'), ((1790, 1816), 'numpy.sqrt', 'np.sqrt', (['(xg * xg + yg * yg)'], {}), '(xg * xg + yg * yg)\n', (1797, 1816), True, 'import numpy as np\n'), ((3227, 3236), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (3230, 3236), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4134, 4143), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (4137, 4143), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4146, 4155), 'math.sin', 'sin', (['lat2'], {}), '(lat2)\n', (4149, 4155), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4182, 4195), 'math.cos', 'cos', (['lon_diff'], {}), '(lon_diff)\n', (4185, 4195), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((7081, 7114), 'numpy.radians', 'np.radians', (['(locations[:, 0] - lon)'], {}), '(locations[:, 0] - lon)\n', (7091, 7114), True, 'import numpy as np\n'), ((4158, 4167), 'math.sin', 'sin', (['lat1'], {}), '(lat1)\n', (4161, 4167), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n'), ((4170, 4179), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (4173, 4179), False, 'from math import sin, asin, cos, atan, atan2, degrees, radians, sqrt, pi\n')] |
import numpy as np
import math
def exp_fun(average, x):
return 1 - pow(np.exp(1), -(average * x))
def normal_fun(alpha, sigma, x):
denominator = np.sqrt(sigma) * np.sqrt(2 * np.pi)
numerator = (np.exp(-pow(x - alpha, 2) / (2 * sigma)))
val = numerator / denominator
return val
def dis_fun(alpha, sigma, x):
return (1 / 2) * (1 + math.erf((x - alpha) / (np.sqrt(sigma) * np.sqrt(2))))
| [
"numpy.exp",
"numpy.sqrt"
] | [((157, 171), 'numpy.sqrt', 'np.sqrt', (['sigma'], {}), '(sigma)\n', (164, 171), True, 'import numpy as np\n'), ((174, 192), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (181, 192), True, 'import numpy as np\n'), ((77, 86), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (83, 86), True, 'import numpy as np\n'), ((383, 397), 'numpy.sqrt', 'np.sqrt', (['sigma'], {}), '(sigma)\n', (390, 397), True, 'import numpy as np\n'), ((400, 410), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (407, 410), True, 'import numpy as np\n')] |
'''
chain2params.py
==============================
Extract parameters from trained model on chainer.
'''
from __future__ import print_function
import argparse
import os
try:
import h5py
except ImportError:
pass
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('chainer_model', help='Path to the trained model.')
args = parser.parse_args()
print('Loading model: {}'.format(args.chainer_model))
try:
namedparams = []
with h5py.File(args.chainer_model, 'r') as f:
for group, data in f.iteritems():
for name, param in data.iteritems():
name = '{}_{}'.format(group, name)
param = np.asarray(param)
namedparams.append((name, param))
print('Serialization type: hdf5')
except:
namedparams = np.load(args.chainer_model).iteritems()
print('Serialization type: npz')
save_dir = os.path.join(os.path.dirname(args.chainer_model), 'params')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
for name, param in namedparams:
filename = os.path.join(save_dir, name.replace('/', '_'))
np.save(filename, param)
print('Done') | [
"h5py.File",
"numpy.save",
"numpy.load",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"os.path.dirname",
"numpy.asarray"
] | [((250, 275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (273, 275), False, 'import argparse\n'), ((906, 941), 'os.path.dirname', 'os.path.dirname', (['args.chainer_model'], {}), '(args.chainer_model)\n', (921, 941), False, 'import os\n'), ((960, 983), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (973, 983), False, 'import os\n'), ((989, 1010), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1000, 1010), False, 'import os\n'), ((1110, 1134), 'numpy.save', 'np.save', (['filename', 'param'], {}), '(filename, param)\n', (1117, 1134), True, 'import numpy as np\n'), ((465, 499), 'h5py.File', 'h5py.File', (['args.chainer_model', '"""r"""'], {}), "(args.chainer_model, 'r')\n", (474, 499), False, 'import h5py\n'), ((672, 689), 'numpy.asarray', 'np.asarray', (['param'], {}), '(param)\n', (682, 689), True, 'import numpy as np\n'), ((804, 831), 'numpy.load', 'np.load', (['args.chainer_model'], {}), '(args.chainer_model)\n', (811, 831), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''Light submodule for dGraph scene description module
<NAME>
Jan 2017 - created by splitting off from dGraph
ALL UNITS ARE IN METRIC
ie 1 cm = .01
www.qenops.com
'''
__author__ = ('<NAME>')
__version__ = '1.6'
__all__ = ["Light", "PointLight", "DirectionLight"]
from dGraph import *
import dGraph.shaders as dgshdr
import numpy as np
from numpy.linalg import norm
class Light(object):
''' A world object that casts light
Intensity
Color
'''
def __init__(self, intensity=(1,1,1), **kwargs):
super(Light, self).__init__(**kwargs)
self._intensity = np.array(intensity, np.float32)
def fragmentShader(self, index):
pass
def pushToShader(self, index, shader):
pass
class PointLight(Light):
''' A light with falloff '''
def __init__(self, position = (0,0,0), **kwargs):
super(PointLight, self).__init__(**kwargs)
self._position = np.array(position, np.float32)
def fragmentShader(self, index):
return '''
uniform vec3 light{index}_intensity;
uniform vec3 light{index}_position;
vec3 getLightDirection{index}(vec3 worldLocation) {{
return normalize(light{index}_position - worldLocation);
}}
vec3 getLightIntensity{index}(vec3 worldLocation) {{
return light{index}_intensity;
}}
'''.format(index = index)
def pushToShader(self, index, shader):
#import pdb; pdb.set_trace();
dgshdr.setUniform(shader, 'light{index}_intensity'.format(index=index), np.array(self._intensity, np.float32))
dgshdr.setUniform(shader, 'light{index}_position'.format(index=index), np.array(self._position, np.float32))
class DirectionLight(Light):
''' A light where position doesn't matter, only a direction vector '''
def __init__(self, direction=(0.,0.,1.0), **kwargs):
super(DirectionLight, self).__init__(**kwargs)
self._direction = np.array(direction, np.float32)
def fragmentShader(self, index):
return '''
uniform vec3 light{index}_intensity;
uniform vec3 light{index}_direction;
vec3 getLightDirection{index}(vec3 worldLocation) {{
return normalize(light{index}_direction);
}}
vec3 getLightIntensity{index}(vec3 worldLocation) {{
return light{index}_intensity;
}}
'''.format(index = index)
def pushToShader(self, index, shader):
#import pdb; pdb.set_trace();
dgshdr.setUniform(shader, 'light{index}_intensity'.format(index=index), np.array(self._intensity, np.float32))
dgshdr.setUniform(shader, 'light{index}_direction'.format(index=index), np.array(self._direction, np.float32))
| [
"numpy.array"
] | [((635, 666), 'numpy.array', 'np.array', (['intensity', 'np.float32'], {}), '(intensity, np.float32)\n', (643, 666), True, 'import numpy as np\n'), ((972, 1002), 'numpy.array', 'np.array', (['position', 'np.float32'], {}), '(position, np.float32)\n', (980, 1002), True, 'import numpy as np\n'), ((1947, 1978), 'numpy.array', 'np.array', (['direction', 'np.float32'], {}), '(direction, np.float32)\n', (1955, 1978), True, 'import numpy as np\n'), ((1532, 1569), 'numpy.array', 'np.array', (['self._intensity', 'np.float32'], {}), '(self._intensity, np.float32)\n', (1540, 1569), True, 'import numpy as np\n'), ((1650, 1686), 'numpy.array', 'np.array', (['self._position', 'np.float32'], {}), '(self._position, np.float32)\n', (1658, 1686), True, 'import numpy as np\n'), ((2495, 2532), 'numpy.array', 'np.array', (['self._intensity', 'np.float32'], {}), '(self._intensity, np.float32)\n', (2503, 2532), True, 'import numpy as np\n'), ((2614, 2651), 'numpy.array', 'np.array', (['self._direction', 'np.float32'], {}), '(self._direction, np.float32)\n', (2622, 2651), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.applications import MobileNet
from tensorflow.keras import initializers
print(tf.__version__)
seed = 42
np.random.seed(seed)
tf.random.set_seed(seed)
def KeypointModel(input_size=128, numclass=196):
mobile_model = MobileNet(
weights="imagenet",
alpha=0.5,
input_tensor=layers.Input(shape=(input_size, input_size, 3),
name='feature'),
include_top=False)
for layer in mobile_model.layers:
layer.trainable = True
x = mobile_model.output
x = layers.GlobalAveragePooling2D()(x)
output = layers.Dense(numclass,
name='predictions',
kernel_initializer=initializers.he_normal(42))(x)
model = Model(inputs=mobile_model.input,
outputs=output, name='facekeypoint')
return model
| [
"tensorflow.random.set_seed",
"numpy.random.seed",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.initializers.he_normal"
] | [((249, 269), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (263, 269), True, 'import numpy as np\n'), ((270, 294), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (288, 294), True, 'import tensorflow as tf\n'), ((883, 952), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'mobile_model.input', 'outputs': 'output', 'name': '"""facekeypoint"""'}), "(inputs=mobile_model.input, outputs=output, name='facekeypoint')\n", (888, 952), False, 'from tensorflow.keras.models import Model\n'), ((678, 709), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), '()\n', (707, 709), False, 'from tensorflow.keras import layers\n'), ((445, 508), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(input_size, input_size, 3)', 'name': '"""feature"""'}), "(shape=(input_size, input_size, 3), name='feature')\n", (457, 508), False, 'from tensorflow.keras import layers\n'), ((840, 866), 'tensorflow.keras.initializers.he_normal', 'initializers.he_normal', (['(42)'], {}), '(42)\n', (862, 866), False, 'from tensorflow.keras import initializers\n')] |
'''
@file momentum_kinematics_optimizer.py
@package momentumopt
@author <NAME> (<EMAIL>)
@license License BSD-3-Clause
@copyright Copyright (c) 2019, New York University and Max Planck Gesellschaft.
@date 2019-10-08
'''
import os
import numpy as np
from momentumopt.kinoptpy.qp import QpSolver
from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics
from momentumopt.kinoptpy.second_order_ik import SecondOrderInverseKinematics
from pinocchio import RobotWrapper
import pinocchio as se3
from pinocchio.utils import zero
from pymomentum import *
from momentumopt.robots.blmc_robot_wrapper import QuadrupedWrapper
from momentumopt.kinoptpy.min_jerk_traj import *
from pymomentum import \
PlannerVectorParam_KinematicDefaultJointPositions, \
PlannerIntParam_NumTimesteps, \
PlannerDoubleParam_TimeStep
class EndeffectorTrajectoryGenerator(object):
def __init__(self):
self.z_offset = 0.1
def get_z_bound(self, mom_kin_optimizer):
z_max = min(max(mom_kin_optimizer.com_dyn[:, 2]), self.max_bound)
z_min = max(min(mom_kin_optimizer.com_dyn[:, 2]), self.min_bound)
return z_max, z_min
def is_end_eff_in_contact(self, it, eff, mom_kin_optimizer):
if mom_kin_optimizer.dynamic_sequence.dynamics_states[it].effActivation(eff):
endeff_contact = 1.
else:
endeff_contact = 0.
return endeff_contact
def get_contact_plan_from_dyn_optimizer(self, mom_kin_optimizer):
contact_plan = {}
for i, eff in enumerate(mom_kin_optimizer.eff_names):
contact_plan[eff] = []
start_time = 0.
count = 1
for it in range(mom_kin_optimizer.num_time_steps-1):
current_contact_activation = mom_kin_optimizer.dynamic_sequence.dynamics_states[it].effActivation(i)
last_contact_activation = mom_kin_optimizer.dynamic_sequence.dynamics_states[it+1].effActivation(i)
if not current_contact_activation == last_contact_activation:
if (count%2 == 0):
start_time = it+1
elif (count%2 == 1 or it == mom_kin_optimizer.num_time_steps-1):
end_time = it
plan = [start_time, end_time, mom_kin_optimizer.dynamic_sequence.dynamics_states[it].eff(i)]
contact_plan[eff].append(plan)
count += 1
return contact_plan
def generate_eff_traj(self, mom_kin_optimizer, contact_plan):
eff_traj_poly = {}
for eff in mom_kin_optimizer.eff_names:
num_contacts = len(contact_plan[eff])
poly_traj = [PolynominalList(), PolynominalList(), PolynominalList()]
for i in range(num_contacts):
# Create a constant polynominal for endeffector on the ground.
t = [contact_plan[eff][i][0], contact_plan[eff][i][1]]
for idx in range(3):
poly_traj[idx].append(t, constant_poly(contact_plan[eff][i][2][idx]))
# If there is a contact following, add the transition between
# the two contact points.
if i < num_contacts - 1:
t = [contact_plan[eff][i][1], contact_plan[eff][i+1][0]]
for idx in range(3):
via = None
if idx == 2:
via = self.z_offset + contact_plan[eff][i][2][idx]
poly = poly_points(t, contact_plan[eff][i][2][idx], contact_plan[eff][i+1][2][idx], via)
poly_traj[idx].append(t, poly)
eff_traj_poly[eff] = poly_traj
# returns end eff trajectories
return eff_traj_poly
def __call__(self, mom_kin_optimizer):
'''
Computes the endeffector positions and velocities.
Returns endeff_pos_ref, endeff_vel_ref
[0]: endeff_pos_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
[1]: endeff_vel_ref: np.array, shape=[num_time_steps, num_eff, 3={x, y, z}]
'''
dt = mom_kin_optimizer.dt
num_eff = len(mom_kin_optimizer.eff_names)
num_time_steps = mom_kin_optimizer.num_time_steps
contact_plan = self.get_contact_plan_from_dyn_optimizer(mom_kin_optimizer)
# Generate minimum jerk trajectories
eff_traj_poly = self.generate_eff_traj(mom_kin_optimizer, contact_plan)
# Compute the endeffector position and velocity trajectories.
endeff_pos_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_vel_ref = np.zeros((num_time_steps, num_eff, 3))
endeff_contact = np.zeros((num_time_steps, num_eff))
for it in range(num_time_steps):
for eff, name in enumerate(mom_kin_optimizer.eff_names):
endeff_pos_ref[it][eff] = [eff_traj_poly[name][i].eval(it) for i in range(3)]
endeff_vel_ref[it][eff] = [eff_traj_poly[name][i].deval(it)/dt for i in range(3)]
endeff_contact[it][eff] = self.is_end_eff_in_contact(it, eff, mom_kin_optimizer)
return endeff_pos_ref, endeff_vel_ref, endeff_contact
class TrajectoryInterpolator(object):
def __init__(self):
self.num_time_steps = None
self.q_init = None
self.init = None
self.end = None
self.poly_traj = None
def generate_trajectory(self, n_via, q_via, dt):
self.poly_traj = []
for i in range(len(self.init)):
self.poly_traj = np.append(self.poly_traj, [PolynominalList()])
for j in range(len(self.init)):
for i in range (n_via+1):
if i==0:
t = [0, q_via[0][0]/dt]
poly = poly_points(t, self.init[j], q_via[i][j+1])
self.poly_traj[j].append(t, poly)
elif(i==n_via):
t = [q_via[i-1][0]/dt, self.num_time_steps]
if t[0] != t[1]: # Avoid singular results at the end.
poly = poly_points(t, q_via[i-1][j+1], self.end[j])
self.poly_traj[j].append(t, poly)
else:
t = [q_via[i-1][0]/dt, q_via[i][0]/dt]
poly = poly_points(t, q_via[i-1][j+1], q_via[i][j+1])
self.poly_traj[j].append(t, poly)
def evaluate_trajecory(self,t):
q = np.zeros((1,len(self.init)),float)
for j in range(len(self.init)):
q[0,j] = self.poly_traj[j].eval(t)
return q
class MomentumKinematicsOptimizer(object):
def __init__(self):
self.q_init = None
self.dq_init = None
self.reg_orientation = 1e-2
self.reg_joint_position = 2.
self.joint_des = None
self.n_via_joint = 0
self.n_via_base = 0
self.via_joint = None
self.via_base = None
def reset(self):
self.kinematics_sequence = KinematicsSequence()
self.kinematics_sequence.resize(self.planner_setting.get(PlannerIntParam_NumTimesteps),
self.planner_setting.get(PlannerIntParam_NumDofs))
def initialize(self, planner_setting, max_iterations=50, eps=0.001, endeff_traj_generator=None,
RobotWrapper=QuadrupedWrapper):
self.planner_setting = planner_setting
if endeff_traj_generator is None:
endeff_traj_generator = EndeffectorTrajectoryGenerator()
self.endeff_traj_generator = endeff_traj_generator
self.dt = planner_setting.get(PlannerDoubleParam_TimeStep)
self.num_time_steps = planner_setting.get(PlannerIntParam_NumTimesteps)
self.max_iterations = max_iterations
self.eps = eps
self.robot = RobotWrapper()
self.reset()
# Holds dynamics and kinematics results
self.com_dyn = np.zeros((self.num_time_steps, 3))
self.lmom_dyn = np.zeros((self.num_time_steps, 3))
self.amom_dyn = np.zeros((self.num_time_steps, 3))
self.com_kin = np.zeros((self.num_time_steps, 3))
self.lmom_kin = np.zeros((self.num_time_steps, 3))
self.amom_kin = np.zeros((self.num_time_steps, 3))
self.q_kin = np.zeros((self.num_time_steps, self.robot.model.nq))
self.dq_kin = np.zeros((self.num_time_steps, self.robot.model.nv))
self.hip_names = ['{}_HFE'.format(eff) for eff in self.robot.effs]
self.hip_ids = [self.robot.model.getFrameId(name) for name in self.hip_names]
self.eff_names = ['{}_{}'.format(eff, self.robot.joints_list[-1]) for eff in self.robot.effs]
self.inv_kin = PointContactInverseKinematics(self.robot.model, self.eff_names)
self.snd_order_inv_kin = SecondOrderInverseKinematics(self.robot.model, self.eff_names)
self.use_second_order_inv_kin = False
self.motion_eff = {
'trajectory': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'trajectory_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne)),
'velocity_wrt_base': np.zeros((self.num_time_steps, 3 * self.inv_kin.ne))
}
def fill_data_from_dynamics(self):
# The centroidal information
for it in range(self.num_time_steps):
self.com_dyn[it] = self.dynamic_sequence.dynamics_states[it].com
self.lmom_dyn[it] = self.dynamic_sequence.dynamics_states[it].lmom
self.amom_dyn[it] = self.dynamic_sequence.dynamics_states[it].amom
def fill_endeffector_trajectory(self):
self.endeff_pos_ref, self.endeff_vel_ref, self.endeff_contact = \
self.endeff_traj_generator(self)
def fill_kinematic_result(self, it, q, dq):
def framesPos(frames):
return np.vstack([data.oMf[idx].translation for idx in frames]).reshape(-1)
def framesVel(frames):
return np.vstack([
self.inv_kin.get_world_oriented_frame_jacobian(q, idx).dot(dq)[:3] for idx in frames
]).reshape(-1)
data = self.inv_kin.robot.data
hg = self.inv_kin.robot.centroidalMomentum(q, dq)
# Storing on the internal array.
self.com_kin[it] = self.inv_kin.robot.com(q).T
self.lmom_kin[it] = hg.linear.T
self.amom_kin[it] = hg.angular.T
self.q_kin[it] = q.T
self.dq_kin[it] = dq.T
# The endeffector informations as well.
self.motion_eff['trajectory'][it] = framesPos(self.inv_kin.endeff_ids)
self.motion_eff['velocity'][it] = self.inv_kin.J[6:(self.inv_kin.ne + 2) * 3].dot(dq).T
self.motion_eff['trajectory_wrt_base'][it] = \
self.motion_eff['trajectory'][it] - framesPos(self.hip_ids)
self.motion_eff['velocity_wrt_base'][it] = \
self.motion_eff['velocity'][it] - framesVel(self.hip_ids)
# Storing on the kinematic sequence.
kinematic_state = self.kinematics_sequence.kinematics_states[it]
kinematic_state.com = self.com_kin[it]
kinematic_state.lmom = self.lmom_kin[it]
kinematic_state.amom = self.amom_kin[it]
kinematic_state.robot_posture.base_position = q[:3]
kinematic_state.robot_posture.base_orientation = q[3:7]
kinematic_state.robot_posture.joint_positions = q[7:]
kinematic_state.robot_velocity.base_linear_velocity = dq[:3]
kinematic_state.robot_velocity.base_angular_velocity = dq[3:6]
kinematic_state.robot_velocity.joint_velocities = dq[6:]
def optimize_initial_position(self, init_state):
# Optimize the initial configuration
q = se3.neutral(self.robot.model)
plan_joint_init_pos = self.planner_setting.get(
PlannerVectorParam_KinematicDefaultJointPositions)
if len(plan_joint_init_pos) != self.robot.num_ctrl_joints:
raise ValueError(
'Number of joints in config file not same as required for robot\n' +
'Got %d joints but robot expects %d joints.' % (
len(plan_joint_init_pos), self.robot.num_ctrl_joints))
q[7:] = plan_joint_init_pos
q[2] = init_state.com[2]
dq = np.zeros(self.robot.robot.nv)
com_ref = init_state.com
lmom_ref = np.zeros(3)
amom_ref = np.zeros(3)
num_eff = len(self.eff_names)
endeff_pos_ref = np.array([init_state.effPosition(i) for i in range(num_eff)])
endeff_vel_ref = np.matrix(np.zeros((num_eff, 3)))
endeff_contact = np.ones(num_eff)
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.zeros([3,])))
q[3:7] = quad_goal.coeffs()
for iters in range(self.max_iterations):
# Adding small P controller for the base orientation to always start with flat
# oriented base.
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
amom_ref = 1e-1 * se3.log((quad_goal * quad_q.inverse()).matrix())
res = self.inv_kin.compute(q, dq, com_ref, lmom_ref, amom_ref,
endeff_pos_ref, endeff_vel_ref, endeff_contact, None)
q = se3.integrate(self.robot.model, q, res)
if np.linalg.norm(res) < 1e-3:
print('Found initial configuration after {} iterations'.format(iters + 1))
break
if iters == self.max_iterations - 1:
print('Failed to converge for initial setup.')
print("initial configuration: \n", q)
self.q_init = q.copy()
self.dq_init = dq.copy()
def optimize(self, init_state, contact_sequence, dynamic_sequence, plotting=False):
self.init_state = init_state
self.contact_sequence = contact_sequence
self.dynamic_sequence = dynamic_sequence
# Create array with centroidal and endeffector informations.
self.fill_data_from_dynamics()
self.fill_endeffector_trajectory()
# Run the optimization for the initial configuration only once.
if self.q_init is None:
self.optimize_initial_position(init_state)
# Generate smooth joint trajectory for regularization
self.joint_des = np.zeros((len(self.q_init[7:]),self.num_time_steps), float)
if self.n_via_joint == 0:
for i in range (self.num_time_steps):
self.joint_des[:,i] = self.q_init[7 : ].T
else:
joint_traj_gen = TrajectoryInterpolator()
joint_traj_gen.num_time_steps = self.num_time_steps
joint_traj_gen.init = self.q_init[7:]
joint_traj_gen.end = self.q_init[7:]
joint_traj_gen.generate_trajectory(self.n_via_joint, self.via_joint, self.dt)
for it in range(self.num_time_steps):
self.joint_des[:,it] = joint_traj_gen.evaluate_trajecory(it)
# Generate smooth base trajectory for regularization
self.base_des = np.zeros((3,self.num_time_steps), float)
if self.n_via_base == 0:
for it in range(self.num_time_steps):
self.base_des[:,it] = np.array([0., 0., 0.]).reshape(-1)
else:
base_traj_gen = TrajectoryInterpolator()
base_traj_gen.num_time_steps = self.num_time_steps
base_traj_gen.init = np.array([0.0, 0.0, 0.0])
base_traj_gen.end = np.array([0.0, 0.0, 0.0])
base_traj_gen.generate_trajectory(self.n_via_base, self.via_base, self.dt)
for it in range(self.num_time_steps):
self.base_des[:,it] = base_traj_gen.evaluate_trajecory(it)
# Compute inverse kinematics over the full trajectory.
self.inv_kin.is_init_time = 0
q, dq = self.q_init.copy(), self.dq_init.copy()
if self.use_second_order_inv_kin:
q_kin, dq_kin, com_kin, lmom_kin, amom_kin, endeff_pos_kin, endeff_vel_kin = \
self.snd_order_inv_kin.solve(self.dt, q, dq, self.com_dyn, self.lmom_dyn,
self.amom_dyn, self.endeff_pos_ref, self.endeff_vel_ref,
self.endeff_contact, self.joint_des.T, self.base_des.T)
for it, (q, dq) in enumerate(zip(q_kin, dq_kin)):
self.inv_kin.forward_robot(q, dq)
self.fill_kinematic_result(it, q, dq)
else:
for it in range(self.num_time_steps):
quad_goal = se3.Quaternion(se3.rpy.rpyToMatrix(np.matrix(self.base_des[:,it]).T))
quad_q = se3.Quaternion(float(q[6]), float(q[3]), float(q[4]), float(q[5]))
## check if this instance belongs to the flight phase and set the momentums accordingly
for eff in range(len(self.eff_names)):
if self.dynamic_sequence.dynamics_states[it].effActivation(eff):
is_flight_phase = False
break
else:
is_flight_phase = True
## for flight phase keep the desired momentum constant
if is_flight_phase:
lmom_ref[0:2] = mom_ref_flight[0:2]
amom_ref = mom_ref_flight[3:6]
lmom_ref[2] -= self.planner_setting.get(PlannerDoubleParam_RobotWeight) * self.dt
else:
lmom_ref = self.lmom_dyn[it]
amom_ref = (self.reg_orientation * se3.log((quad_goal * quad_q.inverse()).matrix()).T + self.amom_dyn[it]).reshape(-1)
joint_regularization_ref = self.reg_joint_position * (self.joint_des[:,it] - q[7 : ])
# Fill the kinematics results for it.
self.inv_kin.forward_robot(q, dq)
self.fill_kinematic_result(it, q, dq)
# Store the momentum to be used in flight phase
if not is_flight_phase:
mom_ref_flight = (self.inv_kin.J[0:6, :].dot(dq)).reshape(-1)
# Compute the inverse kinematics
dq = self.inv_kin.compute(
q, dq, self.com_dyn[it], lmom_ref, amom_ref,
self.endeff_pos_ref[it], self.endeff_vel_ref[it],
self.endeff_contact[it], joint_regularization_ref,
is_flight_phase)
# Integrate to the next state.
q = se3.integrate(self.robot.model, q, dq * self.dt)
| [
"numpy.matrix",
"pinocchio.RobotWrapper",
"numpy.zeros",
"numpy.ones",
"pinocchio.neutral",
"pinocchio.integrate",
"numpy.array",
"momentumopt.kinoptpy.inverse_kinematics.PointContactInverseKinematics",
"momentumopt.kinoptpy.second_order_ik.SecondOrderInverseKinematics",
"numpy.linalg.norm",
"nu... | [((4587, 4625), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff, 3)'], {}), '((num_time_steps, num_eff, 3))\n', (4595, 4625), True, 'import numpy as np\n'), ((4651, 4689), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff, 3)'], {}), '((num_time_steps, num_eff, 3))\n', (4659, 4689), True, 'import numpy as np\n'), ((4715, 4750), 'numpy.zeros', 'np.zeros', (['(num_time_steps, num_eff)'], {}), '((num_time_steps, num_eff))\n', (4723, 4750), True, 'import numpy as np\n'), ((7807, 7821), 'pinocchio.RobotWrapper', 'RobotWrapper', ([], {}), '()\n', (7819, 7821), False, 'from pinocchio import RobotWrapper\n'), ((7916, 7950), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7924, 7950), True, 'import numpy as np\n'), ((7975, 8009), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (7983, 8009), True, 'import numpy as np\n'), ((8034, 8068), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (8042, 8068), True, 'import numpy as np\n'), ((8093, 8127), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (8101, 8127), True, 'import numpy as np\n'), ((8152, 8186), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (8160, 8186), True, 'import numpy as np\n'), ((8211, 8245), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3)'], {}), '((self.num_time_steps, 3))\n', (8219, 8245), True, 'import numpy as np\n'), ((8267, 8319), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.robot.model.nq)'], {}), '((self.num_time_steps, self.robot.model.nq))\n', (8275, 8319), True, 'import numpy as np\n'), ((8342, 8394), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, self.robot.model.nv)'], {}), '((self.num_time_steps, self.robot.model.nv))\n', (8350, 8394), True, 'import numpy as np\n'), ((8683, 8746), 'momentumopt.kinoptpy.inverse_kinematics.PointContactInverseKinematics', 'PointContactInverseKinematics', (['self.robot.model', 'self.eff_names'], {}), '(self.robot.model, self.eff_names)\n', (8712, 8746), False, 'from momentumopt.kinoptpy.inverse_kinematics import PointContactInverseKinematics\n'), ((8780, 8842), 'momentumopt.kinoptpy.second_order_ik.SecondOrderInverseKinematics', 'SecondOrderInverseKinematics', (['self.robot.model', 'self.eff_names'], {}), '(self.robot.model, self.eff_names)\n', (8808, 8842), False, 'from momentumopt.kinoptpy.second_order_ik import SecondOrderInverseKinematics\n'), ((11727, 11756), 'pinocchio.neutral', 'se3.neutral', (['self.robot.model'], {}), '(self.robot.model)\n', (11738, 11756), True, 'import pinocchio as se3\n'), ((12282, 12311), 'numpy.zeros', 'np.zeros', (['self.robot.robot.nv'], {}), '(self.robot.robot.nv)\n', (12290, 12311), True, 'import numpy as np\n'), ((12365, 12376), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12373, 12376), True, 'import numpy as np\n'), ((12396, 12407), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (12404, 12407), True, 'import numpy as np\n'), ((12617, 12633), 'numpy.ones', 'np.ones', (['num_eff'], {}), '(num_eff)\n', (12624, 12633), True, 'import numpy as np\n'), ((15036, 15077), 'numpy.zeros', 'np.zeros', (['(3, self.num_time_steps)', 'float'], {}), '((3, self.num_time_steps), float)\n', (15044, 15077), True, 'import numpy as np\n'), ((8944, 8996), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (8952, 8996), True, 'import numpy as np\n'), ((9022, 9074), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (9030, 9074), True, 'import numpy as np\n'), ((9111, 9163), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (9119, 9163), True, 'import numpy as np\n'), ((9198, 9250), 'numpy.zeros', 'np.zeros', (['(self.num_time_steps, 3 * self.inv_kin.ne)'], {}), '((self.num_time_steps, 3 * self.inv_kin.ne))\n', (9206, 9250), True, 'import numpy as np\n'), ((12568, 12590), 'numpy.zeros', 'np.zeros', (['(num_eff, 3)'], {}), '((num_eff, 3))\n', (12576, 12590), True, 'import numpy as np\n'), ((13263, 13302), 'pinocchio.integrate', 'se3.integrate', (['self.robot.model', 'q', 'res'], {}), '(self.robot.model, q, res)\n', (13276, 13302), True, 'import pinocchio as se3\n'), ((15396, 15421), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15404, 15421), True, 'import numpy as np\n'), ((15454, 15479), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15462, 15479), True, 'import numpy as np\n'), ((12689, 12702), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (12697, 12702), True, 'import numpy as np\n'), ((13319, 13338), 'numpy.linalg.norm', 'np.linalg.norm', (['res'], {}), '(res)\n', (13333, 13338), True, 'import numpy as np\n'), ((18436, 18484), 'pinocchio.integrate', 'se3.integrate', (['self.robot.model', 'q', '(dq * self.dt)'], {}), '(self.robot.model, q, dq * self.dt)\n', (18449, 18484), True, 'import pinocchio as se3\n'), ((9879, 9935), 'numpy.vstack', 'np.vstack', (['[data.oMf[idx].translation for idx in frames]'], {}), '([data.oMf[idx].translation for idx in frames])\n', (9888, 9935), True, 'import numpy as np\n'), ((15198, 15223), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15206, 15223), True, 'import numpy as np\n'), ((16521, 16552), 'numpy.matrix', 'np.matrix', (['self.base_des[:, it]'], {}), '(self.base_des[:, it])\n', (16530, 16552), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Get the cartesian indices of input 1-D arrays
Similar to the Julia CartesianIndices
https://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
"""
import numpy as np
def cartesian(*arrays, order='F'):
"""
-i- arrays : list of array-like,
1-D arrays to form the cartesian product of
-i- order : string, {'C', 'F', 'A'}, see numpy.reshape
'F' changes the first axis fastest ("FORTRAN style" or "column-major")
'C' changes the last axis fastest ("C style" or "row-major")
"""
N = len(arrays)
return np.transpose(np.meshgrid(*arrays, indexing='ij'),
np.roll(np.arange(N + 1), -1)).reshape((-1, N), order=order)
def main():
print(cartesian([1,2,3], [4,5], [6,7], order='F'))
"""
[[1 4 6]
[2 4 6]
[3 4 6]
[1 5 6]
[2 5 6]
[3 5 6]
[1 4 7]
[2 4 7]
[3 4 7]
[1 5 7]
[2 5 7]
[3 5 7]]
"""
print(cartesian([1,2,3], [4,5], [6,7], order='C'))
"""
[[1 4 6]
[1 4 7]
[1 5 6]
[1 5 7]
[2 4 6]
[2 4 7]
[2 5 6]
[2 5 7]
[3 4 6]
[3 4 7]
[3 5 6]
[3 5 7]]
"""
if __name__ == '__main__':
main()
| [
"numpy.meshgrid",
"numpy.arange"
] | [((636, 671), 'numpy.meshgrid', 'np.meshgrid', (['*arrays'], {'indexing': '"""ij"""'}), "(*arrays, indexing='ij')\n", (647, 671), True, 'import numpy as np\n'), ((690, 706), 'numpy.arange', 'np.arange', (['(N + 1)'], {}), '(N + 1)\n', (699, 706), True, 'import numpy as np\n')] |
# Ground truth is from covid-hospitalization-all-state-merged_vEW202210.csv
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import glob
from epiweeks import Week
from metrics import *
EPS = 1e-6
import matplotlib.pyplot as plt
import math
# In[2]:
# ground truth
df_ground_truth = pd.read_csv("ground_truth.csv")
# In[3]:
df_ground_truth.head()
df_grnd = df_ground_truth[["epiweek", "region", "cdc_flu_hosp"]]
df_grnd = df_grnd[df_grnd["epiweek"] >= 202201]
df_grnd = df_grnd.rename(
columns={"epiweek": "predicted_week", "cdc_flu_hosp": "value", "region": "location"}
)
df_grnd["location"] = df_grnd["location"].str.replace("X", "US")
df_grnd["location"] = df_grnd["location"].str.replace("TUS", "TX")
df_grnd = df_grnd.sort_values("location", kind="mergesort")
# df_grnd.head()
# In[4]:
file_dir = "./predictions.csv"
df_total = pd.read_csv(file_dir)
# In[5]:
df_total["model"].nunique()
df_final = df_total.copy()
all_model_names = np.array(df_final["model"].drop_duplicates())
# In[6]:
all_model_names = np.array(df_final["model"].drop_duplicates())
df_gt = df_final[df_final["model"] == "GT-FluFNP"]
# GT-FluFNP model hasn't predicted for some locations
all_regions = np.array(df_gt["location"].drop_duplicates())
regions_ground_truth = np.array(df_grnd["location"].drop_duplicates())
# In[7]:
df_point = df_final[df_final["type"] == "point"]
df_quant = df_final[df_final["type"] == "quantile"]
# In[8]:
weeks = np.array(df_point["forecast_week"].drop_duplicates())
max_week = df_grnd["predicted_week"].max()
# In[9]:
df_point["predicted_week"] = df_point["forecast_week"] + df_point["ahead"]
# Have ground truth only till week 10
df_point = df_point[df_point["predicted_week"] <= max_week]
# In[10]:
# Merging the two datasets on predicted week
df_newpoint = pd.merge(df_point, df_grnd, on="predicted_week")
# Removing all unnecessary merges
df_newpoint = df_newpoint[df_newpoint["location_x"] == df_newpoint["location_y"]]
# In[11]:
rmse_all = []
nrmse_all = []
model_all = []
mape_all = []
week_ahead = []
regions = []
# In[ ]:
for model in all_model_names:
for i in range(1, 5):
for region in all_regions:
sample = df_newpoint[
(df_newpoint["model"] == model)
& (df_newpoint["ahead"] == i)
& (df_newpoint["location_x"] == region)
]["value_x"].values
target = df_newpoint[
(df_newpoint["model"] == model)
& (df_newpoint["ahead"] == i)
& (df_newpoint["location_x"] == region)
]["value_y"].values
rmse_all.append(rmse(sample, target))
nrmse_all.append(norm_rmse(sample, target))
# Deal with inf values
target = np.array([EPS if x == 0 else x for x in target]).reshape(
(len(target), 1)
)
mape_all.append(mape(sample, target))
model_all.append(model)
week_ahead.append(i)
regions.append(region)
# In[ ]:
df_point_scores = pd.DataFrame.from_dict(
{
"Model": model_all,
"RMSE": rmse_all,
"NRMSE": nrmse_all,
"MAPE": mape_all,
"Weeks ahead": week_ahead,
"Location": regions,
}
)
# In[ ]:
df_point_scores.to_csv("point_scores.csv")
# In[12]:
# target is ground truth
df_quant = df_final[df_final["type"] == "quantile"]
# In[13]:
# norm_val = (df_quant['value']-df_quant['value'].min())/(df_quant['value'].max()-df_quant['value'].min())
norm_df_quant = df_quant.copy()
norm_df_quant["predicted_week"] = (
norm_df_quant["forecast_week"] + norm_df_quant["ahead"]
)
norm_df_quant = norm_df_quant[norm_df_quant["predicted_week"] <= max_week]
# In[64]:
week_ahead = []
regions = []
crps_all = []
ls_all = []
model_all = []
cs_all = []
# In[65]:
# Runtime warning - invalid value occurs during multiply -- ignore
import warnings
warnings.filterwarnings("ignore")
# In[66]:
# All models
count = 0
for model in all_model_names:
print("Compiling scores of model ", model)
print(f"Model {count}/{len(all_model_names)}")
count += 1
# All Weeks ahead
for i in range(1, 5):
print("Week ahead ", i)
# All regions
for region in all_regions:
# Dataset with information about Ground truth ('value_y') and predictions ('value_x')
target = df_newpoint[
(df_newpoint["model"] == model)
& (df_newpoint["ahead"] == i)
& (df_newpoint["location_x"] == region)
]
norm_model = norm_df_quant[
(norm_df_quant["model"] == model)
& (norm_df_quant["ahead"] == i)
& (norm_df_quant["location"] == region)
]
mean_ = []
std_ = []
var_ = []
tg_vals = []
pred_vals = []
weeks = np.array(target["forecast_week"].drop_duplicates())
if len(weeks) != 0:
for week in weeks:
# Append point predictions
point_val = target[(target["forecast_week"] == week)][
"value_x"
].values
mean_.append(point_val)
if len(point_val) == 0:
print(i, week, region, model)
# Append point pred as predictions
predval = target[(target["forecast_week"] == week)][
"value_y"
].values
pred_vals.append(predval)
# Append ground truth as target
tgval = target[(target["forecast_week"] == week)]["value_y"].values
tg_vals.append(tgval)
# Find std from quantiles
b = norm_model[
(norm_model["forecast_week"] == week)
& (norm_model["quantile"] == 0.75)
]["value"].values
a = norm_model[
(norm_model["forecast_week"] == week)
& (norm_model["quantile"] == 0.25)
]["value"].values
std = (b - a) / 1.35
var = std**2
std_.append(std)
var_.append(var)
std_ = np.array(std_)
var_ = np.array(var_)
pred_vals = np.array(pred_vals)
mean_ = np.array(mean_)
tg_vals = np.array(tg_vals)
if len(tg_vals) == 0:
print(
"No target found for week ahead ",
i,
" region ",
region,
"model ",
model,
)
#
# print(cr, ls)
# Calculate ls and crps
cr = crps(mean_, std_, tg_vals)
ls = log_score(mean_, std_, tg_vals, window = 0.1)
if(ls<-10):
ls = -10
# print(cr, ls, "hi")
auc, cs, _ = get_pr(mean_, std_**2, tg_vals)
# if(ls<-10 or math.isnan(ls)):
# ls = -10
# elif(ls>10):
# ls = 10
# if(math.isnan(cr)):
# cr = 0
crps_all.append(cr)
ls_all.append(ls)
# print(cs)
cs_all.append(cs)
else:
crps_all.append(np.nan)
ls_all.append(np.nan)
cs_all.append(np.nan)
week_ahead.append(i)
regions.append(region)
model_all.append(model)
# In[67]:
df_spread_scores = pd.DataFrame.from_dict(
{
"Model": model_all,
"Weeks ahead": week_ahead,
"Location": regions,
"LS": ls_all,
"CRPS": crps_all,
"CS": cs_all,
}
)
# In[68]:
df_spread_scores.isna().sum()
# In[70]:
df_spread_scores.to_csv("spread_scores.csv")
| [
"pandas.DataFrame.from_dict",
"warnings.filterwarnings",
"pandas.read_csv",
"pandas.merge",
"numpy.array"
] | [((319, 350), 'pandas.read_csv', 'pd.read_csv', (['"""ground_truth.csv"""'], {}), "('ground_truth.csv')\n", (330, 350), True, 'import pandas as pd\n'), ((881, 902), 'pandas.read_csv', 'pd.read_csv', (['file_dir'], {}), '(file_dir)\n', (892, 902), True, 'import pandas as pd\n'), ((1842, 1890), 'pandas.merge', 'pd.merge', (['df_point', 'df_grnd'], {'on': '"""predicted_week"""'}), "(df_point, df_grnd, on='predicted_week')\n", (1850, 1890), True, 'import pandas as pd\n'), ((3109, 3265), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'Model': model_all, 'RMSE': rmse_all, 'NRMSE': nrmse_all, 'MAPE': mape_all,\n 'Weeks ahead': week_ahead, 'Location': regions}"], {}), "({'Model': model_all, 'RMSE': rmse_all, 'NRMSE':\n nrmse_all, 'MAPE': mape_all, 'Weeks ahead': week_ahead, 'Location':\n regions})\n", (3131, 3265), True, 'import pandas as pd\n'), ((3987, 4020), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (4010, 4020), False, 'import warnings\n'), ((8112, 8254), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'Model': model_all, 'Weeks ahead': week_ahead, 'Location': regions, 'LS':\n ls_all, 'CRPS': crps_all, 'CS': cs_all}"], {}), "({'Model': model_all, 'Weeks ahead': week_ahead,\n 'Location': regions, 'LS': ls_all, 'CRPS': crps_all, 'CS': cs_all})\n", (8134, 8254), True, 'import pandas as pd\n'), ((6555, 6569), 'numpy.array', 'np.array', (['std_'], {}), '(std_)\n', (6563, 6569), True, 'import numpy as np\n'), ((6593, 6607), 'numpy.array', 'np.array', (['var_'], {}), '(var_)\n', (6601, 6607), True, 'import numpy as np\n'), ((6636, 6655), 'numpy.array', 'np.array', (['pred_vals'], {}), '(pred_vals)\n', (6644, 6655), True, 'import numpy as np\n'), ((6680, 6695), 'numpy.array', 'np.array', (['mean_'], {}), '(mean_)\n', (6688, 6695), True, 'import numpy as np\n'), ((6722, 6739), 'numpy.array', 'np.array', (['tg_vals'], {}), '(tg_vals)\n', (6730, 6739), True, 'import numpy as np\n'), ((2819, 2869), 'numpy.array', 'np.array', (['[(EPS if x == 0 else x) for x in target]'], {}), '([(EPS if x == 0 else x) for x in target])\n', (2827, 2869), True, 'import numpy as np\n')] |
from mux import *
from transfer import *
import numpy as np
import matplotlib.pyplot as plt
from parameters import *
for name, max_val in zip(names[:], max_vals[:]):
x = np.logspace(0, max_val, 1000)
x = x.astype(np.double)
print("ID_"+name, end="...")
print(*params[cells["ID_"+name]])
print("NOT_ID_"+name, end="...")
print(*params[cells["NOT_"+name]])
print("######")
y = T_f(x, *params[cells["ID_"+name]])
inv_y = iT_f(y,*params[cells["ID_"+name]])
#print(x-inv_y)
not_y = T_f(x, *params[cells["NOT_"+name]])
not_inv_y = iT_f(not_y,*params[cells["NOT_"+name]])
#print(x-not_inv_y)
gamma, alpha, omega, n = params[cells["NOT_"+name]]
l = alpha-(not_y/gamma)
plt.plot(l,x, '.', label='x')
plt.plot(l, not_inv_y, 'x',label='x_approx')
plt.legend()
plt.show()
plt.plot(x,y, label = "ID_"+name)
plt.plot(inv_y,y,'o', label = "ID_"+name+" approx.")
plt.plot(x,not_y, label = "NOT_"+name)
plt.plot(not_inv_y,not_y,'x', label = "NOT_"+name+" approx.")
plt.legend()
ax = plt.gca()
ax.set_xscale('log')
plt.show()
#plt.plot(x,not_inv_y)
#plt.show()
#break
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca"
] | [((177, 206), 'numpy.logspace', 'np.logspace', (['(0)', 'max_val', '(1000)'], {}), '(0, max_val, 1000)\n', (188, 206), True, 'import numpy as np\n'), ((747, 777), 'matplotlib.pyplot.plot', 'plt.plot', (['l', 'x', '"""."""'], {'label': '"""x"""'}), "(l, x, '.', label='x')\n", (755, 777), True, 'import matplotlib.pyplot as plt\n'), ((781, 826), 'matplotlib.pyplot.plot', 'plt.plot', (['l', 'not_inv_y', '"""x"""'], {'label': '"""x_approx"""'}), "(l, not_inv_y, 'x', label='x_approx')\n", (789, 826), True, 'import matplotlib.pyplot as plt\n'), ((830, 842), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (840, 842), True, 'import matplotlib.pyplot as plt\n'), ((847, 857), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (855, 857), True, 'import matplotlib.pyplot as plt\n'), ((864, 898), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': "('ID_' + name)"}), "(x, y, label='ID_' + name)\n", (872, 898), True, 'import matplotlib.pyplot as plt\n'), ((902, 958), 'matplotlib.pyplot.plot', 'plt.plot', (['inv_y', 'y', '"""o"""'], {'label': "('ID_' + name + ' approx.')"}), "(inv_y, y, 'o', label='ID_' + name + ' approx.')\n", (910, 958), True, 'import matplotlib.pyplot as plt\n'), ((959, 998), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'not_y'], {'label': "('NOT_' + name)"}), "(x, not_y, label='NOT_' + name)\n", (967, 998), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1067), 'matplotlib.pyplot.plot', 'plt.plot', (['not_inv_y', 'not_y', '"""x"""'], {'label': "('NOT_' + name + ' approx.')"}), "(not_inv_y, not_y, 'x', label='NOT_' + name + ' approx.')\n", (1010, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1080), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1078, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1100), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1098, 1100), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1149), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1147, 1149), True, 'import matplotlib.pyplot as plt\n')] |
# Figure
# Running times of computation tasks
import numpy as np
import matplotlib.pyplot as plt
# from brokenaxes import brokenaxes
# x = np.linspace(1, 8, 8)
x = np.array([1,16,32,48,64,80,96,112,128,144,160])
y1 = [0.685,10.985,22.058,33.032,43.961,55.007,66.084,77.082,87.850,99.113,110.151] # DataBroker attesting to parallel DataOwners TCSNUM = 1 (sequential)
y2 = [0.674,3.366,6.728,10.147,13.515,16.896,20.277,23.582,26.979,30.460,33.799] # DataBroker attesting to parallel DataOwners TCSNUM = 4
y3 = [0.688,1.500,2.956,4.501,6.012,7.448,8.899,10.395,11.929,13.343,14.831] # DataBroker attesting to parallel DataOwners TCSNUM = 16
y4 = [0.689,1.509,2.386,3.834,4.746,6.173,6.990,8.457,9.255,10.822,11.712] # DataBroker attesting to parallel DataOwners TCSNUM = 32
y5 = [0.690,1.506,2.376,3.203,4.030,5.557,6.387,7.285,8.157,9.583,10.398] # DataBroker attesting to parallel DataOwners TCSNUM = 64
y6 = [0.697,1.517,2.396,3.287,4.143,4.972,5.838,6.612,7.467,8.972,9.881] # DataBroker attesting to parallel DataOwners TCSNUM = 128
fig, ax = plt.subplots()
# ax = brokenaxes(ylims=((0, 20.0), (100.0, 140.0)), hspace=.1)
line1 = ax.plot(x, y1, 'o-', label='Sequential (30.88MB enclave)', color='black', markersize=6)
line2 = ax.plot(x, y2, 'x-', label='4 threads (32.51MB enclave)', color='magenta', markersize=6)
line3 = ax.plot(x, y3, 's-', label='16 threads (38.99MB enclave)', color='green', markersize=6)
# line4 = ax.plot(x, y4, 's-', label='32 threads (47.64MB enclave)', color='red', markersize=6)
line5 = ax.plot(x, y5, '^-', label='64 threads (64.95MB enclave)', color='blue', markersize=6)
# line6 = ax.plot(x, y6, 'o-', label='128 threads (99.55MB enclave)', color='cyan', markersize=6)
ax.set_xlabel('N (Number of DataOwners)', fontsize=12)
ax.set_ylabel('Attestation Time (seconds)', fontsize=12)
# ax.set_title('Runtimes of Training a 14x8x8x2 ANN Classifier', fontsize=14)
ax.legend(fontsize = 12, loc = 'upper left')
# plt.ylim(-5,170)
# plt.ylim(0,8)
plt.xticks(x, ['1','16','32','48','64','80','96','112','128','144','160'], fontsize=11)
# plt.yticks([-10,0,20,40,60,80,100,120,140,160], ['-10','0','20','40','60','80','100','120','140','160'], fontsize=11)
# plt.text(80, 15, 'DataBroker \nenclave size: 2.3 MB', color='magenta', fontsize=12)
# plt.text(90, 60, 'CEE enclave size: \n118.7 MB', color='blue', fontsize=12)
plt.grid()
plt.show() | [
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] | [((167, 224), 'numpy.array', 'np.array', (['[1, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160]'], {}), '([1, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160])\n', (175, 224), True, 'import numpy as np\n'), ((1051, 1065), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1063, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2092), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', "['1', '16', '32', '48', '64', '80', '96', '112', '128', '144', '160']"], {'fontsize': '(11)'}), "(x, ['1', '16', '32', '48', '64', '80', '96', '112', '128', '144',\n '160'], fontsize=11)\n", (2001, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2375), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2373, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2376, 2386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2384, 2386), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pymc
class TestObsCorrelation():
def prepare_model(self, n, nad, order):
l1 = pymc.GrapheneLattice(n)
FK = pymc.Hamiltonian(lattice=l1, t=-1, U=2)
o_C = pymc.CorrelationObs(FK)
for _ in range(100):
FK.put_adatoms(nad, order)
o_C.calculate()
return o_C.get_result()
def test_obs_correlation_1(self):
d = self.prepare_model(10, 50, "sublattice")
np.testing.assert_almost_equal(d, 1.0)
def test_obs_correlation_2(self):
d = self.prepare_model(10, 90, "sublattice")
np.testing.assert_almost_equal(d, 1.0)
def test_obs_correlation_3(self):
d = self.prepare_model(10, 50, "separation")
assert d < -0.5
def test_obs_correlation_4(self):
d = self.prepare_model(10, 10, "separation")
assert d < -0.5
def test_obs_correlation_5(self):
d = self.prepare_model(10, 90, "separation")
assert d < -0.5
def test_obs_correlation_6(self):
l1 = pymc.GrapheneLattice(10)
FK = pymc.Hamiltonian(lattice=l1, t=-1, U=2)
o_C = pymc.CorrelationObs(FK)
for _ in range(100):
FK.put_adatoms(50, "random")
o_C.calculate()
d = o_C.get_result()
assert len(o_C.value_list) == 100
np.testing.assert_almost_equal(abs(d), 0, decimal=1)
def test_obs_correlation_6(self):
l1 = pymc.GrapheneLattice(10)
FK = pymc.Hamiltonian(lattice=l1, t=-1, U=2)
o_C = pymc.CorrelationObs(FK)
for _ in range(100):
FK.put_adatoms(50, "random")
o_C.calculate()
o_C.reset()
assert o_C.get_result() is None
| [
"pymc.Hamiltonian",
"pymc.CorrelationObs",
"numpy.testing.assert_almost_equal",
"pymc.GrapheneLattice"
] | [((119, 142), 'pymc.GrapheneLattice', 'pymc.GrapheneLattice', (['n'], {}), '(n)\n', (139, 142), False, 'import pymc\n'), ((156, 195), 'pymc.Hamiltonian', 'pymc.Hamiltonian', ([], {'lattice': 'l1', 't': '(-1)', 'U': '(2)'}), '(lattice=l1, t=-1, U=2)\n', (172, 195), False, 'import pymc\n'), ((210, 233), 'pymc.CorrelationObs', 'pymc.CorrelationObs', (['FK'], {}), '(FK)\n', (229, 233), False, 'import pymc\n'), ((462, 500), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['d', '(1.0)'], {}), '(d, 1.0)\n', (492, 500), True, 'import numpy as np\n'), ((601, 639), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['d', '(1.0)'], {}), '(d, 1.0)\n', (631, 639), True, 'import numpy as np\n'), ((1040, 1064), 'pymc.GrapheneLattice', 'pymc.GrapheneLattice', (['(10)'], {}), '(10)\n', (1060, 1064), False, 'import pymc\n'), ((1078, 1117), 'pymc.Hamiltonian', 'pymc.Hamiltonian', ([], {'lattice': 'l1', 't': '(-1)', 'U': '(2)'}), '(lattice=l1, t=-1, U=2)\n', (1094, 1117), False, 'import pymc\n'), ((1132, 1155), 'pymc.CorrelationObs', 'pymc.CorrelationObs', (['FK'], {}), '(FK)\n', (1151, 1155), False, 'import pymc\n'), ((1439, 1463), 'pymc.GrapheneLattice', 'pymc.GrapheneLattice', (['(10)'], {}), '(10)\n', (1459, 1463), False, 'import pymc\n'), ((1477, 1516), 'pymc.Hamiltonian', 'pymc.Hamiltonian', ([], {'lattice': 'l1', 't': '(-1)', 'U': '(2)'}), '(lattice=l1, t=-1, U=2)\n', (1493, 1516), False, 'import pymc\n'), ((1531, 1554), 'pymc.CorrelationObs', 'pymc.CorrelationObs', (['FK'], {}), '(FK)\n', (1550, 1554), False, 'import pymc\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 22 14:14:34 2020
@author: mclea
"""
import numpy as np
import matplotlib.pyplot as plt
dx = 0.01
X = np.arange(-4, 4+dx, dx)
Y = np.arange(-4, 4+dx, dx)
XY = np.meshgrid(X, Y)
a = 1
epsilon = 0.02
N = np.array([[1,0],
[0,1]])
def cartesian_to_polar(x, y):
"""
Parameters
----------
X : Numpy array
A numpy array of X coordinates
Y : Numpy array
A numpy array of Y coordinats
Returns
-------
r : The radial distance
theta : The angle, in radians
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan(y/x)
return (r, theta)
def calc_nearest_point(r, theta, a):
"""
Parameters
----------
r : Numpy array
The radial distance
theta : Numpy array
The angle, in radians
a : float
The diameter of the circle
Returns
-------
None.
"""
new_r = np.ones(r.shape)*a
return (new_r, theta)
def calc_distance(original, closest):
"""
Parameters
----------
original : Numpy array
The coordinates where you want to know the distance to the closest surface
closest : TYPE
The closest point to the points of interest
Returns
-------
distance : Numpy array
The distance to the nearest surface, negative within the body.
"""
distance = original[0]-closest[0]
return distance
def calc_delta_solid_body(d, epsilon):
"""
Calculates the interpolation function delta for a immersed solid body
Parameters
----------
d : numpy array
The signed distance to the nearest point on a fluid/solid interface,
chosen negative within the solid.
epsilon : float
The kernal diameter.
Returns
-------
delta : numpy array
Returns the kernal zeroth moment over a body, delta epsilon B. The
integrated value is 1 within the body, 0 on the exterior and a smooth
transition over 2*epsilon.
"""
in_kernal = abs(d)/epsilon < 1
outside_kernal = d/epsilon < -1
delta = np.zeros(d.shape)
delta[in_kernal] = 0.5*(1-np.sin((np.pi/2)*(d[in_kernal]/epsilon)))
delta[outside_kernal] = 1
return delta
def divergence(f):
"""
Calculates the divergence of a field f(x, y, z,...)
Parameters
----------
f : Numpy array
A numpy array of a vector field [U, V, W]
Returns
-------
divergence:
The divergence of the vector field [U, V, W]
"""
num_dims = len(f)
return np.ufunc.reduce(np.add, [np.gradient(f[i], axis=i) for i in range(num_dims)])
U = -1*np.ones(XY[0].shape) # Uniform flow of magnitude 1
V = 0*np.ones(XY[0].shape) # Zero flow up
velocity = np.array([U, V])
polar_coords = cartesian_to_polar(XY[0], XY[1])
nearest_coords = calc_nearest_point(polar_coords[0], polar_coords[1], a)
distance = calc_distance(polar_coords, nearest_coords)
delta = calc_delta_solid_body(distance, epsilon)
velocity = delta*velocity
g = divergence(velocity)
# uu = delta * U
# B = np.diff(delta*U, axis=1)
# plt.figure()
plt.contourf(XY[0], XY[1], g, cmap="RdGy")
plt.colorbar(); | [
"numpy.meshgrid",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.colorbar",
"numpy.sin",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.contourf",
"numpy.arctan",
"numpy.gradient",
"numpy.sqrt"
] | [((150, 175), 'numpy.arange', 'np.arange', (['(-4)', '(4 + dx)', 'dx'], {}), '(-4, 4 + dx, dx)\n', (159, 175), True, 'import numpy as np\n'), ((178, 203), 'numpy.arange', 'np.arange', (['(-4)', '(4 + dx)', 'dx'], {}), '(-4, 4 + dx, dx)\n', (187, 203), True, 'import numpy as np\n'), ((207, 224), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (218, 224), True, 'import numpy as np\n'), ((251, 277), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (259, 277), True, 'import numpy as np\n'), ((2761, 2777), 'numpy.array', 'np.array', (['[U, V]'], {}), '([U, V])\n', (2769, 2777), True, 'import numpy as np\n'), ((3120, 3162), 'matplotlib.pyplot.contourf', 'plt.contourf', (['XY[0]', 'XY[1]', 'g'], {'cmap': '"""RdGy"""'}), "(XY[0], XY[1], g, cmap='RdGy')\n", (3132, 3162), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3177), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3175, 3177), True, 'import matplotlib.pyplot as plt\n'), ((587, 611), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (594, 611), True, 'import numpy as np\n'), ((620, 636), 'numpy.arctan', 'np.arctan', (['(y / x)'], {}), '(y / x)\n', (629, 636), True, 'import numpy as np\n'), ((2114, 2131), 'numpy.zeros', 'np.zeros', (['d.shape'], {}), '(d.shape)\n', (2122, 2131), True, 'import numpy as np\n'), ((2657, 2677), 'numpy.ones', 'np.ones', (['XY[0].shape'], {}), '(XY[0].shape)\n', (2664, 2677), True, 'import numpy as np\n'), ((2714, 2734), 'numpy.ones', 'np.ones', (['XY[0].shape'], {}), '(XY[0].shape)\n', (2721, 2734), True, 'import numpy as np\n'), ((946, 962), 'numpy.ones', 'np.ones', (['r.shape'], {}), '(r.shape)\n', (953, 962), True, 'import numpy as np\n'), ((2162, 2206), 'numpy.sin', 'np.sin', (['(np.pi / 2 * (d[in_kernal] / epsilon))'], {}), '(np.pi / 2 * (d[in_kernal] / epsilon))\n', (2168, 2206), True, 'import numpy as np\n'), ((2596, 2621), 'numpy.gradient', 'np.gradient', (['f[i]'], {'axis': 'i'}), '(f[i], axis=i)\n', (2607, 2621), True, 'import numpy as np\n')] |
"""This module consists of functions for simulating the phase shift of a given
object.
It contained two functions:
1) linsupPhi - using the linear superposition principle for application in model
based iterative reconstruction (MBIR) type 3D reconstruction of magnetization
(both magnetic and electrostatic). This also includes a helper function that
makes use of numba and just-in-time (jit) compilation.
2) mansPhi - using the Mansuripur Algorithm to compute the phase shift (only
magnetic)
Authors: <NAME>, <NAME> June 2020
"""
import numpy as np
import time
import numba
from numba import jit
@jit(nopython=True, parallel=True)
def exp_sum(mphi_k, ephi_k, inds, KY, KX, j_n, i_n, my_n, mx_n, Sy, Sx):
"""Called by linsupPhi when running with multiprocessing and numba.
Numba incorporates just-in-time (jit) compiling and multiprocessing to numpy
array calculations, greatly speeding up the phase-shift computation beyond
that of pure vectorization and without the memory cost. Running this
for the first time each session will take an additional 5-10 seconds as it is
compiled.
This function could be further improved by sending it to the GPU, or likely
by other methods we haven't considered. If you have suggestions (or better
yet, written and tested code) please email <EMAIL>.
"""
for i in numba.prange(np.shape(inds)[0]):
z = int(inds[i,0])
y = int(inds[i,1])
x = int(inds[i,2])
sum_term = np.exp(-1j * (KY*j_n[z,y,x] + KX*i_n[z,y,x]))
ephi_k += sum_term
mphi_k += sum_term * (my_n[z,y,x]*Sx - mx_n[z,y,x]*Sy)
return ephi_k, mphi_k
def linsupPhi(mx=1.0, my=1.0, mz=1.0, Dshp=None, theta_x=0.0, theta_y=0.0, pre_B=1.0, pre_E=1, v=1, multiproc=True):
"""Applies linear superposition principle for 3D reconstruction of magnetic and electrostatic phase shifts.
This function will take 3D arrays with Mx, My and Mz components of the
magnetization, the Dshp array consisting of the shape function for the
object (1 inside, 0 outside), and the tilt angles about x and y axes to
compute the magnetic and the electrostatic phase shift. Initial computation
is done in Fourier space and then real space values are returned.
Args:
mx (3D array): x component of magnetization at each voxel (z,y,x)
my (3D array): y component of magnetization at each voxel (z,y,x)
mz (3D array): z component of magnetization at each voxel (z,y,x)
Dshp (3D array): Binary shape function of the object. Where value is 0,
phase is not computed.
theta_x (float): Rotation around x-axis (degrees). Rotates around x axis
then y axis if both are nonzero.
theta_y (float): Rotation around y-axis (degrees)
pre_B (float): Numerical prefactor for unit conversion in calculating
the magnetic phase shift. Units 1/pixels^2. Generally
(2*pi*b0*(nm/pix)^2)/phi0 , where b0 is the Saturation induction and
phi0 the magnetic flux quantum.
pre_E (float): Numerical prefactor for unit conversion in calculating the
electrostatic phase shift. Equal to sigma*V0, where sigma is the
interaction constant of the given TEM accelerating voltage (an
attribute of the microscope class), and V0 the mean inner potential.
v (int): Verbosity. v >= 1 will print status and progress when running
without numba. v=0 will suppress all prints.
mp (bool): Whether or not to implement multiprocessing.
Returns:
tuple: Tuple of length 2: (ephi, mphi). Where ephi and mphi are 2D numpy
arrays of the electrostatic and magnetic phase shifts respectively.
"""
vprint = print if v>=1 else lambda *a, **k: None
assert mx.ndim == my.ndim == mz.ndim
if mx.ndim == 2:
mx = mx[None,...]
my = my[None,...]
mz = mz[None,...]
[dimz,dimy,dimx] = mx.shape
dx2 = dimx//2
dy2 = dimy//2
dz2 = dimz//2
ly = (np.arange(dimy)-dy2)/dimy
lx = (np.arange(dimx)-dx2)/dimx
[Y,X] = np.meshgrid(ly,lx, indexing='ij')
dk = 2.0*np.pi # Kspace vector spacing
KX = X*dk
KY = Y*dk
KK = np.sqrt(KX**2 + KY**2) # same as dist(ny, nx, shift=True)*2*np.pi
zeros = np.where(KK == 0) # but we need KX and KY later.
KK[zeros] = 1.0 # remove points where KK is zero as will divide by it
# compute S arrays (will apply constants at very end)
inv_KK = 1/KK**2
Sx = 1j * KX * inv_KK
Sy = 1j * KY * inv_KK
Sx[zeros] = 0.0
Sy[zeros] = 0.0
# Get indices for which to calculate phase shift. Skip all pixels where
# thickness == 0
if Dshp is None:
Dshp = np.ones(mx.shape)
# exclude indices where thickness is 0, compile into list of ((z1,y1,x1), (z2,y2...
zz, yy, xx = np.where(Dshp != 0)
inds = np.dstack((zz,yy,xx)).squeeze()
# Compute the rotation angles
st = np.sin(np.deg2rad(theta_x))
ct = np.cos(np.deg2rad(theta_x))
sg = np.sin(np.deg2rad(theta_y))
cg = np.cos(np.deg2rad(theta_y))
x = np.arange(dimx) - dx2
y = np.arange(dimy) - dy2
z = np.arange(dimz) - dz2
Z,Y,X = np.meshgrid(z,y,x, indexing='ij') # grid of actual positions (centered on 0)
# compute the rotated values;
# here we apply rotation about X first, then about Y
i_n = Z*sg*ct + Y*sg*st + X*cg
j_n = Y*ct - Z*st
mx_n = mx*cg + my*sg*st + mz*sg*ct
my_n = my*ct - mz*st
# setup
mphi_k = np.zeros(KK.shape,dtype=complex)
ephi_k = np.zeros(KK.shape,dtype=complex)
nelems = np.shape(inds)[0]
stime = time.time()
vprint(f'Beginning phase calculation for {nelems:g} voxels.')
if multiproc:
vprint("Running in parallel with numba.")
ephi_k, mphi_k = exp_sum(mphi_k, ephi_k, inds, KY, KX, j_n, i_n, my_n, mx_n, Sy, Sx)
else:
vprint("Running on 1 cpu.")
otime = time.time()
vprint('0.00%', end=' .. ')
cc = -1
for ind in inds:
ind = tuple(ind)
cc += 1
if time.time() - otime >= 15:
vprint(f'{cc/nelems*100:.2f}%', end=' .. ')
otime = time.time()
# compute the expontential summation
sum_term = np.exp(-1j * (KY*j_n[ind] + KX*i_n[ind]))
ephi_k += sum_term
mphi_k += sum_term * (my_n[ind]*Sx - mx_n[ind]*Sy)
vprint('100.0%')
vprint(f"total time: {time.time()-stime:.5g} sec, {(time.time()-stime)/nelems:.5g} sec/voxel.")
#Now we have the phases in K-space. We convert to real space and return
ephi_k[zeros] = 0.0
mphi_k[zeros] = 0.0
ephi = (np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(ephi_k)))).real*pre_E
mphi = (np.fft.ifftshift(np.fft.ifftn(np.fft.ifftshift(mphi_k)))).real*pre_B
return (ephi,mphi)
def mansPhi(mx = 1.0,my = 1.0,mz = None,beam = [0.0,0.0,1.0],thick = 1.0,embed = 0.0):
"""Calculate magnetic phase shift using Mansuripur algorithm [1].
Unlike the linear superposition method, this algorithm only accepts 2D
samples. The input given is expected to be 2D arrays for mx, my, mz. It can
compute beam angles close to (0,0,1), but for tilts greater than a few
degrees (depending on sample conditions) it loses accuracy.
The `embed` argument places the magnetization into a larger array to increase
Fourier resolution, but this also seems to introduce a background phase shift
into some images. Use at your own risk.
Args:
mx (2D array): x component of magnetization at each pixel.
my (2D array): y component of magnetization at each pixel.
mz (2D array): z component of magnetization at each pixel.
beam (list): Vector direction of beam [x,y,z]. Default [001].
thick (float): Thickness of the sample in pixels. i.e. thickness in nm
divided by del_px which is nm/pixel.
embed (int): Whether or not to embed the mx, my, mz into a larger array
for Fourier-space computation. In theory this improves edge effects
at the cost of reduced speed, however it also seems to add a
background phase gradient in some simulations.
=========== ===========================
embed value effect
=========== ===========================
0 Do not embed (default)
1 Embed in (1024, 1024) array
x (int) Embed in (x, x) array.
=========== ===========================
Returns:
``ndarray``: 2D array of magnetic phase shift
References:
1) <NAME>. Computation of electron diffraction patterns in Lorentz
electron microscopy of thin magnetic films. J. Appl. Phys. 69, 5890 (1991).
"""
#Normalize the beam direction
beam = np.array(beam)
beam = beam / np.sqrt(np.sum(beam**2))
#Get dimensions
[ysz,xsz] = mx.shape
#Embed
if (embed == 1.0):
bdim = 1024
bdimx,bdimy = bdim,bdim
elif (embed == 0.0):
bdimx,bdimy = xsz,ysz
else:
bdim = int(embed)
bdimx,bdimy = bdim,bdim
bigmx = np.zeros([bdimy,bdimx])
bigmy = np.zeros([bdimy,bdimx])
bigmx[(bdimy-ysz)//2:(bdimy+ysz)//2,(bdimx-xsz)//2:(bdimx+xsz)//2] = mx
bigmy[(bdimy-ysz)//2:(bdimy+ysz)//2,(bdimx-xsz)//2:(bdimx+xsz)//2] = my
if mz is not None:
bigmz = np.zeros([bdimy,bdimx])
bigmz[(bdimy-ysz)//2:(bdimy+ysz)//2,(bdimx-xsz)//2:(bdimx+xsz)//2] = mz
#Compute the auxiliary arrays requried for computation
dsx = 2.0*np.pi/bdimx
linex = (np.arange(bdimx)-bdimx/2)*dsx
dsy = 2.0*np.pi/bdimy
liney = (np.arange(bdimy)-bdimy/2)*dsy
[Sx,Sy] = np.meshgrid(linex,liney)
S = np.sqrt(Sx**2 + Sy**2)
zinds = np.where(S == 0)
S[zinds] = 1.0
sigx = Sx/S
sigy = Sy/S
sigx[zinds] = 0.0
sigy[zinds] = 0.0
#compute FFTs of the B arrays.
fmx = np.fft.fftshift(np.fft.fft2(bigmx))
fmy = np.fft.fftshift(np.fft.fft2(bigmy))
if mz is not None:
fmz = np.fft.fftshift(np.fft.fft2(bigmz))
#Compute vector products and Gpts
if mz is None: # eq 13a in Mansuripur
if not np.array_equal(beam, [0,0,1]):
print("Using a tilted beam requires a nonzero mz input")
print("Proceeding with beam [0,0,1].")
prod = sigx*fmy - sigy*fmx
Gpts = 1+1j*0
else:
e_x, e_y, e_z = beam
prod = sigx*(fmy*e_x**2 - fmx*e_x*e_y - fmz*e_y*e_z+ fmy*e_z**2
) + sigy*(fmy*e_x*e_y - fmx*e_y**2 + fmz*e_x*e_z - fmx*e_z**2)
arg = np.pi*thick*(sigx*e_x+sigy*e_y)/e_z
denom = 1.0/((sigx*e_x+sigy*e_y)**2 + e_z**2)
qq = np.where(arg == 0)
arg[qq] = 1
Gpts = (denom*np.sin(arg)/arg).astype(complex)
Gpts[qq] = denom[qq]
#prefactor
prefac = 1j*thick/S
#F-space phase
fphi = prefac * Gpts * prod
fphi[zinds] = 0.0
phi = np.fft.ifft2(np.fft.ifftshift(fphi)).real
#return only the actual phase part from the embed file
if embed != 0:
ret_phi = phi[(bdimx-xsz)//2:(bdimx+xsz)//2,(bdimy-ysz)//2:(bdimy+ysz)//2]
else:
ret_phi = phi
return ret_phi
### End ### | [
"numpy.dstack",
"numpy.fft.ifftshift",
"numpy.meshgrid",
"numpy.sum",
"numpy.array_equal",
"numpy.deg2rad",
"numpy.zeros",
"numpy.ones",
"time.time",
"numpy.shape",
"numpy.where",
"numba.jit",
"numpy.array",
"numpy.exp",
"numpy.arange",
"numpy.fft.fft2",
"numpy.sin",
"numpy.sqrt"
] | [((620, 653), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'parallel': '(True)'}), '(nopython=True, parallel=True)\n', (623, 653), False, 'from numba import jit\n'), ((4157, 4191), 'numpy.meshgrid', 'np.meshgrid', (['ly', 'lx'], {'indexing': '"""ij"""'}), "(ly, lx, indexing='ij')\n", (4168, 4191), True, 'import numpy as np\n'), ((4271, 4297), 'numpy.sqrt', 'np.sqrt', (['(KX ** 2 + KY ** 2)'], {}), '(KX ** 2 + KY ** 2)\n', (4278, 4297), True, 'import numpy as np\n'), ((4349, 4366), 'numpy.where', 'np.where', (['(KK == 0)'], {}), '(KK == 0)\n', (4357, 4366), True, 'import numpy as np\n'), ((4911, 4930), 'numpy.where', 'np.where', (['(Dshp != 0)'], {}), '(Dshp != 0)\n', (4919, 4930), True, 'import numpy as np\n'), ((5260, 5295), 'numpy.meshgrid', 'np.meshgrid', (['z', 'y', 'x'], {'indexing': '"""ij"""'}), "(z, y, x, indexing='ij')\n", (5271, 5295), True, 'import numpy as np\n'), ((5579, 5612), 'numpy.zeros', 'np.zeros', (['KK.shape'], {'dtype': 'complex'}), '(KK.shape, dtype=complex)\n', (5587, 5612), True, 'import numpy as np\n'), ((5625, 5658), 'numpy.zeros', 'np.zeros', (['KK.shape'], {'dtype': 'complex'}), '(KK.shape, dtype=complex)\n', (5633, 5658), True, 'import numpy as np\n'), ((5702, 5713), 'time.time', 'time.time', ([], {}), '()\n', (5711, 5713), False, 'import time\n'), ((8964, 8978), 'numpy.array', 'np.array', (['beam'], {}), '(beam)\n', (8972, 8978), True, 'import numpy as np\n'), ((9291, 9315), 'numpy.zeros', 'np.zeros', (['[bdimy, bdimx]'], {}), '([bdimy, bdimx])\n', (9299, 9315), True, 'import numpy as np\n'), ((9327, 9351), 'numpy.zeros', 'np.zeros', (['[bdimy, bdimx]'], {}), '([bdimy, bdimx])\n', (9335, 9351), True, 'import numpy as np\n'), ((9863, 9888), 'numpy.meshgrid', 'np.meshgrid', (['linex', 'liney'], {}), '(linex, liney)\n', (9874, 9888), True, 'import numpy as np\n'), ((9896, 9922), 'numpy.sqrt', 'np.sqrt', (['(Sx ** 2 + Sy ** 2)'], {}), '(Sx ** 2 + Sy ** 2)\n', (9903, 9922), True, 'import numpy as np\n'), ((9931, 9947), 'numpy.where', 'np.where', (['(S == 0)'], {}), '(S == 0)\n', (9939, 9947), True, 'import numpy as np\n'), ((1504, 1559), 'numpy.exp', 'np.exp', (['(-1.0j * (KY * j_n[z, y, x] + KX * i_n[z, y, x]))'], {}), '(-1.0j * (KY * j_n[z, y, x] + KX * i_n[z, y, x]))\n', (1510, 1559), True, 'import numpy as np\n'), ((4788, 4805), 'numpy.ones', 'np.ones', (['mx.shape'], {}), '(mx.shape)\n', (4795, 4805), True, 'import numpy as np\n'), ((5025, 5044), 'numpy.deg2rad', 'np.deg2rad', (['theta_x'], {}), '(theta_x)\n', (5035, 5044), True, 'import numpy as np\n'), ((5062, 5081), 'numpy.deg2rad', 'np.deg2rad', (['theta_x'], {}), '(theta_x)\n', (5072, 5081), True, 'import numpy as np\n'), ((5099, 5118), 'numpy.deg2rad', 'np.deg2rad', (['theta_y'], {}), '(theta_y)\n', (5109, 5118), True, 'import numpy as np\n'), ((5136, 5155), 'numpy.deg2rad', 'np.deg2rad', (['theta_y'], {}), '(theta_y)\n', (5146, 5155), True, 'import numpy as np\n'), ((5166, 5181), 'numpy.arange', 'np.arange', (['dimx'], {}), '(dimx)\n', (5175, 5181), True, 'import numpy as np\n'), ((5196, 5211), 'numpy.arange', 'np.arange', (['dimy'], {}), '(dimy)\n', (5205, 5211), True, 'import numpy as np\n'), ((5226, 5241), 'numpy.arange', 'np.arange', (['dimz'], {}), '(dimz)\n', (5235, 5241), True, 'import numpy as np\n'), ((5672, 5686), 'numpy.shape', 'np.shape', (['inds'], {}), '(inds)\n', (5680, 5686), True, 'import numpy as np\n'), ((6012, 6023), 'time.time', 'time.time', ([], {}), '()\n', (6021, 6023), False, 'import time\n'), ((9542, 9566), 'numpy.zeros', 'np.zeros', (['[bdimy, bdimx]'], {}), '([bdimy, bdimx])\n', (9550, 9566), True, 'import numpy as np\n'), ((10105, 10123), 'numpy.fft.fft2', 'np.fft.fft2', (['bigmx'], {}), '(bigmx)\n', (10116, 10123), True, 'import numpy as np\n'), ((10151, 10169), 'numpy.fft.fft2', 'np.fft.fft2', (['bigmy'], {}), '(bigmy)\n', (10162, 10169), True, 'import numpy as np\n'), ((10858, 10876), 'numpy.where', 'np.where', (['(arg == 0)'], {}), '(arg == 0)\n', (10866, 10876), True, 'import numpy as np\n'), ((1384, 1398), 'numpy.shape', 'np.shape', (['inds'], {}), '(inds)\n', (1392, 1398), True, 'import numpy as np\n'), ((4083, 4098), 'numpy.arange', 'np.arange', (['dimy'], {}), '(dimy)\n', (4092, 4098), True, 'import numpy as np\n'), ((4119, 4134), 'numpy.arange', 'np.arange', (['dimx'], {}), '(dimx)\n', (4128, 4134), True, 'import numpy as np\n'), ((4942, 4965), 'numpy.dstack', 'np.dstack', (['(zz, yy, xx)'], {}), '((zz, yy, xx))\n', (4951, 4965), True, 'import numpy as np\n'), ((6360, 6407), 'numpy.exp', 'np.exp', (['(-1.0j * (KY * j_n[ind] + KX * i_n[ind]))'], {}), '(-1.0j * (KY * j_n[ind] + KX * i_n[ind]))\n', (6366, 6407), True, 'import numpy as np\n'), ((9005, 9022), 'numpy.sum', 'np.sum', (['(beam ** 2)'], {}), '(beam ** 2)\n', (9011, 9022), True, 'import numpy as np\n'), ((9750, 9766), 'numpy.arange', 'np.arange', (['bdimx'], {}), '(bdimx)\n', (9759, 9766), True, 'import numpy as np\n'), ((9819, 9835), 'numpy.arange', 'np.arange', (['bdimy'], {}), '(bdimy)\n', (9828, 9835), True, 'import numpy as np\n'), ((10225, 10243), 'numpy.fft.fft2', 'np.fft.fft2', (['bigmz'], {}), '(bigmz)\n', (10236, 10243), True, 'import numpy as np\n'), ((10342, 10373), 'numpy.array_equal', 'np.array_equal', (['beam', '[0, 0, 1]'], {}), '(beam, [0, 0, 1])\n', (10356, 10373), True, 'import numpy as np\n'), ((11121, 11143), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['fphi'], {}), '(fphi)\n', (11137, 11143), True, 'import numpy as np\n'), ((6276, 6287), 'time.time', 'time.time', ([], {}), '()\n', (6285, 6287), False, 'import time\n'), ((6165, 6176), 'time.time', 'time.time', ([], {}), '()\n', (6174, 6176), False, 'import time\n'), ((6549, 6560), 'time.time', 'time.time', ([], {}), '()\n', (6558, 6560), False, 'import time\n'), ((6789, 6813), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['ephi_k'], {}), '(ephi_k)\n', (6805, 6813), True, 'import numpy as np\n'), ((6870, 6894), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['mphi_k'], {}), '(mphi_k)\n', (6886, 6894), True, 'import numpy as np\n'), ((6579, 6590), 'time.time', 'time.time', ([], {}), '()\n', (6588, 6590), False, 'import time\n'), ((10919, 10930), 'numpy.sin', 'np.sin', (['arg'], {}), '(arg)\n', (10925, 10930), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed June 13 11:55:20 2018
@author: mbgunay1in
"""
# import libraries and modules needed
import os
import numpy as np
from scipy import integrate, linalg
from matplotlib import pyplot
# load geometry from data file
naca_filepath = os.path.join('datfile.dat')
with open(naca_filepath, 'r') as infile:
x, y = np.loadtxt(infile, dtype=float, unpack=True)
# plot geometry
width = 10
pyplot.figure(figsize=(width, width))
pyplot.grid()
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.plot(x, y, color='k', linestyle='-', linewidth=2)
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(-0.1, 0.1);
class Panel:
"""
Contains information related to a panel.
"""
def __init__(self, xa, ya, xb, yb):
self.xa, self.ya = xa, ya # panel starting-point
self.xb, self.yb = xb, yb # panel ending-point
self.xc, self.yc = (xa + xb) / 2, (ya + yb) / 2 # panel center
self.length = np.sqrt((xb - xa)**2 + (yb - ya)**2) # panel length
# orientation of panel (angle between x-axis and panel's normal)
if xb - xa <= 0.0:
self.beta = np.arccos((yb - ya) / self.length)
elif xb - xa > 0.0:
self.beta = np.pi + np.arccos(-(yb - ya) / self.length)
# panel location
if self.beta <= np.pi:
self.loc = 'upper' # upper surface
else:
self.loc = 'lower' # lower surface
self.sigma = 0.0 # source strength
self.vt = 0.0 # tangential velocity
self.cp = 0.0 # pressure coefficient
def panel_def(x, y, N=40):
R = (x.max() - x.min()) / 2.0 # circle radius
x_center = (x.max() + x.min()) / 2.0 # x-coordinate of circle center
theta = np.linspace(0.0, 2.0 * np.pi, N + 1) # array of angles
x_circle = x_center + R * np.cos(theta) # x-coordinates of circle
x_ends = np.copy(x_circle) # x-coordinate of panels end-points
y_ends = np.empty_like(x_ends) # y-coordinate of panels end-points
# extend coordinates to consider closed surface
x, y = np.append(x, x[0]), np.append(y, y[0])
# compute y-coordinate of end-points by projection
I = 0
for i in range(N):
while I < len(x) - 1:
if (x[I] <= x_ends[i] <= x[I + 1]) or (x[I + 1] <= x_ends[i] <= x[I]):
break
else:
I += 1
a = (y[I + 1] - y[I]) / (x[I + 1] - x[I])
b = y[I + 1] - a * x[I + 1]
y_ends[i] = a * x_ends[i] + b
y_ends[N] = y_ends[0]
# create panels
panels = np.empty(N, dtype=object)
for i in range(N):
panels[i] = Panel(x_ends[i], y_ends[i], x_ends[i + 1], y_ends[i + 1])
return panels
# discretize geoemetry into panels
panels = panel_def(x, y, N=40)
# plot discretized geometry
width = 10
pyplot.figure(figsize=(width, width))
pyplot.grid()
pyplot.xlabel('x', fontsize=16)
pyplot.ylabel('y', fontsize=16)
pyplot.plot(x, y, color='k', linestyle='-', linewidth=2)
pyplot.plot(np.append([panel.xa for panel in panels], panels[0].xa),
np.append([panel.ya for panel in panels], panels[0].ya),
linestyle='-', linewidth=1, marker='o', markersize=6, color='#CD2305')
pyplot.axis('scaled', adjustable='box')
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(-0.1, 0.1);
class Freestream:
def __init__(self, u_inf=1.0, alpha=0.0):
self.u_inf = u_inf
self.alpha = np.radians(alpha) # degrees to radians
# define freestream conditions
freestream = Freestream(u_inf=1.0, alpha=4.0)
def integral(x, y, panel, dxdk, dydk):
def integrand(s):
return (((x - (panel.xa - np.sin(panel.beta) * s)) * dxdk +
(y - (panel.ya + np.cos(panel.beta) * s)) * dydk) /
((x - (panel.xa - np.sin(panel.beta) * s))**2 +
(y - (panel.ya + np.cos(panel.beta) * s))**2) )
return integrate.quad(integrand, 0.0, panel.length)[0]
def normal_sourcecont(panels):
A = np.empty((panels.size, panels.size), dtype=float)
# source contribution on a panel from itself
np.fill_diagonal(A, 0.5)
# source contribution on a panel from others
for i, panel_i in enumerate(panels):
for j, panel_j in enumerate(panels):
if i != j:
A[i, j] = 0.5 / np.pi * integral(panel_i.xc, panel_i.yc,
panel_j,
np.cos(panel_i.beta),
np.sin(panel_i.beta))
return A
def v_c_n(panels):
A = np.empty((panels.size, panels.size), dtype=float)
# vortex contribution on a panel from itself
np.fill_diagonal(A, 0.0)
# vortex contribution on a panel from others
for i, panel_i in enumerate(panels):
for j, panel_j in enumerate(panels):
if i != j:
A[i, j] = -0.5 / np.pi * integral(panel_i.xc, panel_i.yc,
panel_j,
np.sin(panel_i.beta),
-np.cos(panel_i.beta))
return A
A_source = normal_sourcecont(panels)
B_vortex = v_c_n(panels)
def kutta_condition(A_source, B_vortex):
b = np.empty(A_source.shape[0] + 1, dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
b[:-1] = B_vortex[0, :] + B_vortex[-1, :]
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
b[-1] = - np.sum(A_source[0, :] + A_source[-1, :])
return b
def sing_matrix(A_source, B_vortex):
A = np.empty((A_source.shape[0] + 1, A_source.shape[1] + 1), dtype=float)
# source contribution matrix
A[:-1, :-1] = A_source
# vortex contribution array
A[:-1, -1] = np.sum(B_vortex, axis=1)
# Kutta condition array
A[-1, :] = kutta_condition(A_source, B_vortex)
return A
def build_freestream_rhs(panels, freestream):
b = np.empty(panels.size + 1, dtype=float)
# freestream contribution on each panel
for i, panel in enumerate(panels):
b[i] = -freestream.u_inf * np.cos(freestream.alpha - panel.beta)
# freestream contribution on the Kutta condition
b[-1] = -freestream.u_inf * (np.sin(freestream.alpha - panels[0].beta) +
np.sin(freestream.alpha - panels[-1].beta) )
return b
A = sing_matrix(A_source, B_vortex)
b = build_freestream_rhs(panels, freestream)
# solve for singularity strengths
strengths = np.linalg.solve(A, b)
# store source strength on each panel
for i , panel in enumerate(panels):
panel.sigma = strengths[i]
# store circulation density
gamma = strengths[-1]
def tangvelocity_comp(panels, freestream, gamma, A_source, B_vortex):
A = np.empty((panels.size, panels.size + 1), dtype=float)
# matrix of source contribution on tangential velocity
# is the same than
# matrix of vortex contribution on normal velocity
A[:, :-1] = B_vortex
# matrix of vortex contribution on tangential velocity
# is the opposite of
# matrix of source contribution on normal velocity
A[:, -1] = -np.sum(A_source, axis=1)
# freestream contribution
b = freestream.u_inf * np.sin([freestream.alpha - panel.beta
for panel in panels])
strengths = np.append([panel.sigma for panel in panels], gamma)
tangential_velocities = np.dot(A, strengths) + b
for i, panel in enumerate(panels):
panel.vt = tangential_velocities[i]
# tangential velocity at each panel center.
tangvelocity_comp(panels, freestream, gamma, A_source, B_vortex)
def compute_pressure_coefficient(panels, freestream):
for panel in panels:
panel.cp = 1.0 - (panel.vt / freestream.u_inf)**2
# surface pressure coefficient
compute_pressure_coefficient(panels, freestream)
#surface pressure coefficient
pyplot.figure(figsize=(12, 8))
pyplot.grid()
pyplot.xlabel('$x$', fontsize=16)
pyplot.ylabel('$C_p$', fontsize=16)
pyplot.plot([panel.xc for panel in panels if panel.loc == 'upper'],
[panel.cp for panel in panels if panel.loc == 'upper'],
label='upper surface',
color='r', linestyle='-', linewidth=2, marker='o', markersize=6)
pyplot.plot([panel.xc for panel in panels if panel.loc == 'lower'],
[panel.cp for panel in panels if panel.loc == 'lower'],
label= 'lower surface',
color='b', linestyle='-', linewidth=1, marker='o', markersize=6)
pyplot.legend(loc='best', prop={'size':18})
pyplot.xlim(-0.1, 1.1)
pyplot.ylim(1.0, -2.0)
pyplot.title('Number of panels: {}'.format(panels.size), fontsize=16);
#accuracy
accuracy = sum([panel.sigma * panel.length for panel in panels])
print('sum of singularity strengths: {:0.7f}'.format(accuracy))
#chord and lift coefficient
c = abs(max(panel.xa for panel in panels) -
min(panel.xa for panel in panels))
cl = (gamma * sum(panel.length for panel in panels) /
(0.5 * freestream.u_inf * c))
print('lift coefficient: CL = {:0.4f}'.format(cl))
| [
"numpy.sum",
"numpy.empty",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linalg.solve",
"os.path.join",
"numpy.copy",
"numpy.empty_like",
"numpy.append",
"numpy.loadtxt",
"numpy.linspace",
"numpy.arccos",
"numpy.fill_diagonal",
"numpy.radians",
"scipy.integrate.quad",
"matplotlib.py... | [((286, 313), 'os.path.join', 'os.path.join', (['"""datfile.dat"""'], {}), "('datfile.dat')\n", (298, 313), False, 'import os\n'), ((447, 484), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(width, width)'}), '(figsize=(width, width))\n', (460, 484), False, 'from matplotlib import pyplot\n'), ((486, 499), 'matplotlib.pyplot.grid', 'pyplot.grid', ([], {}), '()\n', (497, 499), False, 'from matplotlib import pyplot\n'), ((501, 532), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""x"""'], {'fontsize': '(16)'}), "('x', fontsize=16)\n", (514, 532), False, 'from matplotlib import pyplot\n'), ((534, 565), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""y"""'], {'fontsize': '(16)'}), "('y', fontsize=16)\n", (547, 565), False, 'from matplotlib import pyplot\n'), ((567, 623), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(2)'}), "(x, y, color='k', linestyle='-', linewidth=2)\n", (578, 623), False, 'from matplotlib import pyplot\n'), ((625, 664), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""scaled"""'], {'adjustable': '"""box"""'}), "('scaled', adjustable='box')\n", (636, 664), False, 'from matplotlib import pyplot\n'), ((666, 688), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (677, 688), False, 'from matplotlib import pyplot\n'), ((690, 712), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (701, 712), False, 'from matplotlib import pyplot\n'), ((3062, 3099), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(width, width)'}), '(figsize=(width, width))\n', (3075, 3099), False, 'from matplotlib import pyplot\n'), ((3101, 3114), 'matplotlib.pyplot.grid', 'pyplot.grid', ([], {}), '()\n', (3112, 3114), False, 'from matplotlib import pyplot\n'), ((3116, 3147), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""x"""'], {'fontsize': '(16)'}), "('x', fontsize=16)\n", (3129, 3147), False, 'from matplotlib import pyplot\n'), ((3149, 3180), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""y"""'], {'fontsize': '(16)'}), "('y', fontsize=16)\n", (3162, 3180), False, 'from matplotlib import pyplot\n'), ((3182, 3238), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(2)'}), "(x, y, color='k', linestyle='-', linewidth=2)\n", (3193, 3238), False, 'from matplotlib import pyplot\n'), ((3464, 3503), 'matplotlib.pyplot.axis', 'pyplot.axis', (['"""scaled"""'], {'adjustable': '"""box"""'}), "('scaled', adjustable='box')\n", (3475, 3503), False, 'from matplotlib import pyplot\n'), ((3505, 3527), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (3516, 3527), False, 'from matplotlib import pyplot\n'), ((3529, 3551), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (3540, 3551), False, 'from matplotlib import pyplot\n'), ((7045, 7066), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (7060, 7066), True, 'import numpy as np\n'), ((8525, 8555), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (8538, 8555), False, 'from matplotlib import pyplot\n'), ((8557, 8570), 'matplotlib.pyplot.grid', 'pyplot.grid', ([], {}), '()\n', (8568, 8570), False, 'from matplotlib import pyplot\n'), ((8572, 8605), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""$x$"""'], {'fontsize': '(16)'}), "('$x$', fontsize=16)\n", (8585, 8605), False, 'from matplotlib import pyplot\n'), ((8607, 8642), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""$C_p$"""'], {'fontsize': '(16)'}), "('$C_p$', fontsize=16)\n", (8620, 8642), False, 'from matplotlib import pyplot\n'), ((8644, 8864), 'matplotlib.pyplot.plot', 'pyplot.plot', (["[panel.xc for panel in panels if panel.loc == 'upper']", "[panel.cp for panel in panels if panel.loc == 'upper']"], {'label': '"""upper surface"""', 'color': '"""r"""', 'linestyle': '"""-"""', 'linewidth': '(2)', 'marker': '"""o"""', 'markersize': '(6)'}), "([panel.xc for panel in panels if panel.loc == 'upper'], [panel.\n cp for panel in panels if panel.loc == 'upper'], label='upper surface',\n color='r', linestyle='-', linewidth=2, marker='o', markersize=6)\n", (8655, 8864), False, 'from matplotlib import pyplot\n'), ((8896, 9116), 'matplotlib.pyplot.plot', 'pyplot.plot', (["[panel.xc for panel in panels if panel.loc == 'lower']", "[panel.cp for panel in panels if panel.loc == 'lower']"], {'label': '"""lower surface"""', 'color': '"""b"""', 'linestyle': '"""-"""', 'linewidth': '(1)', 'marker': '"""o"""', 'markersize': '(6)'}), "([panel.xc for panel in panels if panel.loc == 'lower'], [panel.\n cp for panel in panels if panel.loc == 'lower'], label='lower surface',\n color='b', linestyle='-', linewidth=1, marker='o', markersize=6)\n", (8907, 9116), False, 'from matplotlib import pyplot\n'), ((9149, 9193), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {'loc': '"""best"""', 'prop': "{'size': 18}"}), "(loc='best', prop={'size': 18})\n", (9162, 9193), False, 'from matplotlib import pyplot\n'), ((9194, 9216), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (9205, 9216), False, 'from matplotlib import pyplot\n'), ((9218, 9240), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(1.0)', '(-2.0)'], {}), '(1.0, -2.0)\n', (9229, 9240), False, 'from matplotlib import pyplot\n'), ((368, 412), 'numpy.loadtxt', 'np.loadtxt', (['infile'], {'dtype': 'float', 'unpack': '(True)'}), '(infile, dtype=float, unpack=True)\n', (378, 412), True, 'import numpy as np\n'), ((1925, 1961), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', '(N + 1)'], {}), '(0.0, 2.0 * np.pi, N + 1)\n', (1936, 1961), True, 'import numpy as np\n'), ((2073, 2090), 'numpy.copy', 'np.copy', (['x_circle'], {}), '(x_circle)\n', (2080, 2090), True, 'import numpy as np\n'), ((2142, 2163), 'numpy.empty_like', 'np.empty_like', (['x_ends'], {}), '(x_ends)\n', (2155, 2163), True, 'import numpy as np\n'), ((2784, 2809), 'numpy.empty', 'np.empty', (['N'], {'dtype': 'object'}), '(N, dtype=object)\n', (2792, 2809), True, 'import numpy as np\n'), ((3252, 3307), 'numpy.append', 'np.append', (['[panel.xa for panel in panels]', 'panels[0].xa'], {}), '([panel.xa for panel in panels], panels[0].xa)\n', (3261, 3307), True, 'import numpy as np\n'), ((3322, 3377), 'numpy.append', 'np.append', (['[panel.ya for panel in panels]', 'panels[0].ya'], {}), '([panel.ya for panel in panels], panels[0].ya)\n', (3331, 3377), True, 'import numpy as np\n'), ((4269, 4318), 'numpy.empty', 'np.empty', (['(panels.size, panels.size)'], {'dtype': 'float'}), '((panels.size, panels.size), dtype=float)\n', (4277, 4318), True, 'import numpy as np\n'), ((4374, 4398), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A', '(0.5)'], {}), '(A, 0.5)\n', (4390, 4398), True, 'import numpy as np\n'), ((4898, 4947), 'numpy.empty', 'np.empty', (['(panels.size, panels.size)'], {'dtype': 'float'}), '((panels.size, panels.size), dtype=float)\n', (4906, 4947), True, 'import numpy as np\n'), ((5003, 5027), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A', '(0.0)'], {}), '(A, 0.0)\n', (5019, 5027), True, 'import numpy as np\n'), ((5621, 5665), 'numpy.empty', 'np.empty', (['(A_source.shape[0] + 1)'], {'dtype': 'float'}), '(A_source.shape[0] + 1, dtype=float)\n', (5629, 5665), True, 'import numpy as np\n'), ((6118, 6187), 'numpy.empty', 'np.empty', (['(A_source.shape[0] + 1, A_source.shape[1] + 1)'], {'dtype': 'float'}), '((A_source.shape[0] + 1, A_source.shape[1] + 1), dtype=float)\n', (6126, 6187), True, 'import numpy as np\n'), ((6301, 6325), 'numpy.sum', 'np.sum', (['B_vortex'], {'axis': '(1)'}), '(B_vortex, axis=1)\n', (6307, 6325), True, 'import numpy as np\n'), ((6483, 6521), 'numpy.empty', 'np.empty', (['(panels.size + 1)'], {'dtype': 'float'}), '(panels.size + 1, dtype=float)\n', (6491, 6521), True, 'import numpy as np\n'), ((7321, 7374), 'numpy.empty', 'np.empty', (['(panels.size, panels.size + 1)'], {'dtype': 'float'}), '((panels.size, panels.size + 1), dtype=float)\n', (7329, 7374), True, 'import numpy as np\n'), ((7907, 7958), 'numpy.append', 'np.append', (['[panel.sigma for panel in panels]', 'gamma'], {}), '([panel.sigma for panel in panels], gamma)\n', (7916, 7958), True, 'import numpy as np\n'), ((1061, 1101), 'numpy.sqrt', 'np.sqrt', (['((xb - xa) ** 2 + (yb - ya) ** 2)'], {}), '((xb - xa) ** 2 + (yb - ya) ** 2)\n', (1068, 1101), True, 'import numpy as np\n'), ((2272, 2290), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (2281, 2290), True, 'import numpy as np\n'), ((2292, 2310), 'numpy.append', 'np.append', (['y', 'y[0]'], {}), '(y, y[0])\n', (2301, 2310), True, 'import numpy as np\n'), ((3680, 3697), 'numpy.radians', 'np.radians', (['alpha'], {}), '(alpha)\n', (3690, 3697), True, 'import numpy as np\n'), ((4172, 4216), 'scipy.integrate.quad', 'integrate.quad', (['integrand', '(0.0)', 'panel.length'], {}), '(integrand, 0.0, panel.length)\n', (4186, 4216), False, 'from scipy import integrate, linalg\n'), ((6010, 6050), 'numpy.sum', 'np.sum', (['(A_source[0, :] + A_source[-1, :])'], {}), '(A_source[0, :] + A_source[-1, :])\n', (6016, 6050), True, 'import numpy as np\n'), ((7700, 7724), 'numpy.sum', 'np.sum', (['A_source'], {'axis': '(1)'}), '(A_source, axis=1)\n', (7706, 7724), True, 'import numpy as np\n'), ((7784, 7845), 'numpy.sin', 'np.sin', (['[(freestream.alpha - panel.beta) for panel in panels]'], {}), '([(freestream.alpha - panel.beta) for panel in panels])\n', (7790, 7845), True, 'import numpy as np\n'), ((7994, 8014), 'numpy.dot', 'np.dot', (['A', 'strengths'], {}), '(A, strengths)\n', (8000, 8014), True, 'import numpy as np\n'), ((1251, 1285), 'numpy.arccos', 'np.arccos', (['((yb - ya) / self.length)'], {}), '((yb - ya) / self.length)\n', (1260, 1285), True, 'import numpy as np\n'), ((2012, 2025), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2018, 2025), True, 'import numpy as np\n'), ((6643, 6680), 'numpy.cos', 'np.cos', (['(freestream.alpha - panel.beta)'], {}), '(freestream.alpha - panel.beta)\n', (6649, 6680), True, 'import numpy as np\n'), ((6769, 6810), 'numpy.sin', 'np.sin', (['(freestream.alpha - panels[0].beta)'], {}), '(freestream.alpha - panels[0].beta)\n', (6775, 6810), True, 'import numpy as np\n'), ((6847, 6889), 'numpy.sin', 'np.sin', (['(freestream.alpha - panels[-1].beta)'], {}), '(freestream.alpha - panels[-1].beta)\n', (6853, 6889), True, 'import numpy as np\n'), ((1348, 1383), 'numpy.arccos', 'np.arccos', (['(-(yb - ya) / self.length)'], {}), '(-(yb - ya) / self.length)\n', (1357, 1383), True, 'import numpy as np\n'), ((4751, 4771), 'numpy.cos', 'np.cos', (['panel_i.beta'], {}), '(panel_i.beta)\n', (4757, 4771), True, 'import numpy as np\n'), ((4826, 4846), 'numpy.sin', 'np.sin', (['panel_i.beta'], {}), '(panel_i.beta)\n', (4832, 4846), True, 'import numpy as np\n'), ((5383, 5403), 'numpy.sin', 'np.sin', (['panel_i.beta'], {}), '(panel_i.beta)\n', (5389, 5403), True, 'import numpy as np\n'), ((5460, 5480), 'numpy.cos', 'np.cos', (['panel_i.beta'], {}), '(panel_i.beta)\n', (5466, 5480), True, 'import numpy as np\n'), ((3925, 3943), 'numpy.sin', 'np.sin', (['panel.beta'], {}), '(panel.beta)\n', (3931, 3943), True, 'import numpy as np\n'), ((3994, 4012), 'numpy.cos', 'np.cos', (['panel.beta'], {}), '(panel.beta)\n', (4000, 4012), True, 'import numpy as np\n'), ((4064, 4082), 'numpy.sin', 'np.sin', (['panel.beta'], {}), '(panel.beta)\n', (4070, 4082), True, 'import numpy as np\n'), ((4129, 4147), 'numpy.cos', 'np.cos', (['panel.beta'], {}), '(panel.beta)\n', (4135, 4147), True, 'import numpy as np\n')] |
import numpy as np
def add_noise(batch, mean=0, var=0.1, amount=0.01, mode='pepper'):
original_size = batch.shape
batch = np.squeeze(batch)
batch_noisy = np.zeros(batch.shape)
for ii in range(batch.shape[0]):
image = np.squeeze(batch[ii])
if mode == 'gaussian':
gauss = np.random.normal(mean, var, image.shape)
image = image + gauss
elif mode == 'pepper':
num_pepper = np.ceil(amount * image.size)
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
image[coords] = 0
elif mode == "s&p":
s_vs_p = 0.5
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape]
image[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape]
image[coords] = 0
batch_noisy[ii] = image
return batch_noisy.reshape(original_size)
def write_spec(args):
config_file = open(args.modeldir + args.run_name + '/config.txt', 'w')
config_file.write('model: ' + args.run_name + '\n')
config_file.write('num_cls: ' + str(args.num_cls) + '\n')
config_file.write('optimizer: ' + 'Adam' + '\n')
config_file.write('learning_rate: ' + str(args.init_lr) + ' : ' + str(args.lr_min) + '\n')
config_file.write('loss_type: ' + args.loss_type + '\n')
config_file.write('batch_size: ' + str(args.batch_size) + '\n')
config_file.write('data_augmentation: ' + str(args.data_augment) + '\n')
config_file.write('max_angle: ' + str(args.max_angle) + '\n')
config_file.write('keep_prob: ' + str(args.keep_prob) + '\n')
config_file.write('batch_normalization: ' + str(args.use_BN) + '\n')
config_file.write('kernel_size: ' + str(args.filter_size) + '\n')
config_file.close()
| [
"numpy.squeeze",
"numpy.zeros",
"numpy.ceil",
"numpy.random.normal"
] | [((132, 149), 'numpy.squeeze', 'np.squeeze', (['batch'], {}), '(batch)\n', (142, 149), True, 'import numpy as np\n'), ((168, 189), 'numpy.zeros', 'np.zeros', (['batch.shape'], {}), '(batch.shape)\n', (176, 189), True, 'import numpy as np\n'), ((243, 264), 'numpy.squeeze', 'np.squeeze', (['batch[ii]'], {}), '(batch[ii])\n', (253, 264), True, 'import numpy as np\n'), ((316, 356), 'numpy.random.normal', 'np.random.normal', (['mean', 'var', 'image.shape'], {}), '(mean, var, image.shape)\n', (332, 356), True, 'import numpy as np\n'), ((447, 475), 'numpy.ceil', 'np.ceil', (['(amount * image.size)'], {}), '(amount * image.size)\n', (454, 475), True, 'import numpy as np\n'), ((695, 732), 'numpy.ceil', 'np.ceil', (['(amount * image.size * s_vs_p)'], {}), '(amount * image.size * s_vs_p)\n', (702, 732), True, 'import numpy as np\n'), ((901, 946), 'numpy.ceil', 'np.ceil', (['(amount * image.size * (1.0 - s_vs_p))'], {}), '(amount * image.size * (1.0 - s_vs_p))\n', (908, 946), True, 'import numpy as np\n')] |
"""
Metric for ML
"""
import numpy as np
def gini(y_valid, y_pred):
"""Calculate gini coefficient."""
assert y_valid.shape == y_pred.shape
n_samples = y_valid.shape[0]
# Sort rows on prediction column
# (from largest to smallest)
arr = np.array([y_valid, y_pred]).transpose()
true_order = arr[arr[:, 0].argsort()][::-1, 0]
pred_order = arr[arr[:, 1].argsort()][::-1, 0]
# Get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1 / n_samples, 1, n_samples)
# Get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# Normalize to true Gini coefficient
return g_pred / g_true
def gini_norm(y_valid, y_pred):
"""Calculate normalised gini coefficient."""
return gini(y_valid, y_pred) / gini(y_valid, y_valid)
| [
"numpy.array",
"numpy.cumsum",
"numpy.sum",
"numpy.linspace"
] | [((556, 596), 'numpy.linspace', 'np.linspace', (['(1 / n_samples)', '(1)', 'n_samples'], {}), '(1 / n_samples, 1, n_samples)\n', (567, 596), True, 'import numpy as np\n'), ((661, 684), 'numpy.sum', 'np.sum', (['(l_ones - l_true)'], {}), '(l_ones - l_true)\n', (667, 684), True, 'import numpy as np\n'), ((698, 721), 'numpy.sum', 'np.sum', (['(l_ones - l_pred)'], {}), '(l_ones - l_pred)\n', (704, 721), True, 'import numpy as np\n'), ((444, 465), 'numpy.cumsum', 'np.cumsum', (['true_order'], {}), '(true_order)\n', (453, 465), True, 'import numpy as np\n'), ((468, 486), 'numpy.sum', 'np.sum', (['true_order'], {}), '(true_order)\n', (474, 486), True, 'import numpy as np\n'), ((500, 521), 'numpy.cumsum', 'np.cumsum', (['pred_order'], {}), '(pred_order)\n', (509, 521), True, 'import numpy as np\n'), ((524, 542), 'numpy.sum', 'np.sum', (['pred_order'], {}), '(pred_order)\n', (530, 542), True, 'import numpy as np\n'), ((264, 291), 'numpy.array', 'np.array', (['[y_valid, y_pred]'], {}), '([y_valid, y_pred])\n', (272, 291), True, 'import numpy as np\n')] |
from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model
from pathlib import Path
import numpy as np
import pyarrow as pa
from utils.functions import get_normalized_areas
from tqdm import tqdm
import pickle
from argparse import ArgumentParser
import ray
import zmq
from itertools import product
import os
from glob import glob
import yaml
parser = ArgumentParser()
parser.add_argument('--nnodes', type=int, default=1, help='number of machines')
parser.add_argument('--ncpus_per_node', type=int, default=6, help='ncpus available on each node')
parser.add_argument('--nodeid', type=int, default=0, help='node id for running on compute canada')
parser.add_argument('--rootdir', type=str, default='results/large_action_space', help='rootdir to store results')
parser.add_argument('--configfile', type=str, default='configs/large_action_space.yaml', help='path to config yaml file')
parser.add_argument('--cluster', type=str, default='niagara', help='niagara or anything else')
parser.add_argument('--default_separating_params_file', type=str, default=None, help='path to default params pkl. if None, uses SCIP defaults')
parser.add_argument('--run_local', action='store_true', help='run on the local machine')
parser.add_argument('--run_node', action='store_true', help='run on the local machine')
args = parser.parse_args()
np.random.seed(777)
SEEDS = [52, 176, 223] # [46, 72, 101]
SCIP_ADAPTIVE_PARAMS_FILE = f'{args.rootdir}/scip_adaptive_params.pkl'
ROOTDIR = args.rootdir
with open(args.configfile) as f:
action_space = yaml.load(f, Loader=yaml.FullLoader)
if args.default_separating_params_file is not None:
with open(args.default_separating_params_file, 'rb') as f:
TUNED_SEPARATING_PARAMS = pickle.load(f)
else:
TUNED_SEPARATING_PARAMS = None
@ray.remote
def run_worker(data, configs, workerid):
# print(f'[worker {workerid}] connecting to {port}')
# context = zmq.Context()
# send_socket = context.socket(zmq.PUSH)
# send_socket.connect(f'tcp://127.0.0.1:{port}')
baseline = 'adaptive'
assert len(configs) == len(set(configs))
print(f'[worker {workerid}] loading adapted params from: {SCIP_ADAPTIVE_PARAMS_FILE}')
with open(SCIP_ADAPTIVE_PARAMS_FILE, 'rb') as f:
scip_adaptive_params = pickle.load(f)
round_idx = len(list(scip_adaptive_params['mvc'].values())[0][SEEDS[0]])
worker_results_file = f'{ROOTDIR}/scip_adaptive_worker_results_{workerid}_{round_idx}.pkl'
logs = []
best_db_aucs = {p: {gs: {seed: 0 for seed in SEEDS} for gs in gss.keys()} for p, gss in data.items()}
best_configs = {p: {gs: {seed: None for seed in SEEDS} for gs in gss.keys()} for p, gss in data.items()}
for config in tqdm(configs, desc=f'worker {workerid}'):
cfg = {k: v for (k, v) in config}
problem = cfg['problem']
instances = data[problem]
cfg_db_aucs = {problem: {gs: {} for gs in instances.keys()}}
for (graph_size, (g, info)), maxcut_lp_iter_limit in zip(instances.items(), [5000, 7000, 10000]):
for seed in SEEDS:
if problem == 'mvc':
model, _ = mvc_model(g)
lp_iterations_limit = 1500
elif problem == 'maxcut':
model, _, _ = maxcut_mccormic_model(g)
lp_iterations_limit = maxcut_lp_iter_limit
else:
raise ValueError
set_aggresive_separation(model)
sepa_params = {'lp_iterations_limit': lp_iterations_limit,
'policy': 'adaptive',
'reset_maxcuts': 100,
'reset_maxcutsroot': 100,
}
# set the adapted params for the previous rounds and the current cfg for the next round.
adapted_params = scip_adaptive_params[problem][graph_size][seed]
adaptive_cfg = {k: {} for k in action_space.keys()}
# for k in ['objparalfac', 'dircutoffdistfac', 'efficacyfac', 'intsupportfac', 'maxcutsroot', 'minorthoroot']:
# cfg[k] = {} #{idx: v for idx, v in enumerate(vals + [cfg[k]])}
for round, adapted_cfg in enumerate(adapted_params):
adapted_cfg = {prm: val for prm, val in adapted_cfg}
for k in adaptive_cfg.keys():
adaptive_cfg[k][round] = adapted_cfg[k]
# set the current round params:
for k in adaptive_cfg.keys():
adaptive_cfg[k][round_idx] = cfg[k]
sepa_params.update(adaptive_cfg)
# set the best tuned params for using after the adapted ones:
if TUNED_SEPARATING_PARAMS is not None:
sepa_params['default_separating_params'] = TUNED_SEPARATING_PARAMS[problem][graph_size][seed]
sepa = CSBaselineSepa(hparams=sepa_params)
model.includeSepa(sepa, '#CS_baseline', baseline, priority=-100000000, freq=1)
reset_sepa = CSResetSepa(hparams=sepa_params)
model.includeSepa(reset_sepa, '#CS_reset', f'reset maxcuts params', priority=99999999, freq=1)
model.setBoolParam("misc/allowdualreds", 0)
model.setLongintParam('limits/nodes', 1) # solve only at the root node
model.setIntParam('separating/maxstallroundsroot', -1) # add cuts forever
model.setIntParam('branching/random/priority', 10000000)
model.setBoolParam('randomization/permutevars', True)
model.setIntParam('randomization/permutationseed', seed)
model.setIntParam('randomization/randomseedshift', seed)
model.setBoolParam('randomization/permutevars', True)
model.setIntParam('randomization/permutationseed', seed)
model.setIntParam('randomization/randomseedshift', seed)
model.hideOutput(True)
model.optimize()
sepa.update_stats()
stats = sepa.stats
db_auc = sum(get_normalized_areas(t=stats['lp_iterations'], ft=stats['dualbound'],
t_support=lp_iterations_limit, reference=info['optimal_value']))
if db_auc > best_db_aucs[problem][graph_size][seed]:
best_configs[problem][graph_size][seed] = config
best_db_aucs[problem][graph_size][seed] = db_auc
cfg_db_aucs[problem][graph_size][seed] = db_auc
logs.append((config, cfg_db_aucs))
# if len(logs) >= 5:
# # send logs to main process for checkpointing
# msg = (workerid, logs, best_configs, best_db_aucs)
# packet = pa.serialize(msg).to_buffer()
# send_socket.send(packet)
# logs = []
# if len(logs) > 0:
# # send remaining logs to main process for checkpointing
# msg = (workerid, logs, best_configs, best_db_aucs)
# packet = pa.serialize(msg).to_buffer()
# send_socket.send(packet)
with open(worker_results_file, 'wb') as f:
pickle.dump((logs, best_configs, best_db_aucs), f)
assert len(logs) == len(configs) == len(set([l[0] for l in logs]))
print(f'[worker {workerid}] saved results to: {worker_results_file}')
print(f'[worker {workerid}] finished')
def get_data_and_configs():
print(f'loading data from: {args.rootdir}/data.pkl')
with open(f'{args.rootdir}/data.pkl', 'rb') as f:
data = pickle.load(f)
search_space = {**action_space, 'problem': ['mvc', 'maxcut']}
# seeds = [46, 72, 101]
kv_list = []
for k, vals in search_space.items():
kv_list.append([(k, v) for v in vals])
configs = list(product(*kv_list))
return data, configs
def run_node(args):
# socket for receiving results from workers
# context = zmq.Context()
# recv_socket = context.socket(zmq.PULL)
# port = recv_socket.bind_to_random_port('tcp://127.0.0.1', min_port=10000, max_port=60000)
# print(f'[node {args.nodeid} connected to port {port}')
# load adapted params:
with open(SCIP_ADAPTIVE_PARAMS_FILE, 'rb') as f:
scip_adaptive_params = pickle.load(f)
round_idx = len(list(scip_adaptive_params['mvc'].values())[0][SEEDS[0]])
# get missing configs:
data, all_configs = get_data_and_configs()
main_results_file = os.path.join(args.rootdir, f'scip_adaptive_round{round_idx}_results.pkl')
with open(main_results_file, 'rb') as f:
main_results = pickle.load(f)
missing_configs = list(set(all_configs) - set(main_results['configs'].keys()))
# # assign configs to current machine
# node_configs = []
# for idx in range(args.nodeid, len(missing_configs), args.nnodes):
# node_configs.append(missing_configs[idx])
# assert len(node_configs) == len(set(node_configs))
with open(f'{ROOTDIR}/node{args.nodeid}_configs.pkl', 'rb') as f:
node_configs = pickle.load(f)
print(f'[node {args.nodeid}] loaded configs from: {ROOTDIR}/node{args.nodeid}_configs.pkl')
# assign configs to workers
nworkers = args.ncpus_per_node-1
ray.init()
worker_handles = []
all_worker_configs = []
for workerid in range(nworkers):
worker_configs = [node_configs[idx] for idx in range(workerid, len(node_configs), nworkers)]
if len(worker_configs) > 0:
assert len(worker_configs) == len(set(worker_configs))
worker_handles.append(run_worker.remote(data, worker_configs, f'{args.nodeid}_{workerid}'))
all_worker_configs += worker_configs
assert len(set(all_worker_configs)) == len(node_configs)
# wait for all workers to finish
ray.get(worker_handles)
print('finished')
# node_results_dir = os.path.join(args.rootdir, f'node{args.nodeid}_results')
# if not os.path.exists(node_results_dir):
# os.makedirs(node_results_dir)
# node_results_file = os.path.join(node_results_dir, f'scip_adaptive_node_results_round{round_idx}.pkl')
# node_results = {'best_db_aucs': {p: {gs: {seed: 0 for seed in SEEDS} for gs in gss.keys()} for p, gss in data.items()},
# 'best_configs': {p: {gs: {seed: None for seed in SEEDS} for gs in gss.keys()} for p, gss in data.items()},
# 'configs': {}}
# # wait for logs
# last_save = 0
# pbar = tqdm(total=len(node_configs), desc='receiving logs')
# while len(node_results['configs']) < len(node_configs):
# msg = recv_socket.recv()
# workerid, logs, worker_best_cfgs, worker_best_db_aucs = pa.deserialize(msg)
# for cfg, cfg_db_aucs in logs:
# node_results['configs'][cfg] = cfg_db_aucs
# for problem, graph_sizes in worker_best_db_aucs.items():
# for graph_size, seeds in graph_sizes.items():
# for seed, db_auc in seeds.items():
# if db_auc > node_results['best_db_aucs'][problem][graph_size][seed]:
# node_results['best_db_aucs'][problem][graph_size][seed] = worker_best_db_aucs[problem][graph_size][seed]
# node_results['best_configs'][problem][graph_size][seed] = worker_best_cfgs[problem][graph_size][seed]
# pbar.update(len(logs))
# # save to node results file every 5 configs
# if len(node_results['configs']) - last_save > 5:
# last_save = len(node_results['configs'])
# with open(node_results_file, 'wb') as f:
# pickle.dump(node_results, f)
# # save results and exit
# with open(node_results_file, 'wb') as f:
# pickle.dump(node_results, f)
# print(f'finished {len(node_results["configs"])}/{len(node_configs)} configs')
# print(f'saved node results to {node_results_file}')
def submit_job(jobname, nnodes, nodeid, time_limit_hours, time_limit_minutes):
# CREATE SBATCH FILE
job_file = os.path.join(args.rootdir, jobname + '.sh')
with open(job_file, 'w') as fh:
fh.writelines("#!/bin/bash\n")
fh.writelines(f'#SBATCH --time={time_limit_hours}:{time_limit_minutes}:00\n')
fh.writelines('#SBATCH --account=def-alodi\n')
fh.writelines(f'#SBATCH --output={args.rootdir}/{jobname}.out\n')
fh.writelines(f'#SBATCH --job-name={jobname}\n')
fh.writelines(f'#SBATCH --cpus-per-task={args.ncpus_per_node}\n')
if args.cluster == 'niagara':
fh.writelines('#SBATCH --mem=0\n')
fh.writelines('#SBATCH --nodes=1\n')
fh.writelines('#SBATCH --ntasks-per-node=1\n')
fh.writelines('module load NiaEnv/2018a\n')
fh.writelines('module load python\n')
fh.writelines('source $HOME/server_bashrc\n')
fh.writelines('source $HOME/venv/bin/activate\n')
else:
fh.writelines(f'#SBATCH --mem={int(4 * args.ncpus_per_node)}G\n')
fh.writelines(f'srun python run_scip_adaptive.py --configfile {args.configfile} --rootdir {args.rootdir} --nnodes {nnodes} --ncpus_per_node {args.ncpus_per_node} --nodeid {nodeid} --run_node --default_separating_params_file {args.default_separating_params_file}\n')
os.system("sbatch {}".format(job_file))
def main(args):
data, all_configs = get_data_and_configs()
# load/init prev rounds params
if not os.path.exists(SCIP_ADAPTIVE_PARAMS_FILE):
scip_adaptive_params = {p: {gs: {seed: [] for seed in SEEDS} for gs in insts.keys()} for p, insts in data.items()}
with open(SCIP_ADAPTIVE_PARAMS_FILE, 'wb') as f:
pickle.dump(scip_adaptive_params, f)
else:
with open(SCIP_ADAPTIVE_PARAMS_FILE, 'rb') as f:
scip_adaptive_params = pickle.load(f)
round_idx = len(list(scip_adaptive_params['mvc'].values())[0][SEEDS[0]])
# update main_results
main_results_file = os.path.join(args.rootdir, f'scip_adaptive_round{round_idx}_results.pkl')
if os.path.exists(main_results_file):
with open(main_results_file, 'rb') as f:
main_results = pickle.load(f)
else:
main_results = {'best_db_aucs': {p: {gs: {seed: 0 for seed in SEEDS} for gs in insts.keys()} for p, insts in data.items()},
'best_configs': {p: {gs: {seed: None for seed in SEEDS} for gs in insts.keys()} for p, insts in data.items()},
'configs': {}}
# read all worker results from the previous run, update main results and remove the files.
for worker_file in tqdm(glob(f'{ROOTDIR}/scip_adaptive_worker_results*.pkl'), desc='loading worker results'):
# for path in tqdm(Path(args.rootdir).rglob(f'scip_adaptive_node_results_round{round_idx}.pkl'), desc='Loading node files'):
with open(worker_file, 'rb') as f:
logs, worker_best_configs, worker_best_db_aucs = pickle.load(f)
# todo continue
for cfg, cfg_db_aucs in logs:
main_results['configs'][cfg] = cfg_db_aucs
for problem, graph_sizes in worker_best_db_aucs.items():
for graph_size, seeds in graph_sizes.items():
for seed, db_auc in seeds.items():
if db_auc > main_results['best_db_aucs'][problem][graph_size][seed]:
main_results['best_db_aucs'][problem][graph_size][seed] = worker_best_db_aucs[problem][graph_size][seed]
main_results['best_configs'][problem][graph_size][seed] = worker_best_configs[problem][graph_size][seed]
# main_results['configs'].update(worker_results['configs'])
# for problem, instances in worker_results['best_db_aucs'].items():
# for graph_size, db_aucs in instances.items():
# for seed, db_auc in db_aucs.items():
# if db_auc > main_results['best_db_aucs'][problem][graph_size][seed]:
# main_results['best_db_aucs'][problem][graph_size][seed] = worker_results['best_db_aucs'][problem][graph_size][seed]
# main_results['best_configs'][problem][graph_size][seed] = worker_results['best_configs'][problem][graph_size][seed]
# save updated results to main results file
with open(main_results_file, 'wb') as f:
pickle.dump(main_results, f)
print(f'saved main results to {main_results_file}')
# remove all worker files
os.system(f"find {ROOTDIR} -type f -name 'scip_adaptive_worker_results*' -delete")
# check for missing results:
missing_configs = list(set(all_configs) - set(main_results['configs'].keys()))
print(f'{len(missing_configs)} configs left to execute')
print(f'################### adapting round {round_idx} ###################')
if len(missing_configs) > 0:
# submit jobs or run local
if args.run_local:
run_node(args)
else:
# submit up to nnodes jobs
nnodes = int(min(args.nnodes, np.ceil(len(missing_configs) / (args.ncpus_per_node-1))))
time_limit_minutes = max(int(np.ceil(len(missing_configs) * 40 / nnodes / (args.ncpus_per_node - 1))), 16)
time_limit_hours = int(np.floor(time_limit_minutes / 60))
time_limit_minutes = time_limit_minutes % 60
assert 24 > time_limit_hours >= 0
assert 60 > time_limit_minutes > 0
# save node configs to pkls
all_node_configs = []
for nodeid in range(nnodes):
node_configs = []
for idx in range(nodeid, len(missing_configs), nnodes):
node_configs.append(missing_configs[idx])
with open(f'{ROOTDIR}/node{nodeid}_configs.pkl', 'wb') as f:
pickle.dump(node_configs, f)
all_node_configs += node_configs
assert set(all_node_configs) == set(missing_configs)
for nodeid in range(nnodes):
submit_job(f'scip_adapt{nodeid}', nnodes, nodeid, time_limit_hours, time_limit_minutes)
else:
# append best params for round_idx to scip_adaptive_params
for problem, graph_sizes in main_results['best_configs'].items():
for graph_size, seeds in graph_sizes.items():
for seed, cfg in seeds.items():
scip_adaptive_params[problem][graph_size][seed].append(cfg)
# save the new scip adaptive params
with open(SCIP_ADAPTIVE_PARAMS_FILE, 'wb') as f:
pickle.dump(scip_adaptive_params, f)
print(f'saved scip_adaptive_params for rounds 0-{round_idx} to {SCIP_ADAPTIVE_PARAMS_FILE}')
print(f'for adapting scip to lp round {round_idx+1} run the script again')
if __name__ == '__main__':
if args.run_node:
run_node(args)
else:
main(args)
print('finished')
| [
"yaml.load",
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"utils.scip_models.CSResetSepa",
"utils.scip_models.maxcut_mccormic_model",
"numpy.floor",
"pickle.load",
"utils.functions.get_normalized_areas",
"glob.glob",
"os.path.join",
"os.path.exists",
"utils.scip_models.CSBa... | [((409, 425), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (423, 425), False, 'from argparse import ArgumentParser\n'), ((1382, 1401), 'numpy.random.seed', 'np.random.seed', (['(777)'], {}), '(777)\n', (1396, 1401), True, 'import numpy as np\n'), ((1588, 1624), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1597, 1624), False, 'import yaml\n'), ((2752, 2792), 'tqdm.tqdm', 'tqdm', (['configs'], {'desc': 'f"""worker {workerid}"""'}), "(configs, desc=f'worker {workerid}')\n", (2756, 2792), False, 'from tqdm import tqdm\n'), ((8512, 8585), 'os.path.join', 'os.path.join', (['args.rootdir', 'f"""scip_adaptive_round{round_idx}_results.pkl"""'], {}), "(args.rootdir, f'scip_adaptive_round{round_idx}_results.pkl')\n", (8524, 8585), False, 'import os\n'), ((9282, 9292), 'ray.init', 'ray.init', ([], {}), '()\n', (9290, 9292), False, 'import ray\n'), ((9842, 9865), 'ray.get', 'ray.get', (['worker_handles'], {}), '(worker_handles)\n', (9849, 9865), False, 'import ray\n'), ((12083, 12126), 'os.path.join', 'os.path.join', (['args.rootdir', "(jobname + '.sh')"], {}), "(args.rootdir, jobname + '.sh')\n", (12095, 12126), False, 'import os\n'), ((14008, 14081), 'os.path.join', 'os.path.join', (['args.rootdir', 'f"""scip_adaptive_round{round_idx}_results.pkl"""'], {}), "(args.rootdir, f'scip_adaptive_round{round_idx}_results.pkl')\n", (14020, 14081), False, 'import os\n'), ((14089, 14122), 'os.path.exists', 'os.path.exists', (['main_results_file'], {}), '(main_results_file)\n', (14103, 14122), False, 'import os\n'), ((16487, 16574), 'os.system', 'os.system', (['f"""find {ROOTDIR} -type f -name \'scip_adaptive_worker_results*\' -delete"""'], {}), '(\n f"find {ROOTDIR} -type f -name \'scip_adaptive_worker_results*\' -delete")\n', (16496, 16574), False, 'import os\n'), ((1774, 1788), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1785, 1788), False, 'import pickle\n'), ((2316, 2330), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2327, 2330), False, 'import pickle\n'), ((7233, 7283), 'pickle.dump', 'pickle.dump', (['(logs, best_configs, best_db_aucs)', 'f'], {}), '((logs, best_configs, best_db_aucs), f)\n', (7244, 7283), False, 'import pickle\n'), ((7629, 7643), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7640, 7643), False, 'import pickle\n'), ((7864, 7881), 'itertools.product', 'product', (['*kv_list'], {}), '(*kv_list)\n', (7871, 7881), False, 'from itertools import product\n'), ((8321, 8335), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8332, 8335), False, 'import pickle\n'), ((8654, 8668), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8665, 8668), False, 'import pickle\n'), ((9093, 9107), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9104, 9107), False, 'import pickle\n'), ((13491, 13532), 'os.path.exists', 'os.path.exists', (['SCIP_ADAPTIVE_PARAMS_FILE'], {}), '(SCIP_ADAPTIVE_PARAMS_FILE)\n', (13505, 13532), False, 'import os\n'), ((14655, 14707), 'glob.glob', 'glob', (['f"""{ROOTDIR}/scip_adaptive_worker_results*.pkl"""'], {}), "(f'{ROOTDIR}/scip_adaptive_worker_results*.pkl')\n", (14659, 14707), False, 'from glob import glob\n'), ((16364, 16392), 'pickle.dump', 'pickle.dump', (['main_results', 'f'], {}), '(main_results, f)\n', (16375, 16392), False, 'import pickle\n'), ((13726, 13762), 'pickle.dump', 'pickle.dump', (['scip_adaptive_params', 'f'], {}), '(scip_adaptive_params, f)\n', (13737, 13762), False, 'import pickle\n'), ((13865, 13879), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13876, 13879), False, 'import pickle\n'), ((14200, 14214), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14211, 14214), False, 'import pickle\n'), ((14974, 14988), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14985, 14988), False, 'import pickle\n'), ((18564, 18600), 'pickle.dump', 'pickle.dump', (['scip_adaptive_params', 'f'], {}), '(scip_adaptive_params, f)\n', (18575, 18600), False, 'import pickle\n'), ((3476, 3507), 'utils.scip_models.set_aggresive_separation', 'set_aggresive_separation', (['model'], {}), '(model)\n', (3500, 3507), False, 'from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\n'), ((4972, 5007), 'utils.scip_models.CSBaselineSepa', 'CSBaselineSepa', ([], {'hparams': 'sepa_params'}), '(hparams=sepa_params)\n', (4986, 5007), False, 'from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\n'), ((5132, 5164), 'utils.scip_models.CSResetSepa', 'CSResetSepa', ([], {'hparams': 'sepa_params'}), '(hparams=sepa_params)\n', (5143, 5164), False, 'from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\n'), ((17259, 17292), 'numpy.floor', 'np.floor', (['(time_limit_minutes / 60)'], {}), '(time_limit_minutes / 60)\n', (17267, 17292), True, 'import numpy as np\n'), ((3177, 3189), 'utils.scip_models.mvc_model', 'mvc_model', (['g'], {}), '(g)\n', (3186, 3189), False, 'from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\n'), ((6192, 6329), 'utils.functions.get_normalized_areas', 'get_normalized_areas', ([], {'t': "stats['lp_iterations']", 'ft': "stats['dualbound']", 't_support': 'lp_iterations_limit', 'reference': "info['optimal_value']"}), "(t=stats['lp_iterations'], ft=stats['dualbound'],\n t_support=lp_iterations_limit, reference=info['optimal_value'])\n", (6212, 6329), False, 'from utils.functions import get_normalized_areas\n'), ((17824, 17852), 'pickle.dump', 'pickle.dump', (['node_configs', 'f'], {}), '(node_configs, f)\n', (17835, 17852), False, 'import pickle\n'), ((3313, 3337), 'utils.scip_models.maxcut_mccormic_model', 'maxcut_mccormic_model', (['g'], {}), '(g)\n', (3334, 3337), False, 'from utils.scip_models import mvc_model, CSBaselineSepa, set_aggresive_separation, CSResetSepa, maxcut_mccormic_model\n')] |
# Author: <EMAIL> (Any bug report is welcome)
# Time Created: Aug 2016
# Time Last Updated: Nov 2016
# Addr: Shenzhen, China
# Description: define functions and parameters related to input data
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import h5py
import time
import random
import numpy as np
import pandas as pd
from scipy import sparse
vec_len = 9561
data_dir = "../data_files"
h5_dir = os.path.join(data_dir, "h5_files")
def dense_to_one_hot(labels_dense, num_classes=2, dtype=np.int):
"""Convert class labels from scalars to one-hot vectors.
Args:
labels_dense: <type 'numpy.ndarray'> dense label
num_classes: <type 'int'> the number of classes in one hot label
dtype: <type 'type'> data type
Return:
labels_ont_hot: <type 'numpy.ndarray'> one hot label
"""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel().astype(dtype)] = 1
return labels_one_hot
class Dataset(object):
"""Base dataset class
"""
def __init__(self, size, is_shuffle=False, fold=10):
"""Constructor, create a dataset container.
Args:
size: <type 'int'> the number of samples
is_shuffle: <type 'bool'> whether shuffle samples when the dataset created
fold: <type 'int'> how many folds to split samples
Return:
None
"""
self.size = size
self.perm = np.array(range(self.size))
if is_shuffle:
random.shuffle(self.perm)
self.train_size = int(self.size * (1.0 - 1.0 / fold))
self.train_perm = self.perm[range(self.train_size)]
self.train_begin = 0
self.train_end = 0
self.test_perm = self.perm[range(self.train_size, self.size)]
def generate_perm_for_train_batch(self, batch_size):
"""Create the permutation for a batch of train samples
Args:
batch_size: <type 'int'> the number of samples in the batch
Return:
perm: <type 'numpy.ndarray'> the permutation of samples which form a batch
"""
self.train_begin = self.train_end
self.train_end += batch_size
if self.train_end > self.train_size:
random.shuffle(self.train_perm)
self.train_begin = 0
self.train_end = batch_size
perm = self.train_perm[self.train_begin: self.train_end]
return perm
class PosDataset(Dataset):
"""Positive dataset class
"""
def __init__(self, target, one_hot=True, dtype=np.float32):
"""Create a positive dataset for a protein kinase target.
The data is read from hdf5 files.
Args:
target: <type 'str'> the protein kinase target name, also the name of hdf5 file
one_hot: <type 'bool'> whether to convert labels from dense to one_hot
dtype: <type 'type'> data type of features
Return:
None
"""
# open h5 file
self.h5_fn = os.path.join(h5_dir, target + ".h5")
self.h5 = h5py.File(self.h5_fn, "r")
# read ids
self.ids = self.h5["chembl_id"].value
# read 3 fp, and stack as feauture
ap = sparse.csr_matrix((self.h5["ap"]["data"], self.h5["ap"]["indices"], self.h5["ap"]["indptr"]), shape=[len(self.h5["ap"]["indptr"]) - 1, vec_len])
#mg = sparse.csr_matrix((self.h5["mg"]["data"], self.h5["mg"]["indices"], self.h5["mg"]["indptr"]), shape=[len(self.h5["mg"]["indptr"]) - 1, vec_len])
#tt = sparse.csr_matrix((self.h5["tt"]["data"], self.h5["tt"]["indices"], self.h5["tt"]["indptr"]), shape=[len(self.h5["tt"]["indptr"]) - 1, vec_len])
#self.features = sparse.hstack([ap, mg, tt]).toarray()
self.features = ap.toarray()
# label
self.labels = self.h5["label"].value
if one_hot == True:
self.labels = dense_to_one_hot(self.labels)
# year
if "year" in self.h5.keys():
self.years = self.h5["year"].value
else:
self.years = None
# close h5 file
self.h5.close()
# dtype
self.dtype = dtype
# pre_process
#self.features = np.log10(1.0 + self.features).astype(self.dtype)
self.features = np.clip(self.features, 0, 1).astype(self.dtype)
#
Dataset.__init__(self, self.features.shape[0])
def next_train_batch(self, batch_size):
"""Generate the next batch of samples
Args:
batch_size: <type 'int'> the number of samples in the batch
Return:
A tuple of features and labels of the samples in the batch
"""
perm = self.generate_perm_for_train_batch(batch_size)
return self.features[perm], self.labels[perm]
class NegDataset(Dataset):
"""Negative dataset class
"""
def __init__(self, target_list, one_hot=True, dtype=np.float32):
"""Create a negative dataset for a protein kinase target.
The data is read from a hdf5 file, pubchem_neg_sample.h5.
Note that for each target, these samples has the corresponding labels,
and I use a mask_dict to store these labels, i.e. mask_dict[target] = labels for target
Args:
target_list: <type 'list'> the protein kinase targets' list
one_hot: <type 'bool'> whether to convert labels from dense to one_hot
dtype: <type 'type'> data type of features
Return:
None
"""
# open h5 file
self.h5_fn = os.path.join(h5_dir, "pubchem_neg_sample.h5")
self.h5 = h5py.File(self.h5_fn, "r")
# read ids
self.ids = self.h5["chembl_id"].value
# read 3 fp, and stack as feauture
ap = sparse.csr_matrix((self.h5["ap"]["data"], self.h5["ap"]["indices"], self.h5["ap"]["indptr"]), shape=[len(self.h5["ap"]["indptr"]) - 1, vec_len])
#mg = sparse.csr_matrix((self.h5["mg"]["data"], self.h5["mg"]["indices"], self.h5["mg"]["indptr"]), shape=[len(self.h5["mg"]["indptr"]) - 1, vec_len])
#tt = sparse.csr_matrix((self.h5["tt"]["data"], self.h5["tt"]["indices"], self.h5["tt"]["indptr"]), shape=[len(self.h5["tt"]["indptr"]) - 1, vec_len])
#self.features = sparse.hstack([ap, mg, tt]).toarray()
self.features = ap.toarray()
# label(mask)
self.mask_dict = {}
for target in target_list:
#mask = self.h5["mask"][target].value
mask = self.h5["cliped_mask"][target].value
if one_hot == True:
self.mask_dict[target] = dense_to_one_hot(mask)
else:
self.mask_dict[target] = mask
# close h5 file
self.h5.close()
# dtype
self.dtype = dtype
# pre_process
#self.features = np.log10(1.0 + self.features).astype(self.dtype)
self.features = np.clip(self.features, 0, 1).astype(self.dtype)
#
Dataset.__init__(self, self.features.shape[0])
def next_train_batch(self, target, batch_size):
"""Generate the next batch of samples
Args:
batch_size: <type 'int'> the number of samples in the batch
Return:
A tuple of features and labels of the samples in the batch
"""
perm = self.generate_perm_for_train_batch(batch_size)
return self.features[perm], self.mask_dict[target][perm]
class Datasets(object):
"""dataset class, contains several positive datasets and one negative dataset.
"""
def __init__(self, target_list, one_hot=True):
"""
Args:
target_list: <type 'list'> the protein kinase targets' list
one_hot: <type 'bool'> whether to convert labels from dense to one_hot
return:
None
"""
# read neg dataset
self.neg = NegDataset(target_list, one_hot=one_hot)
# read pos datasets
self.pos = {}
for target in target_list:
self.pos[target] = PosDataset(target, one_hot=one_hot)
def next_train_batch(self, target, pos_batch_size, neg_batch_size):
"""Generate the next batch of samples
Args:
target: <type 'str'> the positive target name
pos_batch_size: <type 'int'> the number of samples in the batch from positive target dataset
neg_batch_size: <type 'int'> the number of samples in the batch from negative target dataset
Return:
A tuple of features and labels of the samples in the batch
"""
pos_feature_batch, pos_label_batch = self.pos[target].next_train_batch(pos_batch_size)
neg_feature_batch, neg_label_batch = self.neg.next_train_batch(target, neg_batch_size)
return np.vstack([pos_feature_batch, neg_feature_batch]), np.vstack([pos_label_batch, neg_label_batch])
def test_dataset():
"""A simple test
"""
target_list = ["cdk2", "egfr_erbB1", "gsk3b", "hgfr", "map_k_p38a", "tpk_lck", "tpk_src", "vegfr2"]
d = Datasets(target_list)
print("test for batching")
print("batch_num target feature_min feature_max label_min label_max")
for step in range(2 * 500):
for target in target_list:
compds_batch, labels_batch = d.next_train_batch(target, 128, 128)
if np.isnan(compds_batch).sum() > 0:
print("warning: nan in feature"),
print("%9d %10s %11.2f %11.2f %9.2f %9.2f" % (step, target, compds_batch.min(), compds_batch.max(), labels_batch.min(), labels_batch.max()))
if (step % 500) == 0:
print("%9d %10s %11.2f %11.2f %9.2f %9.2f" % (step, target, compds_batch.min(), compds_batch.max(), labels_batch.min(), labels_batch.max()))
# from data_files/fp_2_code.py
def read_fp(filename, dtype=int):
""" read fingerprint from file
Args:
filename: <type 'str'>
Return:
chembl_id_list: <type 'list'>, a list of str
fps_list: <type 'list'>, a list of dict.
"""
chembl_id_list = []
fps_list = []
infile = open(filename, "r")
line_num = 0
for line in infile:
line_num += 1
chembl_id = line.split("\t")[0].strip()
fps_str = line.split("\t")[1].strip()
fps = {}
fps_str = fps_str[1:-1].split(",")
for fp in fps_str:
if ":" in fp:
k, v = fp.split(":")
k = dtype(k.strip())
v = dtype(v.strip())
assert k not in fps.keys(), ("error in fp_file %s at line %d: dict's keys duplicated" % (filename, line_num))
fps[k] = v
chembl_id_list.append(chembl_id)
fps_list.append(fps)
infile.close()
return chembl_id_list, fps_list
class Dataset_reg(object):
def __init__(self, target, train_year_up_limit = 2013):
"""
"""
fp_dir = "../data_files/fp_files"
# all apfps that were picked out.
apfp_picked_fn = os.path.join(fp_dir, target + "_apfp.picked_all")
self.apfp_picked_all = list(np.genfromtxt(apfp_picked_fn, dtype=str))
self.apfp_picked_all.sort()
# read response
response_df = pd.read_csv(os.path.join(fp_dir, target + ".response"), delimiter="\t", names=["CHEMBL_ID", "YEAR", "LABEL", "TYPE", "RELATION", "VALUE"], index_col=0)
# read apfp as features
apfp_fn = os.path.join(fp_dir, target + ".apfp")
id_list, apfps_list = read_fp(apfp_fn, dtype=str)
features_df = pd.DataFrame(index=id_list, data=apfps_list, columns=self.apfp_picked_all, dtype=float)
# merge response and features
df = pd.concat([response_df, features_df], axis=1)
# pick out records with explicit values
df = df[df["RELATION"] == "="]
df = df[["YEAR", "VALUE"] + self.apfp_picked_all]
# remove duplicates, keep the mean "VALUE" and mean "YEAR".
df.reset_index(drop=False, inplace=True)
df = df.fillna(0).groupby(by=["CHEMBL_ID"]).mean()
# log processing for "VALUE" and features
df["LOG_VALUE"] = np.log(df["VALUE"])
df[self.apfp_picked_all] = np.log(1 + df[self.apfp_picked_all])
self.df = df
# batch related
mask = self.df["YEAR"] <= train_year_up_limit
self.tr_ids = self.df.index[mask].values
self.te_ids = self.df.index[~mask].values
self.tr_size = self.tr_ids.shape[0]
self.tr_begin = 0
self.tr_end = 0
def next_batch(self, batch_size):
"""
"""
self.tr_begin = self.tr_end
self.tr_end += batch_size
if self.tr_end > self.tr_size:
random.shuffle(self.tr_ids)
self.tr_begin = 0
self.tr_end = batch_size
batch = self.df.ix[self.tr_ids[self.tr_begin: self.tr_end]]
return batch[self.apfp_picked_all].values, batch["LOG_VALUE"].values
def test_batch(self):
batch = self.df.ix[self.te_ids]
return batch[self.apfp_picked_all].values, batch["LOG_VALUE"].values
def train_batch(self):
batch = self.df.ix[self.tr_ids]
return batch[self.apfp_picked_all].values, batch["LOG_VALUE"].values
if __name__ == "__main__":
target_list = ["cdk2", "egfr_erbB1", "gsk3b", "hgfr", "map_k_p38a", "tpk_lck", "tpk_src", "vegfr2"]
test_dataset()
| [
"pandas.DataFrame",
"h5py.File",
"numpy.log",
"random.shuffle",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.clip",
"numpy.isnan",
"numpy.arange",
"os.path.join",
"pandas.concat",
"numpy.vstack"
] | [((472, 506), 'os.path.join', 'os.path.join', (['data_dir', '"""h5_files"""'], {}), "(data_dir, 'h5_files')\n", (484, 506), False, 'import os\n'), ((980, 1015), 'numpy.zeros', 'np.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (988, 1015), True, 'import numpy as np\n'), ((925, 946), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (934, 946), True, 'import numpy as np\n'), ((2946, 2982), 'os.path.join', 'os.path.join', (['h5_dir', "(target + '.h5')"], {}), "(h5_dir, target + '.h5')\n", (2958, 2982), False, 'import os\n'), ((2997, 3023), 'h5py.File', 'h5py.File', (['self.h5_fn', '"""r"""'], {}), "(self.h5_fn, 'r')\n", (3006, 3023), False, 'import h5py\n'), ((5264, 5309), 'os.path.join', 'os.path.join', (['h5_dir', '"""pubchem_neg_sample.h5"""'], {}), "(h5_dir, 'pubchem_neg_sample.h5')\n", (5276, 5309), False, 'import os\n'), ((5324, 5350), 'h5py.File', 'h5py.File', (['self.h5_fn', '"""r"""'], {}), "(self.h5_fn, 'r')\n", (5333, 5350), False, 'import h5py\n'), ((10198, 10247), 'os.path.join', 'os.path.join', (['fp_dir', "(target + '_apfp.picked_all')"], {}), "(fp_dir, target + '_apfp.picked_all')\n", (10210, 10247), False, 'import os\n'), ((10586, 10624), 'os.path.join', 'os.path.join', (['fp_dir', "(target + '.apfp')"], {}), "(fp_dir, target + '.apfp')\n", (10598, 10624), False, 'import os\n'), ((10697, 10788), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'id_list', 'data': 'apfps_list', 'columns': 'self.apfp_picked_all', 'dtype': 'float'}), '(index=id_list, data=apfps_list, columns=self.apfp_picked_all,\n dtype=float)\n', (10709, 10788), True, 'import pandas as pd\n'), ((10828, 10873), 'pandas.concat', 'pd.concat', (['[response_df, features_df]'], {'axis': '(1)'}), '([response_df, features_df], axis=1)\n', (10837, 10873), True, 'import pandas as pd\n'), ((11240, 11259), 'numpy.log', 'np.log', (["df['VALUE']"], {}), "(df['VALUE'])\n", (11246, 11259), True, 'import numpy as np\n'), ((11291, 11327), 'numpy.log', 'np.log', (['(1 + df[self.apfp_picked_all])'], {}), '(1 + df[self.apfp_picked_all])\n', (11297, 11327), True, 'import numpy as np\n'), ((1592, 1617), 'random.shuffle', 'random.shuffle', (['self.perm'], {}), '(self.perm)\n', (1606, 1617), False, 'import random\n'), ((2259, 2290), 'random.shuffle', 'random.shuffle', (['self.train_perm'], {}), '(self.train_perm)\n', (2273, 2290), False, 'import random\n'), ((8184, 8233), 'numpy.vstack', 'np.vstack', (['[pos_feature_batch, neg_feature_batch]'], {}), '([pos_feature_batch, neg_feature_batch])\n', (8193, 8233), True, 'import numpy as np\n'), ((8235, 8280), 'numpy.vstack', 'np.vstack', (['[pos_label_batch, neg_label_batch]'], {}), '([pos_label_batch, neg_label_batch])\n', (8244, 8280), True, 'import numpy as np\n'), ((10280, 10320), 'numpy.genfromtxt', 'np.genfromtxt', (['apfp_picked_fn'], {'dtype': 'str'}), '(apfp_picked_fn, dtype=str)\n', (10293, 10320), True, 'import numpy as np\n'), ((10404, 10446), 'os.path.join', 'os.path.join', (['fp_dir', "(target + '.response')"], {}), "(fp_dir, target + '.response')\n", (10416, 10446), False, 'import os\n'), ((11753, 11780), 'random.shuffle', 'random.shuffle', (['self.tr_ids'], {}), '(self.tr_ids)\n', (11767, 11780), False, 'import random\n'), ((4106, 4134), 'numpy.clip', 'np.clip', (['self.features', '(0)', '(1)'], {}), '(self.features, 0, 1)\n', (4113, 4134), True, 'import numpy as np\n'), ((6485, 6513), 'numpy.clip', 'np.clip', (['self.features', '(0)', '(1)'], {}), '(self.features, 0, 1)\n', (6492, 6513), True, 'import numpy as np\n'), ((8709, 8731), 'numpy.isnan', 'np.isnan', (['compds_batch'], {}), '(compds_batch)\n', (8717, 8731), True, 'import numpy as np\n')] |
import numpy as np
from math import atan2, sin, cos, pi, tan
import DR20API.sim
import time
import matplotlib.pyplot as plt
class Controller:
def __init__(self, port = 19997):
"""
Initialize the controller of DR20 robot, and connect and start the simulation in CoppeliaSim.
Arguments:
port -- The port used to connect to coppeliaSim, default 19997.
"""
self.map_size = 500
self.current_map = np.zeros((self.map_size,self.map_size),dtype="uint8")
self.port = port
self.client = self.connect_simulation(self.port)
# Get handles
_ , self.robot = sim.simxGetObjectHandle(self.client,"dr20",sim.simx_opmode_blocking)
_ , self.sensor = sim.simxGetObjectHandle(self.client, "Hokuyo_URG_04LX_UG01", sim.simx_opmode_blocking)
self.handle_left_wheel = sim.simxGetObjectHandle(self.client, "dr20_leftWheelJoint_", sim.simx_opmode_blocking)
self.handle_right_wheel = sim.simxGetObjectHandle(self.client, "dr20_rightWheelJoint_", sim.simx_opmode_blocking)
# Get data from Lidar
sim.simxAddStatusbarMessage(self.client, "python_remote_connected\n", sim.simx_opmode_oneshot)
_ , data = sim.simxGetStringSignal(self.client, 'UG01_distance', sim.simx_opmode_streaming)
_ , left_wheel_pos = sim.simxGetObjectPosition(self.client, self.handle_left_wheel[1], -1, sim.simx_opmode_streaming)
_ , right_wheel_pos = sim.simxGetObjectPosition(self.client, self.handle_right_wheel[1], -1, sim.simx_opmode_streaming)
_, pos = sim.simxGetObjectPosition(self.client, self.robot, -1, sim.simx_opmode_streaming)
_, orientation = sim.simxGetObjectOrientation(self.client, self.robot, -1, sim.simx_opmode_streaming)
_, sensor_pos = sim.simxGetObjectPosition(self.client, self.sensor, -1, sim.simx_opmode_streaming)
_, sensor_orientation = sim.simxGetObjectOrientation(self.client, self.sensor, -1, sim.simx_opmode_streaming)
_, data = sim.simxGetStringSignal(self.client, 'UG01_distance', sim.simx_opmode_streaming)
sim.simxSynchronousTrigger(self.client)
# In CoppeliaSim, you should use simx_opmode_streaming mode to get data first time,
# and then use simx_opmode_blocking mode
_, pos = sim.simxGetObjectPosition(self.client, self.robot, -1, sim.simx_opmode_blocking)
_, orientation = sim.simxGetObjectOrientation(self.client, self.robot, -1, sim.simx_opmode_buffer)
_, left_wheel_pos = sim.simxGetObjectPosition(self.client, self.handle_left_wheel[1], -1,
sim.simx_opmode_blocking)
_, right_wheel_pos = sim.simxGetObjectPosition(self.client, self.handle_right_wheel[1], -1,
sim.simx_opmode_blocking)
self.vehl = np.linalg.norm(np.array(left_wheel_pos)-np.array(right_wheel_pos))
_, sensor_pos = sim.simxGetObjectPosition(self.client, self.sensor, -1, sim.simx_opmode_blocking)
_, sensor_orientation = sim.simxGetObjectOrientation(self.client, self.sensor, -1, sim.simx_opmode_buffer)
_, data = sim.simxGetStringSignal(self.client, 'UG01_distance', sim.simx_opmode_buffer)
sim.simxSynchronousTrigger(self.client)
data = sim.simxUnpackFloats(data)
self.robot_pos = pos[0:-1]
def connect_simulation(self, port):
"""
Connect and start simulation.
Arguments:
port -- The port used to connect to CoppeliaSim, default 19997.
Return:
clientID -- Client ID to communicate with CoppeliaSim.
"""
clientID = sim.simxStart("127.0.0.1", port, True, True, 5000, 5)
sim.simxSynchronous(clientID,True)
sim.simxStartSimulation(clientID, sim.simx_opmode_blocking)
if clientID < 0:
print("Connection failed.")
exit()
else:
print("Connection success.")
return clientID
def stop_simulation(self):
"""
Stop the simulation.
"""
sim.simxStopSimulation(self.client, sim.simx_opmode_blocking)
time.sleep(0.5)
exit(0)
print("Stop the simulation.")
def get_lidar(self,Q_sim):
_, data = sim.simxGetStringSignal(self.client, 'UG01_distance', sim.simx_opmode_buffer)
sim.simxSynchronousTrigger(self.client)
data = sim.simxUnpackFloats(data)
data = data[1::3]
data = data + np.random.randn() * Q_sim[0, 0] ** 0.5 # add noise
return data
def update_map(self):
"""
Update the map based on the current information of laser scanner. The obstacles are inflated to avoid collision.
Return:
current_map -- where 0 indicating traversable and 1 indicating obstacles.
"""
scale = self.map_size/5
scanningAngle = 240
pts=1024
AtoR = 1.0 / 180.0 * pi
_, pos = sim.simxGetObjectPosition(self.client, self.sensor, -1, sim.simx_opmode_blocking)
_, orientation = sim.simxGetObjectOrientation(self.client, self.robot, -1, sim.simx_opmode_buffer)
print('pos:')
print(pos)
print('ori:')
print(orientation)
data = self.get_lidar()
for i in range(1,pts+1):
absolute_angle = AtoR * (-scanningAngle / 2 + i * scanningAngle / pts) + orientation[2]
lidar_pose_x = pos[0]
lidar_pose_y = pos[1]
if data[i*3 -2] > 0:
obstacle_x = lidar_pose_x + data[i*3 - 2] * cos(absolute_angle)
obstacle_y = lidar_pose_y + data[i*3 - 2] * sin(absolute_angle)
pixel_x = scale * -1 * obstacle_y + 2.5 * scale
pixel_y = scale * 1 * obstacle_x + 2.5 * scale
pixel_x=int(pixel_x)
pixel_y=int(pixel_y)
self.current_map[min(pixel_x,self.map_size-1)][min(pixel_y,self.map_size-1)] = 1
time.sleep(0.1)
return self.current_map
def move_robot_vw(self,v,w):
"""
Given linear velocity and angular velocity,
compute the velocity of left wheel and right wheel by the two wheeled differential drive robot.
And control the robot to move.
Argument:
v -- linear velocity.
w -- angular velocity.
"""
wheel_radius = 0.0424
wheel_tread = 0.2541
vLeft = (1.0/wheel_radius)*(v - wheel_tread/2.0*w)
vRight = (1.0/wheel_radius)*(v + wheel_tread/2.0*w)
sim.simxSetJointTargetVelocity(self.client, self.handle_left_wheel[1], vLeft,
sim.simx_opmode_streaming)
sim.simxSetJointTargetVelocity(self.client, self.handle_right_wheel[1], vRight,
sim.simx_opmode_streaming)
# sim.simxSynchronousTrigger(self.client)
def move_robot(self, path):
"""
Given planned path of the robot,
control the robot track a part of path, with a maximum of 3 meters from the start position.
Arguments:
path -- A N*2 array indicating the planned path.
"""
k1 = 1.5
k2 = 0
v = 6
pre_error = 0
path = np.array(path)/10
for i in range(1,len(path)):
if np.linalg.norm(path[i] - path[0]) >= 3 and np.linalg.norm(path[i-1] - path[0]) <= 3:
path = path[0:i]
break
final_target = np.array(path[-1])
_, pos = sim.simxGetObjectPosition(self.client, self.robot, -1, sim.simx_opmode_blocking)
pos = pos[0:-1]
_, orientation = sim.simxGetObjectOrientation(self.client, self.robot, -1, sim.simx_opmode_buffer)
for i in range(1,len(path)):
target = path[i]
while np.linalg.norm(np.array(target) - np.array(pos)) > 0.1:
move = target - np.array(pos)
theta = orientation[2]
theta_goal = atan2(move[1], move[0])
theta_error = theta - theta_goal
if theta_error < -pi:
theta_error += 2 * pi
elif theta_error > pi:
theta_error -= 2 * pi
u = -(k1 * theta_error + k2 * (pre_error - theta_error))
pre_error = theta_error
if abs(theta_error) < 0.1:
v_r = v + u
v_l = v - u
elif abs(theta_error) > 0.1:
v_r = u
v_l = -u
sim.simxSetJointTargetVelocity(self.client, self.handle_left_wheel[1], v_l,
sim.simx_opmode_streaming)
sim.simxSetJointTargetVelocity(self.client, self.handle_right_wheel[1], v_r,
sim.simx_opmode_streaming)
sim.simxSynchronousTrigger(self.client)
_, pos = sim.simxGetObjectPosition(self.client, self.robot, -1, sim.simx_opmode_blocking)
pos = pos[0:-1]
self.robot_pos = pos
_, orientation = sim.simxGetObjectOrientation(self.client, self.robot, -1, sim.simx_opmode_buffer)
def get_robot_pos(self):
"""
Get current position of the robot.
Return:
robot_pos -- A 2D vector indicating the coordinate of robot's current position in the grid map.
"""
_, pos = sim.simxGetObjectPosition(self.client, self.sensor, -1, sim.simx_opmode_blocking)
self.robot_pos = pos[0:-1]
robot_pos = np.array(self.robot_pos)
robot_pos = (robot_pos)
return robot_pos
def get_robot_ori(self):
"""
Get current orientation of the robot.
Return:
robot_ori -- A float number indicating current orientation of the robot in radian.
"""
_, orientation = sim.simxGetObjectOrientation(self.client, self.sensor, -1, sim.simx_opmode_buffer)
self.robot_ori = orientation[2]
robot_ori = self.robot_ori
return robot_ori
def set_robot_pos(self):
sim.simxSetObjectPosition(self.client, self.robot, -1, [4,2.5,1], sim.simx_opmode_oneshot) | [
"numpy.random.randn",
"math.atan2",
"numpy.zeros",
"math.sin",
"time.sleep",
"numpy.array",
"numpy.linalg.norm",
"math.cos"
] | [((455, 510), 'numpy.zeros', 'np.zeros', (['(self.map_size, self.map_size)'], {'dtype': '"""uint8"""'}), "((self.map_size, self.map_size), dtype='uint8')\n", (463, 510), True, 'import numpy as np\n'), ((4153, 4168), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4163, 4168), False, 'import time\n'), ((5973, 5988), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5983, 5988), False, 'import time\n'), ((7503, 7521), 'numpy.array', 'np.array', (['path[-1]'], {}), '(path[-1])\n', (7511, 7521), True, 'import numpy as np\n'), ((9621, 9645), 'numpy.array', 'np.array', (['self.robot_pos'], {}), '(self.robot_pos)\n', (9629, 9645), True, 'import numpy as np\n'), ((7269, 7283), 'numpy.array', 'np.array', (['path'], {}), '(path)\n', (7277, 7283), True, 'import numpy as np\n'), ((2871, 2895), 'numpy.array', 'np.array', (['left_wheel_pos'], {}), '(left_wheel_pos)\n', (2879, 2895), True, 'import numpy as np\n'), ((2896, 2921), 'numpy.array', 'np.array', (['right_wheel_pos'], {}), '(right_wheel_pos)\n', (2904, 2921), True, 'import numpy as np\n'), ((4489, 4506), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (4504, 4506), True, 'import numpy as np\n'), ((8009, 8032), 'math.atan2', 'atan2', (['move[1]', 'move[0]'], {}), '(move[1], move[0])\n', (8014, 8032), False, 'from math import atan2, sin, cos, pi, tan\n'), ((7339, 7372), 'numpy.linalg.norm', 'np.linalg.norm', (['(path[i] - path[0])'], {}), '(path[i] - path[0])\n', (7353, 7372), True, 'import numpy as np\n'), ((7382, 7419), 'numpy.linalg.norm', 'np.linalg.norm', (['(path[i - 1] - path[0])'], {}), '(path[i - 1] - path[0])\n', (7396, 7419), True, 'import numpy as np\n'), ((7927, 7940), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (7935, 7940), True, 'import numpy as np\n'), ((5566, 5585), 'math.cos', 'cos', (['absolute_angle'], {}), '(absolute_angle)\n', (5569, 5585), False, 'from math import atan2, sin, cos, pi, tan\n'), ((5646, 5665), 'math.sin', 'sin', (['absolute_angle'], {}), '(absolute_angle)\n', (5649, 5665), False, 'from math import atan2, sin, cos, pi, tan\n'), ((7854, 7870), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (7862, 7870), True, 'import numpy as np\n'), ((7873, 7886), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (7881, 7886), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This module is used for calculations of the boundary wavelets in the frequency
domain.
The boundary_wavelets.py package is licensed under the MIT "Expat" license.
Copyright (c) 2019: <NAME> and <NAME>.
"""
# =============================================================================
# Imports
# =============================================================================
import numpy as np
import boundwave.boundary_wavelets as bw
# =============================================================================
# Functions
# =============================================================================
def rectangle(scheme):
'''
The Fourier transform of a rectangular window function.
INPUT:
scheme : numpy.float64
A numpy array with the frequencies in which to sample.
OUTPUT:
chi : numpy.complex128
A numpy array with the window function sampled in the
freqency domain.
'''
chi = np.zeros(len(scheme), dtype=np.complex128)
for i in range(len(scheme)):
if scheme[i] == 0:
chi[i] = 1
else:
chi[i] = (1 - np.exp(-2 * np.pi * 1j * scheme[i])) / \
(2 * np.pi * 1j * scheme[i])
return chi
def scaling_function_fourier(wavelet_coef, J, k, scheme, win, P=20):
r'''
This function evaluates the Fourier transform of the scaling function,
:math:`\phi_{j,k}`, sampled in scheme.
INPUT:
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`.
For Daubechies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
J : int
The scale.
k : int
The translation.
scheme : numpy.float64
The points in which to evaluate.
window=rectangle : numpy.complex128
The window to use on the boundary functions.
P=20 : int
The number of factors to include in the infinite product
in the Fourier transform of phi.
OUTPUT:
phi : numpy.complex128
:math:`\hat{\phi}_{j,k}`
'''
h = wavelet_coef * np.sqrt(2) / 2
e = (scheme[-1] - scheme[0]) / len(scheme)
phi = np.zeros((len(scheme), P, len(h)), dtype=complex)
for i in range(P):
for l in range(len(h)):
phi[:, i, l] = h[l] * \
np.exp(-2 * np.pi * 1j * l * 2**(-i - J - 1) * scheme)
phi = np.sum(phi, axis=2, dtype=np.complex128)
phi = (2**(-J / 2) * np.exp(-2 * np.pi * 1j * k * 2**(-J) * scheme) *
np.prod(phi, axis=1, dtype=np.complex128))
PhiAstChi = np.convolve(phi, win, mode='same') * e
return PhiAstChi
def fourier_boundary_wavelets(J, scheme, wavelet_coef, AL=None, AR=None,
win=rectangle):
r'''
This function evaluates the Fourier transformed boundary functions
for db2.
INPUT:
J : int
The scale.
scheme : numpy.float64
The sampling scheme in the Fourier domain.
wavelet_coef : numpy.float64
The wavelet coefficients, must sum to :math:`\sqrt{2}`.
For Daubeshies 2 they can be found using
`np.flipud(pywt.Wavelet('db2').dec_lo)`.
AL=None : numpy.float64
The left orthonormalisation matrix, if this is not
supplied the functions will not be orthonormalized. Can be
computed using
:py:func:`boundwave.Orthonormal.ortho_matrix`.
AR=None : numpy.float64
The right orthonormalisation matrix, if this is not
supplied the functions will not be orthonormalized. Can be
computed using
:py:func:`boundwave.Orthonormal.ortho_matrix`.
win= :py:func:`rectangle` : numpy.complex128
The window to use on the boundary functions.
OUTPUT:
x : numpy.complex128
2d numpy array with the boundary functions in the columns;
orthonormalised if `AL` and `AR` given.
'''
a = int(len(wavelet_coef) / 2)
kLeft = np.arange(-2 * a + 2, 1)
kRight = np.arange(2**J - 2 * a + 1, 2**J)
xj = np.zeros((len(scheme), 2 * a), dtype=complex)
Moment = bw.moments(wavelet_coef, a - 1)
FourierPhiLeft = np.zeros((len(kLeft), len(scheme)), dtype=complex)
FourierPhiRight = np.zeros((len(kRight), len(scheme)), dtype=complex)
window = win(scheme)
for i in range(len(kLeft)):
FourierPhiLeft[i] = scaling_function_fourier(wavelet_coef, J,
kLeft[i], scheme, window)
FourierPhiRight[i] = scaling_function_fourier(wavelet_coef, J,
kRight[i], scheme, window)
for b in range(a):
xj[:, b] = np.sum(np.multiply(bw.inner_product_phi_x(
b, J, kLeft, Moment), np.transpose(FourierPhiLeft)), axis=1)
xj[:, b + a] = np.sum(np.multiply(bw.inner_product_phi_x(
b, J, kRight, Moment), np.transpose(FourierPhiRight)), axis=1)
if AL is None or AR is None:
return xj
else:
x = np.zeros(np.shape(xj), dtype=complex)
for i in range(a):
for j in range(a):
x[:, i] += xj[:, j] * AL[i, j]
for i in range(a):
for j in range(a):
x[:, i + a] += xj[:, j + a] * AR[i, j]
return x
| [
"numpy.sum",
"boundwave.boundary_wavelets.inner_product_phi_x",
"boundwave.boundary_wavelets.moments",
"numpy.transpose",
"numpy.shape",
"numpy.arange",
"numpy.exp",
"numpy.convolve",
"numpy.prod",
"numpy.sqrt"
] | [((2488, 2528), 'numpy.sum', 'np.sum', (['phi'], {'axis': '(2)', 'dtype': 'np.complex128'}), '(phi, axis=2, dtype=np.complex128)\n', (2494, 2528), True, 'import numpy as np\n'), ((4131, 4155), 'numpy.arange', 'np.arange', (['(-2 * a + 2)', '(1)'], {}), '(-2 * a + 2, 1)\n', (4140, 4155), True, 'import numpy as np\n'), ((4169, 4206), 'numpy.arange', 'np.arange', (['(2 ** J - 2 * a + 1)', '(2 ** J)'], {}), '(2 ** J - 2 * a + 1, 2 ** J)\n', (4178, 4206), True, 'import numpy as np\n'), ((4271, 4302), 'boundwave.boundary_wavelets.moments', 'bw.moments', (['wavelet_coef', '(a - 1)'], {}), '(wavelet_coef, a - 1)\n', (4281, 4302), True, 'import boundwave.boundary_wavelets as bw\n'), ((2614, 2655), 'numpy.prod', 'np.prod', (['phi'], {'axis': '(1)', 'dtype': 'np.complex128'}), '(phi, axis=1, dtype=np.complex128)\n', (2621, 2655), True, 'import numpy as np\n'), ((2673, 2707), 'numpy.convolve', 'np.convolve', (['phi', 'win'], {'mode': '"""same"""'}), "(phi, win, mode='same')\n", (2684, 2707), True, 'import numpy as np\n'), ((2194, 2204), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2201, 2204), True, 'import numpy as np\n'), ((2554, 2602), 'numpy.exp', 'np.exp', (['(-2 * np.pi * 1.0j * k * 2 ** -J * scheme)'], {}), '(-2 * np.pi * 1.0j * k * 2 ** -J * scheme)\n', (2560, 2602), True, 'import numpy as np\n'), ((5188, 5200), 'numpy.shape', 'np.shape', (['xj'], {}), '(xj)\n', (5196, 5200), True, 'import numpy as np\n'), ((2423, 2481), 'numpy.exp', 'np.exp', (['(-2 * np.pi * 1.0j * l * 2 ** (-i - J - 1) * scheme)'], {}), '(-2 * np.pi * 1.0j * l * 2 ** (-i - J - 1) * scheme)\n', (2429, 2481), True, 'import numpy as np\n'), ((4868, 4911), 'boundwave.boundary_wavelets.inner_product_phi_x', 'bw.inner_product_phi_x', (['b', 'J', 'kLeft', 'Moment'], {}), '(b, J, kLeft, Moment)\n', (4890, 4911), True, 'import boundwave.boundary_wavelets as bw\n'), ((4926, 4954), 'numpy.transpose', 'np.transpose', (['FourierPhiLeft'], {}), '(FourierPhiLeft)\n', (4938, 4954), True, 'import numpy as np\n'), ((5007, 5051), 'boundwave.boundary_wavelets.inner_product_phi_x', 'bw.inner_product_phi_x', (['b', 'J', 'kRight', 'Moment'], {}), '(b, J, kRight, Moment)\n', (5029, 5051), True, 'import boundwave.boundary_wavelets as bw\n'), ((5066, 5095), 'numpy.transpose', 'np.transpose', (['FourierPhiRight'], {}), '(FourierPhiRight)\n', (5078, 5095), True, 'import numpy as np\n'), ((1165, 1202), 'numpy.exp', 'np.exp', (['(-2 * np.pi * 1.0j * scheme[i])'], {}), '(-2 * np.pi * 1.0j * scheme[i])\n', (1171, 1202), True, 'import numpy as np\n')] |
import rospy
from geometry_msgs.msg import PoseArray, Pose
from tf.transformations import euler_from_quaternion
import time
import math
import matplotlib.pyplot as plt
from nav_msgs.msg import Odometry
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from math import pow, atan2, sqrt
class MovePID:
def __init__(self):
rospy.init_node('pid_controller_initial', anonymous=True)
self.velocity_publisher = rospy.Publisher('/drone1/cmd_vel', Twist, queue_size=10)
self.pose = Odometry()
self.rate = rospy.Rate(1)
self.tolerancia = 0.25
self.currentPosX, self.currentPosY, self.currentPosZ, self.currentPosYaw = 0, 0, 0, 0
# self.count = 0
# self.unic = 0
# self.pub = rospy.Publisher('/build_map3D', PoseArray, queue_size=1)
# self.all = []
# self.obsX, self.obsY, self.obsZ = [], [], []
# self.t = time.time()
print("Start")
_ = rospy.Subscriber("/pid/cmd_vel", Pose, self.callbackMove)
_ = rospy.Subscriber('/drone1/ground_truth/state', Odometry, self.callbackPosicao)
def callbackPosicao(self, odom):
_, _, yaw = euler_from_quaternion([odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])
self.currentPosX = odom.pose.pose.position.x
self.currentPosY = odom.pose.pose.position.y
self.currentPosZ = odom.pose.pose.position.z
self.currentPosYaw = yaw
# print(self.currentPosX)
# print(self.currentPosY)
def rotationMatrix(self, psi0, x1, y1, z1):
r = [[np.cos(psi0), np.sin(psi0) * -1, 0], [np.sin(psi0), np.cos(psi0), 0], [0, 0, 1]]
pos_local = np.dot(np.transpose(np.asarray(r)), np.asarray([x1, y1, z1]))
return pos_local
def callbackMove(self, data):
print("LISTEN")
self.move2goal(data.position.x, data.position.y)
def euclidean_distance(self, goal_pose):
return sqrt(pow((goal_pose.x - self.currentPosX), 2) + pow((goal_pose.y - self.currentPosY), 2))
def linear_vel(self, goal_pose, constant=1.5):
return constant * self.euclidean_distance(goal_pose)
def steering_angle(self, goal_pose):
return atan2(goal_pose.y - self.currentPosY, goal_pose.x - self.currentPosX)
def angular_vel(self, goal_pose, constant=6):
return constant * (self.steering_angle(goal_pose) - self.currentPosYaw)
def velocity(self, goal_pose, vel=0.5):
x = abs(self.currentPosX - goal_pose.x)
y = abs(self.currentPosY - goal_pose.y)
sinalX, sinalY = 1, 1
percent = x/y if x > y else y/x
if x > y:
percent = x / y
# print("X")
# print(vel/percent)
if self.currentPosX > goal_pose.x:
sinalX = -1
if self.currentPosY > goal_pose.y:
sinalY = -1
return vel*sinalX, vel/percent*sinalY
else:
percent = y / x
# print("Y")
# print(vel/percent)
if self.currentPosX > goal_pose.x:
sinalX = -1
if self.currentPosY > goal_pose.y:
sinalY = -1
return vel/percent*sinalX, vel*sinalY
def move2goal(self, x, y):
goal_pose = Pose().position
# print("Indo para " + str(goal_pose))
goal_pose.x = x
goal_pose.y = y
vel_msg = Twist()
while self.euclidean_distance(goal_pose) >= self.tolerancia:
vel_msg.linear.x = self.linear_vel(goal_pose)/10 # self.velocity(goal_pose)[0] #self.linear_vel(goal_pose)/8
vel_msg.linear.y = 0 #self.velocity(goal_pose)[1]
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = self.angular_vel(goal_pose)/10
self.velocity_publisher.publish(vel_msg)
# self.rate.sleep()
# print("SAIU")
vel_msg.linear.x = 0
vel_msg.linear.y = 0
vel_msg.angular.z = 0
self.velocity_publisher.publish(vel_msg)
# rospy.spin()
def main():
MovePID()
try:
rospy.spin()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main() | [
"rospy.Subscriber",
"nav_msgs.msg.Odometry",
"math.pow",
"math.atan2",
"numpy.asarray",
"rospy.Publisher",
"rospy.Rate",
"geometry_msgs.msg.Twist",
"numpy.sin",
"rospy.init_node",
"numpy.cos",
"tf.transformations.euler_from_quaternion",
"rospy.spin",
"geometry_msgs.msg.Pose"
] | [((415, 472), 'rospy.init_node', 'rospy.init_node', (['"""pid_controller_initial"""'], {'anonymous': '(True)'}), "('pid_controller_initial', anonymous=True)\n", (430, 472), False, 'import rospy\n'), ((508, 564), 'rospy.Publisher', 'rospy.Publisher', (['"""/drone1/cmd_vel"""', 'Twist'], {'queue_size': '(10)'}), "('/drone1/cmd_vel', Twist, queue_size=10)\n", (523, 564), False, 'import rospy\n'), ((587, 597), 'nav_msgs.msg.Odometry', 'Odometry', ([], {}), '()\n', (595, 597), False, 'from nav_msgs.msg import Odometry\n'), ((618, 631), 'rospy.Rate', 'rospy.Rate', (['(1)'], {}), '(1)\n', (628, 631), False, 'import rospy\n'), ((1049, 1106), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/pid/cmd_vel"""', 'Pose', 'self.callbackMove'], {}), "('/pid/cmd_vel', Pose, self.callbackMove)\n", (1065, 1106), False, 'import rospy\n'), ((1119, 1197), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/drone1/ground_truth/state"""', 'Odometry', 'self.callbackPosicao'], {}), "('/drone1/ground_truth/state', Odometry, self.callbackPosicao)\n", (1135, 1197), False, 'import rospy\n'), ((1256, 1404), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[odom.pose.pose.orientation.x, odom.pose.pose.orientation.y, odom.pose.pose\n .orientation.z, odom.pose.pose.orientation.w]'], {}), '([odom.pose.pose.orientation.x, odom.pose.pose.\n orientation.y, odom.pose.pose.orientation.z, odom.pose.pose.orientation.w])\n', (1277, 1404), False, 'from tf.transformations import euler_from_quaternion\n'), ((2372, 2441), 'math.atan2', 'atan2', (['(goal_pose.y - self.currentPosY)', '(goal_pose.x - self.currentPosX)'], {}), '(goal_pose.y - self.currentPosY, goal_pose.x - self.currentPosX)\n', (2377, 2441), False, 'from math import pow, atan2, sqrt\n'), ((3591, 3598), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (3596, 3598), False, 'from geometry_msgs.msg import Twist\n'), ((4351, 4363), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (4361, 4363), False, 'import rospy\n'), ((1878, 1902), 'numpy.asarray', 'np.asarray', (['[x1, y1, z1]'], {}), '([x1, y1, z1])\n', (1888, 1902), True, 'import numpy as np\n'), ((3454, 3460), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (3458, 3460), False, 'from geometry_msgs.msg import PoseArray, Pose\n'), ((1741, 1753), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1747, 1753), True, 'import numpy as np\n'), ((1779, 1791), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1785, 1791), True, 'import numpy as np\n'), ((1793, 1805), 'numpy.cos', 'np.cos', (['psi0'], {}), '(psi0)\n', (1799, 1805), True, 'import numpy as np\n'), ((1862, 1875), 'numpy.asarray', 'np.asarray', (['r'], {}), '(r)\n', (1872, 1875), True, 'import numpy as np\n'), ((2111, 2149), 'math.pow', 'pow', (['(goal_pose.x - self.currentPosX)', '(2)'], {}), '(goal_pose.x - self.currentPosX, 2)\n', (2114, 2149), False, 'from math import pow, atan2, sqrt\n'), ((2154, 2192), 'math.pow', 'pow', (['(goal_pose.y - self.currentPosY)', '(2)'], {}), '(goal_pose.y - self.currentPosY, 2)\n', (2157, 2192), False, 'from math import pow, atan2, sqrt\n'), ((1755, 1767), 'numpy.sin', 'np.sin', (['psi0'], {}), '(psi0)\n', (1761, 1767), True, 'import numpy as np\n')] |
import torch
import numpy as np
def position_encoding(n_postion, dim_hidden, padding_idx = None):
"""
from paper: Attention is all you need <http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>
"""
def angle(postion, hidden_idx):
"""
pos/10000^(2i/d_{model})
"""
return postion / np.power(10000, 2 * (hidden_idx // 2) / dim_hidden)
def position_angle_vec(postion):
return [angle(postion, hidden_idx) for hidden_idx in range(dim_hidden)]
sin_table = np.array([position_angle_vec(pos) for pos in range(n_postion)])
sin_table[:, 0::2] = np.sin(sin_table[:, 0::2])
sin_table[:, 1::2] = np.cos(sin_table[:, 1::2])
if padding_idx is not None:
sin_table[padding_idx] = 0.
return torch.FloatTensor(sin_table)
import torch.nn as nn
class PostionEmbedding(nn.Module):
def __init__(self):
super(PostionEmbedding, self).__init__()
def forward(self,):
pass | [
"numpy.power",
"numpy.sin",
"torch.FloatTensor",
"numpy.cos"
] | [((615, 641), 'numpy.sin', 'np.sin', (['sin_table[:, 0::2]'], {}), '(sin_table[:, 0::2])\n', (621, 641), True, 'import numpy as np\n'), ((667, 693), 'numpy.cos', 'np.cos', (['sin_table[:, 1::2]'], {}), '(sin_table[:, 1::2])\n', (673, 693), True, 'import numpy as np\n'), ((776, 804), 'torch.FloatTensor', 'torch.FloatTensor', (['sin_table'], {}), '(sin_table)\n', (793, 804), False, 'import torch\n'), ((340, 391), 'numpy.power', 'np.power', (['(10000)', '(2 * (hidden_idx // 2) / dim_hidden)'], {}), '(10000, 2 * (hidden_idx // 2) / dim_hidden)\n', (348, 391), True, 'import numpy as np\n')] |
from sklearn.externals import joblib
import pandas as pd
import numpy as np
from flask import request
def prediction():
if request.method == 'POST':
model_logreg = joblib.load("models/Liver_prediction_model/logreg.pkl")
age = int(request.form.get('age'))
sex = int(request.form.get('sex'))
Total_Bilirubin = float(request.form.get('Total_Bilirubin'))
Direct_Bilirubin = float(request.form.get('Direct_Bilirubin'))
Alkaline_Phosphotase = float(request.form.get('Alkaline_Phosphotase'))
Alamine_Aminotransferase = float(request.form.get('Alamine_Aminotransferase'))
Aspartate_Aminotransferase = float(request.form.get('Aspartate_Aminotransferase'))
Total_Protiens = float(request.form.get('Total_Protiens'))
Albumin = float(request.form.get('Albumin'))
Albumin_and_Globulin_Ratio = float(request.form.get('Albumin_and_Globulin_Ratio'))
target= 0
data = [
age,
sex,
Total_Bilirubin,
Direct_Bilirubin,
Alkaline_Phosphotase,
Alamine_Aminotransferase,
Aspartate_Aminotransferase,
Total_Protiens,
Albumin,
Albumin_and_Globulin_Ratio,
target
]
liver_df = pd.read_csv('models/Liver_prediction_model/indian_liver_patient.csv')
gender = {'Male': 1,'Female': 2}
liver_df.Gender = [gender[item] for item in liver_df.Gender]
liver_df.loc[582] = [i for i in data]
X = liver_df.drop('Dataset', axis=1)
X= (X - np.min(X)) / (np.max(X) - np.min(X)).values
finX = X[['Total_Protiens','Albumin', 'Gender']]
data_to_predict = finX.loc[582].tolist()
print(data_to_predict)
predicted_result = model_logreg.predict([data_to_predict])
def predict_liver_stuff():
if request.method == 'POST':
try:
return prediction()
except:
return 0
| [
"flask.request.form.get",
"pandas.read_csv",
"numpy.min",
"numpy.max",
"sklearn.externals.joblib.load"
] | [((177, 232), 'sklearn.externals.joblib.load', 'joblib.load', (['"""models/Liver_prediction_model/logreg.pkl"""'], {}), "('models/Liver_prediction_model/logreg.pkl')\n", (188, 232), False, 'from sklearn.externals import joblib\n'), ((1356, 1425), 'pandas.read_csv', 'pd.read_csv', (['"""models/Liver_prediction_model/indian_liver_patient.csv"""'], {}), "('models/Liver_prediction_model/indian_liver_patient.csv')\n", (1367, 1425), True, 'import pandas as pd\n'), ((252, 275), 'flask.request.form.get', 'request.form.get', (['"""age"""'], {}), "('age')\n", (268, 275), False, 'from flask import request\n'), ((295, 318), 'flask.request.form.get', 'request.form.get', (['"""sex"""'], {}), "('sex')\n", (311, 318), False, 'from flask import request\n'), ((352, 387), 'flask.request.form.get', 'request.form.get', (['"""Total_Bilirubin"""'], {}), "('Total_Bilirubin')\n", (368, 387), False, 'from flask import request\n'), ((422, 458), 'flask.request.form.get', 'request.form.get', (['"""Direct_Bilirubin"""'], {}), "('Direct_Bilirubin')\n", (438, 458), False, 'from flask import request\n'), ((497, 537), 'flask.request.form.get', 'request.form.get', (['"""Alkaline_Phosphotase"""'], {}), "('Alkaline_Phosphotase')\n", (513, 537), False, 'from flask import request\n'), ((580, 624), 'flask.request.form.get', 'request.form.get', (['"""Alamine_Aminotransferase"""'], {}), "('Alamine_Aminotransferase')\n", (596, 624), False, 'from flask import request\n'), ((669, 715), 'flask.request.form.get', 'request.form.get', (['"""Aspartate_Aminotransferase"""'], {}), "('Aspartate_Aminotransferase')\n", (685, 715), False, 'from flask import request\n'), ((748, 782), 'flask.request.form.get', 'request.form.get', (['"""Total_Protiens"""'], {}), "('Total_Protiens')\n", (764, 782), False, 'from flask import request\n'), ((809, 836), 'flask.request.form.get', 'request.form.get', (['"""Albumin"""'], {}), "('Albumin')\n", (825, 836), False, 'from flask import request\n'), ((881, 927), 'flask.request.form.get', 'request.form.get', (['"""Albumin_and_Globulin_Ratio"""'], {}), "('Albumin_and_Globulin_Ratio')\n", (897, 927), False, 'from flask import request\n'), ((1644, 1653), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (1650, 1653), True, 'import numpy as np\n'), ((1658, 1667), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (1664, 1667), True, 'import numpy as np\n'), ((1670, 1679), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (1676, 1679), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import os, os.path
import imageio
from skimage.color import rgb2gray
import matplotlib.image as mpimg
import glob
import cv2
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam
#from sklearn.metrices import accuracy_score, f1_Score
test_path="C:\\Users\\Shilpa\\PycharmProjects\\tipra2\\data\\MNIST"
vector=[]
output=[]
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\0\\*.jpg")]
images_0= np.array(images)
for i in range (len(images_0)):
output.append(0)
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\1\\*.jpg")]
images_1 = np.array(images)
for i in range (len(images_1)):
output.append(1)
vector=np.vstack((images_0,images_1))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\2\\*.jpg")]
images_2 = np.array(images)
for i in range (len(images_2)):
output.append(2)
print("done")
vector=np.vstack((vector,images_2))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\3\\*.jpg")]
images_3 = np.array(images)
for i in range (len(images_3)):
output.append(3)
vector=np.vstack((vector,images_3))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\4\\*.jpg")]
images_4 = np.array(images)
for i in range (len(images_4)):
output.append(4)
vector=np.vstack((vector,images_4))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\5\\*.jpg")]
images_5 = np.array(images)
for i in range (len(images_5)):
output.append(5)
vector=np.vstack((vector,images_5))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\6\\*.jpg")]
images_6 = np.array(images)
for i in range (len(images_6)):
output.append(6)
vector=np.vstack((vector,images_6))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\7\\*.jpg")]
images_7 = np.array(images)
print("done")
for i in range (len(images_7)):
output.append(7)
vector=np.vstack((vector,images_7))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\8\\*.jpg")]
images_8 = np.array(images)
for i in range (len(images_8)):
output.append(8)
vector=np.vstack((vector,images_8))
images = [cv2.imread(file,cv2.IMREAD_GRAYSCALE).ravel() for file in glob.glob(test_path +"\\9\\*.jpg")]
images_9 = np.array(images)
for i in range (len(images_9)):
output.append(9)
vector=np.vstack((vector,images_9))
def labeltovector(output):
size = 10
list1=[]
list2=[]
list_zero=[0 for i in range(size)]
for i in range (len(output)):
list1=[0 for i in range(size)]
#print(list1)
#print(output[i])
list1[output[i]]=1
#print(list1)
#break
list2.append(list1)
#print(list2)
#break
return list2
# L = 1000
# _1,_2 = list(np.random.random((L,2))), list(np.random.random((L,2)))
# X1,X2 = [],[]
# Y1,Y2 = [],[]
# rad = 0.8
# for i in range(L):
# a,b = _1[i][0],_1[i][1]
# if a**2+b**2<rad**2:
# Y1.append([1,0])
# X1.append(_1[i])
# elif a**2+b**2>=rad**2:
# Y1.append([0,1])
# X1.append(_1[i])
# a,b = _2[i][0],_2[i][1]
# if a**2+b**2<rad**2:
# Y2.append([1,0])
# X2.append(_2[i])
# elif a**2+b**2>=rad**2:
# Y2.append([0,1])
# X2.append(_2[i])
# X1 = np.array(X1)
# X2 = np.array(X2)
# Y1 = np.array(Y1)
# Y2 = np.array(Y2)
# output=[]
# for i in range (10):
# for j in range (100):
# output.append(i)
output=labeltovector(np.array(output))
#print(output)
X1=np.array(vector)
Y1=np.array(output)
indx = np.array(list(range(len(X1))))
np.random.shuffle(indx)
X1 = X1[indx]
Y1 = Y1[indx]
print(X1.shape)
print(Y1.shape)
print(Y1)
model = Sequential()
model.add(Dense(784*2, activation='relu', input_dim=X1.shape[1]))
model.add(Dense(128, activation='relu'))
model.add(Dense(Y1.shape[1], activation='softmax'))
sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
#sgd = Adam(lr=0.001,)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(X1, Y1,
epochs=100,
batch_size=100)
#score = model.evaluate(X1, Y1, batch_size=40)
#lr=0.0001
list1=[42.39,45.49,62.12,64.98,68.08,70.78,78.82,88.51,92.95,94.36,96.81,97.09,98.25,98.55,98.65,98.83,98.89,98.99,99.08,99.13,99.18,99.21,99.23,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25,99.25]
epochs=[i for i in range(51)]
#plt.plot(epochs,list1, color="black")
plt.plot(epochs,list1)
plt.xlabel('Epoch')
plt.ylabel('Accuracy with Keras')
plt.legend(['MNIST_dataset'])
#plt.legend()
plt.savefig(os.getcwd()+'/part_1_task_5.png')
plt.show()
plt.clf()
| [
"matplotlib.pyplot.show",
"keras.optimizers.SGD",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"os.getcwd",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"cv2.imread",
"numpy.vstack",
"keras.layers.Dense",
"numpy.array",
"glob.glob",
"keras.models.Sequential",
"matplotlib.p... | [((620, 636), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (628, 636), True, 'import numpy as np\n'), ((807, 823), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (815, 823), True, 'import numpy as np\n'), ((886, 917), 'numpy.vstack', 'np.vstack', (['(images_0, images_1)'], {}), '((images_0, images_1))\n', (895, 917), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1040, 1048), True, 'import numpy as np\n'), ((1126, 1155), 'numpy.vstack', 'np.vstack', (['(vector, images_2)'], {}), '((vector, images_2))\n', (1135, 1155), True, 'import numpy as np\n'), ((1270, 1286), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1278, 1286), True, 'import numpy as np\n'), ((1349, 1378), 'numpy.vstack', 'np.vstack', (['(vector, images_3)'], {}), '((vector, images_3))\n', (1358, 1378), True, 'import numpy as np\n'), ((1493, 1509), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1501, 1509), True, 'import numpy as np\n'), ((1572, 1601), 'numpy.vstack', 'np.vstack', (['(vector, images_4)'], {}), '((vector, images_4))\n', (1581, 1601), True, 'import numpy as np\n'), ((1716, 1732), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1724, 1732), True, 'import numpy as np\n'), ((1795, 1824), 'numpy.vstack', 'np.vstack', (['(vector, images_5)'], {}), '((vector, images_5))\n', (1804, 1824), True, 'import numpy as np\n'), ((1939, 1955), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1947, 1955), True, 'import numpy as np\n'), ((2018, 2047), 'numpy.vstack', 'np.vstack', (['(vector, images_6)'], {}), '((vector, images_6))\n', (2027, 2047), True, 'import numpy as np\n'), ((2162, 2178), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2170, 2178), True, 'import numpy as np\n'), ((2256, 2285), 'numpy.vstack', 'np.vstack', (['(vector, images_7)'], {}), '((vector, images_7))\n', (2265, 2285), True, 'import numpy as np\n'), ((2400, 2416), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2408, 2416), True, 'import numpy as np\n'), ((2479, 2508), 'numpy.vstack', 'np.vstack', (['(vector, images_8)'], {}), '((vector, images_8))\n', (2488, 2508), True, 'import numpy as np\n'), ((2623, 2639), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (2631, 2639), True, 'import numpy as np\n'), ((2702, 2731), 'numpy.vstack', 'np.vstack', (['(vector, images_9)'], {}), '((vector, images_9))\n', (2711, 2731), True, 'import numpy as np\n'), ((3872, 3888), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (3880, 3888), True, 'import numpy as np\n'), ((3892, 3908), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3900, 3908), True, 'import numpy as np\n'), ((3948, 3971), 'numpy.random.shuffle', 'np.random.shuffle', (['indx'], {}), '(indx)\n', (3965, 3971), True, 'import numpy as np\n'), ((4051, 4063), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4061, 4063), False, 'from keras.models import Sequential\n'), ((4230, 4286), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.0001)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.0001, decay=1e-06, momentum=0.9, nesterov=True)\n', (4233, 4286), False, 'from keras.optimizers import SGD, Adam\n'), ((4934, 4957), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'list1'], {}), '(epochs, list1)\n', (4942, 4957), True, 'import matplotlib.pyplot as plt\n'), ((4957, 4976), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4967, 4976), True, 'import matplotlib.pyplot as plt\n'), ((4977, 5010), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy with Keras"""'], {}), "('Accuracy with Keras')\n", (4987, 5010), True, 'import matplotlib.pyplot as plt\n'), ((5011, 5040), 'matplotlib.pyplot.legend', 'plt.legend', (["['MNIST_dataset']"], {}), "(['MNIST_dataset'])\n", (5021, 5040), True, 'import matplotlib.pyplot as plt\n'), ((5101, 5111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5109, 5111), True, 'import matplotlib.pyplot as plt\n'), ((5113, 5122), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5120, 5122), True, 'import matplotlib.pyplot as plt\n'), ((3834, 3850), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3842, 3850), True, 'import numpy as np\n'), ((4074, 4130), 'keras.layers.Dense', 'Dense', (['(784 * 2)'], {'activation': '"""relu"""', 'input_dim': 'X1.shape[1]'}), "(784 * 2, activation='relu', input_dim=X1.shape[1])\n", (4079, 4130), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4140, 4169), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4145, 4169), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4181, 4221), 'keras.layers.Dense', 'Dense', (['Y1.shape[1]'], {'activation': '"""softmax"""'}), "(Y1.shape[1], activation='softmax')\n", (4186, 4221), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((574, 609), 'glob.glob', 'glob.glob', (["(test_path + '\\\\0\\\\*.jpg')"], {}), "(test_path + '\\\\0\\\\*.jpg')\n", (583, 609), False, 'import glob\n'), ((760, 795), 'glob.glob', 'glob.glob', (["(test_path + '\\\\1\\\\*.jpg')"], {}), "(test_path + '\\\\1\\\\*.jpg')\n", (769, 795), False, 'import glob\n'), ((985, 1020), 'glob.glob', 'glob.glob', (["(test_path + '\\\\2\\\\*.jpg')"], {}), "(test_path + '\\\\2\\\\*.jpg')\n", (994, 1020), False, 'import glob\n'), ((1223, 1258), 'glob.glob', 'glob.glob', (["(test_path + '\\\\3\\\\*.jpg')"], {}), "(test_path + '\\\\3\\\\*.jpg')\n", (1232, 1258), False, 'import glob\n'), ((1446, 1481), 'glob.glob', 'glob.glob', (["(test_path + '\\\\4\\\\*.jpg')"], {}), "(test_path + '\\\\4\\\\*.jpg')\n", (1455, 1481), False, 'import glob\n'), ((1669, 1704), 'glob.glob', 'glob.glob', (["(test_path + '\\\\5\\\\*.jpg')"], {}), "(test_path + '\\\\5\\\\*.jpg')\n", (1678, 1704), False, 'import glob\n'), ((1892, 1927), 'glob.glob', 'glob.glob', (["(test_path + '\\\\6\\\\*.jpg')"], {}), "(test_path + '\\\\6\\\\*.jpg')\n", (1901, 1927), False, 'import glob\n'), ((2115, 2150), 'glob.glob', 'glob.glob', (["(test_path + '\\\\7\\\\*.jpg')"], {}), "(test_path + '\\\\7\\\\*.jpg')\n", (2124, 2150), False, 'import glob\n'), ((2353, 2388), 'glob.glob', 'glob.glob', (["(test_path + '\\\\8\\\\*.jpg')"], {}), "(test_path + '\\\\8\\\\*.jpg')\n", (2362, 2388), False, 'import glob\n'), ((2576, 2611), 'glob.glob', 'glob.glob', (["(test_path + '\\\\9\\\\*.jpg')"], {}), "(test_path + '\\\\9\\\\*.jpg')\n", (2585, 2611), False, 'import glob\n'), ((5067, 5078), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5076, 5078), False, 'import os, os.path\n'), ((516, 554), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (526, 554), False, 'import cv2\n'), ((702, 740), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (712, 740), False, 'import cv2\n'), ((927, 965), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (937, 965), False, 'import cv2\n'), ((1165, 1203), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (1175, 1203), False, 'import cv2\n'), ((1388, 1426), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (1398, 1426), False, 'import cv2\n'), ((1611, 1649), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (1621, 1649), False, 'import cv2\n'), ((1834, 1872), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (1844, 1872), False, 'import cv2\n'), ((2057, 2095), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (2067, 2095), False, 'import cv2\n'), ((2295, 2333), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (2305, 2333), False, 'import cv2\n'), ((2518, 2556), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (2528, 2556), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
"""
@file
@brief Ce module définit un segment qui va parcourir l'image,
en plus d'être un segment, cette classe inclut la dimension de l'image,
et une fonction repérant sur ce segment les gradients presque
orthogonaux à l'image.
"""
import copy
import numpy
from .geometrie import Segment, Point
class SegmentBord_Commun(Segment):
"""
Définit un segment allant d'un bord a un autre de l'image,
la méthode importante est @see me decoupe_gradient.
dim est la dimension de l'image"""
# voir la remarque dans la classe Point a propos de __slots__
__slots__ = ("dim",)
def __init__(self, dim):
"""constructeur, definit la definition de l'image"""
Segment.__init__(self, Point(0, 0), Point(0, 0))
self.dim = dim
def copy(self):
"""
Copie l'instance.
"""
return copy.deepcopy(self)
def __str__(self):
"""permet d'afficher le segment"""
s = Segment.__str__(self)
s += " -- dim -- " + self.dim.__str__()
return s
def decoupe_gradient(self, gradient, cos_angle, ligne_gradient, seuil_norme):
"""
Pour un segment donne joignant deux bords de l'image,
cette fonction récupère le gradient et construit une liste
contenant des informations pour un pixel sur deux du segment,
* norme* : mémorise la norme du gradient en ce point de l'image
* *pos* : mémorise la position du pixel
* *aligne* : est vrai si le gradient est presque orthogonale au segment,
ce resultat est relié au paramètre proba_bin,
deux vecteurs sont proches en terme de direction,
s'ils font partie du secteur angulaire défini par *proba_bin*.
Le parcours du segment commence à son origine ``self.a``,
et on ajoute à chaque itération deux fois le vecteur normal
jusqu'à sortir du cadre de l'image,
les informations sont stockées dans ``ligne_gradient`` qui a une liste
d'informations préalablement créée au debut du programme
de facon à gagner du temps.
"""
n = self.directeur()
nor = self.normal().as_array()
n.scalairek(2.0)
p = copy.copy(self.a)
a = p.arrondi()
i = 0
while a.x >= 0 and a.y >= 0 and a.x < self.dim.x and a.y < self.dim.y:
# on recupere l'élément dans ligne ou doivent être
# stockées les informations (ligne_gradient)
t = ligne_gradient.info_ligne[i]
# on recupere le gradient de l'image au pixel a
g = gradient[a.y, a.x]
# on calcul sa norme
t.norme = (g[0] ** 2 + g[1]**2) ** 0.5
# on place les coordonnees du pixel dans t
t.pos.x = p.x
t.pos.y = p.y
# si la norme est positive, le gradient à une direction
# on regarde s'il est dans le meme secteur angulaire (proba_bin)
# que le vecteur normal au segment (nor)
if t.norme > seuil_norme:
t.aligne = numpy.dot(g, nor) > cos_angle * t.norme
else:
t.aligne = False
# on passe au pixel suivant
p += n
a = p.arrondi() # calcul de l'arrondi
i += 1
# on indique a ligne_gradient le nombre de pixel pris en compte
# ensuite, on decidera si ce segment est effectivement un segment de l'image
ligne_gradient.nb = i
| [
"numpy.dot",
"copy.deepcopy",
"copy.copy"
] | [((881, 900), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (894, 900), False, 'import copy\n'), ((2228, 2245), 'copy.copy', 'copy.copy', (['self.a'], {}), '(self.a)\n', (2237, 2245), False, 'import copy\n'), ((3082, 3099), 'numpy.dot', 'numpy.dot', (['g', 'nor'], {}), '(g, nor)\n', (3091, 3099), False, 'import numpy\n')] |
##############################
##### Helper functions #####
##############################
# This is a list of helper functions related to
# file read/write and converting pgn to np array, etc.
from PIL import Image
import numpy as np
import os
piece_to_num_dict = {
'P': 1,
'N': 2,
'B': 3,
'R': 4,
'Q': 5,
'K': 6,
'p': -1,
'n': -2,
'b': -3,
'r': -4,
'q': -5,
'k': -6
}
num_dict_to_piece = {
1: 'P',
2: 'N',
3: 'B',
4: 'R',
5: 'Q',
6: 'K',
-1: 'p',
-2: 'n',
-3: 'b',
-4: 'r',
-5: 'q',
-6: 'k',
}
piece_to_list = {
'P': [0]*7 + [1] + [0]*5,
'N': [0]*8 + [1] + [0]*4,
'B': [0]*9 + [1] + [0]*3,
'R': [0]*10 + [1] + [0]*2,
'Q': [0]*11 + [1] + [0]*1,
'K': [0]*12 + [1],
'p': [0]*5 + [1] + [0]*7,
'n': [0]*4 + [1] + [0]*8,
'b': [0]*3 + [1] + [0]*9,
'r': [0]*2 + [1] + [0]*10,
'q': [0]*1 + [1] + [0]*11,
'k': [1] + [0]*12
}
def fpath_to_pgn(fpath):
"""Slices the pgn string from file path.
"""
return fpath.split('/')[-1].split('.jpeg')[0]
def pgn_to_np_array(pgn):
"""Convert pgn string to a 64x64 numpy array.
Dictionary:
Empty: 0
WPawn: 1
WKnight: 2
WBishop: 3
WRook: 4
WQueen: 5
WKing: 6
BPawn: -1
BKnight: -2
BBishop: -3
BRook: -4
BQueen: -5
BKing: -6
"""
board = []
for s in pgn.split('-'):
row = []
for ch in s:
if ch in piece_to_num_dict.keys():
row.append(piece_to_num_dict[ch])
else:
row += [0]*int(ch)
assert len(row) == 8, 'Length of row != 8.'
board.append(row)
return np.array(board)
def pgn_to_dc_list(pgn):
"""Converts a pgn string to a list of size 64 of list of size 13.
Dictionary:
Empty: 0
WPawn: 1
WKnight: 2
WBishop: 3
WRook: 4
WQueen: 5
WKing: 6
BPawn: -1
BKnight: -2
BBishop: -3
BRook: -4
BQueen: -5
BKing: -6
"""
vector = []
for s in pgn.split('-'):
for ch in s:
if ch in piece_to_num_dict.keys():
vector.append(piece_to_list[ch])
else:
for _ in range(int(ch)):
vector.append([0]*6 + [1] + [0]*6)
return vector
def dc_vector_to_np_array(vector):
"""Converts a dummy-coded vector of size 832 = 64x13
into a numpy array representing the chessboard.
"""
vector = vector.reshape(64, 13)
array = [i-6 for v in vector for i, pos in enumerate(v) if pos]
array = np.array(array).reshape(8, 8)
return array
def get_X(fpaths):
"""Generates a numpy vector of size len(fpaths) x 480_000.
Normalization is done by dividing each value by the maximum, i.e., 255.
"""
x_array = np.array([], dtype=np.float32).reshape((0, 480_000))
for f in fpaths:
im = Image.open(f)
arr = np.asarray(im).reshape(1, 480_000)
x_array = np.append(x_array, arr, axis=0)
return x_array/255.
def get_Y(fpaths):
"""Generates a numpy vector of size len(fpaths) x 832.
"""
y_array = [np.array([], dtype=np.float32).reshape(0, 13)]*64
for f in fpaths:
pgn = fpath_to_pgn(f)
vec = pgn_to_dc_list(pgn)
vec = [np.array(i).reshape(1, 13) for i in vec]
y_array = [np.append(item, vec[i], axis=0) for i, item in enumerate(y_array)]
return y_array | [
"numpy.append",
"numpy.asarray",
"numpy.array",
"PIL.Image.open"
] | [((1772, 1787), 'numpy.array', 'np.array', (['board'], {}), '(board)\n', (1780, 1787), True, 'import numpy as np\n'), ((3032, 3045), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (3042, 3045), False, 'from PIL import Image\n'), ((3113, 3144), 'numpy.append', 'np.append', (['x_array', 'arr'], {'axis': '(0)'}), '(x_array, arr, axis=0)\n', (3122, 3144), True, 'import numpy as np\n'), ((2710, 2725), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (2718, 2725), True, 'import numpy as np\n'), ((2945, 2975), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (2953, 2975), True, 'import numpy as np\n'), ((3482, 3513), 'numpy.append', 'np.append', (['item', 'vec[i]'], {'axis': '(0)'}), '(item, vec[i], axis=0)\n', (3491, 3513), True, 'import numpy as np\n'), ((3060, 3074), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (3070, 3074), True, 'import numpy as np\n'), ((3272, 3302), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (3280, 3302), True, 'import numpy as np\n'), ((3422, 3433), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3430, 3433), True, 'import numpy as np\n')] |
# importar modulos
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
def compox (v, a):
return v * np.cos(a)
def compoy (v, a):
return v * np.sin(a)
v = 100 #float (input("Velocidad inicial "))
a = 45 #float (input("Angulo: "))
a = np.deg2rad (a)
g = 9.8
xmax = v ** 2.0 * np.sin(2.0 * a) / g
ymax = v ** 2.0 * np.sin(a) ** 2.0 / (2.0 * g )
ttot = 2.0 * v * np.sin(a) / g
margenx = xmax * (0.10)
margeny = ymax * (0.10)
t = np.linspace(0.0, ttot, 100)
y = compoy(v,a) * t - (1.0/2.0)*g * t**2.0
x = compox(v,a) * t
# definir gráfica
fig = plt.figure(20)
ax = fig.gca()
def actualiza(i) :
ax.clear()
plt.ylim (0.0 - margeny, ymax + margeny)
plt.xlim (0.0 - margenx, xmax + margenx)
ax.plot(x[:i], y[:i])
# funicion de animar
ani = animation.FuncAnimation(fig,actualiza,range(len(x)), interval=5,repeat=True)
plt.show() | [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.deg2rad",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"numpy.cos"
] | [((280, 293), 'numpy.deg2rad', 'np.deg2rad', (['a'], {}), '(a)\n', (290, 293), True, 'import numpy as np\n'), ((477, 504), 'numpy.linspace', 'np.linspace', (['(0.0)', 'ttot', '(100)'], {}), '(0.0, ttot, 100)\n', (488, 504), True, 'import numpy as np\n'), ((593, 607), 'matplotlib.pyplot.figure', 'plt.figure', (['(20)'], {}), '(20)\n', (603, 607), True, 'import matplotlib.pyplot as plt\n'), ((880, 890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (888, 890), True, 'import matplotlib.pyplot as plt\n'), ((662, 701), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0 - margeny)', '(ymax + margeny)'], {}), '(0.0 - margeny, ymax + margeny)\n', (670, 701), True, 'import matplotlib.pyplot as plt\n'), ((707, 746), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.0 - margenx)', '(xmax + margenx)'], {}), '(0.0 - margenx, xmax + margenx)\n', (715, 746), True, 'import matplotlib.pyplot as plt\n'), ((141, 150), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (147, 150), True, 'import numpy as np\n'), ((186, 195), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (192, 195), True, 'import numpy as np\n'), ((322, 337), 'numpy.sin', 'np.sin', (['(2.0 * a)'], {}), '(2.0 * a)\n', (328, 337), True, 'import numpy as np\n'), ((409, 418), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (415, 418), True, 'import numpy as np\n'), ((361, 370), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (367, 370), True, 'import numpy as np\n')] |
import os
import sys
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import numpy as np
import codes.graph_utils as gu
from codes.simulator import ByzantineWorker
from codes.utils import filter_entries_from_json
from codes.attacks import get_attackers
from template import MNISTTemplate, MNISTTask
from template import (
define_parser,
DecentralizedTrainer,
check_noniid_hooks,
get_sampler_callback,
SGDMWorker,
AverageEvaluator,
)
LOG_CONSENSUS_DISTANCE_INTERVAL = 10
def get_graph(args):
if args.graph.startswith("tcb"):
# Pattern: twocliques2,1 for n=2 m=1
m, b, delta = args.graph[len("tcb"):].split(",")
m, b, delta = int(m), int(b), float(delta)
assert args.n == 2 * m + 1 + b
return gu.TwoCliquesWithByzantine(m, b, delta)
return gu.get_graph(args)
# ---------------------------------------------------------------------------- #
# Hooks #
# ---------------------------------------------------------------------------- #
def log_global_consensus_distance(trainer, E, B):
"""Log the consensus distance among all good workers."""
if B % LOG_CONSENSUS_DISTANCE_INTERVAL == 0:
lg = trainer.debug_logger
jlg = trainer.json_logger
lg.info(f"\n=== Log global consensus distance @ E{E}B{B} ===")
mean, counter = 0, 0
for w in trainer.workers:
if not isinstance(w, ByzantineWorker):
mean += w.running["aggregated_model"]
counter += 1
mean /= counter
consensus_distance = 0
for w in trainer.workers:
if not isinstance(w, ByzantineWorker):
consensus_distance += (
w.running["aggregated_model"] - mean).norm() ** 2
consensus_distance /= counter
lg.info(f"consensus_distance={consensus_distance:.3f}")
jlg.info(
{
"_meta": {"type": "global_consensus_distance"},
"E": E,
"B": B,
"gcd": consensus_distance.item(),
}
)
lg.info("\n")
def log_clique_consensus_distance(trainer, E, B):
"""Log the consensus distance among all good workers."""
if B % LOG_CONSENSUS_DISTANCE_INTERVAL == 0:
lg = trainer.debug_logger
jlg = trainer.json_logger
lg.info(f"\n=== Log clique consensus distance @ E{E}B{B} ===")
counter = 0
for w in trainer.workers:
if not isinstance(w, ByzantineWorker):
counter += 1
clique_size = (counter - 1) // 2
assert counter == clique_size * 2 + 1, (clique_size, counter)
mean1, mean2, c = 0, 0, 0
for w in trainer.workers:
if not isinstance(w, ByzantineWorker):
if c < clique_size:
mean1 += w.running["aggregated_model"]
elif c < 2 * clique_size:
mean2 += w.running["aggregated_model"]
c += 1
mean1 /= clique_size
mean2 /= clique_size
cd1, cd2, c = 0, 0, 0
for w in trainer.workers:
if not isinstance(w, ByzantineWorker):
if c < clique_size:
cd1 += (w.running["aggregated_model"] - mean1).norm() ** 2
elif c < 2 * clique_size:
cd2 += (w.running["aggregated_model"] - mean1).norm() ** 2
c += 1
cd1 /= clique_size
cd2 /= clique_size
lg.info(f"clique1_consensus_distance={cd1:.3f}")
lg.info(f"clique2_consensus_distance={cd2:.3f}")
jlg.info(
{
"_meta": {"type": "clique_consensus_distance"},
"E": E,
"B": B,
"clique1": cd1.item(),
"clique2": cd2.item(),
}
)
lg.info("\n")
def log_mixing_matrix(trainer, E, B):
"""Log the consensus distance among all good workers."""
if E == 1 and B == 0:
lg = trainer.debug_logger
jlg = trainer.json_logger
lg.info(f"\n=== Log mixing matrix @ E{E}B{B} ===")
with np.printoptions(precision=3, suppress=True):
lg.info(f"{trainer.graph.metropolis_weight}")
lg.info("\n")
class OptimizationDeltaRunner(MNISTTemplate):
EXP_PATTERN = (
"n{n}f{f}ATK{attack}_noniid{noniid}_agg{agg}_lr{lr:.3e}_m{momentum:.3e}_{graph}"
)
LOG_DIR_PATTERN = (
MNISTTemplate.ROOT_DIR +
"outputs/{script}/{exp_id}/" + EXP_PATTERN + "/"
)
DEFAULT_LINE_ARG = """--lr 0.01 --use-cuda --debug -n 12 -f 1 --epochs 30 --momentum 0.0 \
--batch-size 32 --max-batch-size-per-epoch 9999 --graph tcb5,1 --noniid 0 --agg gossip_avg \
--identifier demo --attack BF"""
def __init__(
self,
parser_func=define_parser,
trainer_fn=lambda args, metrics: DecentralizedTrainer(
pre_batch_hooks=[],
post_batch_hooks=[
check_noniid_hooks,
log_global_consensus_distance,
log_clique_consensus_distance,
log_mixing_matrix,
],
max_batches_per_epoch=args.max_batch_size_per_epoch,
log_interval=args.log_interval,
metrics=metrics,
use_cuda=args.use_cuda,
debug=args.debug,
),
sampler_fn=lambda args, rank: get_sampler_callback(
rank, args.n, noniid=args.noniid, longtail=args.longtail
),
lr_scheduler_fn=lambda opt: torch.optim.lr_scheduler.MultiStepLR(
opt, milestones=[], gamma=1.0
),
task=MNISTTask,
worker_fn=lambda args, trainer, rank, model, opt, loss_func, m, loader, device, lr_scheduler: SGDMWorker(
momentum=m,
index=rank,
data_loader=loader,
model=model,
optimizer=opt,
loss_func=loss_func,
device=device,
lr_scheduler=lr_scheduler,
)
if rank < args.n - args.f
else get_attackers(
args, rank, trainer, model, opt, loss_func, loader, device, lr_scheduler
),
evaluators_fn=lambda args, task, trainer, test_loader, device: [
AverageEvaluator(
# NOTE: as there is no Byzantine workers.
models=[
w.model
for w in trainer.workers
if not isinstance(w, ByzantineWorker)
],
data_loader=test_loader,
loss_func=task.loss_func(device),
device=device,
metrics=task.metrics(),
use_cuda=args.use_cuda,
debug=args.debug,
meta={"type": "Global Average Validation Accuracy"},
),
# NOTE: evaluate the average accuracy inside clique 1
AverageEvaluator(
models=[trainer.workers[i].model for i in trainer.graph.clique1()],
data_loader=test_loader,
loss_func=task.loss_func(device),
device=device,
metrics=task.metrics(),
use_cuda=args.use_cuda,
debug=args.debug,
meta={"type": "Clique1 Average Validation Accuracy"},
),
# NOTE: evaluate the average accuracy inside clique 2
AverageEvaluator(
models=[trainer.workers[i].model for i in trainer.graph.clique2()],
data_loader=test_loader,
loss_func=task.loss_func(device),
device=device,
metrics=task.metrics(),
use_cuda=args.use_cuda,
debug=args.debug,
meta={"type": "Clique2 Average Validation Accuracy"},
),
],
):
super().__init__(
parser_func=parser_func,
trainer_fn=trainer_fn,
sampler_fn=sampler_fn,
lr_scheduler_fn=lr_scheduler_fn,
task=task,
worker_fn=worker_fn,
evaluators_fn=evaluators_fn,
get_graph=get_graph,
)
def run(self):
if self.args.analyze:
if self.args.identifier == "exp":
self.generate_analysis()
else:
raise NotImplementedError(self.args.identifier)
else:
self.train()
# ---------------------------------------------------------------------------- #
# Plot for OptimizationDeltaRunner #
# ---------------------------------------------------------------------------- #
def generate_analysis(self):
out_dir = os.path.abspath(os.path.join(self.log_dir, os.pardir))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
mapping_attack = {
"LF": "LF",
"ALIE10": "ALIE",
"IPM": "IPM",
"dissensus1.5": "Dissensus",
"BF": "BF",
}
def loop_files():
b = 1
# for delta in [0, 0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 1]:
for delta in [0, 0.25, 0.5, 0.75, 1]:
# for attack in ["LF", "BF", "ALIE10", "IPM", "dissensus1.5"]:
for attack in ["dissensus1.5"]:
log_dir = self.LOG_DIR_PATTERN.format(
script=sys.argv[0][:-3],
exp_id=self.args.identifier,
n=11 + b,
f=b,
attack=attack,
noniid=1.0,
agg="scp1",
lr=1e-3,
momentum=0.9,
graph=f"tcb5,1,{delta}",
)
path = log_dir + "stats"
yield b, delta, attack, path
# Plot for accuracy
acc_results = []
for b, delta, attack, path in loop_files():
# Add global accuracies
try:
values = filter_entries_from_json(
path, kw="Global Average Validation Accuracy"
)
for v in values:
it = (v["E"] - 1) * self.args.max_batch_size_per_epoch
acc_results.append(
{
"Iterations": it,
"Accuracy (%)": v["top1"],
r"$\delta_{\max}$": str(delta * b / (b + 3)),
"ATK": mapping_attack[attack],
"b": b,
"Group": "All",
}
)
except Exception as e:
raise NotImplementedError(
f"attack={attack} b={b} delta={delta}")
# Extract results for local accuracy
for clique_id, clique_name in [(1, 'A'), (2, 'B')]:
try:
values = filter_entries_from_json(
path, kw=f"Clique{clique_id} Average Validation Accuracy"
)
for v in values:
it = (v["E"] - 1) * self.args.max_batch_size_per_epoch
acc_results.append(
{
"Iterations": it,
"Accuracy (%)": v["top1"],
r"$\delta_{\max}$": str(delta * b / (b + 3)),
"ATK": mapping_attack[attack],
"b": b,
"Group": f"Clique {clique_name}",
}
)
except Exception as e:
raise NotImplementedError(
f"clique_id={clique_id} attack={attack} b={b} delta={delta}"
)
acc_df = pd.DataFrame(acc_results)
acc_df.to_csv(out_dir + "/acc.csv", index=None)
acc_df[r"$\delta_{\max}$ "] = acc_df[r"$\delta_{\max}$"].apply(
lambda x: float(x))
plt.rcParams["font.family"] = "Times New Roman"
sns.set(rc={'figure.figsize': (6, 6.75 / 3)})
sns.set(font_scale=1)
fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True)
g = sns.lineplot(
data=acc_df,
x="Iterations",
y="Accuracy (%)",
hue=r"$\delta_{\max}$",
style="Group",
ax=axes[0],
)
g.set(xlim=(0, 1500))
g.set(xlim=(0, 1500))
g.set_xticks([0, 500, 1000])
g.set_xticklabels([0, 500, 1000])
axes[0].legend(loc="lower left", ncol=6, columnspacing=0.5, handlelength=1,
borderaxespad=0, labelspacing=0.1, bbox_to_anchor=(1, 1.02, 1, 0.2))
handles, labels = axes[0].get_legend_handles_labels()
g.legend().remove()
axes[0].legend(handles[:6], labels[:6], ncol=6, loc='lower center',
columnspacing=0.5, handlelength=1, borderaxespad=0, labelspacing=0.,
bbox_to_anchor=(0.4, 1.02, 1, 0.2), frameon=False)
last_iterate = acc_df[acc_df['Iterations'] == 1470]
g = sns.lineplot(
data=last_iterate,
x=r"$\delta_{\max}$ ",
y="Accuracy (%)",
style="Group",
ax=axes[1],
hue='ATK',
palette=['black']
)
g.set(xlim=(0, 0.25))
axes[1].legend(handles[6:], labels[6:], ncol=1, loc='upper right',
columnspacing=0.5, handlelength=1, borderaxespad=0, labelspacing=0.1,
bbox_to_anchor=(1., 1), frameon=False)
for i in range(2):
axes[i].tick_params(axis='both', which='major', pad=-4)
axes[0].set_ylabel('Accuracy (%)', labelpad=0)
axes[1].set_ylabel('', labelpad=-1)
fig.subplots_adjust(wspace=0.093)
fig.savefig(out_dir + "/acc.pdf",
bbox_inches="tight", dpi=720)
if __name__ == "__main__":
runner = OptimizationDeltaRunner()
runner.run()
| [
"pandas.DataFrame",
"seaborn.lineplot",
"os.path.join",
"numpy.printoptions",
"os.makedirs",
"template.get_sampler_callback",
"os.path.exists",
"matplotlib.pyplot.subplots",
"codes.graph_utils.TwoCliquesWithByzantine",
"codes.utils.filter_entries_from_json",
"codes.graph_utils.get_graph",
"cod... | [((854, 872), 'codes.graph_utils.get_graph', 'gu.get_graph', (['args'], {}), '(args)\n', (866, 872), True, 'import codes.graph_utils as gu\n'), ((802, 841), 'codes.graph_utils.TwoCliquesWithByzantine', 'gu.TwoCliquesWithByzantine', (['m', 'b', 'delta'], {}), '(m, b, delta)\n', (828, 841), True, 'import codes.graph_utils as gu\n'), ((12085, 12110), 'pandas.DataFrame', 'pd.DataFrame', (['acc_results'], {}), '(acc_results)\n', (12097, 12110), True, 'import pandas as pd\n'), ((12337, 12382), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (6, 6.75 / 3)}"}), "(rc={'figure.figsize': (6, 6.75 / 3)})\n", (12344, 12382), True, 'import seaborn as sns\n'), ((12391, 12412), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1)'}), '(font_scale=1)\n', (12398, 12412), True, 'import seaborn as sns\n'), ((12434, 12477), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharey': '(True)'}), '(nrows=1, ncols=2, sharey=True)\n', (12446, 12477), True, 'import matplotlib.pyplot as plt\n'), ((12490, 12606), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'acc_df', 'x': '"""Iterations"""', 'y': '"""Accuracy (%)"""', 'hue': '"""$\\\\delta_{\\\\max}$"""', 'style': '"""Group"""', 'ax': 'axes[0]'}), "(data=acc_df, x='Iterations', y='Accuracy (%)', hue=\n '$\\\\delta_{\\\\max}$', style='Group', ax=axes[0])\n", (12502, 12606), True, 'import seaborn as sns\n'), ((13408, 13542), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'last_iterate', 'x': '"""$\\\\delta_{\\\\max}$ """', 'y': '"""Accuracy (%)"""', 'style': '"""Group"""', 'ax': 'axes[1]', 'hue': '"""ATK"""', 'palette': "['black']"}), "(data=last_iterate, x='$\\\\delta_{\\\\max}$ ', y='Accuracy (%)',\n style='Group', ax=axes[1], hue='ATK', palette=['black'])\n", (13420, 13542), True, 'import seaborn as sns\n'), ((4237, 4280), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (4252, 4280), True, 'import numpy as np\n'), ((4977, 5299), 'template.DecentralizedTrainer', 'DecentralizedTrainer', ([], {'pre_batch_hooks': '[]', 'post_batch_hooks': '[check_noniid_hooks, log_global_consensus_distance,\n log_clique_consensus_distance, log_mixing_matrix]', 'max_batches_per_epoch': 'args.max_batch_size_per_epoch', 'log_interval': 'args.log_interval', 'metrics': 'metrics', 'use_cuda': 'args.use_cuda', 'debug': 'args.debug'}), '(pre_batch_hooks=[], post_batch_hooks=[\n check_noniid_hooks, log_global_consensus_distance,\n log_clique_consensus_distance, log_mixing_matrix],\n max_batches_per_epoch=args.max_batch_size_per_epoch, log_interval=args.\n log_interval, metrics=metrics, use_cuda=args.use_cuda, debug=args.debug)\n', (4997, 5299), False, 'from template import define_parser, DecentralizedTrainer, check_noniid_hooks, get_sampler_callback, SGDMWorker, AverageEvaluator\n'), ((5495, 5573), 'template.get_sampler_callback', 'get_sampler_callback', (['rank', 'args.n'], {'noniid': 'args.noniid', 'longtail': 'args.longtail'}), '(rank, args.n, noniid=args.noniid, longtail=args.longtail)\n', (5515, 5573), False, 'from template import define_parser, DecentralizedTrainer, check_noniid_hooks, get_sampler_callback, SGDMWorker, AverageEvaluator\n'), ((5633, 5700), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['opt'], {'milestones': '[]', 'gamma': '(1.0)'}), '(opt, milestones=[], gamma=1.0)\n', (5669, 5700), False, 'import torch\n'), ((8856, 8893), 'os.path.join', 'os.path.join', (['self.log_dir', 'os.pardir'], {}), '(self.log_dir, os.pardir)\n', (8868, 8893), False, 'import os\n'), ((8910, 8933), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (8924, 8933), False, 'import os\n'), ((8947, 8967), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (8958, 8967), False, 'import os\n'), ((5850, 6004), 'template.SGDMWorker', 'SGDMWorker', ([], {'momentum': 'm', 'index': 'rank', 'data_loader': 'loader', 'model': 'model', 'optimizer': 'opt', 'loss_func': 'loss_func', 'device': 'device', 'lr_scheduler': 'lr_scheduler'}), '(momentum=m, index=rank, data_loader=loader, model=model,\n optimizer=opt, loss_func=loss_func, device=device, lr_scheduler=\n lr_scheduler)\n', (5860, 6004), False, 'from template import define_parser, DecentralizedTrainer, check_noniid_hooks, get_sampler_callback, SGDMWorker, AverageEvaluator\n'), ((6150, 6241), 'codes.attacks.get_attackers', 'get_attackers', (['args', 'rank', 'trainer', 'model', 'opt', 'loss_func', 'loader', 'device', 'lr_scheduler'], {}), '(args, rank, trainer, model, opt, loss_func, loader, device,\n lr_scheduler)\n', (6163, 6241), False, 'from codes.attacks import get_attackers\n'), ((10194, 10265), 'codes.utils.filter_entries_from_json', 'filter_entries_from_json', (['path'], {'kw': '"""Global Average Validation Accuracy"""'}), "(path, kw='Global Average Validation Accuracy')\n", (10218, 10265), False, 'from codes.utils import filter_entries_from_json\n'), ((11142, 11230), 'codes.utils.filter_entries_from_json', 'filter_entries_from_json', (['path'], {'kw': 'f"""Clique{clique_id} Average Validation Accuracy"""'}), "(path, kw=\n f'Clique{clique_id} Average Validation Accuracy')\n", (11166, 11230), False, 'from codes.utils import filter_entries_from_json\n')] |
# Law of large numbers
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,10)
from distributionLib import Dist
import os
import imageio
np.random.seed(404)
N = 1000
data = np.random.choice([0, 1], size=(N,1), p=[0.49, 0.51])
E_x = np.sum(data)/N
sampleLook = 801
temp = 0
yData = []
xData = []
sample = 100
plt.plot([0,sampleLook],[E_x,E_x],'r--',label='E(x)')
plt.ylim(E_x - E_x/20, E_x +E_x/20)
plt.xlim(0,sampleLook)
plt.title('Law of Large Numbers')
plt.xlabel('Number of samples')
plt.ylabel('Mean of sample Data')
##filenames = []
for ii in range(sampleLook):
tSample = np.random.randint(0, N,size=(sample,1))
temp += np.mean(data[tSample])
if not ii%10:
if not ii:
plt.scatter(ii,temp/(ii+1),color='k',label='Sample Data mean')
plt.legend()
else:
plt.scatter(ii,temp/(ii+1),color='k')
## filename = f'{ii}.png'
## filenames.append(filename)
plt.pause(0.00001)
plt.draw()
## plt.savefig(filename)
##with imageio.get_writer('mygif.gif', mode='I') as writer:
## for filename in filenames:
## image = imageio.imread(filename)
## writer.append_data(image)
##
### Remove files
##for filename in set(filenames):
## os.remove(filename)
##
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.draw",
"numpy.random.randint",
"numpy.mean",
"numpy.random.choice",
"matplotl... | [((173, 192), 'numpy.random.seed', 'np.random.seed', (['(404)'], {}), '(404)\n', (187, 192), True, 'import numpy as np\n'), ((211, 264), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': '(N, 1)', 'p': '[0.49, 0.51]'}), '([0, 1], size=(N, 1), p=[0.49, 0.51])\n', (227, 264), True, 'import numpy as np\n'), ((348, 406), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, sampleLook]', '[E_x, E_x]', '"""r--"""'], {'label': '"""E(x)"""'}), "([0, sampleLook], [E_x, E_x], 'r--', label='E(x)')\n", (356, 406), True, 'import matplotlib.pyplot as plt\n'), ((402, 442), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(E_x - E_x / 20)', '(E_x + E_x / 20)'], {}), '(E_x - E_x / 20, E_x + E_x / 20)\n', (410, 442), True, 'import matplotlib.pyplot as plt\n'), ((438, 461), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'sampleLook'], {}), '(0, sampleLook)\n', (446, 461), True, 'import matplotlib.pyplot as plt\n'), ((461, 494), 'matplotlib.pyplot.title', 'plt.title', (['"""Law of Large Numbers"""'], {}), "('Law of Large Numbers')\n", (470, 494), True, 'import matplotlib.pyplot as plt\n'), ((495, 526), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of samples"""'], {}), "('Number of samples')\n", (505, 526), True, 'import matplotlib.pyplot as plt\n'), ((527, 560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean of sample Data"""'], {}), "('Mean of sample Data')\n", (537, 560), True, 'import matplotlib.pyplot as plt\n'), ((270, 282), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (276, 282), True, 'import numpy as np\n'), ((621, 662), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N'], {'size': '(sample, 1)'}), '(0, N, size=(sample, 1))\n', (638, 662), True, 'import numpy as np\n'), ((673, 695), 'numpy.mean', 'np.mean', (['data[tSample]'], {}), '(data[tSample])\n', (680, 695), True, 'import numpy as np\n'), ((975, 991), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-05)'], {}), '(1e-05)\n', (984, 991), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1012), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1010, 1012), True, 'import matplotlib.pyplot as plt\n'), ((745, 814), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ii', '(temp / (ii + 1))'], {'color': '"""k"""', 'label': '"""Sample Data mean"""'}), "(ii, temp / (ii + 1), color='k', label='Sample Data mean')\n", (756, 814), True, 'import matplotlib.pyplot as plt\n'), ((820, 832), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (830, 832), True, 'import matplotlib.pyplot as plt\n'), ((859, 902), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ii', '(temp / (ii + 1))'], {'color': '"""k"""'}), "(ii, temp / (ii + 1), color='k')\n", (870, 902), True, 'import matplotlib.pyplot as plt\n')] |
from amodem import calib
from amodem import common
from amodem import config
from io import BytesIO
import numpy as np
import random
import pytest
import mock
config = config.fastest()
class ProcessMock:
def __init__(self):
self.buf = BytesIO()
self.stdin = self
self.stdout = self
self.bytes_per_sample = 2
def write(self, data):
assert self.buf.tell() < 10e6
self.buf.write(data)
def read(self, n):
return self.buf.read(n)
def test_success():
p = ProcessMock()
calib.send(config, p, gain=0.5, limit=32)
p.buf.seek(0)
calib.recv(config, p)
def test_too_strong():
p = ProcessMock()
calib.send(config, p, gain=1.001, limit=32)
p.buf.seek(0)
for r in calib.detector(config, src=p):
assert not r['success']
assert r['msg'] == 'too strong signal'
def test_too_weak():
p = ProcessMock()
calib.send(config, p, gain=0.01, limit=32)
p.buf.seek(0)
for r in calib.detector(config, src=p):
assert not r['success']
assert r['msg'] == 'too weak signal'
def test_too_noisy():
r = random.Random(0) # generate random binary signal
signal = np.array([r.choice([-1, 1]) for i in range(int(config.Fs))])
src = BytesIO(common.dumps(signal * 0.5))
for r in calib.detector(config, src=src):
assert not r['success']
assert r['msg'] == 'too noisy signal'
def test_errors():
class WriteError(ProcessMock):
def write(self, data):
raise KeyboardInterrupt()
p = WriteError()
with pytest.raises(KeyboardInterrupt):
calib.send(config, p, limit=32)
assert p.buf.tell() == 0
class ReadError(ProcessMock):
def read(self, n):
raise KeyboardInterrupt()
p = ReadError()
with pytest.raises(KeyboardInterrupt):
calib.recv(config, p, verbose=True)
assert p.buf.tell() == 0
@pytest.fixture(params=[0] + [sign * mag for sign in (+1, -1)
for mag in (0.1, 1, 10, 100, 1e3, 2e3)])
def freq_err(request):
return request.param * 1e-6
def test_drift(freq_err):
freq = config.Fc * (1 + freq_err / 1e6)
t = np.arange(int(1.0 * config.Fs)) * config.Ts
frame_length = 100
rms = 0.5
signal = rms * np.cos(2 * np.pi * freq * t)
src = BytesIO(common.dumps(signal))
iters = 0
for r in calib.detector(config, src, frame_length=frame_length):
assert r['success'] is True
assert abs(r['rms'] - rms) < 1e-3
assert abs(r['total'] - rms) < 1e-3
iters += 1
assert iters > 0
assert iters == config.baud / frame_length
def test_volume():
with mock.patch('subprocess.check_call') as check_call:
ctl = calib.volume_controller('volume-control')
ctl(0.01)
ctl(0.421)
ctl(0.369)
ctl(1)
assert check_call.mock_calls == [
mock.call(shell=True, args='volume-control 1%'),
mock.call(shell=True, args='volume-control 42%'),
mock.call(shell=True, args='volume-control 37%'),
mock.call(shell=True, args='volume-control 100%')
]
with pytest.raises(AssertionError):
ctl(0)
with pytest.raises(AssertionError):
ctl(-0.5)
with pytest.raises(AssertionError):
ctl(12.3)
def test_send_max_volume():
with mock.patch('subprocess.check_call') as check_call:
calib.send(config, dst=BytesIO(), volume_cmd='ctl', limit=1)
assert check_call.mock_calls == [mock.call(shell=True, args='ctl 100%')]
def test_recv_binary_search():
buf = BytesIO()
gains = [0.5, 0.25, 0.38, 0.44, 0.41, 0.39, 0.40, 0.40]
for gain in gains:
calib.send(config, buf, gain=gain, limit=2)
buf.seek(0)
dump = BytesIO()
with mock.patch('subprocess.check_call') as check_call:
calib.recv(config, src=buf, volume_cmd='ctl', dump_audio=dump)
assert dump.getvalue() == buf.getvalue()
gains.append(gains[-1])
fmt = 'ctl {0:.0f}%'
expected = [mock.call(shell=True, args=fmt.format(100 * g)) for g in gains]
assert check_call.mock_calls == expected
def test_recv_freq_change():
p = ProcessMock()
calib.send(config, p, gain=0.5, limit=2)
offset = p.buf.tell() // 16
p.buf.seek(offset)
messages = [state['msg'] for state in calib.recv_iter(config, p)]
assert messages == [
'good signal', 'good signal', 'good signal',
'frequency change',
'good signal', 'good signal', 'good signal']
| [
"amodem.calib.detector",
"io.BytesIO",
"amodem.common.dumps",
"amodem.config.fastest",
"amodem.calib.volume_controller",
"mock.call",
"random.Random",
"amodem.calib.recv_iter",
"pytest.fixture",
"mock.patch",
"pytest.raises",
"amodem.calib.recv",
"numpy.cos",
"amodem.calib.send"
] | [((171, 187), 'amodem.config.fastest', 'config.fastest', ([], {}), '()\n', (185, 187), False, 'from amodem import config\n'), ((1924, 2038), 'pytest.fixture', 'pytest.fixture', ([], {'params': '([0] + [(sign * mag) for sign in (+1, -1) for mag in (0.1, 1, 10, 100, \n 1000.0, 2000.0)])'}), '(params=[0] + [(sign * mag) for sign in (+1, -1) for mag in (\n 0.1, 1, 10, 100, 1000.0, 2000.0)])\n', (1938, 2038), False, 'import pytest\n'), ((548, 589), 'amodem.calib.send', 'calib.send', (['config', 'p'], {'gain': '(0.5)', 'limit': '(32)'}), '(config, p, gain=0.5, limit=32)\n', (558, 589), False, 'from amodem import calib\n'), ((612, 633), 'amodem.calib.recv', 'calib.recv', (['config', 'p'], {}), '(config, p)\n', (622, 633), False, 'from amodem import calib\n'), ((685, 728), 'amodem.calib.send', 'calib.send', (['config', 'p'], {'gain': '(1.001)', 'limit': '(32)'}), '(config, p, gain=1.001, limit=32)\n', (695, 728), False, 'from amodem import calib\n'), ((760, 789), 'amodem.calib.detector', 'calib.detector', (['config'], {'src': 'p'}), '(config, src=p)\n', (774, 789), False, 'from amodem import calib\n'), ((919, 961), 'amodem.calib.send', 'calib.send', (['config', 'p'], {'gain': '(0.01)', 'limit': '(32)'}), '(config, p, gain=0.01, limit=32)\n', (929, 961), False, 'from amodem import calib\n'), ((993, 1022), 'amodem.calib.detector', 'calib.detector', (['config'], {'src': 'p'}), '(config, src=p)\n', (1007, 1022), False, 'from amodem import calib\n'), ((1133, 1149), 'random.Random', 'random.Random', (['(0)'], {}), '(0)\n', (1146, 1149), False, 'import random\n'), ((1316, 1347), 'amodem.calib.detector', 'calib.detector', (['config'], {'src': 'src'}), '(config, src=src)\n', (1330, 1347), False, 'from amodem import calib\n'), ((2387, 2441), 'amodem.calib.detector', 'calib.detector', (['config', 'src'], {'frame_length': 'frame_length'}), '(config, src, frame_length=frame_length)\n', (2401, 2441), False, 'from amodem import calib\n'), ((3634, 3643), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3641, 3643), False, 'from io import BytesIO\n'), ((3807, 3816), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3814, 3816), False, 'from io import BytesIO\n'), ((4229, 4269), 'amodem.calib.send', 'calib.send', (['config', 'p'], {'gain': '(0.5)', 'limit': '(2)'}), '(config, p, gain=0.5, limit=2)\n', (4239, 4269), False, 'from amodem import calib\n'), ((252, 261), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (259, 261), False, 'from io import BytesIO\n'), ((1275, 1301), 'amodem.common.dumps', 'common.dumps', (['(signal * 0.5)'], {}), '(signal * 0.5)\n', (1287, 1301), False, 'from amodem import common\n'), ((1582, 1614), 'pytest.raises', 'pytest.raises', (['KeyboardInterrupt'], {}), '(KeyboardInterrupt)\n', (1595, 1614), False, 'import pytest\n'), ((1624, 1655), 'amodem.calib.send', 'calib.send', (['config', 'p'], {'limit': '(32)'}), '(config, p, limit=32)\n', (1634, 1655), False, 'from amodem import calib\n'), ((1814, 1846), 'pytest.raises', 'pytest.raises', (['KeyboardInterrupt'], {}), '(KeyboardInterrupt)\n', (1827, 1846), False, 'import pytest\n'), ((1856, 1891), 'amodem.calib.recv', 'calib.recv', (['config', 'p'], {'verbose': '(True)'}), '(config, p, verbose=True)\n', (1866, 1891), False, 'from amodem import calib\n'), ((2291, 2319), 'numpy.cos', 'np.cos', (['(2 * np.pi * freq * t)'], {}), '(2 * np.pi * freq * t)\n', (2297, 2319), True, 'import numpy as np\n'), ((2338, 2358), 'amodem.common.dumps', 'common.dumps', (['signal'], {}), '(signal)\n', (2350, 2358), False, 'from amodem import common\n'), ((2683, 2718), 'mock.patch', 'mock.patch', (['"""subprocess.check_call"""'], {}), "('subprocess.check_call')\n", (2693, 2718), False, 'import mock\n'), ((2748, 2789), 'amodem.calib.volume_controller', 'calib.volume_controller', (['"""volume-control"""'], {}), "('volume-control')\n", (2771, 2789), False, 'from amodem import calib\n'), ((3394, 3429), 'mock.patch', 'mock.patch', (['"""subprocess.check_call"""'], {}), "('subprocess.check_call')\n", (3404, 3429), False, 'import mock\n'), ((3735, 3778), 'amodem.calib.send', 'calib.send', (['config', 'buf'], {'gain': 'gain', 'limit': '(2)'}), '(config, buf, gain=gain, limit=2)\n', (3745, 3778), False, 'from amodem import calib\n'), ((3826, 3861), 'mock.patch', 'mock.patch', (['"""subprocess.check_call"""'], {}), "('subprocess.check_call')\n", (3836, 3861), False, 'import mock\n'), ((3885, 3947), 'amodem.calib.recv', 'calib.recv', (['config'], {'src': 'buf', 'volume_cmd': '"""ctl"""', 'dump_audio': 'dump'}), "(config, src=buf, volume_cmd='ctl', dump_audio=dump)\n", (3895, 3947), False, 'from amodem import calib\n'), ((3173, 3202), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3186, 3202), False, 'import pytest\n'), ((3236, 3265), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3249, 3265), False, 'import pytest\n'), ((3302, 3331), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3315, 3331), False, 'import pytest\n'), ((3551, 3589), 'mock.call', 'mock.call', ([], {'shell': '(True)', 'args': '"""ctl 100%"""'}), "(shell=True, args='ctl 100%')\n", (3560, 3589), False, 'import mock\n'), ((4367, 4393), 'amodem.calib.recv_iter', 'calib.recv_iter', (['config', 'p'], {}), '(config, p)\n', (4382, 4393), False, 'from amodem import calib\n'), ((2915, 2962), 'mock.call', 'mock.call', ([], {'shell': '(True)', 'args': '"""volume-control 1%"""'}), "(shell=True, args='volume-control 1%')\n", (2924, 2962), False, 'import mock\n'), ((2976, 3024), 'mock.call', 'mock.call', ([], {'shell': '(True)', 'args': '"""volume-control 42%"""'}), "(shell=True, args='volume-control 42%')\n", (2985, 3024), False, 'import mock\n'), ((3038, 3086), 'mock.call', 'mock.call', ([], {'shell': '(True)', 'args': '"""volume-control 37%"""'}), "(shell=True, args='volume-control 37%')\n", (3047, 3086), False, 'import mock\n'), ((3100, 3149), 'mock.call', 'mock.call', ([], {'shell': '(True)', 'args': '"""volume-control 100%"""'}), "(shell=True, args='volume-control 100%')\n", (3109, 3149), False, 'import mock\n'), ((3476, 3485), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3483, 3485), False, 'from io import BytesIO\n')] |
import os
import glob
from collections import defaultdict
import pickle
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin
from scipy.stats import uniform
import keras.backend as K
import xgboost as XGB
def objective_function(x_i, x_i_prev, x_ineg, capcosts, MODELS, regularize=False, alpha=1.):
x_i = x_i.reshape(-1, len(capcosts)) # investor
x_tot = (x_i + x_ineg).reshape(-1, len(capcosts))
y = 0.
for ix, model in enumerate(MODELS):
if x_tot[0, ix] == 0.0:
continue
net_rev = model.predict(x_tot).squeeze() * (x_i[0, ix]/x_tot[0, ix])
total_cost = capcosts[ix].squeeze() * x_i[0, ix].squeeze()
y += total_cost - net_rev
if regularize is True:
y += alpha * np.linalg.norm(x_i - x_i_prev)**2
return y
def objective_function_iccn(u, x_ineg, capcosts, datastruct, models, g):
"""
Returns the value of the ICNN and gradient of output w.r.t. input
evaluated at a given input tensor
"""
# Generate output
y =0.
x_tot = (u + x_ineg).reshape(1,len(capcosts))
grad = np.zeros((1,len(capcosts)), dtype=float)
for ix, m in enumerate(models):
y += m.predict(x_tot).squeeze()
# Get gradient of output w.r.t. inputs.
sess = K.get_session()
_grad = sess.run(g[ix], feed_dict={m.inputs[0]: x_tot})[0].squeeze()
if (x_tot == np.zeros((1,len(capcosts)))).all():
grad += _grad
else:
grad += +_grad*(u/x_tot) + ((x_tot-u)/x_tot**2) * m.predict(x_tot).squeeze()
return np.float(y), grad.reshape(len(capcosts))
def de_optimizer(x, i, nodes, capcosts, caplimits, datastruct,
models, iteration_count, action_incr=np.inf, num_x0=5,
regularize=False, alpha=1.):
"""Optimize for agent i"""
# Get total upper and lower bounds
nodes = np.array(nodes)
lower_bound_tot = nodes.min(axis=0)
upper_bound_tot = nodes.max(axis=0)
# Get upper and lower bounds for agent based on other agents' decisions
ineg = [x for x in range(x.shape[0]) if x != i]
x_ineg = x[ineg, :].sum(axis=0)
lower_bound = np.zeros_like(lower_bound_tot)
upper_bound = np.clip(upper_bound_tot - x_ineg, 0, upper_bound_tot)
upper_bound = np.minimum(upper_bound, x[i, :] + action_incr)
upper_bound = np.minimum(upper_bound, caplimits)
bounds = Bounds(lower_bound, upper_bound)
print("i: {}, ineg: {}".format(i, ineg))
print(" lower_bound: {}".format(lower_bound))
print(" upper_bound: {}".format(upper_bound))
# Solve over random starting points
fs= []; xs = []
for j in range(num_x0):
res = differential_evolution(objective_function,
bounds=bounds,
args=(x[i, :], x_ineg, capcosts, models, regularize, alpha),
popsize=100,
mutation=0.5,
recombination=0.9,
init="latinhypercube")
fs.append(res["fun"])
xs.append(res["x"])
# Find the best solution
fs = -1 * np.array(fs)
max_idx = np.argmax(fs)
xs = np.array(xs)
xopt = xs[max_idx]
fopt = fs[max_idx]
print(" xopt: {}".format(xopt))
print(" fopt: {}".format(fopt))
return xopt, fopt
def brute_force_optimizer(x, i, nodes, capcosts, caplimits, datastruct,
models, iteration_count, action_incr=np.inf, num_x0=10,
regularize=False, alpha=1.):
# Get total upper and lower bounds
nodes = np.array(nodes)
lower_bound_tot = nodes.min(axis=0)
upper_bound_tot = nodes.max(axis=0)
# Get upper and lower bounds for agent based on other agents' decisions
ineg = [x for x in range(x.shape[0]) if x != i]
x_ineg = x[ineg, :].sum(axis=0)
lower_bound = np.zeros_like(lower_bound_tot)
# lower_bound = np.clip(lower_bound_tot, 0, upper_bound_tot)
upper_bound = np.clip(upper_bound_tot, 0, upper_bound_tot)
upper_bound = np.minimum(upper_bound, x[i, :] + action_incr)
upper_bound = np.maximum(upper_bound, caplimits)
bounds = Bounds(lower_bound, upper_bound)
print("i: {}, ineg: {}".format(i, ineg))
print(" lower_bound: {}".format(lower_bound))
print(" upper_bound: {}".format(upper_bound))
print(x[i, :])
split_array = [3, 4, 2, 3, 0.5, 4, 4]
ranges = tuple([slice(lower_bound[i], upper_bound[i], split_array[i]) for i in range(0,7)])
xv, f, _, _ = brute(objective_function, ranges=ranges,
args=(x[i, :], x_ineg, capcosts, models,
regularize, alpha),
finish=None, full_output=True)
print(" BFO xopt: ",(xv))
print(" BFO fopt: ",(f))
return xv, f
def gradient_optimizer(x, i, nodes, capcosts, caplimits, datastruct,
models, iteration_count, action_incr=np.inf, num_x0=3,
regularize=False, alpha=1.):
"""Optimize for agent i"""
# Get total upper and lower bounds
nodes = np.array(nodes)
x_scaler = datastruct.capacity.max().max()
lower_bound_tot = (nodes.min(axis=0)/x_scaler)
upper_bound_tot = (nodes.max(axis=0)/x_scaler)
# Get upper and lower bounds for agent based on other agents' decisions
ineg = [x for x in range(x.shape[0]) if x != i]
x_ineg = x[ineg, :].sum(axis=0)
lower_bound = lower_bound_tot/1.8
lower_bound = np.zeros(lower_bound_tot.shape)
lower_bound = np.clip(lower_bound_tot - x_ineg, 0, lower_bound)
upper_bound = np.clip(upper_bound_tot - x_ineg, 0, upper_bound_tot)
bounds = Bounds(lower_bound, upper_bound)
grads = [K.gradients(model.output, model.inputs) for model in models]
print("i: {}, ineg: {}".format(i, ineg))
print(" lower_bound: {}".format(lower_bound))
print(" upper_bound: {}".format(upper_bound))
# Solve over random starting points
fs= []; xs = []
for j in range(num_x0):
if all(x[i, :] == 0.0):
int_start = np.random.rand(len(lower_bound))*np.array([1, 1, 1, 1, 1e-5, 1, 1])
else:
int_start = x[i, :] + np.random.rand(len(lower_bound))*np.array([0.1, 0.1, 0.1, 0.1, 0.0, 0.1, 0.1])/iteration_count
res = minimize(objective_function_iccn,
x0=int_start,
bounds=bounds,
args=(x_ineg, capcosts, datastruct, models, grads),
method="trust-constr",
jac=True,
options={ 'disp': False, 'maxiter': 10000}, tol=1e-6)
fs.append(res["fun"])
xs.append(res["x"])
# Find the best solution
fs = np.array(fs)
max_idx = np.argmin(fs)
xs = np.array(xs)
xopt = xs[max_idx]
fopt = fs[max_idx]
print(" xopt: {}".format(xopt))
print(" fopt: {}".format(fopt))
return xopt, fopt | [
"keras.backend.gradients",
"scipy.optimize.minimize",
"numpy.minimum",
"numpy.zeros_like",
"numpy.maximum",
"numpy.argmax",
"keras.backend.get_session",
"numpy.zeros",
"numpy.float",
"numpy.clip",
"numpy.argmin",
"scipy.optimize.differential_evolution",
"scipy.optimize.Bounds",
"numpy.arra... | [((2025, 2040), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (2033, 2040), True, 'import numpy as np\n'), ((2308, 2338), 'numpy.zeros_like', 'np.zeros_like', (['lower_bound_tot'], {}), '(lower_bound_tot)\n', (2321, 2338), True, 'import numpy as np\n'), ((2357, 2410), 'numpy.clip', 'np.clip', (['(upper_bound_tot - x_ineg)', '(0)', 'upper_bound_tot'], {}), '(upper_bound_tot - x_ineg, 0, upper_bound_tot)\n', (2364, 2410), True, 'import numpy as np\n'), ((2429, 2475), 'numpy.minimum', 'np.minimum', (['upper_bound', '(x[i, :] + action_incr)'], {}), '(upper_bound, x[i, :] + action_incr)\n', (2439, 2475), True, 'import numpy as np\n'), ((2494, 2528), 'numpy.minimum', 'np.minimum', (['upper_bound', 'caplimits'], {}), '(upper_bound, caplimits)\n', (2504, 2528), True, 'import numpy as np\n'), ((2542, 2574), 'scipy.optimize.Bounds', 'Bounds', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (2548, 2574), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((3372, 3385), 'numpy.argmax', 'np.argmax', (['fs'], {}), '(fs)\n', (3381, 3385), True, 'import numpy as np\n'), ((3396, 3408), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (3404, 3408), True, 'import numpy as np\n'), ((3825, 3840), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (3833, 3840), True, 'import numpy as np\n'), ((4108, 4138), 'numpy.zeros_like', 'np.zeros_like', (['lower_bound_tot'], {}), '(lower_bound_tot)\n', (4121, 4138), True, 'import numpy as np\n'), ((4222, 4266), 'numpy.clip', 'np.clip', (['upper_bound_tot', '(0)', 'upper_bound_tot'], {}), '(upper_bound_tot, 0, upper_bound_tot)\n', (4229, 4266), True, 'import numpy as np\n'), ((4285, 4331), 'numpy.minimum', 'np.minimum', (['upper_bound', '(x[i, :] + action_incr)'], {}), '(upper_bound, x[i, :] + action_incr)\n', (4295, 4331), True, 'import numpy as np\n'), ((4350, 4384), 'numpy.maximum', 'np.maximum', (['upper_bound', 'caplimits'], {}), '(upper_bound, caplimits)\n', (4360, 4384), True, 'import numpy as np\n'), ((4398, 4430), 'scipy.optimize.Bounds', 'Bounds', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (4404, 4430), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((4755, 4891), 'scipy.optimize.brute', 'brute', (['objective_function'], {'ranges': 'ranges', 'args': '(x[i, :], x_ineg, capcosts, models, regularize, alpha)', 'finish': 'None', 'full_output': '(True)'}), '(objective_function, ranges=ranges, args=(x[i, :], x_ineg, capcosts,\n models, regularize, alpha), finish=None, full_output=True)\n', (4760, 4891), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((5310, 5325), 'numpy.array', 'np.array', (['nodes'], {}), '(nodes)\n', (5318, 5325), True, 'import numpy as np\n'), ((5696, 5727), 'numpy.zeros', 'np.zeros', (['lower_bound_tot.shape'], {}), '(lower_bound_tot.shape)\n', (5704, 5727), True, 'import numpy as np\n'), ((5746, 5795), 'numpy.clip', 'np.clip', (['(lower_bound_tot - x_ineg)', '(0)', 'lower_bound'], {}), '(lower_bound_tot - x_ineg, 0, lower_bound)\n', (5753, 5795), True, 'import numpy as np\n'), ((5814, 5867), 'numpy.clip', 'np.clip', (['(upper_bound_tot - x_ineg)', '(0)', 'upper_bound_tot'], {}), '(upper_bound_tot - x_ineg, 0, upper_bound_tot)\n', (5821, 5867), True, 'import numpy as np\n'), ((5881, 5913), 'scipy.optimize.Bounds', 'Bounds', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (5887, 5913), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((6955, 6967), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (6963, 6967), True, 'import numpy as np\n'), ((6982, 6995), 'numpy.argmin', 'np.argmin', (['fs'], {}), '(fs)\n', (6991, 6995), True, 'import numpy as np\n'), ((7006, 7018), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (7014, 7018), True, 'import numpy as np\n'), ((1423, 1438), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (1436, 1438), True, 'import keras.backend as K\n'), ((1716, 1727), 'numpy.float', 'np.float', (['y'], {}), '(y)\n', (1724, 1727), True, 'import numpy as np\n'), ((2827, 3022), 'scipy.optimize.differential_evolution', 'differential_evolution', (['objective_function'], {'bounds': 'bounds', 'args': '(x[i, :], x_ineg, capcosts, models, regularize, alpha)', 'popsize': '(100)', 'mutation': '(0.5)', 'recombination': '(0.9)', 'init': '"""latinhypercube"""'}), "(objective_function, bounds=bounds, args=(x[i, :],\n x_ineg, capcosts, models, regularize, alpha), popsize=100, mutation=0.5,\n recombination=0.9, init='latinhypercube')\n", (2849, 3022), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((3345, 3357), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (3353, 3357), True, 'import numpy as np\n'), ((5927, 5966), 'keras.backend.gradients', 'K.gradients', (['model.output', 'model.inputs'], {}), '(model.output, model.inputs)\n', (5938, 5966), True, 'import keras.backend as K\n'), ((6506, 6715), 'scipy.optimize.minimize', 'minimize', (['objective_function_iccn'], {'x0': 'int_start', 'bounds': 'bounds', 'args': '(x_ineg, capcosts, datastruct, models, grads)', 'method': '"""trust-constr"""', 'jac': '(True)', 'options': "{'disp': False, 'maxiter': 10000}", 'tol': '(1e-06)'}), "(objective_function_iccn, x0=int_start, bounds=bounds, args=(x_ineg,\n capcosts, datastruct, models, grads), method='trust-constr', jac=True,\n options={'disp': False, 'maxiter': 10000}, tol=1e-06)\n", (6514, 6715), False, 'from scipy.optimize import Bounds, minimize, differential_evolution, brute, fmin\n'), ((901, 931), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_i - x_i_prev)'], {}), '(x_i - x_i_prev)\n', (915, 931), True, 'import numpy as np\n'), ((6314, 6349), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1e-05, 1, 1]'], {}), '([1, 1, 1, 1, 1e-05, 1, 1])\n', (6322, 6349), True, 'import numpy as np\n'), ((6430, 6475), 'numpy.array', 'np.array', (['[0.1, 0.1, 0.1, 0.1, 0.0, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1, 0.1, 0.0, 0.1, 0.1])\n', (6438, 6475), True, 'import numpy as np\n')] |
import math
import pandas as pd
import xlsxwriter
import numpy as np
from scipy.stats.contingency import chi2_contingency
from scipy.stats.contingency import margins
import pyreadstat
def write_long_crosttab_to_xslx(df_name, varx, vary, PONDER = 'weight', file_name = 'ispis_baze.xlsx'):
''' Takes in spss file and for each specified variable in varx makes crosstab with posthoc based on adjusted standradardized residual with variables in vary
:param df: spss input
:param PONDER: weight
:param file_name: out put file (.xlsx file)
:return: None
'''
df = pd.read_spss(df_name, convert_categoricals=False)
throwaway, meta = pyreadstat.read_sav(df_name, apply_value_formats=True)
workbook = xlsxwriter.Workbook(file_name) # opens EXCEL file which is named by the file_name argument
# defining the cell formating
cell_format = workbook.add_format() # format regulating method
cell_format.set_font_color('#000000') # color code
cell_format.set_font_size(12) # font size
cell_format.set_align('center') # alignment
# format for p < 0.01 and standardized residual value higher than 2.58
cell_formatv1 = workbook.add_format()
cell_formatv1.set_font_color('#000000') # color code
cell_formatv1.set_font_size(12) # font size
cell_formatv1.set_align('center') # alignment
cell_formatv1.set_bg_color('#00FF80') # bg color code
# format for p < 0.05 and standardized residual value higher than 1.96
cell_formatv2 = workbook.add_format()
cell_formatv2.set_font_color('#000000') # color code
cell_formatv2.set_font_size(12) # font size
cell_formatv2.set_align('center') # alignment
cell_formatv2.set_bg_color('#00FF80') # bg color code
# format for p < 0.05 and standardized residual value lower than -1.96
cell_formatm1 = workbook.add_format()
cell_formatm1.set_font_color('#000000') # color code
cell_formatm1.set_font_size(12) # font size
cell_formatm1.set_align('center') # alignment
cell_formatm1.set_bg_color('#FFCC99') # bg color code
# format for p < 0.01 and standardized residual value lower than -2.58
cell_formatm2 = workbook.add_format()
cell_formatm2.set_font_color('#000000') # color code
cell_formatm2.set_font_size(12) # font size
cell_formatm2.set_align('center') # alignment
cell_formatm2.set_bg_color('#FFCC99') # bg color code
for column in varx:
col = 0 # we start from the 1st column
row = 1 # we start from the second row (because we want to use the 1st row for the "column" variable names)
worksheet = workbook.add_worksheet(column)
worksheet.write(row, col, column, cell_format) # we write the "row" variable name
row += 1
for item in sorted(df[column].unique()):
worksheet.write(row, col, meta.variable_value_labels[column][item], cell_format)
row += 1
col = 1 # we go to the next column
for column2 in vary:
row = 0 # we write the "column" variable name in row 0
worksheet.write(row, col, column2, cell_format)
row += 1
for item in sorted(df[column2].unique()):
worksheet.write(row, col, meta.variable_value_labels[column2][item], cell_format)
col += 1
col -= len(df[column2].unique())
row += 1
if column == column2:
col += len(df[column].unique())
else:
crosstab = pd.crosstab(df[column], df[column2], df[PONDER], aggfunc=sum)
crosstab = crosstab.fillna(0)
crosstab = crosstab.round(0)
print(crosstab)
print(crosstab.columns)
print(crosstab.index)
# posthoc format
residuals_format = chi_square_post_hoc(crosstab)
for i in range(len(residuals_format)):
if residuals_format[i] == 1:
residuals_format[i] = cell_formatv1
elif residuals_format[i] == 2:
residuals_format[i] = cell_formatv2
elif residuals_format[i] == 3:
residuals_format[i] = cell_formatm2
elif residuals_format[i] == 4:
residuals_format[i] = cell_formatm1
else:
residuals_format[i] = cell_format
residuals_format = np.asarray(residuals_format)
residuals_format = np.split(residuals_format, crosstab.shape[0])
crosstab = (100. * crosstab / crosstab.sum()).round(1)
crosstab = crosstab.to_numpy()
for i in range(crosstab.shape[0]):
for m in range(crosstab.shape[1]):
worksheet.write(row, col, f'{crosstab[i][m]}%', residuals_format[i][m])
col += 1
col -= crosstab.shape[1]
row += 1
col += crosstab.shape[1]
workbook.close()
def chi_square_post_hoc(a):
'''
:param a: Crosstabs to calculate chi square post hoc on
:return: Table os same shape of Crosstabs table with formatting rules for appropriate cell
'''
res = []
chi, p, dof, expected = chi2_contingency(a, correction= False)
b = np.asarray(a)
suma = b.sum()
b = b.tolist()
marginss = margins(a)
if p <= 0.05:
for i1, item1 in enumerate(b):
for i2, item2 in enumerate(item1):
azr = (b[i1][i2] - expected[i1][i2]) / math.sqrt(marginss[0][i1][0] * marginss[1][0][i2] * (1 - marginss[0][i1][0]/suma) * (1 - marginss[1][0][i2]/suma) / suma)
if b[i1][i2] == 0:
res.append(5)
elif azr >= 2.58:
res.append(1)
elif azr >= 1.96:
res.append(2)
elif azr <= -2.58:
res.append(3)
elif azr <= -1.96:
res.append(4)
else:
res.append(5)
else:
for dim in b:
for item in dim:
res.append(5)
return res
if __name__ == '__main__':
# write_long_crosttab_to_xslx('imematrice', [var_row_1, var_row_2, var_row_3], ['var_colum_1', 'var_column_2','var_colum_3'])
pass | [
"pandas.crosstab",
"math.sqrt",
"numpy.asarray",
"xlsxwriter.Workbook",
"scipy.stats.contingency.margins",
"numpy.split",
"pandas.read_spss",
"scipy.stats.contingency.chi2_contingency",
"pyreadstat.read_sav"
] | [((604, 653), 'pandas.read_spss', 'pd.read_spss', (['df_name'], {'convert_categoricals': '(False)'}), '(df_name, convert_categoricals=False)\n', (616, 653), True, 'import pandas as pd\n'), ((677, 731), 'pyreadstat.read_sav', 'pyreadstat.read_sav', (['df_name'], {'apply_value_formats': '(True)'}), '(df_name, apply_value_formats=True)\n', (696, 731), False, 'import pyreadstat\n'), ((750, 780), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['file_name'], {}), '(file_name)\n', (769, 780), False, 'import xlsxwriter\n'), ((5452, 5489), 'scipy.stats.contingency.chi2_contingency', 'chi2_contingency', (['a'], {'correction': '(False)'}), '(a, correction=False)\n', (5468, 5489), False, 'from scipy.stats.contingency import chi2_contingency\n'), ((5500, 5513), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (5510, 5513), True, 'import numpy as np\n'), ((5570, 5580), 'scipy.stats.contingency.margins', 'margins', (['a'], {}), '(a)\n', (5577, 5580), False, 'from scipy.stats.contingency import margins\n'), ((3583, 3644), 'pandas.crosstab', 'pd.crosstab', (['df[column]', 'df[column2]', 'df[PONDER]'], {'aggfunc': 'sum'}), '(df[column], df[column2], df[PONDER], aggfunc=sum)\n', (3594, 3644), True, 'import pandas as pd\n'), ((4585, 4613), 'numpy.asarray', 'np.asarray', (['residuals_format'], {}), '(residuals_format)\n', (4595, 4613), True, 'import numpy as np\n'), ((4650, 4695), 'numpy.split', 'np.split', (['residuals_format', 'crosstab.shape[0]'], {}), '(residuals_format, crosstab.shape[0])\n', (4658, 4695), True, 'import numpy as np\n'), ((5746, 5875), 'math.sqrt', 'math.sqrt', (['(marginss[0][i1][0] * marginss[1][0][i2] * (1 - marginss[0][i1][0] / suma) *\n (1 - marginss[1][0][i2] / suma) / suma)'], {}), '(marginss[0][i1][0] * marginss[1][0][i2] * (1 - marginss[0][i1][0] /\n suma) * (1 - marginss[1][0][i2] / suma) / suma)\n', (5755, 5875), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#Part 2 Making of ANN
import keras
from keras.models import Sequential
from keras.layers import Dense
#Initialise ANN
classifier = Sequential()
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
#Adding output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
#Compile the network
classifier.compile(optimizer= 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
#Fit an ANN to Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
#Part 3 Making predection and eval models
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#Homework
#Test only one data point with this trained model
new_pred = classifier.predict(sc.transform(np.array([[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])))
new_pred = (new_pred > 0.5)
#Part 4 Evaluation and Tuning
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense
def build_classifier():
classifier = Sequential()
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
#Adding output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
#Compile the network
classifier.compile(optimizer= 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
classifier = KerasClassifier(build_fn = build_classifier, batch_size = 10, epochs = 100)
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10, n_jobs = -1) | [
"sklearn.metrics.confusion_matrix",
"keras.wrappers.scikit_learn.KerasClassifier",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.LabelEncode... | [((106, 140), 'pandas.read_csv', 'pd.read_csv', (['"""Churn_Modelling.csv"""'], {}), "('Churn_Modelling.csv')\n", (117, 140), True, 'import pandas as pd\n'), ((315, 329), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (327, 329), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((399, 413), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (411, 413), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((480, 519), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categorical_features': '[1]'}), '(categorical_features=[1])\n', (493, 519), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((728, 781), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (744, 781), False, 'from sklearn.model_selection import train_test_split\n'), ((859, 875), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (873, 875), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1077, 1089), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1087, 1089), False, 'from keras.models import Sequential\n'), ((1788, 1820), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1804, 1820), False, 'from sklearn.metrics import confusion_matrix\n'), ((2720, 2789), 'keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', ([], {'build_fn': 'build_classifier', 'batch_size': '(10)', 'epochs': '(100)'}), '(build_fn=build_classifier, batch_size=10, epochs=100)\n', (2735, 2789), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((2809, 2886), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'classifier', 'X': 'X_train', 'y': 'y_train', 'cv': '(10)', 'n_jobs': '(-1)'}), '(estimator=classifier, X=X_train, y=y_train, cv=10, n_jobs=-1)\n', (2824, 2886), False, 'from sklearn.model_selection import cross_val_score\n'), ((1105, 1173), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(6)', 'init': '"""uniform"""', 'activation': '"""relu"""', 'input_dim': '(11)'}), "(output_dim=6, init='uniform', activation='relu', input_dim=11)\n", (1110, 1173), False, 'from keras.layers import Dense\n'), ((1198, 1252), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(6)', 'init': '"""uniform"""', 'activation': '"""relu"""'}), "(output_dim=6, init='uniform', activation='relu')\n", (1203, 1252), False, 'from keras.layers import Dense\n'), ((1297, 1354), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(1)', 'init': '"""uniform"""', 'activation': '"""sigmoid"""'}), "(output_dim=1, init='uniform', activation='sigmoid')\n", (1302, 1354), False, 'from keras.layers import Dense\n'), ((2261, 2273), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2271, 2273), False, 'from keras.models import Sequential\n'), ((1925, 1981), 'numpy.array', 'np.array', (['[[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]]'], {}), '([[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])\n', (1933, 1981), True, 'import numpy as np\n'), ((2293, 2361), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(6)', 'init': '"""uniform"""', 'activation': '"""relu"""', 'input_dim': '(11)'}), "(output_dim=6, init='uniform', activation='relu', input_dim=11)\n", (2298, 2361), False, 'from keras.layers import Dense\n'), ((2390, 2444), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(6)', 'init': '"""uniform"""', 'activation': '"""relu"""'}), "(output_dim=6, init='uniform', activation='relu')\n", (2395, 2444), False, 'from keras.layers import Dense\n'), ((2497, 2554), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(1)', 'init': '"""uniform"""', 'activation': '"""sigmoid"""'}), "(output_dim=1, init='uniform', activation='sigmoid')\n", (2502, 2554), False, 'from keras.layers import Dense\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Pascal VOC2012 dataset manager
"""
import cv2
from glob import glob
import numpy as np
import os
from PIL import Image
class DBManager():
def __init__(self):
super(DBManager, self).__init__()
def load_data(db_path, train_ratio=0.7, img_is_gray=True, normalize=True):
"""
Method to load the database
N: number of images for a class
C: number of images channels
Args:
- (str) database path
- (float) train ratio
- (bool) specify if data as only 1-channel
Return:
- (numpy array) images (N, C, H, W)
- (numpy array) classes (N)
"""
imgs_path = glob(db_path + "/*")
number_of_imgs = len(imgs_path)
train_number = int(number_of_imgs*train_ratio)
random_idx = np.arange(number_of_imgs)
np.random.seed(42)
np.random.shuffle(random_idx)
train_imgs, train_labels = [], []
test_imgs, test_labels = [], []
for i, idx in enumerate(random_idx):
img_path = imgs_path[idx]
image = np.asarray(Image.open(img_path))
if img_is_gray:
image = np.expand_dims(image, 0)
img_basename = os.path.splitext(os.path.basename(img_path))[0]
if i <= train_number:
train_imgs.append(image)
train_labels.append(int(img_basename.split('_')[2]))
else:
test_imgs.append(image)
test_labels.append(int(img_basename.split('_')[2]))
return np.array(train_imgs)/255., np.array(train_labels),\
np.array(test_imgs)/255., np.array(test_labels)
| [
"numpy.random.seed",
"os.path.basename",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.array",
"numpy.arange",
"glob.glob",
"numpy.random.shuffle"
] | [((739, 759), 'glob.glob', 'glob', (["(db_path + '/*')"], {}), "(db_path + '/*')\n", (743, 759), False, 'from glob import glob\n'), ((876, 901), 'numpy.arange', 'np.arange', (['number_of_imgs'], {}), '(number_of_imgs)\n', (885, 901), True, 'import numpy as np\n'), ((910, 928), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (924, 928), True, 'import numpy as np\n'), ((937, 966), 'numpy.random.shuffle', 'np.random.shuffle', (['random_idx'], {}), '(random_idx)\n', (954, 966), True, 'import numpy as np\n'), ((1653, 1675), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (1661, 1675), True, 'import numpy as np\n'), ((1716, 1737), 'numpy.array', 'np.array', (['test_labels'], {}), '(test_labels)\n', (1724, 1737), True, 'import numpy as np\n'), ((1165, 1185), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1175, 1185), False, 'from PIL import Image\n'), ((1239, 1263), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1253, 1263), True, 'import numpy as np\n'), ((1626, 1646), 'numpy.array', 'np.array', (['train_imgs'], {}), '(train_imgs)\n', (1634, 1646), True, 'import numpy as np\n'), ((1690, 1709), 'numpy.array', 'np.array', (['test_imgs'], {}), '(test_imgs)\n', (1698, 1709), True, 'import numpy as np\n'), ((1308, 1334), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (1324, 1334), False, 'import os\n')] |
import os, glob, torch, time
import numpy as np
from PIL import Image
import torch.nn as nn
def resampling(x, new_size):
l = len(x.shape)
if l == 2:
x = torch.from_numpy(x).type(torch.FloatTensor).unsqueeze(0).unsqueeze(1)
if l == 3:
x = torch.from_numpy(x).type(torch.FloatTensor).unsqueeze(1)
x = nn.functional.interpolate(
input=x, size=new_size, mode='bilinear', align_corners=True).numpy()
return np.squeeze(x)
def mask_overlap(img, mask):
img = np.concatenate([np.expand_dims(img, 2)]*3, 2)
img[:,:,0] = np.multiply(img[:,:,0], 1 - mask)
imagesc(img)
def to_8bit(x):
if type(x) == torch.Tensor:
x = (x / x.max() * 255).numpy().astype(np.uint8)
else:
x = (x / x.max() * 255).astype(np.uint8)
if len(x.shape) == 2:
x = np.concatenate([np.expand_dims(x, 2)]*3, 2)
return x
def imagesc(x, show=True, save=None):
if isinstance(x, list):
x = [to_8bit(y) for y in x]
x = np.concatenate(x, 1)
x = Image.fromarray(x)
else:
x = x - x.min()
x = Image.fromarray(to_8bit(x))
if show:
x.show()
if save:
x.save(save)
def append_dict(x):
return [j for i in x for j in i]
def resize_and_crop(pilimg, scale):
dx = 32
w0 = pilimg.size[0]//dx * dx
h0 = pilimg.size[1]//dx * dx
pilimg = pilimg.crop((0, 0, w0, h0))
w = pilimg.size[0]
h = pilimg.size[1]
newW = int(w * scale)
newH = int(h * scale)
img = pilimg.resize((newW, newH))
return img
class imorphics_masks():
def __init__(self, adapt=None):
self.adapt = adapt
def load_masks(self, id, dir, fmt, scale):
if self.adapt is not None:
id = str(self.adapt.index((int(id.split('/')[1]), id.split('/')[0])) + 1) + '_' + str(int(id.split('/')[2]))
raw_masks = []
for d in dir:
temp = []
for m in d:
x = Image.open(os.path.join(m, id + fmt)) # PIL
x = resize_and_crop(x, scale=scale) # PIL
x = np.array(x) # np.int32
temp.append(x.astype(np.float32)) # np.float32
raw_masks.append(temp)
out = np.expand_dims(self.assemble_masks(raw_masks), 0)
return out
def assemble_masks(self, raw_masks):
converted_masks = np.zeros(raw_masks[0][0].shape, np.long)
for i in range(len(raw_masks)):
for j in range(len(raw_masks[i])):
converted_masks[raw_masks[i][j] == 1] = i + 1
return converted_masks
def load_masks(id, dir, fmt, scale):
raw_masks = []
for d in dir:
temp = []
for m in d:
x = Image.open(os.path.join(m, id + fmt)) # PIL
x = resize_and_crop(x, scale=scale) # PIL
x = np.array(x) # np.int32
temp.append(x.astype(np.float32)) # np.float32
raw_masks.append(temp)
return raw_masks
def assemble_masks(raw_masks):
converted_masks = np.zeros(raw_masks[0][0].shape, np.long)
for i in range(len(raw_masks)):
for j in range(len(raw_masks[i])):
converted_masks[raw_masks[i][j] == 1] = i + 1
return converted_masks
| [
"numpy.multiply",
"numpy.zeros",
"numpy.expand_dims",
"PIL.Image.fromarray",
"numpy.array",
"numpy.squeeze",
"torch.nn.functional.interpolate",
"os.path.join",
"numpy.concatenate",
"torch.from_numpy"
] | [((450, 463), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (460, 463), True, 'import numpy as np\n'), ((568, 603), 'numpy.multiply', 'np.multiply', (['img[:, :, 0]', '(1 - mask)'], {}), '(img[:, :, 0], 1 - mask)\n', (579, 603), True, 'import numpy as np\n'), ((3024, 3064), 'numpy.zeros', 'np.zeros', (['raw_masks[0][0].shape', 'np.long'], {}), '(raw_masks[0][0].shape, np.long)\n', (3032, 3064), True, 'import numpy as np\n'), ((997, 1017), 'numpy.concatenate', 'np.concatenate', (['x', '(1)'], {}), '(x, 1)\n', (1011, 1017), True, 'import numpy as np\n'), ((1030, 1048), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (1045, 1048), False, 'from PIL import Image\n'), ((2363, 2403), 'numpy.zeros', 'np.zeros', (['raw_masks[0][0].shape', 'np.long'], {}), '(raw_masks[0][0].shape, np.long)\n', (2371, 2403), True, 'import numpy as np\n'), ((334, 424), 'torch.nn.functional.interpolate', 'nn.functional.interpolate', ([], {'input': 'x', 'size': 'new_size', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(input=x, size=new_size, mode='bilinear',\n align_corners=True)\n", (359, 424), True, 'import torch.nn as nn\n'), ((2831, 2842), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2839, 2842), True, 'import numpy as np\n'), ((521, 543), 'numpy.expand_dims', 'np.expand_dims', (['img', '(2)'], {}), '(img, 2)\n', (535, 543), True, 'import numpy as np\n'), ((2087, 2098), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2095, 2098), True, 'import numpy as np\n'), ((2726, 2751), 'os.path.join', 'os.path.join', (['m', '(id + fmt)'], {}), '(m, id + fmt)\n', (2738, 2751), False, 'import os, glob, torch, time\n'), ((840, 860), 'numpy.expand_dims', 'np.expand_dims', (['x', '(2)'], {}), '(x, 2)\n', (854, 860), True, 'import numpy as np\n'), ((1974, 1999), 'os.path.join', 'os.path.join', (['m', '(id + fmt)'], {}), '(m, id + fmt)\n', (1986, 1999), False, 'import os, glob, torch, time\n'), ((268, 287), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (284, 287), False, 'import os, glob, torch, time\n'), ((171, 190), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (187, 190), False, 'import os, glob, torch, time\n')] |
from random import random
import cv2
import numpy as np
import torchvision.transforms.functional as F
from PIL import Image, ImageOps
from albumentations import (
PadIfNeeded,
HorizontalFlip,
VerticalFlip,
CenterCrop,
Compose,
GridDistortion,
OpticalDistortion,
RandomCrop,
OneOf,
CLAHE,
RandomBrightnessContrast,
RandomGamma,
RandomScale
)
__all__ = ['to_tensor', 'random_flip_transform', 'random_crop_transform', 'random_scale_crop',
'medical_transform', 'real_world_transform']
def to_tensor(data):
image, label = data['image'], data['label']
image = F.to_tensor(image)
label = F.to_tensor(label)
return {'image': image, 'label': label}
def to_numpy(data):
image, label = data['image'], data['label']
image = np.array(image)
label = np.array(label)
label = label.reshape((*label.shape, 1))
return {'image': image, 'label': label}
def random_flip_transform(data):
image, label = data['image'], data['label']
# Random horizontal flipping
if random() > 0.5:
image = F.hflip(image)
label = F.hflip(label)
# Random vertical flipping
if random() > 0.5:
image = F.vflip(image)
label = F.vflip(label)
if random() > 0.5:
gamma = random() * 1 + 0.5
image = F.adjust_gamma(image, gamma)
if random() > 0.5:
contrast_factor = random() * 1 + 0.5
image = F.adjust_contrast(image, contrast_factor)
if random() > 0.5:
angle = random() * 20 - 10
translate = (0, 0)
scale = random() * 0.2 + 0.9
shear = 0
image = F.affine(image, angle, translate, scale, shear)
label = F.affine(label, angle, translate, scale, shear)
data = {'image': image, 'label': label}
return data
def random_crop_transform(img, label, transform_params):
width, height = img.size
padh = width - height if width > height else 0
padw = height - width if height > width else 0
img = ImageOps.expand(img, border=(padw // 2, padh // 2, padw // 2, padh // 2), fill=0)
label = ImageOps.expand(label, border=(padw // 2, padh // 2, padw // 2, padh // 2), fill=0)
oh, ow = transform_params
img = img.resize((ow, oh), Image.BILINEAR)
label = label.resize((ow, oh), Image.NEAREST)
img, label = random_flip_transform(img, label, transform_params)
return img, label
class random_scale_crop:
def __init__(self, output_size, scale_range=0.1, type='train'):
if isinstance(output_size, (tuple, list)):
self.output_size = output_size # (h, w)
else:
self.output_size = (output_size, output_size)
self.scale_range = scale_range
self.type = type
def __call__(self, data):
img, label = data['image'], data['label']
img = np.array(img)
label = np.array(label)
img_size = img.shape[0] if img.shape[0] < img.shape[1] else img.shape[1]
crop_size = self.output_size[0] if self.output_size[0] < self.output_size[1] else self.output_size[1]
scale = crop_size / img_size - 1
if scale < 0:
scale_limit = (scale - self.scale_range, scale + self.scale_range)
else:
scale_limit = (-self.scale_range, scale + self.scale_range)
if self.type == 'train':
aug = Compose([
RandomScale(scale_limit=scale_limit, p=1),
PadIfNeeded(min_height=self.output_size[0], min_width=self.output_size[1],
border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0]),
OneOf([
RandomCrop(height=self.output_size[0], width=self.output_size[1], p=1),
CenterCrop(height=self.output_size[0], width=self.output_size[1], p=1)
], p=1),
])
elif self.type == 'valid':
aug = Compose([
PadIfNeeded(min_height=self.output_size[0], min_width=self.output_size[1],
border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0]),
CenterCrop(height=self.output_size[0], width=self.output_size[1], p=1)
])
data = aug(image=img, mask=label)
img, label = data['image'], data['mask']
if len(img.shape) == 2:
img = img.reshape((*img.shape, 1))
if len(label.shape) == 2:
label = label.reshape((*label.shape, 1))
data = {'image': img, 'label': label}
return data
class medical_transform:
def __init__(self, output_size, scale_range, type):
if isinstance(output_size, (tuple, list)):
self.size = output_size # (h, w)
else:
self.size = (output_size, output_size)
self.scale_range = scale_range
self.type = type
def __call__(self, data):
aug = random_scale_crop(output_size=self.size, scale_range=self.scale_range, type=self.type)
data = aug(data)
img, label = data['image'], data['label']
img = np.array(img)
label = np.array(label)
if self.type == 'train':
aug = Compose([
VerticalFlip(p=0.5),
HorizontalFlip(p=0.5),
OneOf([
GridDistortion(p=1),
OpticalDistortion(p=1, distort_limit=1, shift_limit=10)
], p=0.5),
RandomBrightnessContrast(p=0.5),
RandomGamma(p=0.5)
])
data = aug(image=img, mask=label)
img, label = data['image'], data['mask']
if len(img.shape) == 2:
img = img.reshape((*img.shape, 1))
if len(label.shape) == 2:
label = label.reshape((*label.shape, 1))
data = {'image': img, 'label': label}
return data
class real_world_transform:
def __init__(self, output_size, scale_range, type):
if isinstance(output_size, (tuple, list)):
self.size = output_size # (h, w)
else:
self.size = (output_size, output_size)
self.scale_range = scale_range
self.type = type
def __call__(self, data):
aug = random_scale_crop(output_size=self.size, scale_range=self.scale_range, type=self.type)
data = aug(data)
img, label = data['image'], data['label']
img = np.array(img)
label = np.array(label)
if self.type == 'train':
aug = Compose([
HorizontalFlip(p=0.25),
CLAHE(p=0.25),
RandomBrightnessContrast(p=0.25),
RandomGamma(p=0.25)
])
data = aug(image=img, mask=label)
img, label = data['image'], data['mask']
if len(img.shape) == 2:
img = img.reshape((*img.shape, 1))
if len(label.shape) == 2:
label = label.reshape((*label.shape, 1))
data = {'image': img, 'label': label}
return data
| [
"torchvision.transforms.functional.to_tensor",
"albumentations.RandomScale",
"torchvision.transforms.functional.affine",
"albumentations.HorizontalFlip",
"torchvision.transforms.functional.hflip",
"albumentations.CenterCrop",
"albumentations.OpticalDistortion",
"torchvision.transforms.functional.vflip... | [((629, 647), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['image'], {}), '(image)\n', (640, 647), True, 'import torchvision.transforms.functional as F\n'), ((660, 678), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['label'], {}), '(label)\n', (671, 678), True, 'import torchvision.transforms.functional as F\n'), ((805, 820), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (813, 820), True, 'import numpy as np\n'), ((833, 848), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (841, 848), True, 'import numpy as np\n'), ((2019, 2104), 'PIL.ImageOps.expand', 'ImageOps.expand', (['img'], {'border': '(padw // 2, padh // 2, padw // 2, padh // 2)', 'fill': '(0)'}), '(img, border=(padw // 2, padh // 2, padw // 2, padh // 2),\n fill=0)\n', (2034, 2104), False, 'from PIL import Image, ImageOps\n'), ((2113, 2200), 'PIL.ImageOps.expand', 'ImageOps.expand', (['label'], {'border': '(padw // 2, padh // 2, padw // 2, padh // 2)', 'fill': '(0)'}), '(label, border=(padw // 2, padh // 2, padw // 2, padh // 2),\n fill=0)\n', (2128, 2200), False, 'from PIL import Image, ImageOps\n'), ((1062, 1070), 'random.random', 'random', ([], {}), '()\n', (1068, 1070), False, 'from random import random\n'), ((1094, 1108), 'torchvision.transforms.functional.hflip', 'F.hflip', (['image'], {}), '(image)\n', (1101, 1108), True, 'import torchvision.transforms.functional as F\n'), ((1125, 1139), 'torchvision.transforms.functional.hflip', 'F.hflip', (['label'], {}), '(label)\n', (1132, 1139), True, 'import torchvision.transforms.functional as F\n'), ((1179, 1187), 'random.random', 'random', ([], {}), '()\n', (1185, 1187), False, 'from random import random\n'), ((1211, 1225), 'torchvision.transforms.functional.vflip', 'F.vflip', (['image'], {}), '(image)\n', (1218, 1225), True, 'import torchvision.transforms.functional as F\n'), ((1242, 1256), 'torchvision.transforms.functional.vflip', 'F.vflip', (['label'], {}), '(label)\n', (1249, 1256), True, 'import torchvision.transforms.functional as F\n'), ((1265, 1273), 'random.random', 'random', ([], {}), '()\n', (1271, 1273), False, 'from random import random\n'), ((1332, 1360), 'torchvision.transforms.functional.adjust_gamma', 'F.adjust_gamma', (['image', 'gamma'], {}), '(image, gamma)\n', (1346, 1360), True, 'import torchvision.transforms.functional as F\n'), ((1369, 1377), 'random.random', 'random', ([], {}), '()\n', (1375, 1377), False, 'from random import random\n'), ((1446, 1487), 'torchvision.transforms.functional.adjust_contrast', 'F.adjust_contrast', (['image', 'contrast_factor'], {}), '(image, contrast_factor)\n', (1463, 1487), True, 'import torchvision.transforms.functional as F\n'), ((1496, 1504), 'random.random', 'random', ([], {}), '()\n', (1502, 1504), False, 'from random import random\n'), ((1645, 1692), 'torchvision.transforms.functional.affine', 'F.affine', (['image', 'angle', 'translate', 'scale', 'shear'], {}), '(image, angle, translate, scale, shear)\n', (1653, 1692), True, 'import torchvision.transforms.functional as F\n'), ((1709, 1756), 'torchvision.transforms.functional.affine', 'F.affine', (['label', 'angle', 'translate', 'scale', 'shear'], {}), '(label, angle, translate, scale, shear)\n', (1717, 1756), True, 'import torchvision.transforms.functional as F\n'), ((2849, 2862), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2857, 2862), True, 'import numpy as np\n'), ((2879, 2894), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2887, 2894), True, 'import numpy as np\n'), ((5047, 5060), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5055, 5060), True, 'import numpy as np\n'), ((5077, 5092), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (5085, 5092), True, 'import numpy as np\n'), ((6367, 6380), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (6375, 6380), True, 'import numpy as np\n'), ((6397, 6412), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (6405, 6412), True, 'import numpy as np\n'), ((1297, 1305), 'random.random', 'random', ([], {}), '()\n', (1303, 1305), False, 'from random import random\n'), ((1411, 1419), 'random.random', 'random', ([], {}), '()\n', (1417, 1419), False, 'from random import random\n'), ((1528, 1536), 'random.random', 'random', ([], {}), '()\n', (1534, 1536), False, 'from random import random\n'), ((1590, 1598), 'random.random', 'random', ([], {}), '()\n', (1596, 1598), False, 'from random import random\n'), ((3393, 3434), 'albumentations.RandomScale', 'RandomScale', ([], {'scale_limit': 'scale_limit', 'p': '(1)'}), '(scale_limit=scale_limit, p=1)\n', (3404, 3434), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((3452, 3580), 'albumentations.PadIfNeeded', 'PadIfNeeded', ([], {'min_height': 'self.output_size[0]', 'min_width': 'self.output_size[1]', 'border_mode': 'cv2.BORDER_CONSTANT', 'value': '[0, 0, 0]'}), '(min_height=self.output_size[0], min_width=self.output_size[1],\n border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0])\n', (3463, 3580), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5171, 5190), 'albumentations.VerticalFlip', 'VerticalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (5183, 5190), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5208, 5229), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (5222, 5229), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5415, 5446), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'p': '(0.5)'}), '(p=0.5)\n', (5439, 5446), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5464, 5482), 'albumentations.RandomGamma', 'RandomGamma', ([], {'p': '(0.5)'}), '(p=0.5)\n', (5475, 5482), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((6491, 6513), 'albumentations.HorizontalFlip', 'HorizontalFlip', ([], {'p': '(0.25)'}), '(p=0.25)\n', (6505, 6513), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((6531, 6544), 'albumentations.CLAHE', 'CLAHE', ([], {'p': '(0.25)'}), '(p=0.25)\n', (6536, 6544), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((6562, 6594), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'p': '(0.25)'}), '(p=0.25)\n', (6586, 6594), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((6612, 6631), 'albumentations.RandomGamma', 'RandomGamma', ([], {'p': '(0.25)'}), '(p=0.25)\n', (6623, 6631), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((3932, 4060), 'albumentations.PadIfNeeded', 'PadIfNeeded', ([], {'min_height': 'self.output_size[0]', 'min_width': 'self.output_size[1]', 'border_mode': 'cv2.BORDER_CONSTANT', 'value': '[0, 0, 0]'}), '(min_height=self.output_size[0], min_width=self.output_size[1],\n border_mode=cv2.BORDER_CONSTANT, value=[0, 0, 0])\n', (3943, 4060), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((4102, 4172), 'albumentations.CenterCrop', 'CenterCrop', ([], {'height': 'self.output_size[0]', 'width': 'self.output_size[1]', 'p': '(1)'}), '(height=self.output_size[0], width=self.output_size[1], p=1)\n', (4112, 4172), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((3650, 3720), 'albumentations.RandomCrop', 'RandomCrop', ([], {'height': 'self.output_size[0]', 'width': 'self.output_size[1]', 'p': '(1)'}), '(height=self.output_size[0], width=self.output_size[1], p=1)\n', (3660, 3720), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((3742, 3812), 'albumentations.CenterCrop', 'CenterCrop', ([], {'height': 'self.output_size[0]', 'width': 'self.output_size[1]', 'p': '(1)'}), '(height=self.output_size[0], width=self.output_size[1], p=1)\n', (3752, 3812), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5275, 5294), 'albumentations.GridDistortion', 'GridDistortion', ([], {'p': '(1)'}), '(p=1)\n', (5289, 5294), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n'), ((5316, 5371), 'albumentations.OpticalDistortion', 'OpticalDistortion', ([], {'p': '(1)', 'distort_limit': '(1)', 'shift_limit': '(10)'}), '(p=1, distort_limit=1, shift_limit=10)\n', (5333, 5371), False, 'from albumentations import PadIfNeeded, HorizontalFlip, VerticalFlip, CenterCrop, Compose, GridDistortion, OpticalDistortion, RandomCrop, OneOf, CLAHE, RandomBrightnessContrast, RandomGamma, RandomScale\n')] |
from fastapi import FastAPI
from pydantic import BaseModel, create_model
import numpy as np
from tensorflow.keras.models import load_model
import json
class FastMlOps(FastAPI):
def helloTao(self):
print("Hello Tao")
def createAPI(self, config):
method, path, inputModel, responseModel, model = (config.get(key) for key in ['method', 'path', 'inputModel', 'responseModel', 'model' ])
inputModel = self.addDefaultValue(inputModel)
inputModel = create_model('BaseModel', **inputModel)
mlModel = load_model(model)
if method == 'GET' :
pass
elif method == 'POST' :
@FastAPI.post(self, path=path)
async def createAPI(data: inputModel):
category, confidence = await self.predict(data.__dict__, mlModel)
res = {'class': category, 'confidence':confidence}
return json.dumps(res)
async def predict(self, inputs, model):
X = np.array([list(inputs.values())])
pred = model.predict(X)
res = np.argmax(pred, axis=1)[0]
confidence = float(pred[0][res])
return int(res) , float(confidence)
def addDefaultValue(self, inputModel):
for key , val in inputModel.items() :
if val == int:
inputModel[key] = 0
elif val == str:
inputModel[key] = ''
elif val == float:
inputModel[key] = 0.0
# if
return inputModel
| [
"tensorflow.keras.models.load_model",
"numpy.argmax",
"fastapi.FastAPI.post",
"json.dumps",
"pydantic.create_model"
] | [((486, 525), 'pydantic.create_model', 'create_model', (['"""BaseModel"""'], {}), "('BaseModel', **inputModel)\n", (498, 525), False, 'from pydantic import BaseModel, create_model\n'), ((544, 561), 'tensorflow.keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (554, 561), False, 'from tensorflow.keras.models import load_model\n'), ((1065, 1088), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (1074, 1088), True, 'import numpy as np\n'), ((653, 682), 'fastapi.FastAPI.post', 'FastAPI.post', (['self'], {'path': 'path'}), '(self, path=path)\n', (665, 682), False, 'from fastapi import FastAPI\n'), ((906, 921), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (916, 921), False, 'import json\n')] |
import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.signal import convolve
from pylops.waveeqprocessing.marchenko import directwave
# Test data
inputfile2d = 'testdata/marchenko/input.npz'
inputfile3d = 'testdata/marchenko/direct3D.npz'
# Parameters
vel = 2400.0 # velocity
def test_direct2D():
"""Check consistency of analytical 2D Green's function with FD modelling
"""
inputdata = np.load(inputfile2d)
# Receivers
r = inputdata['r']
nr = r.shape[1]
# Virtual points
vs = inputdata['vs']
# Time axis
t = inputdata['t']
dt, nt = t[1] - t[0], len(t)
# FD GF
G0FD = inputdata['G0sub']
wav = inputdata['wav']
wav_c = np.argmax(wav)
G0FD = np.apply_along_axis(convolve, 0, G0FD, wav, mode='full')
G0FD = G0FD[wav_c:][:nt]
# Analytic GF
trav = np.sqrt((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2) / vel
G0ana = directwave(wav, trav, nt, dt, nfft=nt, derivative=False)
# Differentiate to get same response as in FD modelling
G0ana = np.diff(G0ana, axis=0)
G0ana = np.vstack([G0ana, np.zeros(nr)])
assert_array_almost_equal(G0FD / np.max(np.abs(G0FD)),
G0ana / np.max(np.abs(G0ana)), decimal=1)
def test_direct3D():
"""Check consistency of analytical 3D Green's function with FD modelling
"""
inputdata = np.load(inputfile3d)
# Receivers
r = inputdata['r']
nr = r.shape[0]
# Virtual points
vs = inputdata['vs']
# Time axis
t = inputdata['t']
dt, nt = t[1] - t[0], len(t)
# FD GF
G0FD = inputdata['G0'][:, :nr]
wav = inputdata['wav']
wav_c = np.argmax(wav)
G0FD = np.apply_along_axis(convolve, 0, G0FD, wav, mode='full')
G0FD = G0FD[wav_c:][:nt]
# Analytic GF
dist = np.sqrt((vs[0] - r[:, 0]) ** 2 +
(vs[1] - r[:, 1]) ** 2 +
(vs[2] - r[:, 2]) ** 2)
trav = dist / vel
G0ana = directwave(wav, trav, nt, dt, nfft=nt, dist=dist,
kind='3d', derivative=False)
# Differentiate to get same response as in FD modelling
G0ana = np.diff(G0ana, axis=0)
G0ana = np.vstack([G0ana, np.zeros(nr)])
assert_array_almost_equal(G0FD / np.max(np.abs(G0FD)),
G0ana / np.max(np.abs(G0ana)), decimal=1)
| [
"numpy.load",
"numpy.abs",
"numpy.argmax",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.diff",
"pylops.waveeqprocessing.marchenko.directwave",
"numpy.sqrt"
] | [((432, 452), 'numpy.load', 'np.load', (['inputfile2d'], {}), '(inputfile2d)\n', (439, 452), True, 'import numpy as np\n'), ((715, 729), 'numpy.argmax', 'np.argmax', (['wav'], {}), '(wav)\n', (724, 729), True, 'import numpy as np\n'), ((742, 798), 'numpy.apply_along_axis', 'np.apply_along_axis', (['convolve', '(0)', 'G0FD', 'wav'], {'mode': '"""full"""'}), "(convolve, 0, G0FD, wav, mode='full')\n", (761, 798), True, 'import numpy as np\n'), ((927, 983), 'pylops.waveeqprocessing.marchenko.directwave', 'directwave', (['wav', 'trav', 'nt', 'dt'], {'nfft': 'nt', 'derivative': '(False)'}), '(wav, trav, nt, dt, nfft=nt, derivative=False)\n', (937, 983), False, 'from pylops.waveeqprocessing.marchenko import directwave\n'), ((1057, 1079), 'numpy.diff', 'np.diff', (['G0ana'], {'axis': '(0)'}), '(G0ana, axis=0)\n', (1064, 1079), True, 'import numpy as np\n'), ((1381, 1401), 'numpy.load', 'np.load', (['inputfile3d'], {}), '(inputfile3d)\n', (1388, 1401), True, 'import numpy as np\n'), ((1669, 1683), 'numpy.argmax', 'np.argmax', (['wav'], {}), '(wav)\n', (1678, 1683), True, 'import numpy as np\n'), ((1696, 1752), 'numpy.apply_along_axis', 'np.apply_along_axis', (['convolve', '(0)', 'G0FD', 'wav'], {'mode': '"""full"""'}), "(convolve, 0, G0FD, wav, mode='full')\n", (1715, 1752), True, 'import numpy as np\n'), ((1812, 1897), 'numpy.sqrt', 'np.sqrt', (['((vs[0] - r[:, 0]) ** 2 + (vs[1] - r[:, 1]) ** 2 + (vs[2] - r[:, 2]) ** 2)'], {}), '((vs[0] - r[:, 0]) ** 2 + (vs[1] - r[:, 1]) ** 2 + (vs[2] - r[:, 2]) **\n 2)\n', (1819, 1897), True, 'import numpy as np\n'), ((1966, 2044), 'pylops.waveeqprocessing.marchenko.directwave', 'directwave', (['wav', 'trav', 'nt', 'dt'], {'nfft': 'nt', 'dist': 'dist', 'kind': '"""3d"""', 'derivative': '(False)'}), "(wav, trav, nt, dt, nfft=nt, dist=dist, kind='3d', derivative=False)\n", (1976, 2044), False, 'from pylops.waveeqprocessing.marchenko import directwave\n'), ((2141, 2163), 'numpy.diff', 'np.diff', (['G0ana'], {'axis': '(0)'}), '(G0ana, axis=0)\n', (2148, 2163), True, 'import numpy as np\n'), ((858, 908), 'numpy.sqrt', 'np.sqrt', (['((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2)'], {}), '((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2)\n', (865, 908), True, 'import numpy as np\n'), ((1110, 1122), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (1118, 1122), True, 'import numpy as np\n'), ((2194, 2206), 'numpy.zeros', 'np.zeros', (['nr'], {}), '(nr)\n', (2202, 2206), True, 'import numpy as np\n'), ((1170, 1182), 'numpy.abs', 'np.abs', (['G0FD'], {}), '(G0FD)\n', (1176, 1182), True, 'import numpy as np\n'), ((1230, 1243), 'numpy.abs', 'np.abs', (['G0ana'], {}), '(G0ana)\n', (1236, 1243), True, 'import numpy as np\n'), ((2254, 2266), 'numpy.abs', 'np.abs', (['G0FD'], {}), '(G0FD)\n', (2260, 2266), True, 'import numpy as np\n'), ((2314, 2327), 'numpy.abs', 'np.abs', (['G0ana'], {}), '(G0ana)\n', (2320, 2327), True, 'import numpy as np\n')] |
from sunpy.map import Map
from sunpy.instr.aia import aiaprep as AP
from skimage.transform import resize as R
import numpy as np
import os
from pandas import read_csv
class sdo_prep:
def __init__(self, resize=False, isize=None, rsun=None):
if resize == True:
if isize :
if type(isize) != int :
raise TypeError('Type(isize) == integer')
elif isize % 2 != 0 :
raise ValueError('isize%2 == 0')
else :
self.isize = isize
else :
raise NotImplementedError('resize:True but isize is not implemented')
if rsun :
if type(rsun) != int :
raise TypeError('Type(rsun) == integer')
else:
self.rsun = rsun
else :
raise NotImplementedError('resize:True but rsun is not implemented')
self.resize = resize
def t_rec_to_date(self, t_rec):
year = t_rec[0:4]
month = t_rec[5:7]
day = t_rec[8:10]
hour = t_rec[11:13]
date = '%s-%s-%s-%s-00-00'%(year, month, day, hour)
return date
def from_sunpy(self, file_):
M = AP(Map(file_))
meta = M.meta
data = M.data
return meta, data
def resize_by_pixel(self, meta, data, pvalue=0):
isize_orig = meta['NAXIS1']
rsun_orig = meta['R_SUN']
ratio = self.rsun/rsun_orig
isize_new = int(isize_orig*ratio)
if isize_new % 2 != 0 :
isize_new += 1
pcsize = (self.isize - isize_new)//2
data_new = R(data, (isize_new, isize_new), order = 1, mode='constant', preserve_range=True)
if pcsize > 0 :
data_new = np.pad(data_new, pcsize, mode='constant', constant_values=pvalue)
elif pcsize < 0:
data_new = data_new[pcsize:-pcsize, pcsize:-pcsize]
else :
pass
meta['NAXIS1'] = self.isize
meta['NAXIS2'] = self.isize
meta['LVL_NUM'] = 2.0
meta['CDELT1'] = meta['cdelt1']/ratio
meta['CRPIX1'] = self.isize//2 + 0.5
meta['CDELT2'] = meta['cdelt2']/ratio
meta['CRPIX2'] = self.isize//2 + 0.5
meta['R_SUN'] = self.rsun
return meta, data_new
class hmi_prep(sdo_prep):
def __init__(self, resize=False, isize=None, rsun=None):
super(hmi_prep, self).__init__(resize, isize, rsun)
X = np.arange(4096)[:, None]
Y = np.arange(4096)[None, :]
self.XY = np.sqrt((X-2048.)**2. + (Y-2048.)**2.)
def cut_radius(self, meta, data):
r_sun = meta['R_SUN']
Z = np.where(self.XY > r_sun)
data[Z] = -5000.
meta['LVL_NUM'] = 1.5
return meta, data
def __call__(self, file_):
meta1, data1 = self.from_sunpy(file_)
meta1, data1 = self.cut_radius(meta1, data1)
result = {'lev1.5':{'meta':meta1, 'data':data1}, 'lev2.0':None}
if self.resize == True :
meta2, data2 = self.resize_by_pixel(meta1.copy(), data1.copy(), pvalue=-5000)
result['lev2.0'] = {'meta':meta2, 'data':data2}
return result
class aia_prep(sdo_prep):
def __init__(self, csv_degradation='./aia_degradation_v8.csv', resize=False, isize=None, rsun=None):
super(aia_prep, self).__init__(resize, isize, rsun)
if os.path.exists(csv_degradation):
self.db_degradation = read_csv(csv_degradation)
def degradation(self, meta, data):
wavelnth = meta['WAVELNTH']
if wavelnth in (94, 131, 171 ,193, 211, 304, 335):
t_rec = meta['T_REC']
date = self.t_rec_to_date(t_rec)
w = np.where(self.db_degradation['date'] == date)
dg_factor = self.db_degradation[str(wavelnth)][w[0][0]]
elif wavelnth in (1600, 1700, 4500):
dg_factor = 1.
data = data * dg_factor
return meta, data
def norm_exposure(self, meta, data):
exptime = meta['EXPTIME']
data = data/exptime
meta['PIXLUNIT'] = 'DN/sec'
meta['LVL_NUM'] = 1.5
meta['EXPTIME'] = 1.0
return meta, data
def __call__(self, file_):
meta1, data1 = self.from_sunpy(file_)
meta1, data1 = self.norm_exposure(meta1, data1)
meta1, data1 = self.degradation(meta1, data1)
result = {'lev1.5':{'meta':meta1, 'data':data1}, 'lev2.0':None}
if self.resize == True :
meta2, data2 = self.resize(meta1.copy(), data1.copy())
result['lev2.0'] = {'meta':meta2, 'data':data2}
return result
| [
"numpy.pad",
"sunpy.map.Map",
"pandas.read_csv",
"os.path.exists",
"numpy.where",
"skimage.transform.resize",
"numpy.arange",
"numpy.sqrt"
] | [((1651, 1729), 'skimage.transform.resize', 'R', (['data', '(isize_new, isize_new)'], {'order': '(1)', 'mode': '"""constant"""', 'preserve_range': '(True)'}), "(data, (isize_new, isize_new), order=1, mode='constant', preserve_range=True)\n", (1652, 1729), True, 'from skimage.transform import resize as R\n'), ((2555, 2605), 'numpy.sqrt', 'np.sqrt', (['((X - 2048.0) ** 2.0 + (Y - 2048.0) ** 2.0)'], {}), '((X - 2048.0) ** 2.0 + (Y - 2048.0) ** 2.0)\n', (2562, 2605), True, 'import numpy as np\n'), ((2675, 2700), 'numpy.where', 'np.where', (['(self.XY > r_sun)'], {}), '(self.XY > r_sun)\n', (2683, 2700), True, 'import numpy as np\n'), ((3393, 3424), 'os.path.exists', 'os.path.exists', (['csv_degradation'], {}), '(csv_degradation)\n', (3407, 3424), False, 'import os\n'), ((1244, 1254), 'sunpy.map.Map', 'Map', (['file_'], {}), '(file_)\n', (1247, 1254), False, 'from sunpy.map import Map\n'), ((1779, 1844), 'numpy.pad', 'np.pad', (['data_new', 'pcsize'], {'mode': '"""constant"""', 'constant_values': 'pvalue'}), "(data_new, pcsize, mode='constant', constant_values=pvalue)\n", (1785, 1844), True, 'import numpy as np\n'), ((2475, 2490), 'numpy.arange', 'np.arange', (['(4096)'], {}), '(4096)\n', (2484, 2490), True, 'import numpy as np\n'), ((2512, 2527), 'numpy.arange', 'np.arange', (['(4096)'], {}), '(4096)\n', (2521, 2527), True, 'import numpy as np\n'), ((3460, 3485), 'pandas.read_csv', 'read_csv', (['csv_degradation'], {}), '(csv_degradation)\n', (3468, 3485), False, 'from pandas import read_csv\n'), ((3716, 3761), 'numpy.where', 'np.where', (["(self.db_degradation['date'] == date)"], {}), "(self.db_degradation['date'] == date)\n", (3724, 3761), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from robolearn.old_utils.trajectory_interpolators import polynomial5_interpolation
from robolearn.old_utils.trajectory_interpolators import spline_interpolation
N = 100
xf = np.array([2, 3, 4, 1])
x0 = np.array([2, 2, 2, 2])
dxf = np.array([0, 0, 0, 0])*N
dx0 = np.array([0, 0, 0, 0])*N
ddxf = np.array([2, 0, 0, 0])*N**2
ddx0 = np.array([0, 0, 0, 0])*N**2
#x, dx, ddx = polynomial5_interpolation(N, xf, x0, dxf, dx0, ddxf, ddx0)
#
#for ii in range(xf.size):
# plt.plot(ddx[:, ii])
#plt.show()
N = 100
time_points = np.array([0, 5, 7, 10])
via_points = np.array([[2, 7, 8, 10],
[7, 1, 3, 2],
[1, 2, 4, 9],
[4, 1, 4, 4]])
x = spline_interpolation(N, time_points, via_points)
for ii in range(via_points.shape[1]):
plt.plot(x[:, ii])
plt.show()
| [
"numpy.array",
"matplotlib.pyplot.plot",
"robolearn.old_utils.trajectory_interpolators.spline_interpolation",
"matplotlib.pyplot.show"
] | [((226, 248), 'numpy.array', 'np.array', (['[2, 3, 4, 1]'], {}), '([2, 3, 4, 1])\n', (234, 248), True, 'import numpy as np\n'), ((254, 276), 'numpy.array', 'np.array', (['[2, 2, 2, 2]'], {}), '([2, 2, 2, 2])\n', (262, 276), True, 'import numpy as np\n'), ((572, 595), 'numpy.array', 'np.array', (['[0, 5, 7, 10]'], {}), '([0, 5, 7, 10])\n', (580, 595), True, 'import numpy as np\n'), ((609, 676), 'numpy.array', 'np.array', (['[[2, 7, 8, 10], [7, 1, 3, 2], [1, 2, 4, 9], [4, 1, 4, 4]]'], {}), '([[2, 7, 8, 10], [7, 1, 3, 2], [1, 2, 4, 9], [4, 1, 4, 4]])\n', (617, 676), True, 'import numpy as np\n'), ((751, 799), 'robolearn.old_utils.trajectory_interpolators.spline_interpolation', 'spline_interpolation', (['N', 'time_points', 'via_points'], {}), '(N, time_points, via_points)\n', (771, 799), False, 'from robolearn.old_utils.trajectory_interpolators import spline_interpolation\n'), ((862, 872), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (870, 872), True, 'import matplotlib.pyplot as plt\n'), ((283, 305), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (291, 305), True, 'import numpy as np\n'), ((314, 336), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (322, 336), True, 'import numpy as np\n'), ((346, 368), 'numpy.array', 'np.array', (['[2, 0, 0, 0]'], {}), '([2, 0, 0, 0])\n', (354, 368), True, 'import numpy as np\n'), ((381, 403), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (389, 403), True, 'import numpy as np\n'), ((843, 861), 'matplotlib.pyplot.plot', 'plt.plot', (['x[:, ii]'], {}), '(x[:, ii])\n', (851, 861), True, 'import matplotlib.pyplot as plt\n')] |
import os
from numpy.lib.twodim_base import mask_indices
import torch
import numpy as np
import glob
FAR_PLANE = 3
NEAR_PLANE = 0.1
def work(name):
if not os.path.exists(os.path.join(name, 'online_masks')):
os.mkdir(os.path.join(name, 'online_masks'))
data3d = torch.load(os.path.join(name, name + '_instance.pth'))
data3d = data3d['coords']
pose_files = glob.glob(os.path.join(name, 'pose', '*.txt'))
pose_files.sort(key=lambda x: int(x[x.rfind('/')+1:-4]))
all_mask = torch.zeros(data3d[0].shape[0]).byte()
for idx, pose_file in enumerate(pose_files):
if idx % 10 != 0:
continue
pose = np.loadtxt(pose_files)
pose = torch.from_numpy(pose)
intrinsic = torch.tensor([[577.590698, 0, 318.905426],
[0, 578.729797, 242.683609],
[0,0,1]])
R = pose[:3,:3]
t = pose[:3, 3]
points_in_camera = torch.matmul(intrinsic, torch.matmul(R.t(), data3d[0].t() - t.view(-1, 1))).t()
inside_mask = (points_in_camera[:,2] < FAR_PLANE) & (points_in_camera[:,2] > NEAR_PLANE)
points_in_camera = points_in_camera / points_in_camera[:,2].view(-1, 1)
# print(points_in_camera[inside_mask])
inside_mask = inside_mask & (points_in_camera[:,0] < 640) & (points_in_camera[:,0] >= 0) & (points_in_camera[:,1] < 480) & (points_in_camera[:,1] >= 0)
masks = []
all_mask = all_mask | inside_mask
if torch.sum(all_mask, dim=0) >= all_mask.shape[0] * 0.25:
mask1 = all_mask.clone()
print('mask1: ', torch.sum(all_mask, dim=0) / all_mask.shape[0])
elif torch.sum(all_mask, dim=0) >= all_mask.shape[0] * 0.5:
mask2 = all_mask.clone()
print('mask2: ', torch.sum(all_mask, dim=0) / all_mask.shape[0])
elif torch.sum(all_mask, dim=0) >= all_mask.shape[0] * 0.75:
mask3 = all_mask.clone()
print('mask3: ', torch.sum(all_mask, dim=0) / all_mask.shape[0])
mask = torch.stack([mask1, mask2, mask3], dim=0)
torch.save(mask, os.path.join(name, 'm25_50_75.pth'), _use_new_zipfile_serialization=False)
print(name)
work('scene0000_00')
| [
"torch.stack",
"torch.sum",
"numpy.loadtxt",
"torch.zeros",
"os.path.join",
"torch.tensor",
"torch.from_numpy"
] | [((291, 333), 'os.path.join', 'os.path.join', (['name', "(name + '_instance.pth')"], {}), "(name, name + '_instance.pth')\n", (303, 333), False, 'import os\n'), ((392, 427), 'os.path.join', 'os.path.join', (['name', '"""pose"""', '"""*.txt"""'], {}), "(name, 'pose', '*.txt')\n", (404, 427), False, 'import os\n'), ((660, 682), 'numpy.loadtxt', 'np.loadtxt', (['pose_files'], {}), '(pose_files)\n', (670, 682), True, 'import numpy as np\n'), ((698, 720), 'torch.from_numpy', 'torch.from_numpy', (['pose'], {}), '(pose)\n', (714, 720), False, 'import torch\n'), ((741, 828), 'torch.tensor', 'torch.tensor', (['[[577.590698, 0, 318.905426], [0, 578.729797, 242.683609], [0, 0, 1]]'], {}), '([[577.590698, 0, 318.905426], [0, 578.729797, 242.683609], [0,\n 0, 1]])\n', (753, 828), False, 'import torch\n'), ((2049, 2090), 'torch.stack', 'torch.stack', (['[mask1, mask2, mask3]'], {'dim': '(0)'}), '([mask1, mask2, mask3], dim=0)\n', (2060, 2090), False, 'import torch\n'), ((177, 211), 'os.path.join', 'os.path.join', (['name', '"""online_masks"""'], {}), "(name, 'online_masks')\n", (189, 211), False, 'import os\n'), ((231, 265), 'os.path.join', 'os.path.join', (['name', '"""online_masks"""'], {}), "(name, 'online_masks')\n", (243, 265), False, 'import os\n'), ((505, 536), 'torch.zeros', 'torch.zeros', (['data3d[0].shape[0]'], {}), '(data3d[0].shape[0])\n', (516, 536), False, 'import torch\n'), ((1498, 1524), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1507, 1524), False, 'import torch\n'), ((2116, 2151), 'os.path.join', 'os.path.join', (['name', '"""m25_50_75.pth"""'], {}), "(name, 'm25_50_75.pth')\n", (2128, 2151), False, 'import os\n'), ((1681, 1707), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1690, 1707), False, 'import torch\n'), ((1620, 1646), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1629, 1646), False, 'import torch\n'), ((1864, 1890), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1873, 1890), False, 'import torch\n'), ((1803, 1829), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1812, 1829), False, 'import torch\n'), ((1986, 2012), 'torch.sum', 'torch.sum', (['all_mask'], {'dim': '(0)'}), '(all_mask, dim=0)\n', (1995, 2012), False, 'import torch\n')] |
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
from unittest import TestCase
import scipy.sparse.linalg
import numpy.linalg
import numpy
from amfe.linalg.linearsolvers import *
class TestPardisoSolver(TestCase):
def test_solve(self):
A = numpy.array([[4, -2, 0, 0], [-2, 4 , -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]], dtype=float)
A = scipy.sparse.csr_matrix(A)
b = numpy.array([1.4, 1.2, 0.8, 1.1])
solver = PardisoLinearSolver()
x1 = solver.solve(A, b)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-13))
class TestScipySparseSolver(TestCase):
def test_solve(self):
A = numpy.array([[4, -2, 0, 0], [-2, 4 , -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]], dtype = float)
A = scipy.sparse.csr_matrix(A)
b = numpy.array([1.4, 1.2, 0.8, 1.1])
solver = ScipySparseLinearSolver()
x1 = solver.solve(A, b)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-13))
class TestScipyConjugateGradientLinearSolver(TestCase):
def test_solve(self):
A = numpy.array([[4, -2, 0, 0], [-2, 4 , -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]], dtype=float)
A = scipy.sparse.csr_matrix(A)
b = numpy.array([1.4, 1.2, 0.8, 1.1])
solver = ScipyConjugateGradientLinearSolver()
x1 = solver.solve(A.todense(), b, numpy.array([0, 0, 0, 0]), 1e-5, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-5))
x1 = solver.solve(A.todense(), b, numpy.array([0, 0, 0, 0]), 1e-13, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-13))
x1 = solver.solve(A.todense(), b, numpy.array([0, 0, 0, 0]), 1e-1, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-1))
class TestResidualbasedConjugateGradient(TestCase):
def test_solve(self):
A = numpy.array([[4, -2, 0, 0], [-2, 4 , -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]], dtype=float)
A = scipy.sparse.csr_matrix(A)
b = numpy.array([1.4, 1.2, 0.8, 1.1])
solver = ResidualbasedConjugateGradient()
def residual(x):
return A.dot(x)-b
x1, _,_ = solver.solve(residual, numpy.array([0.0, 0.0, 0.0, 0.0]), 1e-5, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-5))
x1, _,_ = solver.solve(residual, numpy.array([0.0, 0.0, 0.0, 0.0]), 1e-13, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-13))
x1, _,_ = solver.solve(residual, numpy.array([0.0, 0.0, 0.0, 0.0]), 1e-1, 4)
res = numpy.linalg.norm(A.dot(x1) - b) / scipy.sparse.linalg.norm(A)
self.assertLess(res, 10 ** (-1))
| [
"numpy.array"
] | [((478, 570), 'numpy.array', 'numpy.array', (['[[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]]'], {'dtype': 'float'}), '([[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]],\n dtype=float)\n', (489, 570), False, 'import numpy\n'), ((619, 652), 'numpy.array', 'numpy.array', (['[1.4, 1.2, 0.8, 1.1]'], {}), '([1.4, 1.2, 0.8, 1.1])\n', (630, 652), False, 'import numpy\n'), ((931, 1023), 'numpy.array', 'numpy.array', (['[[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]]'], {'dtype': 'float'}), '([[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]],\n dtype=float)\n', (942, 1023), False, 'import numpy\n'), ((1074, 1107), 'numpy.array', 'numpy.array', (['[1.4, 1.2, 0.8, 1.1]'], {}), '([1.4, 1.2, 0.8, 1.1])\n', (1085, 1107), False, 'import numpy\n'), ((1406, 1498), 'numpy.array', 'numpy.array', (['[[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]]'], {'dtype': 'float'}), '([[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]],\n dtype=float)\n', (1417, 1498), False, 'import numpy\n'), ((1547, 1580), 'numpy.array', 'numpy.array', (['[1.4, 1.2, 0.8, 1.1]'], {}), '([1.4, 1.2, 0.8, 1.1])\n', (1558, 1580), False, 'import numpy\n'), ((2345, 2437), 'numpy.array', 'numpy.array', (['[[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]]'], {'dtype': 'float'}), '([[4, -2, 0, 0], [-2, 4, -2, 0], [0, -2, 4, -1], [0, 0, -1, 1]],\n dtype=float)\n', (2356, 2437), False, 'import numpy\n'), ((2486, 2519), 'numpy.array', 'numpy.array', (['[1.4, 1.2, 0.8, 1.1]'], {}), '([1.4, 1.2, 0.8, 1.1])\n', (2497, 2519), False, 'import numpy\n'), ((1677, 1702), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1688, 1702), False, 'import numpy\n'), ((1883, 1908), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1894, 1908), False, 'import numpy\n'), ((2091, 2116), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (2102, 2116), False, 'import numpy\n'), ((2684, 2717), 'numpy.array', 'numpy.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (2695, 2717), False, 'import numpy\n'), ((2897, 2930), 'numpy.array', 'numpy.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (2908, 2930), False, 'import numpy\n'), ((3112, 3145), 'numpy.array', 'numpy.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (3123, 3145), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf8 -*-
# Copyright 2019 Université de Liège
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import CVLM
import numpy as np
class VLMDriver(object):
def __init__(self, infile):
self.iteration = 0
self.data = CVLM.VLMData()
CVLM.setup(infile, self.data)
self.m = self.data.wing.nshed+2 # Assumes only wing
self.n = int(self.data.wing.nvert/self.m)
CVLM.geometry_setup(self.data)
CVLM.cycleliftsurf(self.data)
CVLM.memory_setup(self.data)
self.x = np.zeros(self.data.wing.nvert)
self.y = np.zeros(self.data.wing.nvert)
self.z = np.zeros(self.data.wing.nvert)
self.xv = np.zeros(self.data.wing.nvert)
self.yv = np.zeros(self.data.wing.nvert)
self.zv = np.zeros(self.data.wing.nvert)
for ii in range(self.data.wing.nvert):
self.x[ii] = self.getX(ii)
self.y[ii] = self.getY(ii)
self.z[ii] = self.getZ(ii)
self.xv[ii] = self.getXv(ii)
self.yv[ii] = self.getYv(ii)
self.zv[ii] = self.getZv(ii)
self.S = 0. # Surface of the wing, imperfect
for i in range(self.data.wing.nface):
self.S += CVLM.nsurf_getitem(self.data.wing.nsurf, i)
def run(self):
# Run simulation until completion
for i in range(self.data.ntimes-1):
self.iteration = i
CVLM.iteration(self.data, self.iteration)
def __getCoord(self, index, delta):
# Get the x/y/z (delta=0/1/2, respectively) of vertex index
if index>self.data.wing.nvert+self.data.flap.nvert: # If the vertex is on the ailerons
c = CVLM.vertices_getitem(self.data.aileron.vertices,
index-self.data.wing.nvert-self.data.flap.nvert+delta*self.data.aileron.nvert)
elif index>self.data.wing.nvert: # If the vertex is on the flaps
c = CVLM.vertices_getitem(self.data.flap.vertices,
index-self.data.wing.nvert+delta*self.data.flap.nvert)
else: # If the vertex is on the wing
c = CVLM.vertices_getitem(self.data.wing.vertices,
index+delta*self.data.wing.nvert)
return c
def __setCoord(self, index, c, delta):
# Set the x/y/z (delta=0/1/2, respectively) of vertex index
if index>self.data.wing.nvert+self.data.flap.nvert:
CVLM.vertices_setitem(self.data.aileron.vertices,
index-self.data.wing.nvert-self.data.flap.nvert+delta*self.data.aileron.nvert, c)
elif index>self.data.wing.nvert:
CVLM.vertices_setitem(self.data.flap.vertices,
index-self.data.wing.nvert+delta*self.data.flap.nvert, c)
else:
CVLM.vertices_setitem(self.data.wing.vertices,
index+delta*self.data.wing.nvert, c)
def __getVortexCoord(self, index, delta):
return CVLM.vortex_getitem(self.data.wing.vortex, index+delta*self.data.wing.nvert)
def __setVortexCoord(self, index, c, delta):
CVLM.vortex_setitem(self.data.wing.vortex, index+delta*self.data.wing.nvert, c)
def getX(self, index):
return self.__getCoord(index, 0)
def getY(self, index):
return self.__getCoord(index, 1)
def getZ(self, index):
return self.__getCoord(index, 2)
def getXv(self, index):
return self.__getVortexCoord(index, 0)
def getYv(self, index):
return self.__getVortexCoord(index, 1)
def getZv(self, index):
return self.__getVortexCoord(index, 2)
def setX(self, index, x):
self.__setCoord(index, x, 0)
def setY(self, index, y):
self.__setCoord(index, y, 1)
def setZ(self, index, z):
self.__setCoord(index, z, 2)
def dX(self, index, dx):
self.__setCoord(index, self.x[index]+dx, 0)
def dY(self, index, dy):
self.__setCoord(index, self.y[index]+dy, 1)
def dZ(self, index, dz):
self.__setCoord(index, self.z[index]+dz, 2)
# Impose deformation in collocation points
def setXv(self, index, dx):
self.__setVortexCoord(index, self.xv[index]+dx, 0)
def setYv(self, index, dy):
self.__setVortexCoord(index, self.yv[index]+dy, 1)
def setZv(self, index, dz):
self.__setVortexCoord(index, self.zv[index]+dz, 2)
# Modify vortex collocation points
def dXv(self, index, dx):
x = self.getXv(index)
self.__setVortexCoord(index, x+dx, 0)
def dYv(self, index, dy):
y = self.getYv(index)
self.__setVortexCoord(index, y+dy, 1)
def dZv(self, index, dz):
z = self.getZv(index)
self.__setVortexCoord(index, z+dz, 2)
def getVertices(self, panel):
v = [-1,-1,-1,-1]
if panel < 10000: # If the panel is on the wing
for j in range(4):
v[j] = CVLM.faces_getitem(self.data.wing.faces, panel+j*self.data.wing.nface)
return v
def __getLoads(self, delta):
return CVLM.aeroforce_getitem(self.data.wing.aeroforce, delta)
def getQ(self):
Q = 0.5*(CVLM.aeroforce_getitem(self.data.UVW, 0)**2+CVLM.aeroforce_getitem(self.data.UVW, 1)**2+
CVLM.aeroforce_getitem(self.data.UVW, 2)**2)*self.data.rho
return Q
def getCl(self):
x_force = self.__getLoads(0)
z_force = self.__getLoads(2)
lift = z_force*np.cos(self.data.aoa)-x_force*np.sin(self.data.aoa)
return lift/(self.getQ()*self.S)
def getCd(self):
x_force = self.__getLoads(0)
z_force = self.__getLoads(2)
induced_drag = self.__getLoads(3)
drag = z_force*np.sin(self.data.aoa)+x_force*np.cos(self.data.aoa)+induced_drag
return drag/(self.getQ()*self.S)
def getdeltaP(self, panel):
if panel < 10000:
dP = CVLM.Deltap_getitem(self.data.wing.Deltap, panel)
normal = [CVLM.normal_getitem(self.data.wing.normal, panel), CVLM.normal_getitem(self.data.wing.normal, panel+self.data.wing.nface), CVLM.normal_getitem(self.data.wing.normal, panel+2*self.data.wing.nface)]
dPnormal = [comp * dP for comp in normal]
return dPnormal
def getSurface(self, panel):
if panel < 10000: # If the panel is on the wing
return CVLM.nsurf_getitem(self.data.wing.nsurf, panel)
def getForce(self, panel, weight):
if panel < 10000:
dP = CVLM.Deltap_getitem(self.data.wing.Deltap, panel)
normal = [CVLM.normal_getitem(self.data.wing.normal, panel), CVLM.normal_getitem(self.data.wing.normal, panel+self.data.wing.nface), CVLM.normal_getitem(self.data.wing.normal, panel+2*self.data.wing.nface)]
S = self.getSurface(panel)
forceNormal = [comp * dP * S * weight for comp in normal]
return forceNormal
def update(self):
# After each geometry update, prepare the VLM struct for a new run
CVLM.reset_wake(self.data)
CVLM.reset_geometry(self.data)
CVLM.geometry_setup(self.data)
CVLM.cycleliftsurf(self.data)
def save(self):
CVLM.exportTextOutput("outfile.m",self.iteration-1,self.data)
| [
"CVLM.vortex_setitem",
"CVLM.vertices_getitem",
"CVLM.vortex_getitem",
"CVLM.memory_setup",
"CVLM.cycleliftsurf",
"CVLM.VLMData",
"numpy.sin",
"CVLM.vertices_setitem",
"CVLM.aeroforce_getitem",
"CVLM.setup",
"CVLM.nsurf_getitem",
"CVLM.geometry_setup",
"CVLM.reset_wake",
"CVLM.normal_getit... | [((769, 783), 'CVLM.VLMData', 'CVLM.VLMData', ([], {}), '()\n', (781, 783), False, 'import CVLM\n'), ((792, 821), 'CVLM.setup', 'CVLM.setup', (['infile', 'self.data'], {}), '(infile, self.data)\n', (802, 821), False, 'import CVLM\n'), ((940, 970), 'CVLM.geometry_setup', 'CVLM.geometry_setup', (['self.data'], {}), '(self.data)\n', (959, 970), False, 'import CVLM\n'), ((979, 1008), 'CVLM.cycleliftsurf', 'CVLM.cycleliftsurf', (['self.data'], {}), '(self.data)\n', (997, 1008), False, 'import CVLM\n'), ((1017, 1045), 'CVLM.memory_setup', 'CVLM.memory_setup', (['self.data'], {}), '(self.data)\n', (1034, 1045), False, 'import CVLM\n'), ((1063, 1093), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1071, 1093), True, 'import numpy as np\n'), ((1111, 1141), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1119, 1141), True, 'import numpy as np\n'), ((1159, 1189), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1167, 1189), True, 'import numpy as np\n'), ((1208, 1238), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1216, 1238), True, 'import numpy as np\n'), ((1257, 1287), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1265, 1287), True, 'import numpy as np\n'), ((1306, 1336), 'numpy.zeros', 'np.zeros', (['self.data.wing.nvert'], {}), '(self.data.wing.nvert)\n', (1314, 1336), True, 'import numpy as np\n'), ((3417, 3502), 'CVLM.vortex_getitem', 'CVLM.vortex_getitem', (['self.data.wing.vortex', '(index + delta * self.data.wing.nvert)'], {}), '(self.data.wing.vortex, index + delta * self.data.wing.nvert\n )\n', (3436, 3502), False, 'import CVLM\n'), ((3555, 3643), 'CVLM.vortex_setitem', 'CVLM.vortex_setitem', (['self.data.wing.vortex', '(index + delta * self.data.wing.nvert)', 'c'], {}), '(self.data.wing.vortex, index + delta * self.data.wing.\n nvert, c)\n', (3574, 3643), False, 'import CVLM\n'), ((5483, 5538), 'CVLM.aeroforce_getitem', 'CVLM.aeroforce_getitem', (['self.data.wing.aeroforce', 'delta'], {}), '(self.data.wing.aeroforce, delta)\n', (5505, 5538), False, 'import CVLM\n'), ((7404, 7430), 'CVLM.reset_wake', 'CVLM.reset_wake', (['self.data'], {}), '(self.data)\n', (7419, 7430), False, 'import CVLM\n'), ((7439, 7469), 'CVLM.reset_geometry', 'CVLM.reset_geometry', (['self.data'], {}), '(self.data)\n', (7458, 7469), False, 'import CVLM\n'), ((7478, 7508), 'CVLM.geometry_setup', 'CVLM.geometry_setup', (['self.data'], {}), '(self.data)\n', (7497, 7508), False, 'import CVLM\n'), ((7517, 7546), 'CVLM.cycleliftsurf', 'CVLM.cycleliftsurf', (['self.data'], {}), '(self.data)\n', (7535, 7546), False, 'import CVLM\n'), ((7575, 7640), 'CVLM.exportTextOutput', 'CVLM.exportTextOutput', (['"""outfile.m"""', '(self.iteration - 1)', 'self.data'], {}), "('outfile.m', self.iteration - 1, self.data)\n", (7596, 7640), False, 'import CVLM\n'), ((1745, 1788), 'CVLM.nsurf_getitem', 'CVLM.nsurf_getitem', (['self.data.wing.nsurf', 'i'], {}), '(self.data.wing.nsurf, i)\n', (1763, 1788), False, 'import CVLM\n'), ((1937, 1978), 'CVLM.iteration', 'CVLM.iteration', (['self.data', 'self.iteration'], {}), '(self.data, self.iteration)\n', (1951, 1978), False, 'import CVLM\n'), ((2198, 2339), 'CVLM.vertices_getitem', 'CVLM.vertices_getitem', (['self.data.aileron.vertices', '(index - self.data.wing.nvert - self.data.flap.nvert + delta * self.data.\n aileron.nvert)'], {}), '(self.data.aileron.vertices, index - self.data.wing.\n nvert - self.data.flap.nvert + delta * self.data.aileron.nvert)\n', (2219, 2339), False, 'import CVLM\n'), ((2908, 3052), 'CVLM.vertices_setitem', 'CVLM.vertices_setitem', (['self.data.aileron.vertices', '(index - self.data.wing.nvert - self.data.flap.nvert + delta * self.data.\n aileron.nvert)', 'c'], {}), '(self.data.aileron.vertices, index - self.data.wing.\n nvert - self.data.flap.nvert + delta * self.data.aileron.nvert, c)\n', (2929, 3052), False, 'import CVLM\n'), ((6309, 6358), 'CVLM.Deltap_getitem', 'CVLM.Deltap_getitem', (['self.data.wing.Deltap', 'panel'], {}), '(self.data.wing.Deltap, panel)\n', (6328, 6358), False, 'import CVLM\n'), ((6764, 6811), 'CVLM.nsurf_getitem', 'CVLM.nsurf_getitem', (['self.data.wing.nsurf', 'panel'], {}), '(self.data.wing.nsurf, panel)\n', (6782, 6811), False, 'import CVLM\n'), ((6894, 6943), 'CVLM.Deltap_getitem', 'CVLM.Deltap_getitem', (['self.data.wing.Deltap', 'panel'], {}), '(self.data.wing.Deltap, panel)\n', (6913, 6943), False, 'import CVLM\n'), ((2432, 2543), 'CVLM.vertices_getitem', 'CVLM.vertices_getitem', (['self.data.flap.vertices', '(index - self.data.wing.nvert + delta * self.data.flap.nvert)'], {}), '(self.data.flap.vertices, index - self.data.wing.nvert +\n delta * self.data.flap.nvert)\n', (2453, 2543), False, 'import CVLM\n'), ((2611, 2700), 'CVLM.vertices_getitem', 'CVLM.vertices_getitem', (['self.data.wing.vertices', '(index + delta * self.data.wing.nvert)'], {}), '(self.data.wing.vertices, index + delta * self.data.\n wing.nvert)\n', (2632, 2700), False, 'import CVLM\n'), ((3109, 3223), 'CVLM.vertices_setitem', 'CVLM.vertices_setitem', (['self.data.flap.vertices', '(index - self.data.wing.nvert + delta * self.data.flap.nvert)', 'c'], {}), '(self.data.flap.vertices, index - self.data.wing.nvert +\n delta * self.data.flap.nvert, c)\n', (3130, 3223), False, 'import CVLM\n'), ((3256, 3348), 'CVLM.vertices_setitem', 'CVLM.vertices_setitem', (['self.data.wing.vertices', '(index + delta * self.data.wing.nvert)', 'c'], {}), '(self.data.wing.vertices, index + delta * self.data.\n wing.nvert, c)\n', (3277, 3348), False, 'import CVLM\n'), ((5347, 5421), 'CVLM.faces_getitem', 'CVLM.faces_getitem', (['self.data.wing.faces', '(panel + j * self.data.wing.nface)'], {}), '(self.data.wing.faces, panel + j * self.data.wing.nface)\n', (5365, 5421), False, 'import CVLM\n'), ((5875, 5896), 'numpy.cos', 'np.cos', (['self.data.aoa'], {}), '(self.data.aoa)\n', (5881, 5896), True, 'import numpy as np\n'), ((5905, 5926), 'numpy.sin', 'np.sin', (['self.data.aoa'], {}), '(self.data.aoa)\n', (5911, 5926), True, 'import numpy as np\n'), ((6381, 6430), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', 'panel'], {}), '(self.data.wing.normal, panel)\n', (6400, 6430), False, 'import CVLM\n'), ((6432, 6504), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', '(panel + self.data.wing.nface)'], {}), '(self.data.wing.normal, panel + self.data.wing.nface)\n', (6451, 6504), False, 'import CVLM\n'), ((6504, 6580), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', '(panel + 2 * self.data.wing.nface)'], {}), '(self.data.wing.normal, panel + 2 * self.data.wing.nface)\n', (6523, 6580), False, 'import CVLM\n'), ((6966, 7015), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', 'panel'], {}), '(self.data.wing.normal, panel)\n', (6985, 7015), False, 'import CVLM\n'), ((7017, 7089), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', '(panel + self.data.wing.nface)'], {}), '(self.data.wing.normal, panel + self.data.wing.nface)\n', (7036, 7089), False, 'import CVLM\n'), ((7089, 7165), 'CVLM.normal_getitem', 'CVLM.normal_getitem', (['self.data.wing.normal', '(panel + 2 * self.data.wing.nface)'], {}), '(self.data.wing.normal, panel + 2 * self.data.wing.nface)\n', (7108, 7165), False, 'import CVLM\n'), ((6128, 6149), 'numpy.sin', 'np.sin', (['self.data.aoa'], {}), '(self.data.aoa)\n', (6134, 6149), True, 'import numpy as np\n'), ((6158, 6179), 'numpy.cos', 'np.cos', (['self.data.aoa'], {}), '(self.data.aoa)\n', (6164, 6179), True, 'import numpy as np\n'), ((5681, 5721), 'CVLM.aeroforce_getitem', 'CVLM.aeroforce_getitem', (['self.data.UVW', '(2)'], {}), '(self.data.UVW, 2)\n', (5703, 5721), False, 'import CVLM\n'), ((5576, 5616), 'CVLM.aeroforce_getitem', 'CVLM.aeroforce_getitem', (['self.data.UVW', '(0)'], {}), '(self.data.UVW, 0)\n', (5598, 5616), False, 'import CVLM\n'), ((5620, 5660), 'CVLM.aeroforce_getitem', 'CVLM.aeroforce_getitem', (['self.data.UVW', '(1)'], {}), '(self.data.UVW, 1)\n', (5642, 5660), False, 'import CVLM\n')] |
import sys
import os
if float(sys.version[0])<3:
import cPickle as pickle
import datetime
import numpy as np
import netCDF4
import matplotlib.mlab as mp
def get_cstr():
'''Returns case-string'''
path = os.getcwd()
return path.split(os.sep)[-1]
class Filelist():
'''Object linking points in time to files and indices in files (model results)'''
def __init__(self, name='fileList.txt', start_time=None, stop_time=None, time_format='FVCOM'):
'''Load information from file. Crop if start or stop is given.'''
fid=open(name,'r')
self.time=np.empty(0)
self.path=[]
self.index=[]
for line in fid:
self.time=np.append(self.time, float(line.split()[0]))
self.path.append(line.split()[1])
self.index.append(int(line.split()[2]))
fid.close()
if time_format == 'FVCOM':
self.time_units = 'days since 1858-11-17 00:00:00'
elif time_format == 'WRF':
self.time_units = 'days since 1948-01-01 00:00:00'
elif time_format == 'ROMS':
self.time_units = 'seconds since 1970-01-01 00:00:00'
elif time_format == 'WRF_dk':
self.time_units = 'minutes since 1900-01-01 00:00:00'
self.datetime = netCDF4.num2date(self.time, units = self.time_units)
self.crop_list(start_time, stop_time)
def crop_list(self, start_time=None, stop_time=None):
'''Remove values oustide specified time range.'''
if start_time is not None:
year = int(start_time.split('-')[0])
month = int(start_time.split('-')[1])
day = int(start_time.split('-')[2])
hour = int(start_time.split('-')[3])
t1 = datetime.datetime(year, month, day, hour)
ind = np.where(self.datetime >= t1)[0]
self.time = self.time[ind]
self.datetime = self.datetime[ind]
self.path = [self.path[i] for i in list(ind)]
self.index = [self.index[i] for i in list(ind)]
if stop_time is not None:
year = int(stop_time.split('-')[0])
month = int(stop_time.split('-')[1])
day = int(stop_time.split('-')[2])
hour = int(stop_time.split('-')[3])
t1 = datetime.datetime(year, month, day, hour)
ind = np.where(self.datetime <= t1)[0]
self.time = self.time[ind]
self.datetime = self.datetime[ind]
self.path = [self.path[i] for i in list(ind)]
self.index = [self.index[i] for i in list(ind)]
def find_nearest(self, yyyy, mm, dd, HH=0):
'''Find index of nearest fileList entry to given point in time.'''
t = datetime.datetime(yyyy, mm, dd, HH)
fvcom_time = netCDF4.date2num(t, units
=self.time_units)
dt = np.abs(self.time - fvcom_time)
ind = np.argmin(dt)
return ind
def write2file(self, name):
'''Write to file.'''
fid = open(name, 'w')
for t, p, i in zip(self.time, self.path, self.index):
line = str(t) + '\t' + p + '\t' + str(i) + '\n'
fid.write(line)
fid.close()
def unique_files(self):
'''Find unique files (paths) in fileList.'''
unique_files = []
for p in self.path:
if p not in unique_files:
unique_files.append(p)
return unique_files
def load(name):
'''Load object stored as p-file.'''
obj = pickle.load(open(name, 'rb'))
return obj
| [
"numpy.abs",
"os.getcwd",
"numpy.empty",
"netCDF4.date2num",
"numpy.argmin",
"datetime.datetime",
"numpy.where",
"netCDF4.num2date"
] | [((221, 232), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (230, 232), False, 'import os\n'), ((598, 609), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (606, 609), True, 'import numpy as np\n'), ((1303, 1353), 'netCDF4.num2date', 'netCDF4.num2date', (['self.time'], {'units': 'self.time_units'}), '(self.time, units=self.time_units)\n', (1319, 1353), False, 'import netCDF4\n'), ((2793, 2828), 'datetime.datetime', 'datetime.datetime', (['yyyy', 'mm', 'dd', 'HH'], {}), '(yyyy, mm, dd, HH)\n', (2810, 2828), False, 'import datetime\n'), ((2850, 2892), 'netCDF4.date2num', 'netCDF4.date2num', (['t'], {'units': 'self.time_units'}), '(t, units=self.time_units)\n', (2866, 2892), False, 'import netCDF4\n'), ((2923, 2953), 'numpy.abs', 'np.abs', (['(self.time - fvcom_time)'], {}), '(self.time - fvcom_time)\n', (2929, 2953), True, 'import numpy as np\n'), ((2968, 2981), 'numpy.argmin', 'np.argmin', (['dt'], {}), '(dt)\n', (2977, 2981), True, 'import numpy as np\n'), ((1793, 1834), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour'], {}), '(year, month, day, hour)\n', (1810, 1834), False, 'import datetime\n'), ((2335, 2376), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour'], {}), '(year, month, day, hour)\n', (2352, 2376), False, 'import datetime\n'), ((1853, 1882), 'numpy.where', 'np.where', (['(self.datetime >= t1)'], {}), '(self.datetime >= t1)\n', (1861, 1882), True, 'import numpy as np\n'), ((2408, 2437), 'numpy.where', 'np.where', (['(self.datetime <= t1)'], {}), '(self.datetime <= t1)\n', (2416, 2437), True, 'import numpy as np\n')] |
import os
import random
import numpy as np
from scipy.spatial.distance import cdist
import cv2
import time
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
# import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from torch.optim import Adam
# from torch.utils.tensorboard import SummaryWriter
from scipy.spatial.distance import cdist
from package.model.san import SaN
from package.loss.san_loss import _SaN_loss
from package.dataset.data_san import *
from package.args.san_args import parse_config
from package.dataset.utils import make_logger
from package.model.utils import *
from package.loss.regularization import _Regularization
from sklearn.neighbors import NearestNeighbors as NN
DEBUG = True
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_fn(save_dir, it, pre=0, mAP=0):
return join(mkdir(join(save_dir, 'models')), 'Iter__{}__{}_{}.pkl'.format(it, int(pre * 1000), int(mAP * 1000)))
def _try_load(args, logger, model, optimizer):
if args.start_from is None:
# try to find the latest checkpoint
files = os.listdir(mkdir(join(mkdir(args.save_dir), 'models')))
if len(files) == 0:
logger.info("Cannot find any checkpoint. Start new training.")
return 0
latest = max(files, key=lambda name: int(name.split('\\')[-1].split('/')[-1].split('.')[0].split('__')[1]))
checkpoint = join(args.save_dir, 'models', latest)
else:
try: checkpoint = save_fn(args.save_dir, str(int(args.start_from)))
except: checkpoint = args.start_from
logger.info("Load model from {}".format(checkpoint))
ckpt = torch.load(checkpoint, map_location='cpu')
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
return ckpt['steps']
def _extract_feats(data_test, model, what, skip=1, batch_size=16):
"""
:param data_test: test Dataset
:param model: network model
:param what: SK or IM
:param skip: skip a certain number of image/sketches to reduce computation
:return: a two-element list [extracted_labels, extracted_features]
"""
labels = []
feats = []
for batch_idx, (xs, id) in \
enumerate(data_test.traverse(what, skip=skip, batch_size=batch_size)):
labels.append(model(xs.cuda()).data.cpu().numpy())
# print(type(labels[0]), labels[0].shape)# <class 'numpy.ndarray'> (16, 256)
# print(type(id), id) # <class 'torch.Tensor'> tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
feats.append(id.numpy())
return np.concatenate(labels), np.concatenate(feats)
def _get_pre_from_matches(matches):
"""
:param matches: A n-by-m matrix. n is number of test samples, m is the top m elements used for evaluation
:return: precision
"""
return np.mean(matches)
def _map_change(inputArr):
dup = np.copy(inputArr)
for idx in range(inputArr.shape[1]):
if idx != 0:
# dup cannot be bool type
dup[:,idx] = dup[:,idx-1] + dup[:,idx]
return np.multiply(dup, inputArr)
def _get_map_from_matches(matches):
"""
mAP's calculation refers to https://github.com/ShivaKrishnaM/ZS-SBIR/blob/master/trainCVAE_pre.py.
:param matches: A n-by-m matrix. n is number of test samples, m is the top m elements used for evaluation
:return: mAP
"""
temp = [np.arange(matches.shape[1]) for _ in range(matches.shape[0])]
mAP_term = 1.0 / (np.stack(temp, axis=0) + 1.0)
mAP = np.mean(np.multiply(_map_change(matches), mAP_term), axis=1)
return np.mean(mAP)
def _eval(feats_labels_sk, feats_labels_im, n=200):
"""
:param feats_labels_sk: a two-element tuple [features_of_sketches, labels_of_sketches]
labels_of_sketches and labels_of_images are scalars(class id).
:param feats_labels_im: a two-element tuple [features_of_images, labels_of_images]
features_of_images and features_of_sketches are used for distance calculation.
:param n: the top n elements used for evaluation
:return: precision@n, mAP@n
"""
nn = NN(n_neighbors=n, metric='cosine', algorithm='brute').fit(feats_labels_im[0])
_, indices = nn.kneighbors(feats_labels_sk[0])
retrieved_classes = np.array(feats_labels_im[1])[indices]
# astype(np.uint16) is necessary
ranks = np.vstack([(retrieved_classes[i] == feats_labels_sk[1][i])
for i in range(retrieved_classes.shape[0])]).astype(np.uint16)
return _get_pre_from_matches(ranks), _get_map_from_matches(ranks)
def _parse_args_paths(args):
if args.dataset == 'sketchy':
sketch_folder = SKETCH_FOLDER_SKETCHY
image_folder = IMAGE_FOLDER_SKETCHY
train_class = TRAIN_CLASS_SKETCHY
test_class = TEST_CLASS_SKETCHY
elif args.dataset == 'tuberlin':
sketch_folder = SKETCH_FOLDER_TUBERLIN
image_folder = IMAGE_FOLDER_TUBERLIN
train_class = TRAIN_CLASS_TUBERLIN
test_class = TEST_CLASS_TUBERLIN
else: raise Exception("dataset args error!")
if args.sketch_dir != '': sketch_folder = args.sketch_dir
if args.image_dir != '': image_folder = args.image_dir
if args.npy_dir == '0': args.npy_dir = NPY_FOLDER_SKETCHY
elif args.npy_dir == '': args.npy_dir = None
return sketch_folder, image_folder, train_class, test_class
def train(args):
# srun -p gpu --gres=gpu:1 --exclusive --output=san10.out python main_san.py --steps 50000 --print_every 500 --save_every 2000 --batch_size 96 --dataset sketchy --margin 10 --npy_dir 0 --save_dir san_sketchy10
# srun -p gpu --gres=gpu:1 --exclusive --output=san1.out python main_san.py --steps 50000 --print_every 500 --save_every 2000 --batch_size 96 --dataset sketchy --margin 1 --npy_dir 0 --save_dir san_sketchy1
# srun -p gpu --gres=gpu:1 --output=san_sketchy03.out python main_san.py --steps 30000 --print_every 200 --save_every 3000 --batch_size 96 --dataset sketchy --margin 0.3 --npy_dir 0 --save_dir san_sketchy03 --lr 0.0001
sketch_folder, image_folder, train_class, test_class = _parse_args_paths(args)
if DEBUG:
args.print_every = 5
args.save_every = 20
args.steps = 100
args.batch_size = 32
train_class = train_class[:2]
test_class = test_class[:2]
data_train = SaN_dataloader(folder_sk=sketch_folder, clss=train_class, folder_nps=args.npy_dir,
folder_im=image_folder, normalize01=False, doaug=False)
dataloader_train = DataLoader(dataset=data_train, batch_size=args.batch_size, shuffle=False)
data_test = SaN_dataloader(folder_sk=sketch_folder, exp3ch=True, clss=test_class, folder_nps=args.npy_dir,
folder_im=image_folder, normalize01=False, doaug=False)
model = SaN()
model.cuda()
optimizer = Adam(params=model.parameters(), lr=args.lr)
logger = make_logger(join(mkdir(args.save_dir), curr_time_str() + '.log'))
steps = _try_load(args, logger, model, optimizer)
logger.info(str(args))
args.steps += steps
san_loss = _SaN_loss(args.margin)
model.train()
l2_regularization = _Regularization(model, args.l2_reg, p=2, logger=None)
while True:
loss_sum = []
for _, (sketch, positive_image, negative_image, positive_class_id) in enumerate(dataloader_train):
optimizer.zero_grad()
loss = san_loss(model(sketch.cuda()),
model(positive_image.cuda()),
model(negative_image.cuda())) \
+ l2_regularization()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
loss_sum.append(float(loss.item()))
if (steps + 1) % args.save_every == 0:
model.eval()
n = 200; skip = 1
start_cpu_t = time.time()
feats_labels_sk = _extract_feats(data_test, model, SK, skip=skip, batch_size=args.batch_size)
feats_labels_im = _extract_feats(data_test, model, IM, skip=skip, batch_size=args.batch_size)
pre, mAP = _eval(feats_labels_sk, feats_labels_im, n)
logger.info("Precision@{}: {}, mAP@{}: {}".format(n, pre, n, mAP) +
" " + 'step: {}, loss: {}, (eval cpu time: {}s)'.format(steps, np.mean(loss_sum),
time.time() - start_cpu_t))
torch.save({'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'steps': steps,
'args': args},
save_fn(args.save_dir, steps, pre, mAP))
model.train()
if (steps + 1) % args.print_every == 0:
logger.info('step: {}, loss: {}'.format(steps, np.mean(loss_sum)))
loss_sum = []
steps += 1
if steps >= args.steps: break
if steps >= args.steps: break
def gen_args(margin=0.3, dataset='sketchy'):
margins = str(margin).replace('.', '')
return \
"""
###
#!/bin/bash
#SBATCH --job-name=ZXLing
#SBATCH --partition=gpu
#SBATCH --gres=gpu:1
#SBATCH --output=san_%j.out
#SBATCH --time=7-00:00:00
module load gcc/7.3.0 anaconda/3 cuda/9.2 cudnn/7.1.4
source activate lzxtc
python main_san.py --steps 30000 --print_every 500 --npy_dir 0 --save_every 3000 --batch_size 32 --dataset {} --save_dir san_{}_{} --lr 0.0001 --margin {}
sbatch san.slurm
""".format(dataset, dataset, margins, margin)
if __name__ == '__main__':
# print(gen_args(1))
# exit()
args = parse_config()
train(args)
'''
#!/bin/bash
#SBATCH --job-name=ZXLing
#SBATCH --partition=gpu
#SBATCH --gres=gpu:1
#SBATCH --output=san_%j.out
#SBATCH --time=7-00:00:00
module load gcc/7.3.0 anaconda/3 cuda/9.2 cudnn/7.1.4
source activate lzxtc
python main_san.py --steps 30000 --print_every 500 --npy_dir 0 --save_every 3000 --batch_size 32 --dataset sketchy --save_dir san_sketchy_03 --lr 0.0001 --margin 0.3
python main_san.py --steps 30000 --print_every 500 --npy_dir 0 --save_every 3000 --batch_size 32 --dataset sketchy --save_dir san_sketchy_01 --lr 0.0001 --margin 0.1
python main_san.py --steps 30000 --print_every 500 --npy_dir 0 --save_every 3000 --batch_size 32 --dataset sketchy --save_dir san_sketchy_05 --lr 0.0001 --margin 0.5
python main_san.py --steps 30000 --print_every 500 --npy_dir 0 --save_every 3000 --batch_size 32 --dataset sketchy --save_dir san_sketchy_1 --lr 0.0001 --margin 1
sbatch san.slurm
''' | [
"package.loss.regularization._Regularization",
"numpy.stack",
"numpy.multiply",
"numpy.concatenate",
"numpy.copy",
"torch.utils.data.DataLoader",
"torch.load",
"time.time",
"numpy.mean",
"numpy.arange",
"numpy.array",
"sklearn.neighbors.NearestNeighbors",
"torch.nn.kneighbors",
"package.mo... | [((1741, 1783), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': '"""cpu"""'}), "(checkpoint, map_location='cpu')\n", (1751, 1783), False, 'import torch\n'), ((2922, 2938), 'numpy.mean', 'np.mean', (['matches'], {}), '(matches)\n', (2929, 2938), True, 'import numpy as np\n'), ((2978, 2995), 'numpy.copy', 'np.copy', (['inputArr'], {}), '(inputArr)\n', (2985, 2995), True, 'import numpy as np\n'), ((3158, 3184), 'numpy.multiply', 'np.multiply', (['dup', 'inputArr'], {}), '(dup, inputArr)\n', (3169, 3184), True, 'import numpy as np\n'), ((3677, 3689), 'numpy.mean', 'np.mean', (['mAP'], {}), '(mAP)\n', (3684, 3689), True, 'import numpy as np\n'), ((4289, 4322), 'torch.nn.kneighbors', 'nn.kneighbors', (['feats_labels_sk[0]'], {}), '(feats_labels_sk[0])\n', (4302, 4322), True, 'import torch.nn as nn\n'), ((6609, 6682), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_train', 'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(dataset=data_train, batch_size=args.batch_size, shuffle=False)\n', (6619, 6682), False, 'from torch.utils.data import DataLoader\n'), ((6895, 6900), 'package.model.san.SaN', 'SaN', ([], {}), '()\n', (6898, 6900), False, 'from package.model.san import SaN\n'), ((7177, 7199), 'package.loss.san_loss._SaN_loss', '_SaN_loss', (['args.margin'], {}), '(args.margin)\n', (7186, 7199), False, 'from package.loss.san_loss import _SaN_loss\n'), ((7242, 7295), 'package.loss.regularization._Regularization', '_Regularization', (['model', 'args.l2_reg'], {'p': '(2)', 'logger': 'None'}), '(model, args.l2_reg, p=2, logger=None)\n', (7257, 7295), False, 'from package.loss.regularization import _Regularization\n'), ((9818, 9832), 'package.args.san_args.parse_config', 'parse_config', ([], {}), '()\n', (9830, 9832), False, 'from package.args.san_args import parse_config\n'), ((2678, 2700), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (2692, 2700), True, 'import numpy as np\n'), ((2702, 2723), 'numpy.concatenate', 'np.concatenate', (['feats'], {}), '(feats)\n', (2716, 2723), True, 'import numpy as np\n'), ((3481, 3508), 'numpy.arange', 'np.arange', (['matches.shape[1]'], {}), '(matches.shape[1])\n', (3490, 3508), True, 'import numpy as np\n'), ((4347, 4375), 'numpy.array', 'np.array', (['feats_labels_im[1]'], {}), '(feats_labels_im[1])\n', (4355, 4375), True, 'import numpy as np\n'), ((3565, 3587), 'numpy.stack', 'np.stack', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (3573, 3587), True, 'import numpy as np\n'), ((4194, 4247), 'sklearn.neighbors.NearestNeighbors', 'NN', ([], {'n_neighbors': 'n', 'metric': '"""cosine"""', 'algorithm': '"""brute"""'}), "(n_neighbors=n, metric='cosine', algorithm='brute')\n", (4196, 4247), True, 'from sklearn.neighbors import NearestNeighbors as NN\n'), ((8002, 8013), 'time.time', 'time.time', ([], {}), '()\n', (8011, 8013), False, 'import time\n'), ((9040, 9057), 'numpy.mean', 'np.mean', (['loss_sum'], {}), '(loss_sum)\n', (9047, 9057), True, 'import numpy as np\n'), ((8482, 8499), 'numpy.mean', 'np.mean', (['loss_sum'], {}), '(loss_sum)\n', (8489, 8499), True, 'import numpy as np\n'), ((8588, 8599), 'time.time', 'time.time', ([], {}), '()\n', (8597, 8599), False, 'import time\n')] |
import numpy as np
import scipy.stats as si
def black_scholes_call_div(S, K, T, r, q, sigma):
# S: spot price
# K: strike price
# T: time to maturity
# r: interest rate
# q: rate of continuous dividend paying asset
# sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
call = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) -
K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
return call
def black_scholes_put_div(S, K, T, r, q, sigma):
# S: spot price
# K: strike price
# T: time to maturity
# r: interest rate
# q: rate of continuous dividend paying asset
# sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
put = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) -
S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))
return put
def euro_vanilla_dividend(S, K, T, r, q, sigma, option='call'):
# S: spot price
# K: strike price
# T: time to maturity
# r: interest rate
# q: rate of continuous dividend paying asset
# sigma: volatility of underlying asset
d1 = (np.log(S / K) + (r - q + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(S / K) + (r - q - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
if option == 'call':
result = (S * np.exp(-q * T) * si.norm.cdf(d1, 0.0, 1.0) -
K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
if option == 'put':
result = (K * np.exp(-r * T) * si.norm.cdf(-d2, 0.0, 1.0) -
S * np.exp(-q * T) * si.norm.cdf(-d1, 0.0, 1.0))
return result
| [
"scipy.stats.norm.cdf",
"numpy.exp",
"numpy.log",
"numpy.sqrt"
] | [((293, 306), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (299, 306), True, 'import numpy as np\n'), ((352, 362), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (359, 362), True, 'import numpy as np\n'), ((374, 387), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (380, 387), True, 'import numpy as np\n'), ((433, 443), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (440, 443), True, 'import numpy as np\n'), ((479, 504), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['d1', '(0.0)', '(1.0)'], {}), '(d1, 0.0, 1.0)\n', (490, 504), True, 'import scipy.stats as si\n'), ((540, 565), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['d2', '(0.0)', '(1.0)'], {}), '(d2, 0.0, 1.0)\n', (551, 565), True, 'import scipy.stats as si\n'), ((832, 845), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (838, 845), True, 'import numpy as np\n'), ((891, 901), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (898, 901), True, 'import numpy as np\n'), ((913, 926), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (919, 926), True, 'import numpy as np\n'), ((972, 982), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (979, 982), True, 'import numpy as np\n'), ((1017, 1043), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['(-d2)', '(0.0)', '(1.0)'], {}), '(-d2, 0.0, 1.0)\n', (1028, 1043), True, 'import scipy.stats as si\n'), ((1078, 1104), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['(-d1)', '(0.0)', '(1.0)'], {}), '(-d1, 0.0, 1.0)\n', (1089, 1104), True, 'import scipy.stats as si\n'), ((1385, 1398), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1391, 1398), True, 'import numpy as np\n'), ((1444, 1454), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1451, 1454), True, 'import numpy as np\n'), ((1466, 1479), 'numpy.log', 'np.log', (['(S / K)'], {}), '(S / K)\n', (1472, 1479), True, 'import numpy as np\n'), ((1525, 1535), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1532, 1535), True, 'import numpy as np\n'), ((462, 476), 'numpy.exp', 'np.exp', (['(-q * T)'], {}), '(-q * T)\n', (468, 476), True, 'import numpy as np\n'), ((523, 537), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (529, 537), True, 'import numpy as np\n'), ((1000, 1014), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (1006, 1014), True, 'import numpy as np\n'), ((1061, 1075), 'numpy.exp', 'np.exp', (['(-q * T)'], {}), '(-q * T)\n', (1067, 1075), True, 'import numpy as np\n'), ((1602, 1627), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['d1', '(0.0)', '(1.0)'], {}), '(d1, 0.0, 1.0)\n', (1613, 1627), True, 'import scipy.stats as si\n'), ((1669, 1694), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['d2', '(0.0)', '(1.0)'], {}), '(d2, 0.0, 1.0)\n', (1680, 1694), True, 'import scipy.stats as si\n'), ((1759, 1785), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['(-d2)', '(0.0)', '(1.0)'], {}), '(-d2, 0.0, 1.0)\n', (1770, 1785), True, 'import scipy.stats as si\n'), ((1827, 1853), 'scipy.stats.norm.cdf', 'si.norm.cdf', (['(-d1)', '(0.0)', '(1.0)'], {}), '(-d1, 0.0, 1.0)\n', (1838, 1853), True, 'import scipy.stats as si\n'), ((1585, 1599), 'numpy.exp', 'np.exp', (['(-q * T)'], {}), '(-q * T)\n', (1591, 1599), True, 'import numpy as np\n'), ((1652, 1666), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (1658, 1666), True, 'import numpy as np\n'), ((1742, 1756), 'numpy.exp', 'np.exp', (['(-r * T)'], {}), '(-r * T)\n', (1748, 1756), True, 'import numpy as np\n'), ((1810, 1824), 'numpy.exp', 'np.exp', (['(-q * T)'], {}), '(-q * T)\n', (1816, 1824), True, 'import numpy as np\n')] |
import os
import shutil
import numpy as np
import numpy.matlib as matl
import torch
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
from torchvision import transforms
from tqdm import tqdm
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def compute_feature(data_loader, model, args):
"""Compute the features
Args:
data_loader ([type]): [description]
model ([type]): [description]
args ([type]): [description]
Returns:
features: The features
labels: The corresponding labels
"""
# switch to evaluate mode
model.eval()
features, labels = [], []
with torch.no_grad():
for i, (input, target) in enumerate(tqdm(data_loader)):
# place input tensors on the device
input = input.to(args.device)
target = target.to(args.device)
# compute output
features.append(model(input).cpu().detach().numpy())
labels.append(target.cpu().detach().numpy().reshape(-1, 1))
return np.vstack(features), np.vstack(labels)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
filename = os.path.join('output', filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join('output', 'model_best.pth.tar'))
def get_data_augmentation(img_size, mean, std, ttype):
"""Get data augmentation
Args:
img_size (int): The desired output dim.
ttype (str, optional): Defaults to 'train'. The type
of data augmenation.
Returns:
Transform: A transform.
"""
# setup data augmentation
mean = np.array(mean).astype(np.float)
std = np.array(std).astype(np.float)
# fix the error
if (mean[0] > 1):
mean = mean / 255.0
std = std / 255.0
normalize = transforms.Normalize(mean=mean, std=std)
if ttype == 'train':
return transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomCrop((img_size, img_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
return transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop((img_size, img_size)),
transforms.ToTensor(),
normalize
])
def _check_triplets(T, X, y):
for t in range(T.shape[1]):
i, j, k = T[:, t]
assert(y[i] == y[j] and y[i] != y[k])
def _generate_triplet(inds, tars, imps):
k1 = tars.shape[0]
k2 = imps.shape[0]
n = inds.shape[0]
T = np.zeros((3, n*k1*k2), dtype=np.int)
T[0] = matl.repmat(inds.reshape(-1, 1), 1, k1 * k2).flatten()
T[1] = matl.repmat(tars.T.flatten().reshape(-1, 1), 1, k2).flatten()
T[2] = matl.repmat(imps.reshape(-1, 1), k1 * n, 1).flatten()
return T
def build_triplets(X, y, n_target=3):
"""Compute all triplet constraints.
Args:
X (np.array, shape = [n_samples, n_features]): The input data.
y (np.array, shape = (n_samples,) ): The labels.
n_target (int, optional): Defaults to 3. The number of targets.
Returns:
(np.array, shape = [3, n_triplets]): The triplet index
"""
dist = pairwise_distances(X, X)
np.fill_diagonal(dist, np.inf)
# list of triplets
Triplets = list()
for label in np.unique(y):
targets = np.where(label == y)[0]
imposters = np.where(label != y)[0]
# remove group of examples with a few targets or no imposters
if len(targets) > 1 and len(imposters) > 0:
# compute the targets
true_n_targets = min(n_target, len(targets) - 1)
index = np.argsort(dist[targets, :][:, targets], axis=0)[
0:true_n_targets]
Triplets.append(_generate_triplet(
targets, targets[index], imposters))
# if set of triplet is not empty
if len(Triplets) > 0:
Triplets = np.hstack(Triplets)
return Triplets
def build_batches(X, y, n_target=3, batch_size=128, n_jobs=-1):
"""Build the batches from training data.
Args:
X ([type]): [description]
y ([type]): [description]
n_target (int, optional): Defaults to 3. Number of targets.
batch_size (int, optional): Defaults to 128.
Returns:
List((indices, triplets)): A list of indices and the corresponding
triplet constraints.
"""
assert len(X) == len(y)
# compute the clusters using kmeans
n_clusters = max(1, X.shape[0] // batch_size)
model = KMeans(n_clusters=n_clusters, n_jobs=n_jobs).fit(X)
# generate all triplet constraints
batches = list()
for label in np.unique(model.labels_):
index = np.where(model.labels_ == label)[0]
# if the number of examples is larger than requires
if len(index) > batch_size:
index = np.random.choice(index, batch_size, replace=False)
triplets = build_triplets(X[index], y[index], n_target=n_target)
if len(triplets) > 0:
batches.append((index, triplets))
return batches
| [
"numpy.argsort",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"numpy.unique",
"sklearn.cluster.KMeans",
"numpy.random.choice",
"torchvision.transforms.CenterCrop",
"numpy.fill_diagonal",
"tqdm.tqdm",
"torchvision.transforms.RandomHorizontalFlip",
"sklearn.metrics.pairwi... | [((1525, 1557), 'os.path.join', 'os.path.join', (['"""output"""', 'filename'], {}), "('output', filename)\n", (1537, 1557), False, 'import os\n'), ((1562, 1589), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (1572, 1589), False, 'import torch\n'), ((2205, 2245), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (2225, 2245), False, 'from torchvision import transforms\n'), ((2959, 2999), 'numpy.zeros', 'np.zeros', (['(3, n * k1 * k2)'], {'dtype': 'np.int'}), '((3, n * k1 * k2), dtype=np.int)\n', (2967, 2999), True, 'import numpy as np\n'), ((3600, 3624), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X', 'X'], {}), '(X, X)\n', (3618, 3624), False, 'from sklearn.metrics import pairwise_distances\n'), ((3629, 3659), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dist', 'np.inf'], {}), '(dist, np.inf)\n', (3645, 3659), True, 'import numpy as np\n'), ((3722, 3734), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3731, 3734), True, 'import numpy as np\n'), ((5068, 5092), 'numpy.unique', 'np.unique', (['model.labels_'], {}), '(model.labels_)\n', (5077, 5092), True, 'import numpy as np\n'), ((1006, 1021), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1019, 1021), False, 'import torch\n'), ((1401, 1420), 'numpy.vstack', 'np.vstack', (['features'], {}), '(features)\n', (1410, 1420), True, 'import numpy as np\n'), ((1422, 1439), 'numpy.vstack', 'np.vstack', (['labels'], {}), '(labels)\n', (1431, 1439), True, 'import numpy as np\n'), ((4326, 4345), 'numpy.hstack', 'np.hstack', (['Triplets'], {}), '(Triplets)\n', (4335, 4345), True, 'import numpy as np\n'), ((1067, 1084), 'tqdm.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (1071, 1084), False, 'from tqdm import tqdm\n'), ((1640, 1684), 'os.path.join', 'os.path.join', (['"""output"""', '"""model_best.pth.tar"""'], {}), "('output', 'model_best.pth.tar')\n", (1652, 1684), False, 'import os\n'), ((2020, 2034), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (2028, 2034), True, 'import numpy as np\n'), ((2062, 2075), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (2070, 2075), True, 'import numpy as np\n'), ((2564, 2593), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (2581, 2593), False, 'from torchvision import transforms\n'), ((2603, 2646), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (2624, 2646), False, 'from torchvision import transforms\n'), ((2656, 2677), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2675, 2677), False, 'from torchvision import transforms\n'), ((3754, 3774), 'numpy.where', 'np.where', (['(label == y)'], {}), '(label == y)\n', (3762, 3774), True, 'import numpy as np\n'), ((3798, 3818), 'numpy.where', 'np.where', (['(label != y)'], {}), '(label != y)\n', (3806, 3818), True, 'import numpy as np\n'), ((4938, 4982), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'n_jobs': 'n_jobs'}), '(n_clusters=n_clusters, n_jobs=n_jobs)\n', (4944, 4982), False, 'from sklearn.cluster import KMeans\n'), ((5110, 5142), 'numpy.where', 'np.where', (['(model.labels_ == label)'], {}), '(model.labels_ == label)\n', (5118, 5142), True, 'import numpy as np\n'), ((5262, 5312), 'numpy.random.choice', 'np.random.choice', (['index', 'batch_size'], {'replace': '(False)'}), '(index, batch_size, replace=False)\n', (5278, 5312), True, 'import numpy as np\n'), ((2320, 2349), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (2337, 2349), False, 'from torchvision import transforms\n'), ((2363, 2406), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (2384, 2406), False, 'from torchvision import transforms\n'), ((2420, 2453), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2451, 2453), False, 'from torchvision import transforms\n'), ((2467, 2488), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2486, 2488), False, 'from torchvision import transforms\n'), ((4059, 4107), 'numpy.argsort', 'np.argsort', (['dist[targets, :][:, targets]'], {'axis': '(0)'}), '(dist[targets, :][:, targets], axis=0)\n', (4069, 4107), True, 'import numpy as np\n')] |
import tkinter as tk
import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn import metrics #Import scikit-learn metrics module for accuracy calculation
import csv
import ipython_genutils as ip
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
from collections import Counter
import datetime
import wordcloud
import program as p1
import program2 as p2
PLOT_COLORS = ["#268bd2", "#0052CC", "#FF5722", "#b58900", "#003f5c"]
pd.options.display.float_format = '{:.2f}'.format
sns.set(style="ticks")
plt.rc('figure', figsize=(8, 5), dpi=100)
plt.rc('axes', labelpad=20, facecolor="#ffffff", linewidth=0.4, grid=True, labelsize=14)
plt.rc('patch', linewidth=0)
plt.rc('xtick.major', width=0.2)
plt.rc('ytick.major', width=0.2)
plt.rc('grid', color='#9E9E9E', linewidth=0.4)
plt.rc('font', family='Arial', weight='400', size=10)
plt.rc('text', color='#282828')
plt.rc('savefig', pad_inches=0.3, dpi=300)
#process Decision tree
df = pd.read_csv(r"./TrendingJoTrending.csv", header=None)
df[0] = pd.to_numeric(df[0], errors='coerce')
df = df.replace(np.nan, 0, regex=True)
df[1] = pd.to_numeric(df[1], errors='coerce')
df = df.replace(np.nan, 1, regex=True)
df[2] = pd.to_numeric(df[2], errors='coerce')
df = df.replace(np.nan, 2, regex=True)
df[3] = pd.to_numeric(df[3], errors='coerce')
df = df.replace(np.nan, 3, regex=True)
df[4] = pd.to_numeric(df[4], errors='coerce')
df = df.replace(np.nan, 4, regex=True)
df[5] = pd.to_numeric(df[5], errors='coerce')
df = df.replace(np.nan, 5, regex=True)
feature_cols = [0,1,2,3,4,5]
X = df[feature_cols] # Features
y = df[6] # Target variable
class Windows(tk.Tk):
def __init__(self,*args,**kwargs):
tk.Tk.__init__(self,*args,**kwargs)
container=tk.Frame(self)
container.grid()
container.grid_rowconfigure(0,weight=1)
container.grid_columnconfigure(0,weight=1)
self.frames={}
for F in (StartPage,PageOne,PageTwo,PageThree):
frame=F(container,self)
self.frames[F]=frame
frame.grid(row=0,column=0,sticky="nsew")
self.show_frame(StartPage)
def show_frame(self,cont):
frame=self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
#self.title("Trending Youtube Videos Statistics")
#self.geometry('500x400')
#self.config(bg = "lightblue")
lbl=tk.Label(self,text='Welcome to TYVS',fg='red')
lbl.place(relx=.5, rely=.4, anchor="center")
btn=tk.Button(self,text='Start',command=lambda:controller.show_frame(PageOne),fg='blue')
btn.place(relx=.5, rely=.5, anchor="center")
class PageOne(tk.Frame):
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
tk.Label(self, text="Te dhenat e videos").grid(row=1,column=2)
tk.Label(self, text="Data e publikuar").grid(row=2,column=1)
tk.Label(self, text="Cat ID").grid(row=3,column=1)
tk.Label(self, text="Views").grid(row=4,column=1)
tk.Label(self, text="Likes").grid(row=5,column=1)
tk.Label(self, text="Dilikes").grid(row=6,column=1)
tk.Label(self, text="Comments").grid(row=7,column=1)
e1 = tk.Entry(self)
e1.grid(row=2, column=2)
e2 = tk.Entry(self)
e2.grid(row=3, column=2)
e3 = tk.Entry(self)
e3.grid(row=4, column=2)
e4 = tk.Entry(self)
e4.grid(row=5, column=2)
e5 = tk.Entry(self)
e5.grid(row=6, column=2)
e6 = tk.Entry(self)
e6.grid(row=7, column=2)
lbl5=tk.Label(self,text="")
lbl5.grid()
def insert():
fields=[e1.get(),e2.get(),e3.get(),e4.get(),e5.get(),e6.get()]
with open(r'./TrendingJoTrending.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
def predict():
df = pd.read_csv(r"./TrendingJoTrending.csv", header=None)
df[0] = pd.to_numeric(df[0], errors='coerce')
df = df.replace(np.nan, 0, regex=True)
df[1] = pd.to_numeric(df[1], errors='coerce')
df = df.replace(np.nan, 1, regex=True)
df[2] = pd.to_numeric(df[2], errors='coerce')
df = df.replace(np.nan, 2, regex=True)
df[3] = pd.to_numeric(df[3], errors='coerce')
df = df.replace(np.nan, 3, regex=True)
df[4] = pd.to_numeric(df[4], errors='coerce')
df = df.replace(np.nan, 4, regex=True)
df[5] = pd.to_numeric(df[5], errors='coerce')
df = df.replace(np.nan, 5, regex=True)
feature_cols = [0,1,2,3,4,5]
X = df[feature_cols] # Features
y = df[6]
test_row=(df.shape[0])-1 #rreshti qe predikon
train_idx=np.arange(X.shape[0])!=test_row
test_idx=np.arange(X.shape[0])==test_row
print(df.shape[0])
X_train=X[train_idx]
y_train=y[train_idx]
X_test=X[test_idx]
y_test=y[test_idx]
#Create Decision Tree classifer object
clf = DecisionTreeClassifier(max_depth=5)
#Train Decision Tree Classifer
clf = clf.fit(X_train,y_train)
#Predict the response for test dataset
y_pred = clf.predict(X_test)
lbl2.config(text=y_pred)#,)
inBtn = tk.Button(self,text="Insert",command=insert,fg='blue')
inBtn.grid(row=8,column=1)
predBtn = tk.Button(self,text="Predict",command=predict,fg='blue')
predBtn.grid(row=8,column=2)
deskBtn = tk.Button(self,text="Descript",command=lambda:controller.show_frame(PageTwo),fg='blue')
deskBtn.grid(row=8,column=3)
nextBtn = tk.Button(self,text="<NAME>",command=lambda:controller.show_frame(PageThree),fg='blue')
nextBtn.grid(row=8,column=4)
lblOut=tk.Label(self,text="Rezultati:")
lblOut.grid(row=9,column=1)
lbl2=tk.Label(self,text="")
lbl2.grid(row=9,column=2)
lbl3=tk.Label(self,text="")
lbl3.grid(row=9,column=3)
lbl4=tk.Label(self,text="")
lbl4.grid(row=9,column=4)
class PageTwo(tk.Frame):
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
backBtn = tk.Button(self,text="Back",command=lambda:controller.show_frame(PageOne),fg='blue')
backBtn.grid()#row=3,column=4)
exitBtn = tk.Button(self,text="Exit",command=lambda:controller.show_frame(StartPage),fg='blue')
exitBtn.grid()
lbl=tk.Label(self,text="Vetite e Datasetit")
lbl.grid()
text = tk.Text(self)
text.config(width=70,height=10)
text.insert(tk.END, df.describe())
text.grid()
lbl=tk.Label(self,text="Lidhshmeria mes elementeve")
lbl.grid()
text2 = tk.Text(self)
text2.config(width=50,height=7)
text2.insert(tk.END, df.corr())
text2.grid()
lbl=tk.Label(self,text="Korrelacionet")
lbl.grid()
corrBtn = tk.Button(self,text="Shfaq",command=lambda:p1.show(),fg='blue')
corrBtn.grid()#row=3,column=4)
class PageThree(tk.Frame):
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
backBtn = tk.Button(self,text="Back",command=lambda:controller.show_frame(PageTwo),fg='blue')
backBtn.grid()#row=3,column=4)
exitBtn = tk.Button(self,text="Exit",command=lambda:controller.show_frame(StartPage),fg='blue')
exitBtn.grid()
lbl=tk.Label(self,text="Parashiko ne baze te rreshtit ne dataset:")
lbl.grid()
lbl1=tk.Label(self,text="")
lbl1.grid()
e = tk.Entry(self)
e.grid()
lblA=tk.Label(self,text="")
lblA.grid()
lblM=tk.Label(self,text="")
lblM.grid()
def pred():
test_row=float(e.get()) #rreshti qe predikon
train_idx=np.arange(X.shape[0])!=test_row
test_idx=np.arange(X.shape[0])==test_row
print(df.shape[0])
X_train=X[train_idx]
y_train=y[train_idx]
X_test=X[test_idx]
y_test=y[test_idx]
#Create Decision Tree classifer object
clf = DecisionTreeClassifier(max_depth=5)
#Train Decision Tree Classifer
clf = clf.fit(X_train,y_train)
#Predict the response for test dataset
y_pred = clf.predict(X_test)
lblA.config(text=metrics.accuracy_score(y_test, y_pred))
if y_pred[0]==np.asarray(y_test).reshape(-1)[0]:
lblM.config(text="Ka parashikuar mirë")
elif y_pred[0]!=np.asarray(y_test).reshape(-1)[0]:
lblM.config(text="Nuk ka parashikuar mirë")
predBtn = tk.Button(self,text="Prediko",command=pred,fg='blue')
predBtn.grid()
app=Windows()
app.mainloop()
| [
"tkinter.Label",
"tkinter.Text",
"tkinter.Tk.__init__",
"csv.writer",
"tkinter.Frame.__init__",
"pandas.read_csv",
"tkinter.Button",
"sklearn.metrics.accuracy_score",
"tkinter.Entry",
"numpy.asarray",
"sklearn.tree.DecisionTreeClassifier",
"program.show",
"numpy.arange",
"matplotlib.pyplot... | [((715, 737), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (722, 737), True, 'import seaborn as sns\n'), ((738, 779), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(8, 5)', 'dpi': '(100)'}), "('figure', figsize=(8, 5), dpi=100)\n", (744, 779), True, 'from matplotlib import pyplot as plt\n'), ((780, 872), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelpad': '(20)', 'facecolor': '"""#ffffff"""', 'linewidth': '(0.4)', 'grid': '(True)', 'labelsize': '(14)'}), "('axes', labelpad=20, facecolor='#ffffff', linewidth=0.4, grid=True,\n labelsize=14)\n", (786, 872), True, 'from matplotlib import pyplot as plt\n'), ((869, 897), 'matplotlib.pyplot.rc', 'plt.rc', (['"""patch"""'], {'linewidth': '(0)'}), "('patch', linewidth=0)\n", (875, 897), True, 'from matplotlib import pyplot as plt\n'), ((898, 930), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick.major"""'], {'width': '(0.2)'}), "('xtick.major', width=0.2)\n", (904, 930), True, 'from matplotlib import pyplot as plt\n'), ((931, 963), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick.major"""'], {'width': '(0.2)'}), "('ytick.major', width=0.2)\n", (937, 963), True, 'from matplotlib import pyplot as plt\n'), ((964, 1010), 'matplotlib.pyplot.rc', 'plt.rc', (['"""grid"""'], {'color': '"""#9E9E9E"""', 'linewidth': '(0.4)'}), "('grid', color='#9E9E9E', linewidth=0.4)\n", (970, 1010), True, 'from matplotlib import pyplot as plt\n'), ((1011, 1064), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Arial"""', 'weight': '"""400"""', 'size': '(10)'}), "('font', family='Arial', weight='400', size=10)\n", (1017, 1064), True, 'from matplotlib import pyplot as plt\n'), ((1065, 1096), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'color': '"""#282828"""'}), "('text', color='#282828')\n", (1071, 1096), True, 'from matplotlib import pyplot as plt\n'), ((1097, 1139), 'matplotlib.pyplot.rc', 'plt.rc', (['"""savefig"""'], {'pad_inches': '(0.3)', 'dpi': '(300)'}), "('savefig', pad_inches=0.3, dpi=300)\n", (1103, 1139), True, 'from matplotlib import pyplot as plt\n'), ((1171, 1223), 'pandas.read_csv', 'pd.read_csv', (['"""./TrendingJoTrending.csv"""'], {'header': 'None'}), "('./TrendingJoTrending.csv', header=None)\n", (1182, 1223), True, 'import pandas as pd\n'), ((1233, 1270), 'pandas.to_numeric', 'pd.to_numeric', (['df[0]'], {'errors': '"""coerce"""'}), "(df[0], errors='coerce')\n", (1246, 1270), True, 'import pandas as pd\n'), ((1319, 1356), 'pandas.to_numeric', 'pd.to_numeric', (['df[1]'], {'errors': '"""coerce"""'}), "(df[1], errors='coerce')\n", (1332, 1356), True, 'import pandas as pd\n'), ((1405, 1442), 'pandas.to_numeric', 'pd.to_numeric', (['df[2]'], {'errors': '"""coerce"""'}), "(df[2], errors='coerce')\n", (1418, 1442), True, 'import pandas as pd\n'), ((1491, 1528), 'pandas.to_numeric', 'pd.to_numeric', (['df[3]'], {'errors': '"""coerce"""'}), "(df[3], errors='coerce')\n", (1504, 1528), True, 'import pandas as pd\n'), ((1577, 1614), 'pandas.to_numeric', 'pd.to_numeric', (['df[4]'], {'errors': '"""coerce"""'}), "(df[4], errors='coerce')\n", (1590, 1614), True, 'import pandas as pd\n'), ((1663, 1700), 'pandas.to_numeric', 'pd.to_numeric', (['df[5]'], {'errors': '"""coerce"""'}), "(df[5], errors='coerce')\n", (1676, 1700), True, 'import pandas as pd\n'), ((1901, 1938), 'tkinter.Tk.__init__', 'tk.Tk.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1915, 1938), True, 'import tkinter as tk\n'), ((1955, 1969), 'tkinter.Frame', 'tk.Frame', (['self'], {}), '(self)\n', (1963, 1969), True, 'import tkinter as tk\n'), ((2531, 2562), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (2548, 2562), True, 'import tkinter as tk\n'), ((2705, 2753), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Welcome to TYVS"""', 'fg': '"""red"""'}), "(self, text='Welcome to TYVS', fg='red')\n", (2713, 2753), True, 'import tkinter as tk\n'), ((3041, 3072), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (3058, 3072), True, 'import tkinter as tk\n'), ((3539, 3553), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3547, 3553), True, 'import tkinter as tk\n'), ((3602, 3616), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3610, 3616), True, 'import tkinter as tk\n'), ((3665, 3679), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3673, 3679), True, 'import tkinter as tk\n'), ((3728, 3742), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3736, 3742), True, 'import tkinter as tk\n'), ((3791, 3805), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3799, 3805), True, 'import tkinter as tk\n'), ((3854, 3868), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (3862, 3868), True, 'import tkinter as tk\n'), ((3934, 3957), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (3942, 3957), True, 'import tkinter as tk\n'), ((5790, 5847), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Insert"""', 'command': 'insert', 'fg': '"""blue"""'}), "(self, text='Insert', command=insert, fg='blue')\n", (5799, 5847), True, 'import tkinter as tk\n'), ((5898, 5957), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Predict"""', 'command': 'predict', 'fg': '"""blue"""'}), "(self, text='Predict', command=predict, fg='blue')\n", (5907, 5957), True, 'import tkinter as tk\n'), ((6293, 6326), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Rezultati:"""'}), "(self, text='Rezultati:')\n", (6301, 6326), True, 'import tkinter as tk\n'), ((6375, 6398), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (6383, 6398), True, 'import tkinter as tk\n'), ((6445, 6468), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (6453, 6468), True, 'import tkinter as tk\n'), ((6515, 6538), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (6523, 6538), True, 'import tkinter as tk\n'), ((6657, 6688), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (6674, 6688), True, 'import tkinter as tk\n'), ((6977, 7018), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Vetite e Datasetit"""'}), "(self, text='Vetite e Datasetit')\n", (6985, 7018), True, 'import tkinter as tk\n'), ((7052, 7065), 'tkinter.Text', 'tk.Text', (['self'], {}), '(self)\n', (7059, 7065), True, 'import tkinter as tk\n'), ((7182, 7231), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Lidhshmeria mes elementeve"""'}), "(self, text='Lidhshmeria mes elementeve')\n", (7190, 7231), True, 'import tkinter as tk\n'), ((7266, 7279), 'tkinter.Text', 'tk.Text', (['self'], {}), '(self)\n', (7273, 7279), True, 'import tkinter as tk\n'), ((7393, 7429), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Korrelacionet"""'}), "(self, text='Korrelacionet')\n", (7401, 7429), True, 'import tkinter as tk\n'), ((7657, 7688), 'tkinter.Frame.__init__', 'tk.Frame.__init__', (['self', 'parent'], {}), '(self, parent)\n', (7674, 7688), True, 'import tkinter as tk\n'), ((7968, 8032), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Parashiko ne baze te rreshtit ne dataset:"""'}), "(self, text='Parashiko ne baze te rreshtit ne dataset:')\n", (7976, 8032), True, 'import tkinter as tk\n'), ((8064, 8087), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (8072, 8087), True, 'import tkinter as tk\n'), ((8119, 8133), 'tkinter.Entry', 'tk.Entry', (['self'], {}), '(self)\n', (8127, 8133), True, 'import tkinter as tk\n'), ((8165, 8188), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (8173, 8188), True, 'import tkinter as tk\n'), ((8221, 8244), 'tkinter.Label', 'tk.Label', (['self'], {'text': '""""""'}), "(self, text='')\n", (8229, 8244), True, 'import tkinter as tk\n'), ((9246, 9302), 'tkinter.Button', 'tk.Button', (['self'], {'text': '"""Prediko"""', 'command': 'pred', 'fg': '"""blue"""'}), "(self, text='Prediko', command=pred, fg='blue')\n", (9255, 9302), True, 'import tkinter as tk\n'), ((4262, 4314), 'pandas.read_csv', 'pd.read_csv', (['"""./TrendingJoTrending.csv"""'], {'header': 'None'}), "('./TrendingJoTrending.csv', header=None)\n", (4273, 4314), True, 'import pandas as pd\n'), ((4336, 4373), 'pandas.to_numeric', 'pd.to_numeric', (['df[0]'], {'errors': '"""coerce"""'}), "(df[0], errors='coerce')\n", (4349, 4373), True, 'import pandas as pd\n'), ((4446, 4483), 'pandas.to_numeric', 'pd.to_numeric', (['df[1]'], {'errors': '"""coerce"""'}), "(df[1], errors='coerce')\n", (4459, 4483), True, 'import pandas as pd\n'), ((4556, 4593), 'pandas.to_numeric', 'pd.to_numeric', (['df[2]'], {'errors': '"""coerce"""'}), "(df[2], errors='coerce')\n", (4569, 4593), True, 'import pandas as pd\n'), ((4666, 4703), 'pandas.to_numeric', 'pd.to_numeric', (['df[3]'], {'errors': '"""coerce"""'}), "(df[3], errors='coerce')\n", (4679, 4703), True, 'import pandas as pd\n'), ((4776, 4813), 'pandas.to_numeric', 'pd.to_numeric', (['df[4]'], {'errors': '"""coerce"""'}), "(df[4], errors='coerce')\n", (4789, 4813), True, 'import pandas as pd\n'), ((4886, 4923), 'pandas.to_numeric', 'pd.to_numeric', (['df[5]'], {'errors': '"""coerce"""'}), "(df[5], errors='coerce')\n", (4899, 4923), True, 'import pandas as pd\n'), ((5478, 5513), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(5)'}), '(max_depth=5)\n', (5500, 5513), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((8693, 8728), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(5)'}), '(max_depth=5)\n', (8715, 8728), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3089, 3130), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Te dhenat e videos"""'}), "(self, text='Te dhenat e videos')\n", (3097, 3130), True, 'import tkinter as tk\n'), ((3160, 3199), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Data e publikuar"""'}), "(self, text='Data e publikuar')\n", (3168, 3199), True, 'import tkinter as tk\n'), ((3229, 3258), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Cat ID"""'}), "(self, text='Cat ID')\n", (3237, 3258), True, 'import tkinter as tk\n'), ((3288, 3316), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Views"""'}), "(self, text='Views')\n", (3296, 3316), True, 'import tkinter as tk\n'), ((3346, 3374), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Likes"""'}), "(self, text='Likes')\n", (3354, 3374), True, 'import tkinter as tk\n'), ((3404, 3434), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Dilikes"""'}), "(self, text='Dilikes')\n", (3412, 3434), True, 'import tkinter as tk\n'), ((3464, 3495), 'tkinter.Label', 'tk.Label', (['self'], {'text': '"""Comments"""'}), "(self, text='Comments')\n", (3472, 3495), True, 'import tkinter as tk\n'), ((4161, 4174), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4171, 4174), False, 'import csv\n'), ((5162, 5183), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (5171, 5183), True, 'import numpy as np\n'), ((5215, 5236), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (5224, 5236), True, 'import numpy as np\n'), ((8376, 8397), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (8385, 8397), True, 'import numpy as np\n'), ((8429, 8450), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (8438, 8450), True, 'import numpy as np\n'), ((7509, 7518), 'program.show', 'p1.show', ([], {}), '()\n', (7516, 7518), True, 'import program as p1\n'), ((8939, 8977), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (8961, 8977), False, 'from sklearn import metrics\n'), ((9005, 9023), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (9015, 9023), True, 'import numpy as np\n'), ((9124, 9142), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (9134, 9142), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.