code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import argparse
import SimpleITK as sitk
import skimage
import nibabel as ni
import numpy as np
import matplotlib.pyplot as plt
import skimage.measure
from pathlib import Path
def isocont(img_arr,
mask_arr,
b_out_labeled_mask=False,
b_class_components=True,
b_use_percentile_threshold=True,
percentile_threshold=25,
maximum_threshold=10,
verbose=True):
"""
Computes a mask based on percentile/relative maximum SUV value thresholds inside all
connected components of the original mask.
Args:
img_arr: array of a PET-SUV image.
mask_arr: array of a tumor mask.
b_out_labeled_mask: Output labeled component mask, no thresholding.
b_class_components: Detected connected components and use component based threshold.
b_use_percentile_threshold: Use percentile based thresholds (otherwise relative maximum value thresholds
are used.
percentile_threshold: Set percentile (SUV value) threshold in percent.
maximum_threshold: Set relative maximum (SUV value) threshold.
verbose:
Returns: array of the new mask.
"""
maximum_threshold = float(maximum_threshold)
# Get numpy array from sitk objects.
# amask = sitk.GetArrayFromImage(mask)
# animg = sitk.GetArrayFromImage(img)
amask = mask_arr
animg = img_arr
# Classify connected image components
if b_class_components:
amask_comp, num_comp = skimage.measure.label(amask,
neighbors=None,
background=None,
return_num=True,
connectivity=None)
else:
amask_comp = amask
num_comp = 1
print(f'Detected {num_comp} connected components.')
# Create new mask based on the selected threshold.
amask_th = np.zeros_like(amask)
# Calculate SUV value thresholds for each connected component.
for comp in range(num_comp):
if verbose:
print(f'Component {comp}')
sel_comp = (amask_comp == (comp + 1))
# Get SUV values inside the selected component.
suv_values = animg[sel_comp]
suv_max = np.max(suv_values)
if verbose:
print(f'#SUV values {suv_values.shape}')
print(f'Max. SUV value: {np.max(suv_values)}')
print(f'{percentile_threshold} percentile SUV value threshold {np.percentile(suv_values, percentile_threshold)}')
print(f'Relative max. SUV value threshold ({maximum_threshold}%): {suv_max * maximum_threshold/100.0}')
if b_use_percentile_threshold:
th = np.percentile(suv_values, percentile_threshold)
else:
th = suv_max * maximum_threshold/100.0
if verbose:
print(f'Used threshold: {th}')
amask_th = amask_th + np.logical_and(np.greater_equal(animg, th), sel_comp)
return amask_th
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--pet', help='pet image .nii file', required=True)
parser.add_argument('-m', '--mask', help='mask .nii file', required=True)
parser.add_argument('-o', '--out', help='output .nii file')
parser.add_argument('-l', '--Praefix', help='praefix for output filename')
parser.add_argument('--ThresholdType', help='threshold type',
choices={'Maximum', 'Percentile'})
parser.add_argument('-p', '--SetPercentileThreshold', help='percentile threshold (percent)', type=int)
parser.add_argument('-q', '--SetMaximumThreshold', help='relative maximum threshold (percent)', type=int)
parser.add_argument('-c', '--OutputCompLabels', help='output component label mask', action='store_true')
parser.add_argument('-g', '--NoComponents', help='do not detect connected components first', action='store_true')
parser.add_argument('-v', '--Verbose', help='verbose', action='store_true')
args = parser.parse_args()
path_pet = Path(args.pet)
path_mask = Path(args.mask)
# todo test if nii file exists
path_mask_out = args.out
if not path_mask_out:
praefix = 'iso_'
if args.Praefix:
praefix = args.Praefix
path_mask_out = path_mask.parent.joinpath(praefix + path_mask.name)
maximum_threshold = 10.0
if args.SetMaximumThreshold:
maximum_threshold = float(args.SetMaximumThreshold)
percentile_threshold = 25
if args.SetPercentileThreshold:
percentile_threshold = args.SetPercentileThreshold
b_use_percentile_threshold = True
if args.ThresholdType:
if args.ThresholdType == 'Maximum':
b_use_percentile_threshold = False
# Read image and mask data.
img = sitk.ReadImage(str(path_pet))
mask = sitk.ReadImage(str(path_mask))
verbose = args.Verbose
b_class_components = not args.NoComponents
b_out_labeled_mask = args.OutputCompLabels
amask_th = isocont(img,
mask,
b_out_labeled_mask=b_out_labeled_mask,
b_class_components=b_class_components,
b_use_percentile_threshold=b_use_percentile_threshold,
percentile_threshold=percentile_threshold,
maximum_threshold=maximum_threshold,
verbose=verbose)
# Create mask_out sitk img. If out_labeled_mask, the output image just
# contains the labeled connected component mask.
mask_out = sitk.GetImageFromArray(amask_th.astype(np.uint8))
if b_out_labeled_mask:
mask_out = sitk.GetImageFromArray(amask_comp.astype(np.uint8))
# Copy MetaData from original mask.
mask_out.SetDirection(mask.GetDirection())
mask_out.SetOrigin(mask.GetOrigin())
mask_out.SetSpacing(mask.GetSpacing())
writer = sitk.ImageFileWriter()
writer.SetFileName(str(path_mask_out))
writer.Execute(mask_out)
if __name__ == '__main__':
main()
| [
"numpy.greater_equal",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.max",
"SimpleITK.ImageFileWriter",
"numpy.percentile",
"numpy.zeros_like",
"skimage.measure.label"
] | [((2034, 2054), 'numpy.zeros_like', 'np.zeros_like', (['amask'], {}), '(amask)\n', (2047, 2054), True, 'import numpy as np\n'), ((3136, 3161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3159, 3161), False, 'import argparse\n'), ((4166, 4180), 'pathlib.Path', 'Path', (['args.pet'], {}), '(args.pet)\n', (4170, 4180), False, 'from pathlib import Path\n'), ((4197, 4212), 'pathlib.Path', 'Path', (['args.mask'], {}), '(args.mask)\n', (4201, 4212), False, 'from pathlib import Path\n'), ((6010, 6032), 'SimpleITK.ImageFileWriter', 'sitk.ImageFileWriter', ([], {}), '()\n', (6030, 6032), True, 'import SimpleITK as sitk\n'), ((1538, 1640), 'skimage.measure.label', 'skimage.measure.label', (['amask'], {'neighbors': 'None', 'background': 'None', 'return_num': '(True)', 'connectivity': 'None'}), '(amask, neighbors=None, background=None, return_num=\n True, connectivity=None)\n', (1559, 1640), False, 'import skimage\n'), ((2375, 2393), 'numpy.max', 'np.max', (['suv_values'], {}), '(suv_values)\n', (2381, 2393), True, 'import numpy as np\n'), ((2826, 2873), 'numpy.percentile', 'np.percentile', (['suv_values', 'percentile_threshold'], {}), '(suv_values, percentile_threshold)\n', (2839, 2873), True, 'import numpy as np\n'), ((3049, 3076), 'numpy.greater_equal', 'np.greater_equal', (['animg', 'th'], {}), '(animg, th)\n', (3065, 3076), True, 'import numpy as np\n'), ((2505, 2523), 'numpy.max', 'np.max', (['suv_values'], {}), '(suv_values)\n', (2511, 2523), True, 'import numpy as np\n'), ((2602, 2649), 'numpy.percentile', 'np.percentile', (['suv_values', 'percentile_threshold'], {}), '(suv_values, percentile_threshold)\n', (2615, 2649), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
BATCH_SIZE = 32
N_CLASSES = 1000
N_PREFETCH = 64
N_VAL = 100000
BRIGHTNESS_DELTA = 0.1
CONTRAST_MIN = 0.8
CONTRAST_MAX = 1.0 / 0.8
HUE_DELTA = 0.05
SATURATION_MIN = 0.8
SATURATION_MAX = 1.0 / 0.8
def imagenet(split, size=(320, 320), augment=False):
"""
Loads a split of the ImageNet dataset.
:param string split: Should be 'train', 'test', or 'val'.
:param tuple size: The height and width (in that order) to which
images should be resized.
:param bool augment: Whether to apply data augmentation.
:return (tensorflow.data.Dataset, int): The dataset split and number
of batches in the split.
"""
def preprocess(item):
# float32 is better behaved during augmentation
x = tf.image.convert_image_dtype(item['image'], tf.float32)
if augment:
x = tf.image.random_flip_left_right(x)
x = tf.image.random_brightness(x, BRIGHTNESS_DELTA)
x = tf.image.random_contrast(x, CONTRAST_MIN, CONTRAST_MAX)
x = tf.image.random_hue(x, HUE_DELTA)
x = tf.image.random_saturation(x, SATURATION_MIN, SATURATION_MAX)
# Brightness and contrast shifts may put pixel values out of
# the range [0, 1]
x = tf.clip_by_value(x, 0.0, 1.0)
# Pad after augmentation so zero padded pixels aren't changed
x = tf.image.resize_with_pad(x, size[0], size[1])
y = tf.one_hot(item['label'], N_CLASSES)
return x, y
def prepare(dataset):
return (
dataset
.map(preprocess)
.repeat()
.prefetch(N_PREFETCH)
.batch(BATCH_SIZE))
if split in ['train', 'val']:
full, info = tfds.load('imagenet2012', split='train', with_info=True)
n_train = info.splits['train'].num_examples - N_VAL
if split == 'train':
return prepare(full.take(n_train)), _n_batches(n_train)
else:
return prepare(full.skip(n_train)), _n_batches(N_VAL)
elif split == 'test':
test, info = tfds.load('imagenet2012', split='val', with_info=True)
n_test = info.splits['val'].num_examples
return prepare(test), _n_batches(n_test)
else:
raise ValueError('split must be "train", "val", or "test"')
def _n_batches(n_items):
return int(np.ceil(n_items / BATCH_SIZE))
| [
"tensorflow.one_hot",
"numpy.ceil",
"tensorflow.image.convert_image_dtype",
"tensorflow.image.random_flip_left_right",
"tensorflow_datasets.load",
"tensorflow.image.resize_with_pad",
"tensorflow.image.random_hue",
"tensorflow.image.random_brightness",
"tensorflow.clip_by_value",
"tensorflow.image.... | [((818, 873), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (["item['image']", 'tf.float32'], {}), "(item['image'], tf.float32)\n", (846, 873), True, 'import tensorflow as tf\n'), ((1444, 1489), 'tensorflow.image.resize_with_pad', 'tf.image.resize_with_pad', (['x', 'size[0]', 'size[1]'], {}), '(x, size[0], size[1])\n', (1468, 1489), True, 'import tensorflow as tf\n'), ((1502, 1538), 'tensorflow.one_hot', 'tf.one_hot', (["item['label']", 'N_CLASSES'], {}), "(item['label'], N_CLASSES)\n", (1512, 1538), True, 'import tensorflow as tf\n'), ((1812, 1868), 'tensorflow_datasets.load', 'tfds.load', (['"""imagenet2012"""'], {'split': '"""train"""', 'with_info': '(True)'}), "('imagenet2012', split='train', with_info=True)\n", (1821, 1868), True, 'import tensorflow_datasets as tfds\n'), ((2426, 2455), 'numpy.ceil', 'np.ceil', (['(n_items / BATCH_SIZE)'], {}), '(n_items / BATCH_SIZE)\n', (2433, 2455), True, 'import numpy as np\n'), ((911, 945), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {}), '(x)\n', (942, 945), True, 'import tensorflow as tf\n'), ((962, 1009), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', 'BRIGHTNESS_DELTA'], {}), '(x, BRIGHTNESS_DELTA)\n', (988, 1009), True, 'import tensorflow as tf\n'), ((1026, 1081), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', 'CONTRAST_MIN', 'CONTRAST_MAX'], {}), '(x, CONTRAST_MIN, CONTRAST_MAX)\n', (1050, 1081), True, 'import tensorflow as tf\n'), ((1098, 1131), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x', 'HUE_DELTA'], {}), '(x, HUE_DELTA)\n', (1117, 1131), True, 'import tensorflow as tf\n'), ((1148, 1209), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x', 'SATURATION_MIN', 'SATURATION_MAX'], {}), '(x, SATURATION_MIN, SATURATION_MAX)\n', (1174, 1209), True, 'import tensorflow as tf\n'), ((1331, 1360), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (1347, 1360), True, 'import tensorflow as tf\n'), ((2153, 2207), 'tensorflow_datasets.load', 'tfds.load', (['"""imagenet2012"""'], {'split': '"""val"""', 'with_info': '(True)'}), "('imagenet2012', split='val', with_info=True)\n", (2162, 2207), True, 'import tensorflow_datasets as tfds\n')] |
# Copyright 2021 Neuron-AI GitHub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from random import normalvariate
from secrets import randbelow
from typing import Tuple
from numpy import array, ndarray
def make_stairs(
classes: int, features: int, samples: int = 1000, sd: float = 0.3, factor: float = 3
) -> Tuple[ndarray, ndarray]:
"""Makes a dataset that contain clusters ascending on the line x=y
Parameters
----------
classes : int
The number of classes
features : int
The number of features
samples : int, optional
The number of samples, by default 1000
sd : float, optional
The standard deviation, by default 0.3
factor : float, optional
How large each cluster should spread, to increase overlapping for noiser data, by default 3
Returns
-------
Tuple[ndarray, ndarray]
A tuple - (X, y): X is the training samples, y are the targets aka. labels
"""
X = []
y = []
for _ in range(samples):
label = randbelow(classes)
to_append = [normalvariate(label, sd) * factor for _ in range(features)]
X.append(to_append)
y.append(label)
return array(X), array(y)
| [
"numpy.array",
"secrets.randbelow",
"random.normalvariate"
] | [((1639, 1657), 'secrets.randbelow', 'randbelow', (['classes'], {}), '(classes)\n', (1648, 1657), False, 'from secrets import randbelow\n'), ((1804, 1812), 'numpy.array', 'array', (['X'], {}), '(X)\n', (1809, 1812), False, 'from numpy import array, ndarray\n'), ((1814, 1822), 'numpy.array', 'array', (['y'], {}), '(y)\n', (1819, 1822), False, 'from numpy import array, ndarray\n'), ((1679, 1703), 'random.normalvariate', 'normalvariate', (['label', 'sd'], {}), '(label, sd)\n', (1692, 1703), False, 'from random import normalvariate\n')] |
import cv2 as cv
import numpy as np
import dlib
import utils.Constants as Constants
from skimage import io
def getFaceFeature(imagePath, rawImagePath):
image = io.imread(imagePath)
predictor_path = Constants.DATA_ROOT_PATH + '/' + Constants.PROGRAM_DATA_PATH + '/' + 'shape_predictor_68_face_landmarks.dat'
face_rec_model_path = Constants.DATA_ROOT_PATH + '/' + Constants.PROGRAM_DATA_PATH + '/' + 'dlib_face_recognition_resnet_model_v1.dat'
# prepare predict
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
detector = dlib.get_frontal_face_detector()
dets = detector(image, 0)
for k, d in enumerate(dets):
shape = sp(image, d)
# 提取特征
face_descriptor = facerec.compute_face_descriptor(image, shape)
v = np.array(face_descriptor)
return v | [
"dlib.face_recognition_model_v1",
"dlib.shape_predictor",
"numpy.array",
"dlib.get_frontal_face_detector",
"skimage.io.imread"
] | [((163, 183), 'skimage.io.imread', 'io.imread', (['imagePath'], {}), '(imagePath)\n', (172, 183), False, 'from skimage import io\n'), ((477, 513), 'dlib.shape_predictor', 'dlib.shape_predictor', (['predictor_path'], {}), '(predictor_path)\n', (497, 513), False, 'import dlib\n'), ((526, 577), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (['face_rec_model_path'], {}), '(face_rec_model_path)\n', (556, 577), False, 'import dlib\n'), ((591, 623), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (621, 623), False, 'import dlib\n'), ((799, 824), 'numpy.array', 'np.array', (['face_descriptor'], {}), '(face_descriptor)\n', (807, 824), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 22:40:02 2015
@author: tsz
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# Table B8.1 (Excel: Tables 1) - MWh
annualHeatingLoad = np.array([4.296, 4.773, 5.709, 5.226, 5.596, 4.882, 4.872, 5.362])
# Table B8.2 (Excel: Tables 1) - MWh
annualCoolingLoad = np.array([6.137, 6.433, 7.079, 7.278, 7.964, 6.492, 6.492, 6.778])
# Table B8.3 (Excel: Tables 2) - kW
peakHeatingLoad = np.array([3.437, 3.940, 4.045, 4.258, 4.037, 3.931, 4.354])
# Table B8.4 (Excel: Tables 2) - kW
peakCoolingLoad = np.array([6.194, 5.965, 6.656, 6.827, 6.286, 6.486, 6.812])
loadsYErr = [[np.mean(annualHeatingLoad) - np.min(annualHeatingLoad),
np.mean(annualCoolingLoad) - np.min(annualCoolingLoad),
np.mean(peakHeatingLoad) - np.min(peakHeatingLoad),
np.mean(peakCoolingLoad) - np.min(peakCoolingLoad)],
[np.max(annualHeatingLoad) - np.mean(annualHeatingLoad),
np.max(annualCoolingLoad) - np.mean(annualCoolingLoad),
np.max(peakHeatingLoad) - np.mean(peakHeatingLoad),
np.max(peakCoolingLoad) - np.mean(peakCoolingLoad)]]
loads = [np.mean(annualHeatingLoad),
np.mean(annualCoolingLoad),
np.mean(peakHeatingLoad),
np.mean(peakCoolingLoad)]
# Table B8.13 (Excel: Tables 6) - kWh/m^2a
solarRadiationNorth = np.array([427, 434, 456, 407, 457, 367, 453])
solarRadiationEast = np.array([959, 1155, 1083, 1217, 1082, 1101, 962])
solarRadiationWest = np.array([1086, 1079, 1003, 857, 1002, 1012, 1090])
solarRadiationSouth = np.array([1456, 1566, 1476, 1468, 1474, 1522, 1468])
solarRadiationHorizontal = np.array([1797, 1831, 1832, 1832, 1832, 1832, 1832])
# Table B8.14 (Excel: Tables 6) - kWh/m^2a
solarRadiationWindowSouth = np.array([946, 1051, 962, 954, 926, 984, 914])
# HOURLY HEATING & COOLING LOAD DATA
# CASE 600 JAN 4
# Data extracted from RESULTS5-2.xls, sheet "data for charts", cells B687-I710
# all values in kWh
ESP_jan4 = [3.25, 3.409, 3.392, 3.381, 3.417, 3.432, 3.421, 3.337, 2.767,
1.497, 0.151, -0.771, -2.66, -3.575, -3.527, -2.435, -0.356,
0.243, 1.53, 2.321, 2.641, 2.899, 3.017, 3.008]
BLAST_jan4 = [3.801823, 3.910936, 3.865797, 3.919602, 3.940134, 3.925815,
3.936957, 3.702264, 2.675222, 1.383322, 0, -1.225471, -2.487117,
-2.957941, -2.631008, -1.34913, 0, 0.9501685, 2.377919,
2.866487, 3.212612, 3.284511, 3.330747, 3.387975]
DOE21D_jan4 = [3.926, 4.035, 4.013, 4.041, 4.045, 4.036, 4.045, 3.857, 2.559,
0.843, 0, -1.552, -2.854, -3.398, -3.116, -1.82, 0, 0.775,
2.232, 2.933, 3.323, 3.487, 3.514, 3.561]
SRES_SUN_jan4 = [4.127, 4.258, 4.229, 4.22, 4.22, 4.221, 4.222, 4.09, 2.902,
1.275, 0, -1.066, -2.586, -3.225, -2.826, -1.552, -0.001,
0.8, 2.34, 2.988, 3.365, 3.532, 3.605, 3.663]
# No data for SRES
S3PAS_jan4 = [3.925, 4.037, 4.003, 4.001, 4.001, 4.001, 4.001, 3.898, 2.706,
1.151, 0, -1.036, -2.498, -3.085, -2.637, -1.345, 0, 0.88,
2.331, 2.949, 3.309, 3.347, 3.494, 3.527]
TRNSYS_jan4 = [3.767, 3.867, 3.903, 3.894, 3.917, 3.931, 3.931, 3.753, 2.423,
0.797, -0.035, -1.435, -2.720, -3.156, -2.844, -1.716, 0,
0.773, 2.301, 2.967, 3.278, 3.461, 3.5, 3.472]
TASE_jan4 = [4.225, 4.354, 4.321, 4.308, 4.303, 4.307, 4.307, 4.167, 2.912,
1.466, 0, -0.424, -2.364, -2.759, -2.431, -1.14, 0, 1.292, 2.445,
2.941, 3.405, 3.594, 3.696, 3.769]
ref_jan4 = np.vstack((ESP_jan4, BLAST_jan4, DOE21D_jan4, SRES_SUN_jan4,
S3PAS_jan4, TRNSYS_jan4, TASE_jan4))
#HOURLY INCIDENT SOLAR RADIATION, CLEAR DAY, JULY 27
#CASE 600
#WEST SURFACE
# Data extracted from RESULTS5-2.xls, sheet "data for charts", cells B483-I506
# all values in Wh/m^2
ESP_jul27_west = [0, 0, 0, 0, 0.4, 17.9, 58.5, 91.8, 113.7, 131.2, 145.7, 153.8,
267.7, 464.8, 635.1, 738.3, 623.9, 296.9, 68.8, 1.6, 0, 0, 0, 0]
# no data for BLAST
DOE21D_jul27_west = [0, 0, 0, 0, 0, 19.96, 65.86, 97.11, 116.89, 128.97, 138.05,
141.34, 243.51, 462.83, 664.62, 786.35, 649.05, 243.11, 43.19,
0, 0, 0, 0, 0]
SRES_SUN_jul27_west = [0, 0, 0, 0, 0.1667, 27.8275, 77.3025, 99.989, 120.051,
134.9631, 149.5847, 153.134, 266.449, 461.2772, 635.5103,
719.323, 502.7889, 141.2425, 25.2472, 0, 0, 0, 0, 0]
SRES_jul27_west = [0, 0, 0, 0, 0.14, 29.94, 89.2, 112.85, 121.41, 123.51, 125.06,
121.07, 117.94, 333.68, 525.35, 634.59, 478.44, 140.3, 21.96,
0, 0, 0, 0, 0]
S3PAS_jul27_west = [0, 0, 0, 0, 0, 28, 80, 104, 125, 140, 154, 157, 270, 463, 635,
715, 497, 139, 24, 0, 0, 0, 0, 0]
TRNSYS_jul27_west = [0, 0, 0, 0, 0.17, 27.01, 63, 71.22, 85.58, 98.03, 109.14,
113.06, 235.17, 453.89, 652.5, 762.78, 568.33, 158, 26.6,
0, 0, 0, 0, 0]
TASE_jul27_west = [0, 0, 0, 0, 0.2, 25.7, 62.1, 72, 92.6, 112.8, 136.75, 150.9,
382.5, 576.81, 744.52, 807.29, 541.68, 145.25, 24.9, 0, 0,
0, 0, 0]
ref_jul27_west = np.vstack((ESP_jul27_west, DOE21D_jul27_west,
SRES_SUN_jul27_west, SRES_jul27_west,
S3PAS_jul27_west, TRNSYS_jul27_west,
TASE_jul27_west))
#HOURLY INCIDENT SOLAR RADIATION, CLEAR DAY, JULY 27
#CASE 600
#SOUTH SURFACE
# Data extracted from RESULTS5-2.xls, sheet "data for charts", cells B442-I465
# all values in Wh/m^2
ESP_jul27_south = [0, 0, 0, 0, 0.5, 17.9, 58.6, 100.4, 205.9, 326, 415.1,
454.8, 455.6, 408.6, 321.2, 200.6, 102.3, 78.8, 37.1,
1.1, 0, 0, 0, 0]
# no data for BLAST
DOE21D_jul27_south = [0, 0, 0, 0, 0, 20.11, 70.22, 108.13, 219.58, 343.67,
435.54, 475.37, 488.49, 443.66, 367.07, 246.71, 119.19,
68.86, 19.75, 0, 0, 0, 0, 0]
SRES_SUN_jul27_south = [0, 0, 0, 0, 0.167, 27.8275, 77.3025, 99.9892, 211.006,
331.006, 418.1717, 454.9942, 464.5689, 413.6364,
334.284, 211.9439, 111.7408, 73.07917, 17.7025,
0, 0, 0, 0, 0]
SRES_jul27_south = [0, 0, 0, 0, 0.14, 29.94, 89.2, 112.85, 164.86, 291.84,
389.26, 437.2, 455.75, 413.67, 341.53, 223.71, 105.72,
68.47, 14.35, 0, 0, 0, 0, 0]
S3PAS_jul27_south = [0, 0, 0, 0, 0, 28, 80, 104, 217, 336, 423, 459, 469,
418, 340, 218, 115, 74, 18, 0, 0, 0, 0, 0]
TRNSYS_jul27_south = [0, 0, 0, 0, 0.17, 27.01, 63, 71.22, 187.72, 314.17,
404.44, 443.61, 452.5, 400.56, 316.94, 188.89, 86.03,
69.78, 17.61, 0, 0, 0, 0, 0]
TASE_jul27_south = [0, 0, 0, 0, 0.2, 25.7, 62.1, 107.47, 232.33, 349.16,
430.22, 459.85, 462.28, 404.57, 319.26, 193.61, 132.3,
76.6, 18.05, 0, 0, 0, 0, 0]
ref_jul27_south = np.vstack((ESP_jul27_south, DOE21D_jul27_south,
SRES_SUN_jul27_south, SRES_jul27_south,
S3PAS_jul27_south, TRNSYS_jul27_south,
TASE_jul27_south))
#HOURLY INCIDENT SOLAR RADIATION CLOUDY DAY, MARCH 5
#CASE 600
#WEST SURFACE
# Data extracted from RESULTS5-2.xls, sheet "data for charts", cells B401-I424
# all values in Wh/m^2
ESP_mar5_west = [0, 0, 0, 0, 0, 0, 1.6, 13.5, 31, 47.1, 59.7, 67.4, 70.1,
67.3, 58.9, 44.9, 27.6, 9, 0, 0, 0, 0, 0, 0]
# no data for BLAST
DOE21D_mar5_west = [0, 0, 0, 0, 0, 0, 1.8, 13.92, 31.75, 45.24, 56.63,
61.58, 63.7, 61.46, 51.67, 37.2, 16.72, 2.52, 0, 0,
0, 0, 0, 0]
SRES_SUN_mar5_west = [0, 0, 0, 0, 0, 0, 2.997, 20.183, 37.955, 53.244, 64.467,
69.982, 70.806, 65.663, 54.921, 39.487, 21.291, 3.27,
0, 0, 0, 0, 0, 0]
SRES_mar5_west = [0, 0, 0, 0, 0, 0, 3, 20.24, 38.01, 53.27, 53.37, 57.91,
58.3, 54.15, 45.38, 32.7, 17.7, 2.73, 0, 0, 0, 0, 0, 0]
S3PAS_mar5_west = [0, 0, 0, 0, 0, 0, 3, 20, 38, 53, 64, 70, 71, 66, 55, 40,
21, 3, 0, 0, 0, 0, 0, 0]
TRNSYS_mar5_west = [0, 0, 0, 0, 0, 0, 2.99, 20.17, 37.92, 53.17, 64.39,
69.89, 70.75, 65.69, 55.03, 39.61, 21.42, 3.28, 0, 0,
0, 0, 0, 0]
TASE_mar5_west = [0, 0, 0, 0, 0, 0, 3, 20.15, 37.9, 53.15, 64.4, 69.95,
71.16, 66.02, 55.16, 39.73, 21.6, 0, 0, 0, 0, 0, 0, 0]
ref_mar5_west = np.vstack((ESP_mar5_west, DOE21D_mar5_west,
SRES_SUN_mar5_west, SRES_mar5_west,
S3PAS_mar5_west, TRNSYS_mar5_west,
TASE_mar5_west))
#HOURLY INCIDENT SOLAR RADIATION CLOUDY DAY, MARCH 5
#CASE 600
#SOUTH SURFACE
# Data extracted from RESULTS5-2.xls, sheet "data for charts", cells B360-I383
# all values in Wh/m^2
ESP_mar5_south = [0, 0, 0, 0, 0, 0, 1.6, 13.8, 31.6, 48.3, 61.6, 69.3, 71.7,
68.1, 58.9, 44.4, 26.9, 8.7, 0, 0, 0, 0, 0, 0]
# no data for BLAST
DOE21D_mar5_south = [0, 0, 0, 0, 0, 0, 1.5, 12.59, 30.01, 46.23, 59.31,
65.05, 66.98, 63.11, 51.79, 37.13, 19.14, 4.62, 0,
0, 0, 0, 0, 0]
SRES_SUN_mar5_south = [0, 0, 0, 0, 0, 0, 3.0447, 20.646, 38.884, 54.566,
65.973, 71.784, 72.2839, 66.4075, 54.8997, 38.8836,
20.4797, 3.0447, 0, 0, 0, 0, 0, 0]
SRES_mar5_south = [0, 0, 0, 0, 0, 0, 3.02, 20.59, 38.83, 54.53, 54.77, 59.65,
60.1, 55.24, 45.68, 32.37, 17.06, 2.54, 0, 0, 0, 0, 0, 0]
S3PAS_mar5_south = [0, 0, 0, 0, 0, 0, 3, 21, 39, 55, 66, 72, 72, 66, 55, 39,
20, 3, 0, 0, 0, 0, 0, 0]
TRNSYS_mar5_south = [0, 0, 0, 0, 0, 0, 3.05, 20.69, 38.94, 54.67, 66.08,
71.92, 72.42, 66.53, 55, 38.94, 20.52, 3.05, 0, 0, 0,
0, 0, 0]
TASE_mar5_south = [0, 0, 0, 0, 0, 0, 3, 20.68, 38.94, 54.56, 65.99, 71.74,
72.3, 66.38, 54.8, 38.84, 20.46, 0, 0, 0, 0, 0, 0, 0]
ref_mar5_south = np.vstack((ESP_mar5_south, DOE21D_mar5_south,
SRES_SUN_mar5_south, SRES_mar5_south,
S3PAS_mar5_south, TRNSYS_mar5_south,
TASE_mar5_south))
def plot_results(results):
"""
"""
# Extract results
heatingLoad = results.heatingLoad
coolingLoad = results.coolingLoad
heatingPeak = results.heatingPeak
coolingPeak = results.coolingPeak
solarNorth = results.solarGainsNorth
solarEast = results.solarGainsEast
solarWest = results.solarGainsWest
solarSouth = results.solarGainsSouth
solarHorizontal = results.solarGainsHorizontal
solarWindowSouth = results.solarGainsWindowSouth
values = [heatingLoad, coolingLoad, heatingPeak, coolingPeak]
fig = plt.figure(figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
plt.plot(range(len(values)), values, marker="o", ms=10, ls="none", color="red", label="Results")
plt.plot(range(len(values)), loads, marker='_', ms=10, ls="none", label="Reference", color="blue")
plt.errorbar(x=range(len(values)),
y=loads,
yerr=loadsYErr,
marker='_', ms=10,
ls='none', mec='blue', capsize=10)
plt.xlim(-1, 4)
plt.legend(numpoints=1)
def plot_west_surface(results):
mar5 = results.solarWestMarch5
jul27 = results.solarWestJuly27
time = np.linspace(1, 24, 24)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(time, mar5, label="result mar5")
ax1.plot(time, np.min(ref_mar5_west, axis=0),
"k--", label="ref_mar5_min")
ax1.plot(time, np.max(ref_mar5_west, axis=0),
"k--", label="ref_mar5_max")
ax1.legend()
plt.show()
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(time, jul27, label="result jul27")
ax2.plot(time, np.min(ref_jul27_west, axis=0),
"k--", label="ref_jul27_min")
ax2.plot(time, np.max(ref_jul27_west, axis=0),
"k--", label="ref_jul27_max")
ax2.legend()
plt.show()
def plot_south_surface(results):
mar5 = results.solarSouthMarch5
jul27 = results.solarSouthJuly27
time = np.linspace(1, 24, 24)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(time, mar5, label="result mar5")
ax1.plot(time, np.min(ref_mar5_south, axis=0),
"k--", label="ref_mar5_min")
ax1.plot(time, np.max(ref_mar5_south, axis=0),
"k--", label="ref_mar5_max")
ax1.legend()
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(time, jul27, label="result jul27")
ax2.plot(time, np.min(ref_jul27_south, axis=0),
"k--", label="ref_jul27_min")
ax2.plot(time, np.max(ref_jul27_south, axis=0),
"k--", label="ref_jul27_max")
ax2.legend()
def plot_january_4(results):
jan4 = results.january4
time = np.linspace(1, 24, 24)
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(time, jan4, label="result jan4")
ax1.plot(time, np.min(ref_jan4, axis=0),
"k--", label="ref_jan4_min")
ax1.plot(time, np.max(ref_jan4, axis=0),
"k--", label="ref_jan4_max")
ax1.legend(loc=3)
| [
"numpy.mean",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.vstack",
"numpy.min",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((246, 312), 'numpy.array', 'np.array', (['[4.296, 4.773, 5.709, 5.226, 5.596, 4.882, 4.872, 5.362]'], {}), '([4.296, 4.773, 5.709, 5.226, 5.596, 4.882, 4.872, 5.362])\n', (254, 312), True, 'import numpy as np\n'), ((371, 437), 'numpy.array', 'np.array', (['[6.137, 6.433, 7.079, 7.278, 7.964, 6.492, 6.492, 6.778]'], {}), '([6.137, 6.433, 7.079, 7.278, 7.964, 6.492, 6.492, 6.778])\n', (379, 437), True, 'import numpy as np\n'), ((493, 551), 'numpy.array', 'np.array', (['[3.437, 3.94, 4.045, 4.258, 4.037, 3.931, 4.354]'], {}), '([3.437, 3.94, 4.045, 4.258, 4.037, 3.931, 4.354])\n', (501, 551), True, 'import numpy as np\n'), ((608, 667), 'numpy.array', 'np.array', (['[6.194, 5.965, 6.656, 6.827, 6.286, 6.486, 6.812]'], {}), '([6.194, 5.965, 6.656, 6.827, 6.286, 6.486, 6.812])\n', (616, 667), True, 'import numpy as np\n'), ((1434, 1479), 'numpy.array', 'np.array', (['[427, 434, 456, 407, 457, 367, 453]'], {}), '([427, 434, 456, 407, 457, 367, 453])\n', (1442, 1479), True, 'import numpy as np\n'), ((1502, 1552), 'numpy.array', 'np.array', (['[959, 1155, 1083, 1217, 1082, 1101, 962]'], {}), '([959, 1155, 1083, 1217, 1082, 1101, 962])\n', (1510, 1552), True, 'import numpy as np\n'), ((1575, 1626), 'numpy.array', 'np.array', (['[1086, 1079, 1003, 857, 1002, 1012, 1090]'], {}), '([1086, 1079, 1003, 857, 1002, 1012, 1090])\n', (1583, 1626), True, 'import numpy as np\n'), ((1649, 1701), 'numpy.array', 'np.array', (['[1456, 1566, 1476, 1468, 1474, 1522, 1468]'], {}), '([1456, 1566, 1476, 1468, 1474, 1522, 1468])\n', (1657, 1701), True, 'import numpy as np\n'), ((1729, 1781), 'numpy.array', 'np.array', (['[1797, 1831, 1832, 1832, 1832, 1832, 1832]'], {}), '([1797, 1831, 1832, 1832, 1832, 1832, 1832])\n', (1737, 1781), True, 'import numpy as np\n'), ((1854, 1900), 'numpy.array', 'np.array', (['[946, 1051, 962, 954, 926, 984, 914]'], {}), '([946, 1051, 962, 954, 926, 984, 914])\n', (1862, 1900), True, 'import numpy as np\n'), ((3662, 3763), 'numpy.vstack', 'np.vstack', (['(ESP_jan4, BLAST_jan4, DOE21D_jan4, SRES_SUN_jan4, S3PAS_jan4, TRNSYS_jan4,\n TASE_jan4)'], {}), '((ESP_jan4, BLAST_jan4, DOE21D_jan4, SRES_SUN_jan4, S3PAS_jan4,\n TRNSYS_jan4, TASE_jan4))\n', (3671, 3763), True, 'import numpy as np\n'), ((5275, 5417), 'numpy.vstack', 'np.vstack', (['(ESP_jul27_west, DOE21D_jul27_west, SRES_SUN_jul27_west, SRES_jul27_west,\n S3PAS_jul27_west, TRNSYS_jul27_west, TASE_jul27_west)'], {}), '((ESP_jul27_west, DOE21D_jul27_west, SRES_SUN_jul27_west,\n SRES_jul27_west, S3PAS_jul27_west, TRNSYS_jul27_west, TASE_jul27_west))\n', (5284, 5417), True, 'import numpy as np\n'), ((7144, 7293), 'numpy.vstack', 'np.vstack', (['(ESP_jul27_south, DOE21D_jul27_south, SRES_SUN_jul27_south,\n SRES_jul27_south, S3PAS_jul27_south, TRNSYS_jul27_south, TASE_jul27_south)'], {}), '((ESP_jul27_south, DOE21D_jul27_south, SRES_SUN_jul27_south,\n SRES_jul27_south, S3PAS_jul27_south, TRNSYS_jul27_south, TASE_jul27_south))\n', (7153, 7293), True, 'import numpy as np\n'), ((8710, 8845), 'numpy.vstack', 'np.vstack', (['(ESP_mar5_west, DOE21D_mar5_west, SRES_SUN_mar5_west, SRES_mar5_west,\n S3PAS_mar5_west, TRNSYS_mar5_west, TASE_mar5_west)'], {}), '((ESP_mar5_west, DOE21D_mar5_west, SRES_SUN_mar5_west,\n SRES_mar5_west, S3PAS_mar5_west, TRNSYS_mar5_west, TASE_mar5_west))\n', (8719, 8845), True, 'import numpy as np\n'), ((10295, 10437), 'numpy.vstack', 'np.vstack', (['(ESP_mar5_south, DOE21D_mar5_south, SRES_SUN_mar5_south, SRES_mar5_south,\n S3PAS_mar5_south, TRNSYS_mar5_south, TASE_mar5_south)'], {}), '((ESP_mar5_south, DOE21D_mar5_south, SRES_SUN_mar5_south,\n SRES_mar5_south, S3PAS_mar5_south, TRNSYS_mar5_south, TASE_mar5_south))\n', (10304, 10437), True, 'import numpy as np\n'), ((1230, 1256), 'numpy.mean', 'np.mean', (['annualHeatingLoad'], {}), '(annualHeatingLoad)\n', (1237, 1256), True, 'import numpy as np\n'), ((1268, 1294), 'numpy.mean', 'np.mean', (['annualCoolingLoad'], {}), '(annualCoolingLoad)\n', (1275, 1294), True, 'import numpy as np\n'), ((1306, 1330), 'numpy.mean', 'np.mean', (['peakHeatingLoad'], {}), '(peakHeatingLoad)\n', (1313, 1330), True, 'import numpy as np\n'), ((1342, 1366), 'numpy.mean', 'np.mean', (['peakCoolingLoad'], {}), '(peakCoolingLoad)\n', (1349, 1366), True, 'import numpy as np\n'), ((11096, 11122), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (11106, 11122), True, 'import matplotlib.pyplot as plt\n'), ((11615, 11630), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(4)'], {}), '(-1, 4)\n', (11623, 11630), True, 'import matplotlib.pyplot as plt\n'), ((11635, 11658), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'numpoints': '(1)'}), '(numpoints=1)\n', (11645, 11658), True, 'import matplotlib.pyplot as plt\n'), ((11777, 11799), 'numpy.linspace', 'np.linspace', (['(1)', '(24)', '(24)'], {}), '(1, 24, 24)\n', (11788, 11799), True, 'import numpy as np\n'), ((11816, 11828), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11826, 11828), True, 'import matplotlib.pyplot as plt\n'), ((12115, 12125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12123, 12125), True, 'import matplotlib.pyplot as plt\n'), ((12142, 12154), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12152, 12154), True, 'import matplotlib.pyplot as plt\n'), ((12446, 12456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12454, 12456), True, 'import matplotlib.pyplot as plt\n'), ((12578, 12600), 'numpy.linspace', 'np.linspace', (['(1)', '(24)', '(24)'], {}), '(1, 24, 24)\n', (12589, 12600), True, 'import numpy as np\n'), ((12617, 12629), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12627, 12629), True, 'import matplotlib.pyplot as plt\n'), ((12930, 12942), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12940, 12942), True, 'import matplotlib.pyplot as plt\n'), ((13307, 13329), 'numpy.linspace', 'np.linspace', (['(1)', '(24)', '(24)'], {}), '(1, 24, 24)\n', (13318, 13329), True, 'import numpy as np\n'), ((13346, 13358), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13356, 13358), True, 'import matplotlib.pyplot as plt\n'), ((11927, 11956), 'numpy.min', 'np.min', (['ref_mar5_west'], {'axis': '(0)'}), '(ref_mar5_west, axis=0)\n', (11933, 11956), True, 'import numpy as np\n'), ((12020, 12049), 'numpy.max', 'np.max', (['ref_mar5_west'], {'axis': '(0)'}), '(ref_mar5_west, axis=0)\n', (12026, 12049), True, 'import numpy as np\n'), ((12254, 12284), 'numpy.min', 'np.min', (['ref_jul27_west'], {'axis': '(0)'}), '(ref_jul27_west, axis=0)\n', (12260, 12284), True, 'import numpy as np\n'), ((12349, 12379), 'numpy.max', 'np.max', (['ref_jul27_west'], {'axis': '(0)'}), '(ref_jul27_west, axis=0)\n', (12355, 12379), True, 'import numpy as np\n'), ((12728, 12758), 'numpy.min', 'np.min', (['ref_mar5_south'], {'axis': '(0)'}), '(ref_mar5_south, axis=0)\n', (12734, 12758), True, 'import numpy as np\n'), ((12822, 12852), 'numpy.max', 'np.max', (['ref_mar5_south'], {'axis': '(0)'}), '(ref_mar5_south, axis=0)\n', (12828, 12852), True, 'import numpy as np\n'), ((13042, 13073), 'numpy.min', 'np.min', (['ref_jul27_south'], {'axis': '(0)'}), '(ref_jul27_south, axis=0)\n', (13048, 13073), True, 'import numpy as np\n'), ((13138, 13169), 'numpy.max', 'np.max', (['ref_jul27_south'], {'axis': '(0)'}), '(ref_jul27_south, axis=0)\n', (13144, 13169), True, 'import numpy as np\n'), ((13457, 13481), 'numpy.min', 'np.min', (['ref_jan4'], {'axis': '(0)'}), '(ref_jan4, axis=0)\n', (13463, 13481), True, 'import numpy as np\n'), ((13545, 13569), 'numpy.max', 'np.max', (['ref_jan4'], {'axis': '(0)'}), '(ref_jan4, axis=0)\n', (13551, 13569), True, 'import numpy as np\n'), ((683, 709), 'numpy.mean', 'np.mean', (['annualHeatingLoad'], {}), '(annualHeatingLoad)\n', (690, 709), True, 'import numpy as np\n'), ((712, 737), 'numpy.min', 'np.min', (['annualHeatingLoad'], {}), '(annualHeatingLoad)\n', (718, 737), True, 'import numpy as np\n'), ((754, 780), 'numpy.mean', 'np.mean', (['annualCoolingLoad'], {}), '(annualCoolingLoad)\n', (761, 780), True, 'import numpy as np\n'), ((783, 808), 'numpy.min', 'np.min', (['annualCoolingLoad'], {}), '(annualCoolingLoad)\n', (789, 808), True, 'import numpy as np\n'), ((825, 849), 'numpy.mean', 'np.mean', (['peakHeatingLoad'], {}), '(peakHeatingLoad)\n', (832, 849), True, 'import numpy as np\n'), ((852, 875), 'numpy.min', 'np.min', (['peakHeatingLoad'], {}), '(peakHeatingLoad)\n', (858, 875), True, 'import numpy as np\n'), ((892, 916), 'numpy.mean', 'np.mean', (['peakCoolingLoad'], {}), '(peakCoolingLoad)\n', (899, 916), True, 'import numpy as np\n'), ((919, 942), 'numpy.min', 'np.min', (['peakCoolingLoad'], {}), '(peakCoolingLoad)\n', (925, 942), True, 'import numpy as np\n'), ((959, 984), 'numpy.max', 'np.max', (['annualHeatingLoad'], {}), '(annualHeatingLoad)\n', (965, 984), True, 'import numpy as np\n'), ((987, 1013), 'numpy.mean', 'np.mean', (['annualHeatingLoad'], {}), '(annualHeatingLoad)\n', (994, 1013), True, 'import numpy as np\n'), ((1030, 1055), 'numpy.max', 'np.max', (['annualCoolingLoad'], {}), '(annualCoolingLoad)\n', (1036, 1055), True, 'import numpy as np\n'), ((1058, 1084), 'numpy.mean', 'np.mean', (['annualCoolingLoad'], {}), '(annualCoolingLoad)\n', (1065, 1084), True, 'import numpy as np\n'), ((1101, 1124), 'numpy.max', 'np.max', (['peakHeatingLoad'], {}), '(peakHeatingLoad)\n', (1107, 1124), True, 'import numpy as np\n'), ((1127, 1151), 'numpy.mean', 'np.mean', (['peakHeatingLoad'], {}), '(peakHeatingLoad)\n', (1134, 1151), True, 'import numpy as np\n'), ((1168, 1191), 'numpy.max', 'np.max', (['peakCoolingLoad'], {}), '(peakCoolingLoad)\n', (1174, 1191), True, 'import numpy as np\n'), ((1194, 1218), 'numpy.mean', 'np.mean', (['peakCoolingLoad'], {}), '(peakCoolingLoad)\n', (1201, 1218), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as pl
def make_chimeric_enzyme():
AA = '-ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
aa_to_id = {y: x for x, y in enumerate(list(AA))}
id_to_aa = {x: y for x, y in enumerate(list(AA))}
root_dir = '/home/hyang/bio/erf/data/stability/enzyme'
pdb_list = ['6THT_A', '6EQE_A', '5ZOA_A']
for pdb in tqdm(pdb_list):
# read sequences
protein_name = []
seq = []
ins_num = []
with open(f'{root_dir}/{pdb}.fil.a3m', 'rt') as msa_file:
for line in msa_file:
if line[0] == '>':
protein_name.append(line.split(' ')[0][1:])
if line[0] != '>':
s = np.array([aa_to_id[x] for x in line[:-1]])
s2 = s[s <= 26] # remove lower case letter
ins_num.append(len(s[s > 26]))
seq.append(s2)
seq = np.vstack(seq)
ins_num = np.array(ins_num)
# make chimeric seq
ref = seq[0] # the first seq is the PDB seq
chimeric = [''.join([id_to_aa[x] for x in ref])]
hamming_dist = [0]
seq_len = len(ref)
del_frac = [0]
ins_frac = ins_num / seq_len
for s in seq[1:]:
idx = (s == 0) # 0 is the index of '-'
del_frac.append(len(s[idx]) * 1.0 / seq_len)
s[idx] = ref[idx] # replace '-' to AA in reference seq
hamming_dist.append(np.sum(s != ref) * 1.0 / seq_len)
chimeric.append(''.join([id_to_aa[x] for x in s]))
df = pd.DataFrame({'protein_name': protein_name,
'ins_frac': ins_frac, 'del_frac': del_frac,
'hamming_dist': hamming_dist, 'seq': chimeric})
df.to_csv(f'{root_dir}/{pdb}.chimeric', index=False, float_format='%.3f')
def plot_enzyme_score():
root_dir = '/home/hyang/bio/erf/data/stability/enzyme'
pdb_list = ['6THT_A', '6EQE_A', '5ZOA_A']
for pdb in pdb_list:
df = pd.read_csv(f'{root_dir}/{pdb}_chimeric_energy.csv')
energy = df['energy_score'].values
idx = (energy > 0)
energy2 = energy[idx]
print(energy2.min(), energy2.max())
# fig = pl.figure()
# pl.hist(energy2, bins=np.arange(25)*10+910)
# pl.xlabel('energy score')
# pl.ylabel('N')
# pl.title(f'{pdb} chimeric')
# pl.savefig(f'{root_dir}/{pdb}_energy_hist.pdf')
# pl.close()
idx = (energy > 0) & (energy < 970)
protein_name = df['protein_name'].values[idx]
print(protein_name, energy[idx])
def check_uniprot():
root_dir = '/home/plover/study/bio/play/erf/data/stability/enzyme'
pdb_list = ['6THT_A', '6EQE_A', '5ZOA_A']
petase_family = pd.read_csv(f'{root_dir}/PETase_subfamily.csv')['UniProtAcc'].values
protein_name_all = np.array([])
for pdb in pdb_list:
df = pd.read_csv(f'{root_dir}/{pdb}_chimeric_energy.csv')
protein_name = df['protein_name'].apply(lambda x: x[3:].split('|')[0])
protein_name_all = np.append(protein_name_all, protein_name)
p_list = []
for p in petase_family:
if p in protein_name_all:
p_list.append(p)
| [
"pandas.read_csv",
"tqdm.tqdm",
"numpy.append",
"numpy.array",
"numpy.sum",
"numpy.vstack",
"pandas.DataFrame"
] | [((415, 429), 'tqdm.tqdm', 'tqdm', (['pdb_list'], {}), '(pdb_list)\n', (419, 429), False, 'from tqdm import tqdm\n'), ((2927, 2939), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2935, 2939), True, 'import numpy as np\n'), ((985, 999), 'numpy.vstack', 'np.vstack', (['seq'], {}), '(seq)\n', (994, 999), True, 'import numpy as np\n'), ((1018, 1035), 'numpy.array', 'np.array', (['ins_num'], {}), '(ins_num)\n', (1026, 1035), True, 'import numpy as np\n'), ((1633, 1772), 'pandas.DataFrame', 'pd.DataFrame', (["{'protein_name': protein_name, 'ins_frac': ins_frac, 'del_frac': del_frac,\n 'hamming_dist': hamming_dist, 'seq': chimeric}"], {}), "({'protein_name': protein_name, 'ins_frac': ins_frac,\n 'del_frac': del_frac, 'hamming_dist': hamming_dist, 'seq': chimeric})\n", (1645, 1772), True, 'import pandas as pd\n'), ((2076, 2128), 'pandas.read_csv', 'pd.read_csv', (['f"""{root_dir}/{pdb}_chimeric_energy.csv"""'], {}), "(f'{root_dir}/{pdb}_chimeric_energy.csv')\n", (2087, 2128), True, 'import pandas as pd\n'), ((2978, 3030), 'pandas.read_csv', 'pd.read_csv', (['f"""{root_dir}/{pdb}_chimeric_energy.csv"""'], {}), "(f'{root_dir}/{pdb}_chimeric_energy.csv')\n", (2989, 3030), True, 'import pandas as pd\n'), ((3137, 3178), 'numpy.append', 'np.append', (['protein_name_all', 'protein_name'], {}), '(protein_name_all, protein_name)\n', (3146, 3178), True, 'import numpy as np\n'), ((2834, 2881), 'pandas.read_csv', 'pd.read_csv', (['f"""{root_dir}/PETase_subfamily.csv"""'], {}), "(f'{root_dir}/PETase_subfamily.csv')\n", (2845, 2881), True, 'import pandas as pd\n'), ((778, 820), 'numpy.array', 'np.array', (['[aa_to_id[x] for x in line[:-1]]'], {}), '([aa_to_id[x] for x in line[:-1]])\n', (786, 820), True, 'import numpy as np\n'), ((1523, 1539), 'numpy.sum', 'np.sum', (['(s != ref)'], {}), '(s != ref)\n', (1529, 1539), True, 'import numpy as np\n')] |
""" dict_sbmat module
Helper functions to deal with sparse block matrices using a dictionary
with index tuples as keys. This is beneficial e.g. in assembling coupled
circuit/FEM systems in the matrix level.
E.g. if you have sparse matrices A B C D and you want to create a sparse
block matrix like
[[A, 0, 0],
[0, B, C],
[0,-D, 0]]
you can do the following:
> sm = {}
> sm[(0,0)] = A
> sm[(1,1)] = B
> sm[(1,2)] = C
> sm[(2,1)] = -D
Inspect the block structure with print_blocks
> dict_tools.print_blocks(sm)
Create a scipy bmat with tolist
> S = scipy.sparse.bmat(dict_tools.tolist(sm))
Pick subblocks corresponding to the block indices of the resulting sparse
matrix with 'submat' and 'mk_selector_builder'.
To e.g. pick blocks
S11 = [[A]]
S12 = [[0,0]]
S21 = [[0],
[0]]
S22 = [[B,C],
[-D,0]]
use
> builder = mk_selector_builder(sm)
> P11,Q11 = builder([0],[0])
> S11 = P11*S*Q11
> P12,Q12 = builder([0], [1,2])
> S12 = P12*S*P12
> P21,Q21 = builder([1,2], [0])
> S21 = P21*S*Q21
> P22,Q22 = builder([1,2], [1,2])
> S22 = P22*S*Q22
At first this seems terribly inefficient, but it really isn't. Using the
sparse linear algebra * to pick rows and columnt sacrifices some memory but
is extremely simple to use and e.g. utilizes the sparsity patterns of all
matrices efficiently. """
import numpy as np
import scipy.sparse as sps
from itertools import product
def tolist(dmat):
""" Convert dmat to a list format [[A,None,...], ...] where
empty blocks are filled with None. This can be given as an input to
scipy.sparse.bmat """
inds = np.array(list(dmat.keys()))
nrows = np.max(inds[:,0])+1
ncols = np.max(inds[:,1])+1
return [[dmat.get((row,col),None)
for col in range(0,ncols)]
for row in range(0,nrows)]
def print_blocks(dmat):
inds = np.array(list(dmat.keys()))
xdim = np.max(inds[:,0])+1
ydim = np.max(inds[:,1])+1
strs = []
for i in range(0, xdim):
rowstr = []
for j in range(0, ydim):
m = dmat.get((i,j), None)
if m is None:
rowstr.append("{:10s}".format(" "))
else:
try:
s = m.shape
except AttributeError:
s = "(s)"
rowstr.append("{:10s}".format(s))
strs.append(rowstr)
print ("\n".join([ "|" + ", ".join(ss) + "|" for ss in strs]))
def submat(X,ii,jj):
""" Pick a block submatrix from block matrix X by rows ii and cols jj """
return {(ix,jx): X.get((i,j), None)
for (ix,i),(jx,j) in product(enumerate(ii),
enumerate(jj))}
def mk_selector_builder(dmat):
""" Builds a function which generated pairs of matrices (Pr, Pc) which
can be used to choose subblocks of dmat. Dmat needs to be in a dict
block form. The matrices generated by the returned function act in the
resulting sparse matrix
Input: dmat - a dictionary {(row, col): sparse matrix / None} representing
a sparse block matrix
Output: fun: (rows: [Int], cols: [Int]) -> (Pr, Pc) , where Pr picks
the rows and Pc picks the columns of a sparse matrix
"""
# find out how many block rows and columns we have
inds = np.array(list(dmat.keys()))
nrows = np.max(inds[:,0])+1
ncols = np.max(inds[:,1])+1
rowshapes = [None]*nrows
colshapes = [None]*ncols
# find out the shapes of rows and columns
for i in range(0, nrows):
for j in range(0, ncols):
m = dmat.get((i,j), None)
if m is None:
# an empty block doesn't give us any information
continue
else:
shp = m.shape
# double check if we already have a dimension that it matches
# the discovered one
# If we don't have a dimension, add it to the lists
existing_rows = rowshapes[i]
existing_cols = colshapes[j]
if existing_rows is not None:
assert shp[0] == existing_rows, "Shapes don't match"
else:
rowshapes[i] = shp[0]
if existing_cols is not None:
assert shp[1] == existing_cols, "Shapes don't match"
else:
colshapes[j] = shp[1]
rows_in_matrix = sum(rowshapes)
cols_in_matrix = sum(colshapes)
rowcsum = np.cumsum([0]+rowshapes)
colcsum = np.cumsum([0]+colshapes)
def fun(rows, cols):
""" Rows, cols are lists of indices """
try:
# get the amount of rows in each block in the slice
nrows = [rowshapes[row] for row in rows]
except TypeError:
# If rows is not a list, try to convert it into a singleton
nrows = [rowshapes[rows]]
rows = [rows]
try:
ncols = [colshapes[col] for col in cols]
except TypeError:
ncols = [colshapes[cols]]
cols = [cols]
# total number of rows to be in the slice
rows_in_slice = np.sum(nrows)
cols_in_slice = np.sum(ncols)
# the ones to be inserted into Pr and Pc
rowi = np.ones((rows_in_slice,))
coli = np.ones((cols_in_slice,))
# The indices from where the blocks in the slice start
rowstarts = [rowcsum[row] for row in rows]
colstarts = [colcsum[col] for col in cols]
# the row indices from the whole slice
rowvector = np.concatenate([np.arange(rowstart, rowstart+rows)
for rows, rowstart in zip(nrows, rowstarts)])
# row permutation matrix is (rows, totalrows)
Pr = sps.csc_matrix((rowi, (np.arange(0,rows_in_slice),
rowvector)),
shape=(rows_in_slice, rows_in_matrix))
colvector = np.concatenate([np.arange(colstart, colstart+cols)
for cols, colstart in zip(ncols, colstarts)])
# col permutation matrix
Pc = sps.csc_matrix((coli, (colvector,
np.arange(0,cols_in_slice))),
shape=(cols_in_matrix, cols_in_slice))
return (Pr, Pc)
return fun
if __name__ == "__main__":
# import scipy.sparse as sps
A = dict()
a = np.array([[1,2,3], [4,5,6]])
b = np.array([[1,2], [3,4]])
c = np.ones((2,3))
A[(0,0)] = b
A[(1,2)] = a
A[(2,1)] = 10*a
A[(2,0)] = 10*b
A[(2,2)] = c
spmat = sps.bmat(tolist(A)).tocsc()
f = mk_selector_builder(A)
def match(a1, a2):
return np.all(a1 == a2)
# Small and cute unit test suite
Pr2, Pc1 = f(2,1)
assert match(10*a, (Pr2 @ spmat @ Pc1).toarray()), "Block (2,1)"
Pr0, Pc0 = f(0,0)
assert match(b, (Pr0 @ spmat @ Pc0).toarray()), "Block (0,0)"
Pr1, Pc2 = f(1,2)
assert match(a, (Pr1 @ spmat @ Pc2).toarray()), "Block (1,2)"
assert match(10*b, (Pr2 @ spmat @ Pc0).toarray()), "Block (2,1)"
assert np.all(np.zeros((2,3)) == (Pr1 @ spmat @ Pc1).toarray()), "Block(1,1) is zero"
Pr12, Pc12 = f([1,2], [1,2])
m = sps.bmat([[None, a],
[10*a, c]]).toarray()
assert np.all(m == (Pr12 @ spmat @ Pc12).toarray()), "Block([1,2], [1,2])"
Pr02,Pc02 = f([0,2], [0,2])
m = sps.bmat([[b, None],
[10*b, c]]).toarray()
assert np.all(m == (Pr02 @ spmat @ Pc02).toarray()), "Block([0,2], [0,2])"
| [
"scipy.sparse.bmat",
"numpy.ones",
"numpy.max",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"numpy.cumsum",
"numpy.all",
"numpy.arange"
] | [((4548, 4574), 'numpy.cumsum', 'np.cumsum', (['([0] + rowshapes)'], {}), '([0] + rowshapes)\n', (4557, 4574), True, 'import numpy as np\n'), ((4587, 4613), 'numpy.cumsum', 'np.cumsum', (['([0] + colshapes)'], {}), '([0] + colshapes)\n', (4596, 4613), True, 'import numpy as np\n'), ((6561, 6593), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (6569, 6593), True, 'import numpy as np\n'), ((6598, 6624), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (6606, 6624), True, 'import numpy as np\n'), ((6631, 6646), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (6638, 6646), True, 'import numpy as np\n'), ((1637, 1655), 'numpy.max', 'np.max', (['inds[:, 0]'], {}), '(inds[:, 0])\n', (1643, 1655), True, 'import numpy as np\n'), ((1669, 1687), 'numpy.max', 'np.max', (['inds[:, 1]'], {}), '(inds[:, 1])\n', (1675, 1687), True, 'import numpy as np\n'), ((1897, 1915), 'numpy.max', 'np.max', (['inds[:, 0]'], {}), '(inds[:, 0])\n', (1903, 1915), True, 'import numpy as np\n'), ((1928, 1946), 'numpy.max', 'np.max', (['inds[:, 1]'], {}), '(inds[:, 1])\n', (1934, 1946), True, 'import numpy as np\n'), ((3389, 3407), 'numpy.max', 'np.max', (['inds[:, 0]'], {}), '(inds[:, 0])\n', (3395, 3407), True, 'import numpy as np\n'), ((3421, 3439), 'numpy.max', 'np.max', (['inds[:, 1]'], {}), '(inds[:, 1])\n', (3427, 3439), True, 'import numpy as np\n'), ((5236, 5249), 'numpy.sum', 'np.sum', (['nrows'], {}), '(nrows)\n', (5242, 5249), True, 'import numpy as np\n'), ((5274, 5287), 'numpy.sum', 'np.sum', (['ncols'], {}), '(ncols)\n', (5280, 5287), True, 'import numpy as np\n'), ((5352, 5377), 'numpy.ones', 'np.ones', (['(rows_in_slice,)'], {}), '((rows_in_slice,))\n', (5359, 5377), True, 'import numpy as np\n'), ((5393, 5418), 'numpy.ones', 'np.ones', (['(cols_in_slice,)'], {}), '((cols_in_slice,))\n', (5400, 5418), True, 'import numpy as np\n'), ((6867, 6883), 'numpy.all', 'np.all', (['(a1 == a2)'], {}), '(a1 == a2)\n', (6873, 6883), True, 'import numpy as np\n'), ((7294, 7310), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (7302, 7310), True, 'import numpy as np\n'), ((7412, 7446), 'scipy.sparse.bmat', 'sps.bmat', (['[[None, a], [10 * a, c]]'], {}), '([[None, a], [10 * a, c]])\n', (7420, 7446), True, 'import scipy.sparse as sps\n'), ((7600, 7634), 'scipy.sparse.bmat', 'sps.bmat', (['[[b, None], [10 * b, c]]'], {}), '([[b, None], [10 * b, c]])\n', (7608, 7634), True, 'import scipy.sparse as sps\n'), ((5668, 5704), 'numpy.arange', 'np.arange', (['rowstart', '(rowstart + rows)'], {}), '(rowstart, rowstart + rows)\n', (5677, 5704), True, 'import numpy as np\n'), ((6082, 6118), 'numpy.arange', 'np.arange', (['colstart', '(colstart + cols)'], {}), '(colstart, colstart + cols)\n', (6091, 6118), True, 'import numpy as np\n'), ((5892, 5919), 'numpy.arange', 'np.arange', (['(0)', 'rows_in_slice'], {}), '(0, rows_in_slice)\n', (5901, 5919), True, 'import numpy as np\n'), ((6315, 6342), 'numpy.arange', 'np.arange', (['(0)', 'cols_in_slice'], {}), '(0, cols_in_slice)\n', (6324, 6342), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import random
import time
import numpy as np
from collections import defaultdict, deque
from quoridor import Quoridor
from policy_value_net import PolicyValueNet
from mcts import MCTSPlayer
from torch.utils.tensorboard import SummaryWriter
from constant import BOARD_SIZE, WALL_NUM
writer = SummaryWriter()
class TrainPipeline(object):
def __init__(self, init_model=None):
# 棋盘参数
self.game = Quoridor()
# 训练参数
self.learn_rate = 2e-3
self.lr_multiplier = 1.0 # 适应性调节学习速率
self.temp = 1.0
self.n_playout = 200
self.c_puct = 5
self.buffer_size = 10000
self.batch_size = 32 # 取1 测试ing
self.data_buffer = deque(maxlen=self.buffer_size)
self.play_batch_size = 1
self.epochs = 50
self.kl_targ = 0.02
self.check_freq = 5
self.game_batch_num = 1000
self.game_batch_num = 200
self.best_win_ratio = 0.0
self.pure_mcts_playout_num = 1000
if init_model:
self.policy_value_net = PolicyValueNet(model_file=init_model)
else:
self.policy_value_net = PolicyValueNet()
# 设置电脑玩家信息
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn, c_puct=self.c_puct,
n_playout=self.n_playout, is_selfplay=1)
# def get_equi_data(self, play_data):
# """
# 数据集增强,获取旋转后的数据,因为五子棋也是对称的
# play_data: [(state, mcts_prob, winner_z), ..., ...]"""
# extend_data = []
# for state, mcts_porb, winner in play_data:
# equi_state = np.array([np.rot90(s,2) for s in state])
# equi_mcts_prob = np.rot90(np.flipud(mcts_porb.reshape(9, 9)), 2)
# extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))
# # flip horizontally
# equi_state = np.array([np.fliplr(s) for s in equi_state])
# equi_mcts_prob = np.fliplr(equi_mcts_prob)
# extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))
# return extend_data
def collect_selfplay_data(self, n_games=1):
"""收集训练数据"""
for i in range(n_games):
winner, play_data = self.game.start_self_play(self.mcts_player, temp=self.temp) # 进行自博弈
play_data = list(play_data)[:]
self.episode_len = len(play_data)
# 数据增强
# play_data = self.get_equi_data(play_data)
self.data_buffer.extend(play_data)
def policy_update(self):
"""训练策略价值网络"""
mini_batch = random.sample(self.data_buffer, self.batch_size) # 获取mini-batch
state_batch = [data[0] for data in mini_batch] # 提取第一位的状态
mcts_probs_batch = [data[1] for data in mini_batch] # 提取第二位的概率
winner_batch = [data[2] for data in mini_batch] # 提取第三位的胜负情况
old_probs, old_v = self.policy_value_net.policy_value(state_batch) # 输入网络计算旧的概率和胜负价值,这里为什么要计算旧的数据是因为需要计算
# 新旧之间的KL散度来控制学习速率的退火
# 开始训练epochs个轮次
for i in range(self.epochs):
valloss, polloss, entropy = self.policy_value_net.train_step(state_batch, mcts_probs_batch, winner_batch,
self.learn_rate * self.lr_multiplier)
new_probs, new_v = self.policy_value_net.policy_value(state_batch) # 计算新的概率和价值
kl = np.mean(np.sum(old_probs * (np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)), axis=1))
if kl > self.kl_targ * 4: # 如果KL散度发散的很不好,就提前结束训练
break
# 根据KL散度,适应性调节学习速率
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
explained_var_old = 1 - np.var(np.array(winner_batch) - old_v.flatten()) / np.var(np.array(winner_batch))
explained_var_new = 1 - np.var(np.array(winner_batch) - new_v.flatten()) / np.var(np.array(winner_batch))
print(
"kl:{:.5f},lr_multiplier:{:.3f},value loss:{},policy loss:[],entropy:{},explained_var_old:{:.3f},explained_var_new:{:.3f}".format(
kl, self.lr_multiplier, valloss, polloss, entropy, explained_var_old, explained_var_new))
return valloss, polloss, entropy
def run(self):
try:
self.collect_selfplay_data(50)
count = 0
for i in range(self.game_batch_num):
self.collect_selfplay_data(self.play_batch_size) # collect_s
print("batch i:{}, episode_len:{}".format(i + 1, self.episode_len))
if len(self.data_buffer) > self.batch_size:
valloss, polloss, entropy = self.policy_update()
print("VALUE LOSS: %0.3f " % valloss.item(), "POLICY LOSS: %0.3f " % polloss.item())
print("ENTROPY:",entropy)
# 保存loss
writer.add_scalar("Val Loss/train", valloss.item(), i)
writer.add_scalar("Policy Loss/train", polloss.item(), i)
writer.add_scalar("Entropy/train", entropy, i)
if (i + 1) % self.check_freq == 0:
count += 1
print("current self-play batch: {}".format(i + 1))
# win_ratio = self.policy_evaluate()
# Add generation to filename
self.policy_value_net.save_model('cp_gen_3_' + str(count) + '_' + str("%0.3f_" % valloss.item()) + str(time.strftime('%Y-%m-%d', time.localtime(time.time())))) # 保存模型
except KeyboardInterrupt:
print('\n\rquit')
# Start
if __name__ == '__main__':
training_pipeline = TrainPipeline(init_model=None)
training_pipeline.run()
| [
"torch.utils.tensorboard.SummaryWriter",
"random.sample",
"collections.deque",
"quoridor.Quoridor",
"numpy.log",
"numpy.array",
"policy_value_net.PolicyValueNet",
"mcts.MCTSPlayer",
"time.time"
] | [((378, 393), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (391, 393), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((507, 517), 'quoridor.Quoridor', 'Quoridor', ([], {}), '()\n', (515, 517), False, 'from quoridor import Quoridor\n'), ((797, 827), 'collections.deque', 'deque', ([], {'maxlen': 'self.buffer_size'}), '(maxlen=self.buffer_size)\n', (802, 827), False, 'from collections import defaultdict, deque\n'), ((1321, 1435), 'mcts.MCTSPlayer', 'MCTSPlayer', (['self.policy_value_net.policy_value_fn'], {'c_puct': 'self.c_puct', 'n_playout': 'self.n_playout', 'is_selfplay': '(1)'}), '(self.policy_value_net.policy_value_fn, c_puct=self.c_puct,\n n_playout=self.n_playout, is_selfplay=1)\n', (1331, 1435), False, 'from mcts import MCTSPlayer\n'), ((2750, 2798), 'random.sample', 'random.sample', (['self.data_buffer', 'self.batch_size'], {}), '(self.data_buffer, self.batch_size)\n', (2763, 2798), False, 'import random\n'), ((1166, 1203), 'policy_value_net.PolicyValueNet', 'PolicyValueNet', ([], {'model_file': 'init_model'}), '(model_file=init_model)\n', (1180, 1203), False, 'from policy_value_net import PolicyValueNet\n'), ((1256, 1272), 'policy_value_net.PolicyValueNet', 'PolicyValueNet', ([], {}), '()\n', (1270, 1272), False, 'from policy_value_net import PolicyValueNet\n'), ((4141, 4163), 'numpy.array', 'np.array', (['winner_batch'], {}), '(winner_batch)\n', (4149, 4163), True, 'import numpy as np\n'), ((4256, 4278), 'numpy.array', 'np.array', (['winner_batch'], {}), '(winner_batch)\n', (4264, 4278), True, 'import numpy as np\n'), ((4090, 4112), 'numpy.array', 'np.array', (['winner_batch'], {}), '(winner_batch)\n', (4098, 4112), True, 'import numpy as np\n'), ((4205, 4227), 'numpy.array', 'np.array', (['winner_batch'], {}), '(winner_batch)\n', (4213, 4227), True, 'import numpy as np\n'), ((3662, 3687), 'numpy.log', 'np.log', (['(old_probs + 1e-10)'], {}), '(old_probs + 1e-10)\n', (3668, 3687), True, 'import numpy as np\n'), ((3690, 3715), 'numpy.log', 'np.log', (['(new_probs + 1e-10)'], {}), '(new_probs + 1e-10)\n', (3696, 3715), True, 'import numpy as np\n'), ((5902, 5913), 'time.time', 'time.time', ([], {}), '()\n', (5911, 5913), False, 'import time\n')] |
# Credit to @danijar and @alexlee-gk on github
# https://github.com/tensorflow/tensorboard/issues/39#issuecomment-568917607
import numpy as np
import tensorflow as tf
def video_summary(name, video, step=None, fps=20):
name = tf.constant(name).numpy().decode('utf-8')
video = np.array(video)
if video.dtype in (np.float32, np.float64):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
B, T, H, W, C = video.shape
try:
frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
summary = tf.compat.v1.Summary()
image = tf.compat.v1.Summary.Image(height=B * H, width=T * W, colorspace=C)
image.encoded_image_string = encode_gif(frames, fps)
summary.value.add(tag=name + '/gif', image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C))
tf.summary.image(name + '/grid', frames, step)
def encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
f'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'
])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out | [
"numpy.clip",
"tensorflow.compat.v1.Summary.Image",
"numpy.array",
"tensorflow.compat.v1.Summary",
"tensorflow.constant",
"tensorflow.summary.image"
] | [((282, 297), 'numpy.array', 'np.array', (['video'], {}), '(video)\n', (290, 297), True, 'import numpy as np\n'), ((525, 547), 'tensorflow.compat.v1.Summary', 'tf.compat.v1.Summary', ([], {}), '()\n', (545, 547), True, 'import tensorflow as tf\n'), ((560, 627), 'tensorflow.compat.v1.Summary.Image', 'tf.compat.v1.Summary.Image', ([], {'height': '(B * H)', 'width': '(T * W)', 'colorspace': 'C'}), '(height=B * H, width=T * W, colorspace=C)\n', (586, 627), True, 'import tensorflow as tf\n'), ((984, 1030), 'tensorflow.summary.image', 'tf.summary.image', (["(name + '/grid')", 'frames', 'step'], {}), "(name + '/grid', frames, step)\n", (1000, 1030), True, 'import tensorflow as tf\n'), ((356, 384), 'numpy.clip', 'np.clip', (['(255 * video)', '(0)', '(255)'], {}), '(255 * video, 0, 255)\n', (363, 384), True, 'import numpy as np\n'), ((230, 247), 'tensorflow.constant', 'tf.constant', (['name'], {}), '(name)\n', (241, 247), True, 'import tensorflow as tf\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestAucOp(OpTest):
def setUp(self):
self.op_type = "auc"
pred = np.random.random((128, 2)).astype("float32")
indices = np.random.randint(0, 2, (128, 2))
labels = np.random.randint(0, 2, (128, 1))
num_thresholds = 200
self.inputs = {'Out': pred, 'Indices': indices, 'Label': labels}
self.attrs = {'curve': 'ROC', 'num_thresholds': num_thresholds}
# NOTE: sklearn use a different way to generate thresholds
# which will cause the result differs slightly:
# from sklearn.metrics import roc_curve, auc
# fpr, tpr, thresholds = roc_curve(labels, pred)
# auc_value = auc(fpr, tpr)
# we caculate AUC again using numpy for testing
kepsilon = 1e-7 # to account for floating point imprecisions
thresholds = [(i + 1) * 1.0 / (num_thresholds - 1)
for i in range(num_thresholds - 2)]
thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon]
# caculate TP, FN, TN, FP count
tp_list = np.ndarray((num_thresholds, ))
fn_list = np.ndarray((num_thresholds, ))
tn_list = np.ndarray((num_thresholds, ))
fp_list = np.ndarray((num_thresholds, ))
for idx_thresh, thresh in enumerate(thresholds):
tp, fn, tn, fp = 0, 0, 0, 0
for i, lbl in enumerate(labels):
if lbl:
if pred[i, 0] >= thresh:
tp += 1
else:
fn += 1
else:
if pred[i, 0] >= thresh:
fp += 1
else:
tn += 1
tp_list[idx_thresh] = tp
fn_list[idx_thresh] = fn
tn_list[idx_thresh] = tn
fp_list[idx_thresh] = fp
epsilon = 1e-6
tpr = (tp_list.astype("float32") + epsilon) / (
tp_list + fn_list + epsilon)
fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon)
rec = (tp_list.astype("float32") + epsilon) / (
tp_list + fp_list + epsilon)
x = fpr[:num_thresholds - 1] - fpr[1:]
y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0
auc_value = np.sum(x * y)
self.outputs = {'AUC': auc_value}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
| [
"numpy.random.random",
"numpy.sum",
"numpy.random.randint",
"numpy.ndarray",
"unittest.main"
] | [((3084, 3099), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3097, 3099), False, 'import unittest\n'), ((829, 862), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(128, 2)'], {}), '(0, 2, (128, 2))\n', (846, 862), True, 'import numpy as np\n'), ((880, 913), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(128, 1)'], {}), '(0, 2, (128, 1))\n', (897, 913), True, 'import numpy as np\n'), ((1735, 1764), 'numpy.ndarray', 'np.ndarray', (['(num_thresholds,)'], {}), '((num_thresholds,))\n', (1745, 1764), True, 'import numpy as np\n'), ((1784, 1813), 'numpy.ndarray', 'np.ndarray', (['(num_thresholds,)'], {}), '((num_thresholds,))\n', (1794, 1813), True, 'import numpy as np\n'), ((1833, 1862), 'numpy.ndarray', 'np.ndarray', (['(num_thresholds,)'], {}), '((num_thresholds,))\n', (1843, 1862), True, 'import numpy as np\n'), ((1882, 1911), 'numpy.ndarray', 'np.ndarray', (['(num_thresholds,)'], {}), '((num_thresholds,))\n', (1892, 1911), True, 'import numpy as np\n'), ((2932, 2945), 'numpy.sum', 'np.sum', (['(x * y)'], {}), '(x * y)\n', (2938, 2945), True, 'import numpy as np\n'), ((766, 792), 'numpy.random.random', 'np.random.random', (['(128, 2)'], {}), '((128, 2))\n', (782, 792), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from config.config import *
from utils.common_util import *
def sort_targets(x):
if x is None or x == '':
return x
x = x.split(' ')
x = np.array(x, dtype=int)
x = np.sort(x)
x = x.astype(str).tolist()
x = ' '.join(x)
return x
def fill_targets(row):
row.Predicted = np.array(row.Predicted.split(" ")).astype(np.int)
for num in row.Predicted:
name = LABEL_NAMES[int(num)]
row.loc[name] = 1
return row
def show_modify_info(submission_df1, submission_df2):
submission_df1 = submission_df1.copy()
submission_df2 = submission_df2.copy()
for label in LABEL_NAME_LIST:
submission_df1[label] = 0
submission_df1 = submission_df1.apply(fill_targets, axis=1)
ts1 = submission_df1[LABEL_NAME_LIST].sum()
for label in LABEL_NAME_LIST:
submission_df2[label] = 0
submission_df2 = submission_df2.apply(fill_targets, axis=1)
ts2 = submission_df2[LABEL_NAME_LIST].sum()
assert np.all(submission_df1[ID].values == submission_df2[ID].values)
df = submission_df2[LABEL_NAME_LIST] - submission_df1[LABEL_NAME_LIST]
ts3 = (df == 1).sum()
ts4 = -(df == -1).sum()
ts = pd.concat((ts1, ts2, ts3, ts4), axis=1)
ts.columns = ['before', 'after', 'increase', 'decrease']
assert np.all((ts['after'] - ts['before']).values == (ts['increase'] + ts['decrease']).values)
ts['modify'] = ts['after'] - ts['before']
print(ts)
def modify_submit(submission_df, show_info=False):
submission_df[PREDICTED] = submission_df[PREDICTED].apply(lambda x: sort_targets(x))
submission_df_cp = submission_df.copy()
# replace leak result
kernal_leak_df = pd.read_csv(opj(DATA_DIR, 'meta', 'test_leak_meta.csv'))
for idx in range(len(kernal_leak_df)):
id_value, target = kernal_leak_df.iloc[idx][[ID, TARGET]].values
submission_df.loc[submission_df[ID] == id_value, PREDICTED] = target
submission_df[PREDICTED] = submission_df[PREDICTED].apply(lambda x: sort_targets(x))
assert np.all(submission_df_cp[ID].values == submission_df[ID].values)
print('modify num: %d' % (submission_df_cp[PREDICTED].values != submission_df[PREDICTED].values).sum())
if show_info:
show_modify_info(submission_df_cp, submission_df)
assert submission_df.shape[-1] == 2
return submission_df
| [
"numpy.array",
"numpy.sort",
"pandas.concat",
"numpy.all"
] | [((197, 219), 'numpy.array', 'np.array', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (205, 219), True, 'import numpy as np\n'), ((228, 238), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (235, 238), True, 'import numpy as np\n'), ((1020, 1082), 'numpy.all', 'np.all', (['(submission_df1[ID].values == submission_df2[ID].values)'], {}), '(submission_df1[ID].values == submission_df2[ID].values)\n', (1026, 1082), True, 'import numpy as np\n'), ((1222, 1261), 'pandas.concat', 'pd.concat', (['(ts1, ts2, ts3, ts4)'], {'axis': '(1)'}), '((ts1, ts2, ts3, ts4), axis=1)\n', (1231, 1261), True, 'import pandas as pd\n'), ((1334, 1426), 'numpy.all', 'np.all', (["((ts['after'] - ts['before']).values == (ts['increase'] + ts['decrease']).\n values)"], {}), "((ts['after'] - ts['before']).values == (ts['increase'] + ts[\n 'decrease']).values)\n", (1340, 1426), True, 'import numpy as np\n'), ((2067, 2130), 'numpy.all', 'np.all', (['(submission_df_cp[ID].values == submission_df[ID].values)'], {}), '(submission_df_cp[ID].values == submission_df[ID].values)\n', (2073, 2130), True, 'import numpy as np\n')] |
import numpy as np
class ReplayBuffer:
def __init__(self, max_size, input_shape, n_actions):
"""
ReplayBuffer를 선언 해줌.
:param max_size: int
:param input_shape: 튜플 형태로 입력 받음. ex. (1, 3)
:param n_actions: int 형식
"""
self.mem_size = max_size
self.mem_cntr = 0
self.state_memory = np.zeros((self.mem_size, *input_shape)) # Shape: (mem_size, *input_shape)
self.new_state_memory = np.zeros((self.mem_size, *input_shape)) # Shape: (mem_size, *input_shape)
self.action_memory = np.zeros((self.mem_size, n_actions)) # Shape: (mem_size, n_action)
self.reward_memory = np.zeros(self.mem_size) # Shape: (mem_size)
self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool) # Shape: (mem_size)
def store_transition(self, state, action, reward, state_, done):
# 현재 count 를 최대 mem_size 로 나누어 한정된 메모리 주소에서 순환하도록 설계함.
index = self.mem_cntr % self.mem_size
# 데이터를 저장
self.state_memory[index] = state
self.new_state_memory[index] = state_
self.action_memory[index] = action
self.reward_memory[index] = reward
self.terminal_memory[index] = done
self.mem_cntr += 1
def sample_buffer(self, batch_size):
# 1] 앞에서 메모리가 초기화 된 상태이기 때문에, 초기화된 값을 가져오지 않도록 현재 카운터보다 작은 범위에서 순환
max_mem = min(self.mem_cntr, self.mem_size) # int
# 2] 0 ~ max_mem 범위 내에서 batch_size 의 갯수 만큼 index 값을 생성함.
batch = np.random.choice(max_mem, batch_size)
# 3] 랜덤으로 생성된 index 에 따라서 샘플링
states = self.state_memory[batch]
states_ = self.new_state_memory[batch]
actions = self.action_memory[batch]
rewards = self.reward_memory[batch]
dones = self.terminal_memory[batch]
return states, actions, rewards, states_, dones
if __name__ == '__main__':
a = ReplayBuffer(max_size=3, input_shape=(3, 1), n_actions=4)
a.store_transition(1, 1, 1, 1, 1)
a.store_transition(1, 1, 1, 1, 1)
a.store_transition(1, 1, 1, 1, 1)
a.store_transition(1, 1, 1, 1, 1)
a.store_transition(1, 1, 1, 1, 1)
a.store_transition(1, 1, 1, 1, 1) | [
"numpy.random.choice",
"numpy.zeros"
] | [((354, 393), 'numpy.zeros', 'np.zeros', (['(self.mem_size, *input_shape)'], {}), '((self.mem_size, *input_shape))\n', (362, 393), True, 'import numpy as np\n'), ((468, 507), 'numpy.zeros', 'np.zeros', (['(self.mem_size, *input_shape)'], {}), '((self.mem_size, *input_shape))\n', (476, 507), True, 'import numpy as np\n'), ((575, 611), 'numpy.zeros', 'np.zeros', (['(self.mem_size, n_actions)'], {}), '((self.mem_size, n_actions))\n', (583, 611), True, 'import numpy as np\n'), ((681, 704), 'numpy.zeros', 'np.zeros', (['self.mem_size'], {}), '(self.mem_size)\n', (689, 704), True, 'import numpy as np\n'), ((779, 817), 'numpy.zeros', 'np.zeros', (['self.mem_size'], {'dtype': 'np.bool'}), '(self.mem_size, dtype=np.bool)\n', (787, 817), True, 'import numpy as np\n'), ((1547, 1584), 'numpy.random.choice', 'np.random.choice', (['max_mem', 'batch_size'], {}), '(max_mem, batch_size)\n', (1563, 1584), True, 'import numpy as np\n')] |
import utility.ascii
import tdtsk
import h5py
import numpy as np
import xarray as xr
def get_key_string_list(h5_file_content, dimension_name):
ascii_arr = h5_file_content[dimension_name][()]
string_list = []
for current_ascii in ascii_arr.tolist():
string_list.append(utility.ascii.ascii_to_string(current_ascii))
return string_list
def get_key_ascii_array_list(tsk, dimension_name):
ascii_solid_len = np.nan
if dimension_name == "KEY":
ascii_solid_len = 128
if dimension_name == "SYMBOL":
ascii_solid_len = 20
if dimension_name == "TIME":
ascii_solid_len = 8
key_string_list = tsk.coords[dimension_name].values
key_ascii_array_list = utility.ascii.parse_ascii_array_list_from_string_list(\
key_string_list, ascii_solid_len)
return key_ascii_array_list
def load(h5_file_path, kline_type, local_symbol_list):
'''
kline_type is formatted as {int}_{time_unit}
local symbol list is read from 2 local files,
time/symbol/key list is read from h5 files.
'''
# Load h5 file
h5_file_content = h5py.File(h5_file_path, 'r')
# Load dimension names from h5_file content
time_list = get_key_string_list(h5_file_content, \
tdtsk.get_kline_type_labal(kline_type))
symbol_list = get_key_string_list(h5_file_content, "SYMBOL")
key_list = get_key_string_list(h5_file_content, "KEY")
# Load raw data from h5_file content
labal = tdtsk.get_kline_type_labal(kline_type) + '_DTSK'
dataset = h5_file_content[labal]
tsk_ndarray = np.asarray(dataset).reshape(len(time_list), len(symbol_list), len(key_list))
tsk_xarray = xr.DataArray(tsk_ndarray, \
coords=[time_list, symbol_list, key_list],\
dims=['TIME', 'SYMBOL', 'KEY'])
# Convert raw data to real dtsk that fit local_symbol_list
symbol_set = set(symbol_list)
tsk_ndarray_symbol_nan = np.ndarray(shape=(len(time_list), 1, len(key_list))) * np.nan
tsk_select_ndarray = np.ndarray(shape=\
(len(time_list), len(local_symbol_list), len(key_list)))
for index, sym in enumerate(local_symbol_list):
if sym in symbol_set:
tsk_select_ndarray[:, index:index + 1, :] = \
tsk_xarray.loc[:, sym, :].values.reshape(len(time_list), 1, len(key_list))
else:
tsk_select_ndarray[:, index:index + 1, :] = tsk_ndarray_symbol_nan
eps = 1e-5
tsk_select_ndarray[abs(tsk_select_ndarray + 2e10) <= eps] = np.nan
tsk_select_xarray = xr.DataArray(tsk_select_ndarray, \
coords=[time_list, local_symbol_list, key_list], \
dims=['TIME', 'SYMBOL', 'KEY'])
return tsk_select_xarray
def save(h5_file_path, tsk, kline_type):
''' Not sure whether the data is correct '''
time_list = get_key_ascii_array_list(tsk, 'TIME')
symbol_list = get_key_ascii_array_list(tsk, "SYMBOL")
key_list = get_key_ascii_array_list(tsk, "KEY")
file = h5py.File(h5_file_path, 'w')
file[tdtsk.get_kline_type_labal(kline_type)] = time_list
file["SYMBOL"] = symbol_list
file["KEY"] = key_list
file[tdtsk.get_kline_type_labal(kline_type)+'_DTSK'] = tsk
''' Not sure whether the tsk is correct '''
file.close()
| [
"tdtsk.get_kline_type_labal",
"numpy.asarray",
"xarray.DataArray",
"h5py.File"
] | [((1057, 1085), 'h5py.File', 'h5py.File', (['h5_file_path', '"""r"""'], {}), "(h5_file_path, 'r')\n", (1066, 1085), False, 'import h5py\n'), ((1611, 1716), 'xarray.DataArray', 'xr.DataArray', (['tsk_ndarray'], {'coords': '[time_list, symbol_list, key_list]', 'dims': "['TIME', 'SYMBOL', 'KEY']"}), "(tsk_ndarray, coords=[time_list, symbol_list, key_list], dims=[\n 'TIME', 'SYMBOL', 'KEY'])\n", (1623, 1716), True, 'import xarray as xr\n'), ((2465, 2582), 'xarray.DataArray', 'xr.DataArray', (['tsk_select_ndarray'], {'coords': '[time_list, local_symbol_list, key_list]', 'dims': "['TIME', 'SYMBOL', 'KEY']"}), "(tsk_select_ndarray, coords=[time_list, local_symbol_list,\n key_list], dims=['TIME', 'SYMBOL', 'KEY'])\n", (2477, 2582), True, 'import xarray as xr\n'), ((2883, 2911), 'h5py.File', 'h5py.File', (['h5_file_path', '"""w"""'], {}), "(h5_file_path, 'w')\n", (2892, 2911), False, 'import h5py\n'), ((1195, 1233), 'tdtsk.get_kline_type_labal', 'tdtsk.get_kline_type_labal', (['kline_type'], {}), '(kline_type)\n', (1221, 1233), False, 'import tdtsk\n'), ((1413, 1451), 'tdtsk.get_kline_type_labal', 'tdtsk.get_kline_type_labal', (['kline_type'], {}), '(kline_type)\n', (1439, 1451), False, 'import tdtsk\n'), ((2918, 2956), 'tdtsk.get_kline_type_labal', 'tdtsk.get_kline_type_labal', (['kline_type'], {}), '(kline_type)\n', (2944, 2956), False, 'import tdtsk\n'), ((1517, 1536), 'numpy.asarray', 'np.asarray', (['dataset'], {}), '(dataset)\n', (1527, 1536), True, 'import numpy as np\n'), ((3030, 3068), 'tdtsk.get_kline_type_labal', 'tdtsk.get_kline_type_labal', (['kline_type'], {}), '(kline_type)\n', (3056, 3068), False, 'import tdtsk\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from os.path import join
import random
import pathlib
import tensorflow as tf
#tf.enable_eager_execution()
#tf.executing_eagerly()
import IPython.display as display
from sklearn.model_selection import train_test_split
from keras_applications import resnet
#tf.random.set_random_seed(42)
np.random.seed(42)
basedir = 'spoon-vs-fork\spoon-vs-fork'
fork_dir = join(basedir, 'fork')
spoon_dir = join(basedir, 'spoon')
spoon_paths = [join(spoon_dir, img_path) for img_path in os.listdir(spoon_dir)]
fork_paths = [join(fork_dir, img_path) for img_path in os.listdir(fork_dir)]
img_paths = spoon_paths + fork_paths
print(len(img_paths))
def load_data(basedir):
folders = os.listdir(basedir)
print(folders)
result = pd.DataFrame(columns=['filename', 'class'])
for folder in folders:
files = [join(basedir, folder, file) for file in os.listdir(join(basedir, folder))]
df = pd.DataFrame({'filename': files, 'class': folder})
result = pd.concat([result, df])
return result
image_df = load_data(basedir)
def validate_data(image_df):
result = image_df.copy()
allowed_extensions = ['jpg', 'jpeg', 'png', 'gif']
for img in image_df.filename:
extension = str.lower(os.path.splitext(img)[1])[1:]
if extension not in allowed_extensions:
result = result[result.filename != img]
print("Removed file with extension '{}'".format(extension))
return result
image_df = validate_data(image_df)
X_train, X_test, y_train, y_test = train_test_split(image_df.filename, image_df['class'], test_size=0.2, random_state=42)
# 8 training
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
resnet = resnet(include_top=False, pooling='avg', weights=resnet_weights_path)
| [
"os.listdir",
"keras_applications.resnet",
"sklearn.model_selection.train_test_split",
"os.path.join",
"os.path.splitext",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.concat"
] | [((388, 406), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (402, 406), True, 'import numpy as np\n'), ((462, 483), 'os.path.join', 'join', (['basedir', '"""fork"""'], {}), "(basedir, 'fork')\n", (466, 483), False, 'from os.path import join\n'), ((497, 519), 'os.path.join', 'join', (['basedir', '"""spoon"""'], {}), "(basedir, 'spoon')\n", (501, 519), False, 'from os.path import join\n'), ((1668, 1758), 'sklearn.model_selection.train_test_split', 'train_test_split', (['image_df.filename', "image_df['class']"], {'test_size': '(0.2)', 'random_state': '(42)'}), "(image_df.filename, image_df['class'], test_size=0.2,\n random_state=42)\n", (1684, 1758), False, 'from sklearn.model_selection import train_test_split\n'), ((1883, 1952), 'keras_applications.resnet', 'resnet', ([], {'include_top': '(False)', 'pooling': '"""avg"""', 'weights': 'resnet_weights_path'}), "(include_top=False, pooling='avg', weights=resnet_weights_path)\n", (1889, 1952), False, 'from keras_applications import resnet\n'), ((536, 561), 'os.path.join', 'join', (['spoon_dir', 'img_path'], {}), '(spoon_dir, img_path)\n', (540, 561), False, 'from os.path import join\n'), ((616, 640), 'os.path.join', 'join', (['fork_dir', 'img_path'], {}), '(fork_dir, img_path)\n', (620, 640), False, 'from os.path import join\n'), ((784, 803), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (794, 803), False, 'import os\n'), ((838, 881), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['filename', 'class']"}), "(columns=['filename', 'class'])\n", (850, 881), True, 'import pandas as pd\n'), ((578, 599), 'os.listdir', 'os.listdir', (['spoon_dir'], {}), '(spoon_dir)\n', (588, 599), False, 'import os\n'), ((657, 677), 'os.listdir', 'os.listdir', (['fork_dir'], {}), '(fork_dir)\n', (667, 677), False, 'import os\n'), ((1017, 1067), 'pandas.DataFrame', 'pd.DataFrame', (["{'filename': files, 'class': folder}"], {}), "({'filename': files, 'class': folder})\n", (1029, 1067), True, 'import pandas as pd\n'), ((1086, 1109), 'pandas.concat', 'pd.concat', (['[result, df]'], {}), '([result, df])\n', (1095, 1109), True, 'import pandas as pd\n'), ((928, 955), 'os.path.join', 'join', (['basedir', 'folder', 'file'], {}), '(basedir, folder, file)\n', (932, 955), False, 'from os.path import join\n'), ((979, 1000), 'os.path.join', 'join', (['basedir', 'folder'], {}), '(basedir, folder)\n', (983, 1000), False, 'from os.path import join\n'), ((1348, 1369), 'os.path.splitext', 'os.path.splitext', (['img'], {}), '(img)\n', (1364, 1369), False, 'import os\n')] |
# Copyright 2018 Alibaba Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import xdl
import unittest
import numpy as np
from xdl.python.lib.datatype import *
idx = np.array([2,1,3,0,1,4,0,3,5,2], dtype=np.int32)
values = np.array([1,2,3,4,5,6,7,8,9,10],dtype=np.float)
segs = np.array([3,4,6,6,10],dtype=np.int32)
grps = np.array([],dtype=np.int32)
embeds = np.array([[0.1],[0.2],[0.3],[0.4],[0.5],[0.6]],dtype=np.float)
length = 3
class TestTile(unittest.TestCase):
def test_cpu_tile_empty_value(self):
empty_values = np.array([], dtype=np.float)
res = xdl.tile(embeds, idx, empty_values, segs, grps,
length=length, reverse=False)
res = xdl.execute(res)
res_tile = np.array([[0.3,0.2,0.4],[0.1,0.0,0.0],[0.2,0.5,0.0],
[0.0,0.0,0.0],[0.1,0.4,0.6]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_cpu_tile_empty_value_reverse(self):
empty_values = np.array([], dtype=np.float)
res = xdl.tile(embeds, idx, empty_values, segs, grps,
length=length, reverse=True)
res = xdl.execute(res)
res_tile = np.array([[0.4,0.2,0.3],[0.1,0.0,0.0],[0.5,0.2,0.0],
[0.0,0.0,0.0],[0.3,0.6,0.4]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_cpu_tile(self):
res = xdl.tile(embeds, idx, values, segs, grps,
length=length, reverse=False)
res = xdl.execute(res)
res_tile = np.array([[0.3,0.4,1.2],[0.4,0.0,0.0],[1.0,3.0,0.0],
[0.0,0.0,0.0],[0.7,3.2,5.4]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_cpu_tile_reverse(self):
res = xdl.tile(embeds, idx, values, segs, grps,
length=length, reverse=True)
res = xdl.execute(res)
res_tile = np.array([[1.2,0.4,0.3],[0.4,0.0,0.0],[3.0,1.0,0.0],
[0.0,0.0,0.0],[3.0,5.4,3.2]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_gpu_tile_empty_value(self):
with xdl.device("GPU"):
empty_values = np.array([], dtype=np.float)
res = xdl.tile(embeds, idx, empty_values, segs, grps,
length=length, reverse=False)
res = xdl.execute(res)
res_tile = np.array([[0.3,0.2,0.4],[0.1,0.0,0.0],[0.2,0.5,0.0],
[0.0,0.0,0.0],[0.1,0.4,0.6]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_gpu_tile_empty_value_reverse(self):
with xdl.device("GPU"):
empty_values = np.array([], dtype=np.float)
res = xdl.tile(embeds, idx, empty_values, segs, grps,
length=length, reverse=True)
res = xdl.execute(res)
res_tile = np.array([[0.4,0.2,0.3],[0.1,0.0,0.0],[0.5,0.2,0.0],
[0.0,0.0,0.0],[0.3,0.6,0.4]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_gpu_tile(self):
with xdl.device("GPU"):
res = xdl.tile(embeds, idx, values, segs, grps,
length=length, reverse=False)
res = xdl.execute(res)
res_tile = np.array([[0.3,0.4,1.2],[0.4,0.0,0.0],[1.0,3.0,0.0],
[0.0,0.0,0.0],[0.7,3.2,5.4]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def test_gpu_tile_reverse(self):
with xdl.device("GPU"):
res = xdl.tile(embeds, idx, values, segs, grps,
length=length, reverse=True)
res = xdl.execute(res)
res_tile = np.array([[1.2,0.4,0.3],[0.4,0.0,0.0],[3.0,1.0,0.0],
[0.0,0.0,0.0],[3.0,5.4,3.2]], dtype=np.float)
self.assertTrue(np.allclose(res, res_tile))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestTile)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| [
"xdl.execute",
"numpy.allclose",
"numpy.array",
"xdl.device",
"xdl.tile",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((772, 828), 'numpy.array', 'np.array', (['[2, 1, 3, 0, 1, 4, 0, 3, 5, 2]'], {'dtype': 'np.int32'}), '([2, 1, 3, 0, 1, 4, 0, 3, 5, 2], dtype=np.int32)\n', (780, 828), True, 'import numpy as np\n'), ((829, 886), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {'dtype': 'np.float'}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=np.float)\n', (837, 886), True, 'import numpy as np\n'), ((884, 926), 'numpy.array', 'np.array', (['[3, 4, 6, 6, 10]'], {'dtype': 'np.int32'}), '([3, 4, 6, 6, 10], dtype=np.int32)\n', (892, 926), True, 'import numpy as np\n'), ((929, 957), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (937, 957), True, 'import numpy as np\n'), ((966, 1034), 'numpy.array', 'np.array', (['[[0.1], [0.2], [0.3], [0.4], [0.5], [0.6]]'], {'dtype': 'np.float'}), '([[0.1], [0.2], [0.3], [0.4], [0.5], [0.6]], dtype=np.float)\n', (974, 1034), True, 'import numpy as np\n'), ((1140, 1168), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (1148, 1168), True, 'import numpy as np\n'), ((1183, 1260), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'empty_values', 'segs', 'grps'], {'length': 'length', 'reverse': '(False)'}), '(embeds, idx, empty_values, segs, grps, length=length, reverse=False)\n', (1191, 1260), False, 'import xdl\n'), ((1298, 1314), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (1309, 1314), False, 'import xdl\n'), ((1334, 1450), 'numpy.array', 'np.array', (['[[0.3, 0.2, 0.4], [0.1, 0.0, 0.0], [0.2, 0.5, 0.0], [0.0, 0.0, 0.0], [0.1, \n 0.4, 0.6]]'], {'dtype': 'np.float'}), '([[0.3, 0.2, 0.4], [0.1, 0.0, 0.0], [0.2, 0.5, 0.0], [0.0, 0.0, 0.0\n ], [0.1, 0.4, 0.6]], dtype=np.float)\n', (1342, 1450), True, 'import numpy as np\n'), ((1587, 1615), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (1595, 1615), True, 'import numpy as np\n'), ((1630, 1706), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'empty_values', 'segs', 'grps'], {'length': 'length', 'reverse': '(True)'}), '(embeds, idx, empty_values, segs, grps, length=length, reverse=True)\n', (1638, 1706), False, 'import xdl\n'), ((1744, 1760), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (1755, 1760), False, 'import xdl\n'), ((1780, 1896), 'numpy.array', 'np.array', (['[[0.4, 0.2, 0.3], [0.1, 0.0, 0.0], [0.5, 0.2, 0.0], [0.0, 0.0, 0.0], [0.3, \n 0.6, 0.4]]'], {'dtype': 'np.float'}), '([[0.4, 0.2, 0.3], [0.1, 0.0, 0.0], [0.5, 0.2, 0.0], [0.0, 0.0, 0.0\n ], [0.3, 0.6, 0.4]], dtype=np.float)\n', (1788, 1896), True, 'import numpy as np\n'), ((2004, 2075), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'values', 'segs', 'grps'], {'length': 'length', 'reverse': '(False)'}), '(embeds, idx, values, segs, grps, length=length, reverse=False)\n', (2012, 2075), False, 'import xdl\n'), ((2113, 2129), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (2124, 2129), False, 'import xdl\n'), ((2149, 2265), 'numpy.array', 'np.array', (['[[0.3, 0.4, 1.2], [0.4, 0.0, 0.0], [1.0, 3.0, 0.0], [0.0, 0.0, 0.0], [0.7, \n 3.2, 5.4]]'], {'dtype': 'np.float'}), '([[0.3, 0.4, 1.2], [0.4, 0.0, 0.0], [1.0, 3.0, 0.0], [0.0, 0.0, 0.0\n ], [0.7, 3.2, 5.4]], dtype=np.float)\n', (2157, 2265), True, 'import numpy as np\n'), ((2381, 2451), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'values', 'segs', 'grps'], {'length': 'length', 'reverse': '(True)'}), '(embeds, idx, values, segs, grps, length=length, reverse=True)\n', (2389, 2451), False, 'import xdl\n'), ((2489, 2505), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (2500, 2505), False, 'import xdl\n'), ((2525, 2641), 'numpy.array', 'np.array', (['[[1.2, 0.4, 0.3], [0.4, 0.0, 0.0], [3.0, 1.0, 0.0], [0.0, 0.0, 0.0], [3.0, \n 5.4, 3.2]]'], {'dtype': 'np.float'}), '([[1.2, 0.4, 0.3], [0.4, 0.0, 0.0], [3.0, 1.0, 0.0], [0.0, 0.0, 0.0\n ], [3.0, 5.4, 3.2]], dtype=np.float)\n', (2533, 2641), True, 'import numpy as np\n'), ((1486, 1512), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (1497, 1512), True, 'import numpy as np\n'), ((1932, 1958), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (1943, 1958), True, 'import numpy as np\n'), ((2301, 2327), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (2312, 2327), True, 'import numpy as np\n'), ((2677, 2703), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (2688, 2703), True, 'import numpy as np\n'), ((2760, 2777), 'xdl.device', 'xdl.device', (['"""GPU"""'], {}), "('GPU')\n", (2770, 2777), False, 'import xdl\n'), ((2806, 2834), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (2814, 2834), True, 'import numpy as np\n'), ((2853, 2930), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'empty_values', 'segs', 'grps'], {'length': 'length', 'reverse': '(False)'}), '(embeds, idx, empty_values, segs, grps, length=length, reverse=False)\n', (2861, 2930), False, 'import xdl\n'), ((2969, 2985), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (2980, 2985), False, 'import xdl\n'), ((3009, 3125), 'numpy.array', 'np.array', (['[[0.3, 0.2, 0.4], [0.1, 0.0, 0.0], [0.2, 0.5, 0.0], [0.0, 0.0, 0.0], [0.1, \n 0.4, 0.6]]'], {'dtype': 'np.float'}), '([[0.3, 0.2, 0.4], [0.1, 0.0, 0.0], [0.2, 0.5, 0.0], [0.0, 0.0, 0.0\n ], [0.1, 0.4, 0.6]], dtype=np.float)\n', (3017, 3125), True, 'import numpy as np\n'), ((3260, 3277), 'xdl.device', 'xdl.device', (['"""GPU"""'], {}), "('GPU')\n", (3270, 3277), False, 'import xdl\n'), ((3306, 3334), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (3314, 3334), True, 'import numpy as np\n'), ((3353, 3429), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'empty_values', 'segs', 'grps'], {'length': 'length', 'reverse': '(True)'}), '(embeds, idx, empty_values, segs, grps, length=length, reverse=True)\n', (3361, 3429), False, 'import xdl\n'), ((3475, 3491), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (3486, 3491), False, 'import xdl\n'), ((3515, 3631), 'numpy.array', 'np.array', (['[[0.4, 0.2, 0.3], [0.1, 0.0, 0.0], [0.5, 0.2, 0.0], [0.0, 0.0, 0.0], [0.3, \n 0.6, 0.4]]'], {'dtype': 'np.float'}), '([[0.4, 0.2, 0.3], [0.1, 0.0, 0.0], [0.5, 0.2, 0.0], [0.0, 0.0, 0.0\n ], [0.3, 0.6, 0.4]], dtype=np.float)\n', (3523, 3631), True, 'import numpy as np\n'), ((3746, 3763), 'xdl.device', 'xdl.device', (['"""GPU"""'], {}), "('GPU')\n", (3756, 3763), False, 'import xdl\n'), ((3783, 3854), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'values', 'segs', 'grps'], {'length': 'length', 'reverse': '(False)'}), '(embeds, idx, values, segs, grps, length=length, reverse=False)\n', (3791, 3854), False, 'import xdl\n'), ((3900, 3916), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (3911, 3916), False, 'import xdl\n'), ((3940, 4056), 'numpy.array', 'np.array', (['[[0.3, 0.4, 1.2], [0.4, 0.0, 0.0], [1.0, 3.0, 0.0], [0.0, 0.0, 0.0], [0.7, \n 3.2, 5.4]]'], {'dtype': 'np.float'}), '([[0.3, 0.4, 1.2], [0.4, 0.0, 0.0], [1.0, 3.0, 0.0], [0.0, 0.0, 0.0\n ], [0.7, 3.2, 5.4]], dtype=np.float)\n', (3948, 4056), True, 'import numpy as np\n'), ((4179, 4196), 'xdl.device', 'xdl.device', (['"""GPU"""'], {}), "('GPU')\n", (4189, 4196), False, 'import xdl\n'), ((4216, 4286), 'xdl.tile', 'xdl.tile', (['embeds', 'idx', 'values', 'segs', 'grps'], {'length': 'length', 'reverse': '(True)'}), '(embeds, idx, values, segs, grps, length=length, reverse=True)\n', (4224, 4286), False, 'import xdl\n'), ((4332, 4348), 'xdl.execute', 'xdl.execute', (['res'], {}), '(res)\n', (4343, 4348), False, 'import xdl\n'), ((4372, 4488), 'numpy.array', 'np.array', (['[[1.2, 0.4, 0.3], [0.4, 0.0, 0.0], [3.0, 1.0, 0.0], [0.0, 0.0, 0.0], [3.0, \n 5.4, 3.2]]'], {'dtype': 'np.float'}), '([[1.2, 0.4, 0.3], [0.4, 0.0, 0.0], [3.0, 1.0, 0.0], [0.0, 0.0, 0.0\n ], [3.0, 5.4, 3.2]], dtype=np.float)\n', (4380, 4488), True, 'import numpy as np\n'), ((4585, 4606), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4604, 4606), False, 'import unittest\n'), ((4671, 4696), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (4694, 4696), False, 'import unittest\n'), ((3169, 3195), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (3180, 3195), True, 'import numpy as np\n'), ((3675, 3701), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (3686, 3701), True, 'import numpy as np\n'), ((4100, 4126), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (4111, 4126), True, 'import numpy as np\n'), ((4532, 4558), 'numpy.allclose', 'np.allclose', (['res', 'res_tile'], {}), '(res, res_tile)\n', (4543, 4558), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import pearsonr
# Correlation Pearson test for whole sample. Outputs are:
# the Pearson statistic rho
# the p-value pval
def test_sample(sample):
# Local variables
var = sample.shape[1]
rho = np.zeros((var, var))
pval = np.zeros((var, var))
# Pearson test results
for i in range(var):
for v in np.arange(i+1, var):
[rho[i, v], pval[i, v]] = pearsonr(sample[:, i], sample[:, v])
[rho[v, i], pval[v, i]] = [rho[i, v], pval[i, v]]
return [rho, pval]
| [
"numpy.zeros",
"numpy.arange",
"scipy.stats.pearsonr"
] | [((242, 262), 'numpy.zeros', 'np.zeros', (['(var, var)'], {}), '((var, var))\n', (250, 262), True, 'import numpy as np\n'), ((274, 294), 'numpy.zeros', 'np.zeros', (['(var, var)'], {}), '((var, var))\n', (282, 294), True, 'import numpy as np\n'), ((365, 386), 'numpy.arange', 'np.arange', (['(i + 1)', 'var'], {}), '(i + 1, var)\n', (374, 386), True, 'import numpy as np\n'), ((424, 460), 'scipy.stats.pearsonr', 'pearsonr', (['sample[:, i]', 'sample[:, v]'], {}), '(sample[:, i], sample[:, v])\n', (432, 460), False, 'from scipy.stats import pearsonr\n')] |
import inspect
import numpy as np
import pytest
from napari._tests.utils import are_objects_equal, layer_test_data
@pytest.mark.parametrize('Layer, data, ndim', layer_test_data)
def test_attrs_arrays(Layer, data, ndim):
"""Test layer attributes and arrays."""
np.random.seed(0)
layer = Layer(data)
# Check layer has been correctly created
assert layer.ndim == ndim
properties = layer._get_state()
# Check every property is in call signature
signature = inspect.signature(Layer)
for prop in properties.keys():
assert prop in signature.parameters
# Check number of properties is same as number in signature
# excluding affine transform and `cache` which is not yet in `_get_state`
assert len(properties) == len(signature.parameters) - 2
# Check new layer can be created
new_layer = Layer(**properties)
# Check that new layer matches old on all properties:
for prop in properties.keys():
assert are_objects_equal(
getattr(layer, prop), getattr(new_layer, prop)
)
@pytest.mark.parametrize('Layer, data, ndim', layer_test_data)
def test_no_callbacks(Layer, data, ndim):
"""Test no internal callbacks for layer emmitters."""
layer = Layer(data)
# Check layer has been correctly created
assert layer.ndim == ndim
# Check that no internal callbacks have been registered
len(layer.events.callbacks) == 0
for em in layer.events.emitters.values():
assert len(em.callbacks) == 0
| [
"pytest.mark.parametrize",
"inspect.signature",
"numpy.random.seed"
] | [((120, 181), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Layer, data, ndim"""', 'layer_test_data'], {}), "('Layer, data, ndim', layer_test_data)\n", (143, 181), False, 'import pytest\n'), ((1073, 1134), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Layer, data, ndim"""', 'layer_test_data'], {}), "('Layer, data, ndim', layer_test_data)\n", (1096, 1134), False, 'import pytest\n'), ((272, 289), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (286, 289), True, 'import numpy as np\n'), ((491, 515), 'inspect.signature', 'inspect.signature', (['Layer'], {}), '(Layer)\n', (508, 515), False, 'import inspect\n')] |
import numpy as np
def diamondarray(dimension=1,fill=1,unfill=0):
""" Create a diamond array using a square dimension.
Fill and unfill values can be integer or float. """
nullresult=np.zeros(1)
#// verify inputs
try:
if not isinstance(dimension, (int, np.integer)):
dimesion=int(dimension)
if not isinstance(fill, (int, float, np.integer)):
fill=int(fill)
if not isinstance(unfill, (int, float, np.integer)):
unfill=int(unfill)
except:
return nullresult
#// check if odd
return nullresult
#// initialize 2d array
a=np.zeros((dimension,dimension))
for row in range(dimension):
for col in range(dimension):
a[row,col]=unfill
#// find the middle of the array
midpoint=(dimension-1)/2
#// initialize an offset
offset=-1
offsetstep=1
#// loop through rows and columns
for row in range(dimension):
if dimension%2 == 0 and row == np.ceil(midpoint):
#// repeat offset for second midpoint row
offset=offset
else:
if row <= np.ceil(midpoint):
#// increase offset for each row for top
offset=offset+offsetstep
else:
#// decrease offset for each row for bottom
offset=offset-offsetstep
for col in range(dimension):
#// set value to one
if dimension%2 == 0:
if col <= np.floor(midpoint):
if col == np.floor(midpoint)-offset:
a[row,col]=fill
if col >= np.ceil(midpoint):
if col == int(midpoint)+offset+1:
a[row,col]=fill
else:
if col == midpoint+offset or col == midpoint-offset:
pass
a[row,col]=fill
return a
def bisectorarray(dimension=1,vertical=True,horizontal=True,fill=1,unfill=0):
""" Create an array using square dimension with the midpoint column
filled. Fill and unfill values can be integer or float. """
nullresult=np.zeros(1)
#// verify inputs
try:
if not isinstance(dimension, (int, np.integer)):
dimesion=int(dimension)
if not isinstance(fill, (int, float, np.integer)):
fill=int(fill)
if not isinstance(unfill, (int, float, np.integer)):
unfill=int(unfill)
except:
return nullresult
#// initialize 2d array
a=np.zeros((dimension,dimension))
for row in range(dimension):
for col in range(dimension):
a[row,col]=unfill
#// find the middle of the array
midpoint=(dimension-1)/2
#// loop through rows and columns
for row in range(dimension):
for col in range(dimension):
#// set value to one
if (col == np.floor(midpoint) or col == np.ceil(midpoint)) and vertical==True:
a[row,col]=fill
if (row == np.floor(midpoint) or row == np.ceil(midpoint)) and horizontal==True:
a[row,col]=fill
return a
| [
"numpy.ceil",
"numpy.zeros",
"numpy.floor"
] | [((195, 206), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (203, 206), True, 'import numpy as np\n'), ((632, 664), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (640, 664), True, 'import numpy as np\n'), ((2153, 2164), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2161, 2164), True, 'import numpy as np\n'), ((2541, 2573), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (2549, 2573), True, 'import numpy as np\n'), ((1004, 1021), 'numpy.ceil', 'np.ceil', (['midpoint'], {}), '(midpoint)\n', (1011, 1021), True, 'import numpy as np\n'), ((1139, 1156), 'numpy.ceil', 'np.ceil', (['midpoint'], {}), '(midpoint)\n', (1146, 1156), True, 'import numpy as np\n'), ((1505, 1523), 'numpy.floor', 'np.floor', (['midpoint'], {}), '(midpoint)\n', (1513, 1523), True, 'import numpy as np\n'), ((1648, 1665), 'numpy.ceil', 'np.ceil', (['midpoint'], {}), '(midpoint)\n', (1655, 1665), True, 'import numpy as np\n'), ((2906, 2924), 'numpy.floor', 'np.floor', (['midpoint'], {}), '(midpoint)\n', (2914, 2924), True, 'import numpy as np\n'), ((2935, 2952), 'numpy.ceil', 'np.ceil', (['midpoint'], {}), '(midpoint)\n', (2942, 2952), True, 'import numpy as np\n'), ((3029, 3047), 'numpy.floor', 'np.floor', (['midpoint'], {}), '(midpoint)\n', (3037, 3047), True, 'import numpy as np\n'), ((3058, 3075), 'numpy.ceil', 'np.ceil', (['midpoint'], {}), '(midpoint)\n', (3065, 3075), True, 'import numpy as np\n'), ((1555, 1573), 'numpy.floor', 'np.floor', (['midpoint'], {}), '(midpoint)\n', (1563, 1573), True, 'import numpy as np\n')] |
import numpy as np
import json
def unjson(file):
with open(file, 'r') as fo:
dict = json.load(fo)
return dict
# r is noise rate
r = 0.2
count = 0
p_a = ''
p_g = ''
a = unjson(p_a)
for i in range(len(a['annotations'])):
if np.random.random() < r:
a['annotations'][i]['category_id'] = np.random.randint(1, 20)
count += 1
with open(p_g, 'w') as file:
json.dump(a, file)
print(count)
| [
"json.load",
"numpy.random.randint",
"numpy.random.random",
"json.dump"
] | [((394, 412), 'json.dump', 'json.dump', (['a', 'file'], {}), '(a, file)\n', (403, 412), False, 'import json\n'), ((98, 111), 'json.load', 'json.load', (['fo'], {}), '(fo)\n', (107, 111), False, 'import json\n'), ((248, 266), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (264, 266), True, 'import numpy as np\n'), ((317, 341), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (334, 341), True, 'import numpy as np\n')] |
# coding = utf-8
import pypuf.simulation, pypuf.io
import pypuf.attack
import pypuf.metrics
import numpy as np
import matplotlib.pyplot as plt
import math
import sys, os, random
from util import *
def select_odd_responses(responses, num_crps):
odd_responses = np.empty((num_crps,13,1), float)
for k in range(num_crps):
for i in range(responses.shape[1]):
if(i%2 == 1):
odd_responses[k][int(i/2)] = responses[k][i].copy()
return odd_responses
def instance_one_opuf_crps(puf, num_crps):
seed_instance = int.from_bytes(os.urandom(4), "big")
crps = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=num_crps, seed=seed_instance)
# threshold = lambda r: np.sign(r - np.quantile(r.flatten(), .5))
# crps.responses = threshold(crps.responses)
return crps
def instance_one_opuf_attack(puf, num_crps):
seed_instance = int.from_bytes(os.urandom(4), "big")
crps = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=num_crps, seed=seed_instance)
threshold = lambda r: np.sign(r - np.quantile(r.flatten(), .5))
crps.responses = threshold(crps.responses)
crps.responses = select_odd_responses(crps.responses, num_crps)
seed_instance_train = int.from_bytes(os.urandom(4), "big")
feature_map = pypuf.attack.LeastSquaresRegression.feature_map_optical_pufs_reloaded_improved
attack = pypuf.attack.LeastSquaresRegression(crps, feature_map=feature_map)
model = attack.fit()
seed_instance_test = int.from_bytes(os.urandom(4), "big")
crps_test = pypuf.io.ChallengeResponseSet.from_simulation(puf_opt, N=1000, seed=seed_instance_test)
crps_test.responses = select_odd_responses(crps_test.responses, 1000)
# accuracy = pypuf.metrics.correlation(model, crps_test).mean()
# With post-processing
accuracy = pypuf.metrics.correlation(model, crps_test, postprocessing=threshold).mean()
return accuracy
def instance_one_opuf_attack_n(puf, crps, repeat_experiment, steps=10):
accuracy_opuf = np.array([])
for i in range(steps):
print(i)
instance_accuracy_opuf_repeat = np.zeros(repeat_experiment)
N = int(crps[i])
for j in range(repeat_experiment):
instance_accuracy_opuf_repeat[j] = instance_one_opuf_attack(puf, N)
instance_accuracy_opuf = np.mean(instance_accuracy_opuf_repeat)
accuracy_opuf = np.append(accuracy_opuf, instance_accuracy_opuf)
return accuracy_opuf
def instance_one_hybrid_opuf_attack(puf, num_crps):
seed_instance = int.from_bytes(os.urandom(4), "big")
crps = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=num_crps, seed=seed_instance)
threshold = lambda r: np.sign(r - np.quantile(r.flatten(), .5))
crps.responses = threshold(crps.responses)
# print(crps.responses[0][0])
res_cpy = np.copy(crps.responses)
seed_instance_train = int.from_bytes(os.urandom(4), "big")
for k in range(num_crps):
for i in range(crps.responses.shape[1]):
if i%2 == 0:
crps.responses[k][i][0] = hybrid_flipping(crps.responses[k][i][0], 0.5)
elif i%2 == 1 and crps.responses[k][i-1][0] != res_cpy[k][i-1][0]:
crps.responses[k][i][0] = hybrid_flipping(crps.responses[k][i][0], 0.5)
else:
pass
feature_map = pypuf.attack.LeastSquaresRegression.feature_map_optical_pufs_reloaded_improved
attack = pypuf.attack.LeastSquaresRegression(crps, feature_map=feature_map)
model = attack.fit()
seed_instance_test = int.from_bytes(os.urandom(4), "big")
crps_test = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=1000, seed=seed_instance_test)
# accuracy = pypuf.metrics.correlation(model, crps_test).mean()
# With post-processing
accuracy = pypuf.metrics.correlation(model, crps_test, postprocessing=threshold).mean()
return accuracy
def instance_one_hybrid_opuf_bit_attack(puf, num_crps):
# bias_basis = puf_bias(puf_basis)
# bias_bit = puf_bias(puf_bit)
seed_instance = int.from_bytes(os.urandom(4), "big")
crps = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=num_crps, seed=seed_instance)
threshold = lambda r: np.sign(r - np.quantile(r.flatten(), .5))
crps.responses = threshold(crps.responses)
# print(crps.responses[0][0])
res_cpy = np.copy(crps.responses)
seed_instance_train = int.from_bytes(os.urandom(4), "big")
crps.responses = select_odd_responses(crps.responses, num_crps)
p_guess = 0.5*(1+np.sqrt(0.5))
for k in range(num_crps):
for i in range(crps.responses.shape[1]):
crps.responses[k][i][0] = hybrid_flipping(crps.responses[k][i][0], p_guess) #Flip the response with probability 1 - P(guessing basis)
feature_map = pypuf.attack.LeastSquaresRegression.feature_map_optical_pufs_reloaded_improved
attack = pypuf.attack.LeastSquaresRegression(crps, feature_map=feature_map)
model = attack.fit()
seed_instance_test = int.from_bytes(os.urandom(4), "big")
crps_test = pypuf.io.ChallengeResponseSet.from_simulation(puf, N=1000, seed=seed_instance_test)
crps_test.responses = select_odd_responses(crps_test.responses, 1000)
# accuracy = pypuf.metrics.correlation(model, crps_test).mean()
# With post-processing
accuracy = pypuf.metrics.correlation(model, crps_test, postprocessing=threshold).mean()
return accuracy
def instance_one_hybrid_opuf_attack_n(puf, crps,repeat_experiment, attack = "both", steps=10):
accuracy_hpuf = np.array([])
for i in range(steps):
print(i)
instance_accuracy_hpuf_repeat = np.zeros(repeat_experiment)
N = int(crps[i])
for j in range(repeat_experiment):
if(attack == "both"):
instance_accuracy_hpuf_repeat[j] = instance_one_hybrid_opuf_attack(puf, N)
elif(attack == "bit"):
instance_accuracy_hpuf_repeat[j] = instance_one_hybrid_opuf_bit_attack(puf, N)
instance_accuracy_hpuf = np.mean(instance_accuracy_hpuf_repeat)
accuracy_hpuf = np.append(accuracy_hpuf, instance_accuracy_hpuf)
return accuracy_hpuf
def crp_opuf(n, steps=10):
crps = np.array([])
N = 1e2
step = 0
if n == 32:
step = 1e4
elif n == 64:
step = 20e3
elif n == 128:
step = 5e2
for i in range(steps):
crps = np.append(crps, N)
N = N + step
return crps
if __name__ == '__main__':
n_size = 32
m_size = 1
seed_instance = int.from_bytes(os.urandom(4), "big")
puf_opt = pypuf.simulation.IntegratedOpticalPUF(n=n_size,m=m_size, seed=seed_instance)
num_crps = 10000
crps = instance_one_opuf_crps(puf_opt, num_crps)
'''
Description: It shows that the distribution intensity of electronmagnetic
field is not uniform, which leads to a diffculty of MUB encoding with higher dimension.
'''
val = 0. # the data to appear on the y-axis(start point).
ar = crps.responses.flatten()
# print(ar)
plt.plot(ar, np.zeros_like(ar) + val, 'x')
plt.show()
'''
template of usage
'''
'''
if __name__ == '__main__':
n_size = 32
m_size = 26
seed_instance = int.from_bytes(os.urandom(4), "big")
puf_opt = pypuf.simulation.IntegratedOpticalPUF(n=n_size,m=m_size, seed=seed_instance)
repeat_experiment = 1
num_crps = crp_opuf(n_size, steps=10)
# accuracy_c = instance_one_opuf_attack(puf_opt, N_sample)
# accuracy_h = instance_one_hybrid_opuf_attack(puf_opt, N_sample)
accuracy_c = instance_one_opuf_attack_n(puf_opt, num_crps, repeat_experiment)
print(accuracy_c)
#accuracy_h1 = instance_one_hybrid_opuf_attack_n(puf_opt, num_crps, repeat_experiment, "both")
#print(accuracy_h1)
accuracy_h2 = instance_one_hybrid_opuf_attack_n(puf_opt, num_crps, repeat_experiment, "bit")
print(accuracy_h2)
np.save('./data/crps_opuf_'+str(n_size)+'_'+str(m_size)+'.npy', num_crps)
np.save('./data/classical_opuf_accuracy'+str(n_size)+'_'+str(m_size)+'.npy', accuracy_c)
#np.save('./data/hybrid_opuf_accuracy'+str(n)+'_'+str(m)+'.npy', accuracy_h2)
np.save('./data/hybrid_opuf_odd_accuracy'+str(n_size)+'_'+str(m_size)+'.npy', accuracy_h2)
plt.title('Optical PUF with Classical/Hybrid Construction')
plt.plot(num_crps, accuracy_c, label='cpuf')
plt.plot(num_crps, accuracy_h2, label='hpuf_odd')
plt.xlabel('Number of CRPs')
plt.ylabel('Accuracy (x100%)')
plt.legend()
plt.show()
'''
| [
"numpy.copy",
"numpy.mean",
"numpy.sqrt",
"os.urandom",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.zeros_like",
"matplotlib.pyplot.show"
] | [((269, 303), 'numpy.empty', 'np.empty', (['(num_crps, 13, 1)', 'float'], {}), '((num_crps, 13, 1), float)\n', (277, 303), True, 'import numpy as np\n'), ((1956, 1968), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1964, 1968), True, 'import numpy as np\n'), ((2706, 2729), 'numpy.copy', 'np.copy', (['crps.responses'], {}), '(crps.responses)\n', (2713, 2729), True, 'import numpy as np\n'), ((4096, 4119), 'numpy.copy', 'np.copy', (['crps.responses'], {}), '(crps.responses)\n', (4103, 4119), True, 'import numpy as np\n'), ((5223, 5235), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5231, 5235), True, 'import numpy as np\n'), ((5798, 5810), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5806, 5810), True, 'import numpy as np\n'), ((6588, 6598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6596, 6598), True, 'import matplotlib.pyplot as plt\n'), ((571, 584), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (581, 584), False, 'import sys, os, random\n'), ((889, 902), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (899, 902), False, 'import sys, os, random\n'), ((1216, 1229), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (1226, 1229), False, 'import sys, os, random\n'), ((1470, 1483), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (1480, 1483), False, 'import sys, os, random\n'), ((2038, 2065), 'numpy.zeros', 'np.zeros', (['repeat_experiment'], {}), '(repeat_experiment)\n', (2046, 2065), True, 'import numpy as np\n'), ((2222, 2260), 'numpy.mean', 'np.mean', (['instance_accuracy_opuf_repeat'], {}), '(instance_accuracy_opuf_repeat)\n', (2229, 2260), True, 'import numpy as np\n'), ((2282, 2330), 'numpy.append', 'np.append', (['accuracy_opuf', 'instance_accuracy_opuf'], {}), '(accuracy_opuf, instance_accuracy_opuf)\n', (2291, 2330), True, 'import numpy as np\n'), ((2439, 2452), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (2449, 2452), False, 'import sys, os, random\n'), ((2768, 2781), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (2778, 2781), False, 'import sys, os, random\n'), ((3354, 3367), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (3364, 3367), False, 'import sys, os, random\n'), ((3830, 3843), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (3840, 3843), False, 'import sys, os, random\n'), ((4158, 4171), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (4168, 4171), False, 'import sys, os, random\n'), ((4720, 4733), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (4730, 4733), False, 'import sys, os, random\n'), ((5305, 5332), 'numpy.zeros', 'np.zeros', (['repeat_experiment'], {}), '(repeat_experiment)\n', (5313, 5332), True, 'import numpy as np\n'), ((5630, 5668), 'numpy.mean', 'np.mean', (['instance_accuracy_hpuf_repeat'], {}), '(instance_accuracy_hpuf_repeat)\n', (5637, 5668), True, 'import numpy as np\n'), ((5690, 5738), 'numpy.append', 'np.append', (['accuracy_hpuf', 'instance_accuracy_hpuf'], {}), '(accuracy_hpuf, instance_accuracy_hpuf)\n', (5699, 5738), True, 'import numpy as np\n'), ((5948, 5966), 'numpy.append', 'np.append', (['crps', 'N'], {}), '(crps, N)\n', (5957, 5966), True, 'import numpy as np\n'), ((6082, 6095), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (6092, 6095), False, 'import sys, os, random\n'), ((4264, 4276), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (4271, 4276), True, 'import numpy as np\n'), ((6557, 6574), 'numpy.zeros_like', 'np.zeros_like', (['ar'], {}), '(ar)\n', (6570, 6574), True, 'import numpy as np\n')] |
"""
Make latex table of fit summary using arviz and pandas to_latex method.
"""
import pandas as pd
import numpy as np
import arviz as az
import click
import kinesis as kn
kn.set_mpl_style()
df = kn.data.load_hyades_dataset()
b0 = np.array([17.15474298, 41.28962638, 13.69105771])
# map arviz summary index -> latex column name
_column_name_dict = {
"f_mem": r"\fmem",
"v0[0]": "$v_{0,x}$ (ICRS)",
"v0[1]": "$v_{0,y}$ (ICRS)",
"v0[2]": "$v_{0,z}$ (ICRS)",
"sigv[0]": "$\sigma_{x}$ (ICRS)",
"sigv[1]": "$\sigma_{y}$ (ICRS)",
"sigv[2]": "$\sigma_{z}$ (ICRS)",
"sigv_gal[0]": "$\sigma_{x}$",
"sigv_gal[1]": "$\sigma_{y}$",
"sigv_gal[2]": "$\sigma_{z}$",
"Sigma_gal[0,0]": "$\Sigma_{xx}$",
"Sigma_gal[0,1]": "$\Sigma_{xy}$",
"Sigma_gal[0,2]": "$\Sigma_{xz}$",
"Sigma_gal[1,1]": "$\Sigma_{yy}$",
"Sigma_gal[1,2]": "$\Sigma_{yz}$",
"Sigma_gal[2,2]": "$\Sigma_{zz}$",
"Omega_gal[0,1]": "$\Omega_{xy}$",
"Omega_gal[0,2]": "$\Omega_{xz}$",
"Omega_gal[1,2]": "$\Omega_{yz}$",
"w1_gal": "$w_1$",
"w2_gal": "$w_2$",
"w3_gal": "$w_3$",
"w4_gal": "$w_4$",
"w5_gal": "$w_5$",
"kappa": "$\kappa$",
"omegax_gal": "$\omega_x$",
"omegay_gal": "$\omega_y$",
"omegaz_gal": "$\omega_z$",
"kappa_gal": "$\kappa$",
"v0_bg[0]": "$v_{\rm{bg},x}$ (ICRS)",
"v0_bg[1]": "$v_{\rm{bg},y}$ (ICRS)",
"v0_bg[2]": "$v_{\rm{bg},z}$ (ICRS)",
"sigv_bg": "$\sigma_{\rm bg}$",
}
_columns_to_remove = [
"Sigma_gal[1,0]",
"Sigma_gal[2,0]",
"Sigma_gal[2,1]",
# lower triangle of Omega
"Omega[0,0]",
"Omega[1,1]",
"Omega[2,2]",
"Omega[1,0]",
"Omega[2,0]",
"Omega[2,1]",
"Omega_gal[0,0]",
"Omega_gal[1,1]",
"Omega_gal[2,2]",
"Omega_gal[1,0]",
"Omega_gal[2,0]",
"Omega_gal[2,1]",
]
def make_summary_table(azfit):
pars = [
"f_mem",
"v0",
"sigv_gal",
"Omega_gal",
"omegax_gal",
"omegay_gal",
"omegaz_gal",
"w1_gal",
"w2_gal",
"w3_gal",
"w4_gal",
"w5_gal",
"kappa_gal",
"v0_bg",
"sigv_bg",
]
azfit = kn.add_transformed_posterior(azfit)
# To report everything else except v0/v0_bg in Galactic,
# convert covariance to sigv and correlation matrix from Sigma_gal
Sigma = azfit.posterior["Sigma_gal"].values
diag = np.sqrt(Sigma[:, :, [0, 1, 2], [0, 1, 2]])
# ij component is sigma_i * sigma_j
denom = np.einsum("mnij,mnjk->mnik", diag[:, :, :, None], diag[:, :, None, :])
Omega = Sigma / denom
azfit.posterior["Omega_gal"] = (
("chain", "draw", "Omega_dim_0", "Omega_dim_1"),
Omega,
)
azfit.posterior["sigv_gal"] = (("chain", "draw", "sigv_dim"), diag)
# az.summary does not allow missing pars
pars_for_az = []
pars_missing = []
for p in pars:
if p in azfit.posterior:
pars_for_az.append(p)
else:
pars_missing.append(p)
summary_table = (
az.summary(azfit, pars_for_az).drop(_columns_to_remove, errors="ignore")
# .rename(index=_column_name_dict)
)[["mean", "sd", "hpd_3%", "hpd_97%"]]
return summary_table
@click.command()
@click.option(
"--fit", "-f", type=(str, str), multiple=True, help="(key, path/to/pickle)"
)
@click.option("--latex", is_flag=True)
def main(fit, latex):
frames = [make_summary_table(az.from_pystan(kn.load_stanfit(v))) for k, v in fit]
keys = [k for k, v in fit]
if latex:
merged = (
pd.concat(frames, keys=keys, axis=1, sort=True)
.reindex(frames[0].index)
.rename(
index=_column_name_dict,
columns={"hpd_3%": "hpd 3\%", "hpd_97%": "hpd 97\%"},
)
)
output_file = "../report/fit-summary.tex"
merged.to_latex(output_file, na_rep="", escape=False, multicolumn_format="c")
click.echo("output written to {}".format(output_file))
else:
merged = pd.concat(frames, keys=keys, axis=1, sort=True).reindex(frames[0].index)
click.echo(merged)
if __name__ == "__main__":
main()
| [
"arviz.summary",
"numpy.sqrt",
"click.option",
"kinesis.load_stanfit",
"numpy.array",
"click.echo",
"kinesis.set_mpl_style",
"numpy.einsum",
"pandas.concat",
"click.command",
"kinesis.data.load_hyades_dataset",
"kinesis.add_transformed_posterior"
] | [((174, 192), 'kinesis.set_mpl_style', 'kn.set_mpl_style', ([], {}), '()\n', (190, 192), True, 'import kinesis as kn\n'), ((199, 228), 'kinesis.data.load_hyades_dataset', 'kn.data.load_hyades_dataset', ([], {}), '()\n', (226, 228), True, 'import kinesis as kn\n'), ((234, 283), 'numpy.array', 'np.array', (['[17.15474298, 41.28962638, 13.69105771]'], {}), '([17.15474298, 41.28962638, 13.69105771])\n', (242, 283), True, 'import numpy as np\n'), ((3234, 3249), 'click.command', 'click.command', ([], {}), '()\n', (3247, 3249), False, 'import click\n'), ((3251, 3345), 'click.option', 'click.option', (['"""--fit"""', '"""-f"""'], {'type': '(str, str)', 'multiple': '(True)', 'help': '"""(key, path/to/pickle)"""'}), "('--fit', '-f', type=(str, str), multiple=True, help=\n '(key, path/to/pickle)')\n", (3263, 3345), False, 'import click\n'), ((3348, 3385), 'click.option', 'click.option', (['"""--latex"""'], {'is_flag': '(True)'}), "('--latex', is_flag=True)\n", (3360, 3385), False, 'import click\n'), ((2187, 2222), 'kinesis.add_transformed_posterior', 'kn.add_transformed_posterior', (['azfit'], {}), '(azfit)\n', (2215, 2222), True, 'import kinesis as kn\n'), ((2414, 2456), 'numpy.sqrt', 'np.sqrt', (['Sigma[:, :, [0, 1, 2], [0, 1, 2]]'], {}), '(Sigma[:, :, [0, 1, 2], [0, 1, 2]])\n', (2421, 2456), True, 'import numpy as np\n'), ((2509, 2579), 'numpy.einsum', 'np.einsum', (['"""mnij,mnjk->mnik"""', 'diag[:, :, :, None]', 'diag[:, :, None, :]'], {}), "('mnij,mnjk->mnik', diag[:, :, :, None], diag[:, :, None, :])\n", (2518, 2579), True, 'import numpy as np\n'), ((4119, 4137), 'click.echo', 'click.echo', (['merged'], {}), '(merged)\n', (4129, 4137), False, 'import click\n'), ((3047, 3077), 'arviz.summary', 'az.summary', (['azfit', 'pars_for_az'], {}), '(azfit, pars_for_az)\n', (3057, 3077), True, 'import arviz as az\n'), ((3456, 3474), 'kinesis.load_stanfit', 'kn.load_stanfit', (['v'], {}), '(v)\n', (3471, 3474), True, 'import kinesis as kn\n'), ((4038, 4085), 'pandas.concat', 'pd.concat', (['frames'], {'keys': 'keys', 'axis': '(1)', 'sort': '(True)'}), '(frames, keys=keys, axis=1, sort=True)\n', (4047, 4085), True, 'import pandas as pd\n'), ((3570, 3617), 'pandas.concat', 'pd.concat', (['frames'], {'keys': 'keys', 'axis': '(1)', 'sort': '(True)'}), '(frames, keys=keys, axis=1, sort=True)\n', (3579, 3617), True, 'import pandas as pd\n')] |
"""
matplotlib gives you 5 ways to specify colors,
1) as a single letter string, ala MATLAB
2) as an html style hex string or html color name
3) as an R,G,B tuple, where R,G,B, range from 0-1
4) as a string representing a floating point number
from 0 to 1, corresponding to shades of gray.
5) as a special color "Cn", where n is a number 0-9 specifying the
nth color in the currently active color cycle.
See help(colors) for more info.
"""
import matplotlib.pyplot as plt
import numpy as np
plt.subplot(111, facecolor='darkslategray')
#subplot(111, facecolor='#ababab')
t = np.arange(0.0, 2.0, 0.01)
s = np.sin(2*np.pi*t)
plt.plot(t, s, 'C1')
plt.xlabel('time (s)', color='C1')
plt.ylabel('voltage (mV)', color='0.5') # grayscale color
plt.title('About as silly as it gets, folks', color='#afeeee')
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((532, 575), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'facecolor': '"""darkslategray"""'}), "(111, facecolor='darkslategray')\n", (543, 575), True, 'import matplotlib.pyplot as plt\n'), ((615, 640), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)', '(0.01)'], {}), '(0.0, 2.0, 0.01)\n', (624, 640), True, 'import numpy as np\n'), ((645, 666), 'numpy.sin', 'np.sin', (['(2 * np.pi * t)'], {}), '(2 * np.pi * t)\n', (651, 666), True, 'import numpy as np\n'), ((663, 683), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 's', '"""C1"""'], {}), "(t, s, 'C1')\n", (671, 683), True, 'import matplotlib.pyplot as plt\n'), ((684, 718), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {'color': '"""C1"""'}), "('time (s)', color='C1')\n", (694, 718), True, 'import matplotlib.pyplot as plt\n'), ((719, 758), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""voltage (mV)"""'], {'color': '"""0.5"""'}), "('voltage (mV)', color='0.5')\n", (729, 758), True, 'import matplotlib.pyplot as plt\n'), ((778, 840), 'matplotlib.pyplot.title', 'plt.title', (['"""About as silly as it gets, folks"""'], {'color': '"""#afeeee"""'}), "('About as silly as it gets, folks', color='#afeeee')\n", (787, 840), True, 'import matplotlib.pyplot as plt\n'), ((841, 851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (849, 851), True, 'import matplotlib.pyplot as plt\n')] |
from sklearn.preprocessing import *
import time
import datetime
import numpy as np
def formatStr(x):
label_encoder = LabelEncoder()
x = label_encoder.fit_transform(x)
x = x.astype(np.float64)
def formatTime(data):
data = data.astype('str')
label_encoder = LabelEncoder()
for col in range(data.shape[1]):
temp = data[:, col]
data[:, col] = label_encoder.fit_transform(temp)
data = formatStructuredData(data)
return data
# d = []
# for col in range(data.shape[1]):
# for row in range(data.shape[0]):
# time=data[row,col]
# index=d.index(time)
def date_compare(time1, time2):
if type(time1) == type(datetime.date(1995, 10, 11)):
t1 = time.mktime(time1)
t2 = time.mktime(time2)
else:
t1 = time1
t2 = time2
if t1 < t2:
return -1
elif t1 > t2:
return 1
else:
return 0
def formatStructuredData(data):
"""
:param data: int
:return: formatData
"""
X_scaled = scale(data)
return X_scaled
def formatTarget(data):
"""
:param data: int
"""
good = 0
bad = 0
avg = np.mean(data)
for index in range(data.shape[0]):
val = data[index]
if val < avg:
data[index] = 0
good += 1
else:
data[index] = 1
bad += 1
# print(good)
# print(bad)
return data.astype('int')
| [
"numpy.mean",
"time.mktime",
"datetime.date"
] | [((1173, 1186), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1180, 1186), True, 'import numpy as np\n'), ((737, 755), 'time.mktime', 'time.mktime', (['time1'], {}), '(time1)\n', (748, 755), False, 'import time\n'), ((769, 787), 'time.mktime', 'time.mktime', (['time2'], {}), '(time2)\n', (780, 787), False, 'import time\n'), ((694, 721), 'datetime.date', 'datetime.date', (['(1995)', '(10)', '(11)'], {}), '(1995, 10, 11)\n', (707, 721), False, 'import datetime\n')] |
import numpy as np
import tensorflow as tf
class GatedCNN(nn.Module):
def __init__(self, conf):
tf.reset_default_graph()
self.X = tf.placeholder(shape=[conf.batch_size, conf.context_size-1], dtype=tf.int32, name="X")
self.y = tf.placeholder(shape=[conf.batch_size, conf.context_size-1], dtype=tf.int32, name="y")
embed = self.create_embeddings(self.X, conf)
h, res_input = embed, embed
for i in range(conf.num_layers):
fanin_depth = h.get_shape()[-1]
#last layer should have filter size of 1
filter_size = conf.filter_size if i < conf.num_layers-1 else 1
shape = (conf.filter_h, conf.filter_w, fanin_depth, filter_size)
with tf.variable_scope("layer_%d"%i):
conv_w = self.conv_op(h, shape, "linear")
conv_v = self.conv_op(h, shape, "gated")
h = conv_w * tf.sigmoid(conv_v)
if i % conf.block_size == 0:
h += res_input
res_input = h
h = tf.reshape(h, (-1, conf.embedding_size))
y_shape = self.y.get_shape().as_list()
self.y = tf.reshape(self.y, (y_shape[0] * y_shape[1], 1))
softmax_w = tf.get_variable("softmax_w", [conf.vocab_size, conf.embedding_size], tf.float32,
tf.random_normal_initializer(0.0, 0.1))
softmax_b = tf.get_variable("softmax_b", [conf.vocab_size], tf.float32, tf.constant_initializer(1.0))
#PROBLEM
#Preferance: NCE Loss, heirarchial softmax, adaptive softmax
self.loss = tf.reduce_mean(tf.nn.nce_loss(softmax_w, softmax_b, h, self.y, conf.num_sampled, conf.vocab_size))
trainer = tf.train.MomentumOptimizer(conf.learning_rate, conf.momentum)
gradients = trainer.compute_gradients(self.loss)
clipped_gradients = [(tf.clip_by_value(_[0], -conf.grad_clip, conf.grad_clip), _[1]) for _ in gradients]
self.optimizer = trainer.apply_gradients(clipped_gradients)
self.perplexity = tf.exp(self.loss)
self.create_summaries()
def create_embeddings(self, X, conf):
#No getters in pytorch
embeddings = tf.get_variable("embeds",(conf.vocab_size, conf.embedding_size), tf.float32, tf.random_uniform_initializer(-1.0,1.0))
#embeddings for words sentences in particular batch
embed = tf.nn.embedding_lookup(embeddings, X)
mask_layer = np.ones((conf.batch_size, conf.context_size-1, conf.embedding_size))
mask_layer[:,0:conf.filter_h/2,:] = 0
embed *= mask_layer
embed_shape = embed.get_shape().as_list()
embed = tf.reshape(embed, (embed_shape[0], embed_shape[1], embed_shape[2], 1))
return embed
def conv_op(self, fan_in, shape, name):
W = tf.get_variable("%s_W"%name, shape, tf.float32, tf.random_normal_initializer(0.0, 0.1))
b = tf.get_variable("%s_b"%name, shape[-1], tf.float32, tf.constant_initializer(1.0))
return tf.add(tf.nn.conv2d(fan_in, W, strides=[1,1,1,1], padding='SAME'), b)
def create_summaries(self):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("perplexity", self.perplexity)
self.merged_summary_op = tf.summary.merge_all()
'''
variables_dict = {
"conv1_weights": tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv1_weights")
"conv1_biases": tf.Variable(tf.zeros([32]), name="conv1_biases")
... etc. ...
}
def my_image_filter(input_images, variables_dict):
conv1 = tf.nn.conv2d(input_images, variables_dict["conv1_weights"],
strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(conv1 + variables_dict["conv1_biases"])
conv2 = tf.nn.conv2d(relu1, variables_dict["conv2_weights"],
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv2 + variables_dict["conv2_biases"])
# The 2 calls to my_image_filter() now use the same variables
result1 = my_image_filter(image1, variables_dict)
result2 = my_image_filter(image2, variables_dict)
def conv_relu(input, kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape,
initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv + biases)
''' | [
"tensorflow.nn.conv2d",
"tensorflow.nn.embedding_lookup",
"tensorflow.summary.merge_all",
"tensorflow.reset_default_graph",
"numpy.ones",
"tensorflow.variable_scope",
"tensorflow.train.MomentumOptimizer",
"tensorflow.nn.nce_loss",
"tensorflow.placeholder",
"tensorflow.random_normal_initializer",
... | [((110, 134), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (132, 134), True, 'import tensorflow as tf\n'), ((165, 258), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[conf.batch_size, conf.context_size - 1]', 'dtype': 'tf.int32', 'name': '"""X"""'}), "(shape=[conf.batch_size, conf.context_size - 1], dtype=tf.\n int32, name='X')\n", (179, 258), True, 'import tensorflow as tf\n'), ((269, 362), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[conf.batch_size, conf.context_size - 1]', 'dtype': 'tf.int32', 'name': '"""y"""'}), "(shape=[conf.batch_size, conf.context_size - 1], dtype=tf.\n int32, name='y')\n", (283, 362), True, 'import tensorflow as tf\n'), ((1089, 1129), 'tensorflow.reshape', 'tf.reshape', (['h', '(-1, conf.embedding_size)'], {}), '(h, (-1, conf.embedding_size))\n', (1099, 1129), True, 'import tensorflow as tf\n'), ((1194, 1242), 'tensorflow.reshape', 'tf.reshape', (['self.y', '(y_shape[0] * y_shape[1], 1)'], {}), '(self.y, (y_shape[0] * y_shape[1], 1))\n', (1204, 1242), True, 'import tensorflow as tf\n'), ((1756, 1817), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['conf.learning_rate', 'conf.momentum'], {}), '(conf.learning_rate, conf.momentum)\n', (1782, 1817), True, 'import tensorflow as tf\n'), ((2082, 2099), 'tensorflow.exp', 'tf.exp', (['self.loss'], {}), '(self.loss)\n', (2088, 2099), True, 'import tensorflow as tf\n'), ((2422, 2459), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'X'], {}), '(embeddings, X)\n', (2444, 2459), True, 'import tensorflow as tf\n'), ((2481, 2551), 'numpy.ones', 'np.ones', (['(conf.batch_size, conf.context_size - 1, conf.embedding_size)'], {}), '((conf.batch_size, conf.context_size - 1, conf.embedding_size))\n', (2488, 2551), True, 'import numpy as np\n'), ((2699, 2769), 'tensorflow.reshape', 'tf.reshape', (['embed', '(embed_shape[0], embed_shape[1], embed_shape[2], 1)'], {}), '(embed, (embed_shape[0], embed_shape[1], embed_shape[2], 1))\n', (2709, 2769), True, 'import tensorflow as tf\n'), ((3161, 3197), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (3178, 3197), True, 'import tensorflow as tf\n'), ((3206, 3254), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""perplexity"""', 'self.perplexity'], {}), "('perplexity', self.perplexity)\n", (3223, 3254), True, 'import tensorflow as tf\n'), ((3288, 3310), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (3308, 3310), True, 'import tensorflow as tf\n'), ((1382, 1420), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (1410, 1420), True, 'import tensorflow as tf\n'), ((1502, 1530), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (1525, 1530), True, 'import tensorflow as tf\n'), ((1653, 1740), 'tensorflow.nn.nce_loss', 'tf.nn.nce_loss', (['softmax_w', 'softmax_b', 'h', 'self.y', 'conf.num_sampled', 'conf.vocab_size'], {}), '(softmax_w, softmax_b, h, self.y, conf.num_sampled, conf.\n vocab_size)\n', (1667, 1740), True, 'import tensorflow as tf\n'), ((2305, 2345), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (2334, 2345), True, 'import tensorflow as tf\n'), ((2897, 2935), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2925, 2935), True, 'import tensorflow as tf\n'), ((3001, 3029), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (3024, 3029), True, 'import tensorflow as tf\n'), ((3053, 3114), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['fan_in', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(fan_in, W, strides=[1, 1, 1, 1], padding='SAME')\n", (3065, 3114), True, 'import tensorflow as tf\n'), ((767, 800), 'tensorflow.variable_scope', 'tf.variable_scope', (["('layer_%d' % i)"], {}), "('layer_%d' % i)\n", (784, 800), True, 'import tensorflow as tf\n'), ((1905, 1960), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['_[0]', '(-conf.grad_clip)', 'conf.grad_clip'], {}), '(_[0], -conf.grad_clip, conf.grad_clip)\n', (1921, 1960), True, 'import tensorflow as tf\n'), ((944, 962), 'tensorflow.sigmoid', 'tf.sigmoid', (['conv_v'], {}), '(conv_v)\n', (954, 962), True, 'import tensorflow as tf\n')] |
#!usr/bin/env python
# -*- coding:utf-8 -*-
import os
import random
import logging
import argparse
import importlib
import platform
from pprint import pformat
import numpy as np
import torch
from agents.utils import *
# torch.backends.cudnn.enabled = True
# torch.backends.cudnn.benchmark = True
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # - %(name)s
logger = logging.getLogger(__file__)
device = torch.device('cuda' if torch.cuda.is_available() and platform.system() != 'Windows' else 'cpu')
logger.info("Device: {}".format(device))
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(42)
parser = argparse.ArgumentParser()
# agent
parser.add_argument("--agent", type=str, required=True,
help="Agent name")
parser.add_argument("--task", type=str, required=True,
help="Agent name")
# data
parser.add_argument("--dataset_path", type=str, default="data/catslu/hyps/map/",
help="Path or url of the dataset. If empty download accroding to dataset.")
parser.add_argument("--save_dir", type=str, default="checkpoint/")
parser.add_argument('--save_name', type=str, default="")
# training
parser.add_argument('--epochs', type=int, required=True)
parser.add_argument('--early_stop', default=-1, type=int)
parser.add_argument('--mode', type=str, default="train")
parser.add_argument('--lr_reduce_patience', default=-1, type=int)
parser.add_argument('--lr_decay', type=float, default=0.5)
# infer
parser.add_argument('--result_path', type=str, default="")
parser.add_argument('--infer_data', type=str, default="test")
def get_agent_task(opt):
agent_name = opt.get('agent')
task_name = opt.get('task')
# "agents.bert_agents.sequence_labeling"
trainer_module = importlib.import_module("agents." + agent_name + ".trainer")
trainer_class = getattr(trainer_module, "Trainer")
data_module = importlib.import_module("tasks." + task_name)
getdata_class = getattr(data_module, "get_datasets")
builddata_class = getattr(data_module, "build_dataset")
return trainer_class, getdata_class, builddata_class
parsed = vars(parser.parse_known_args()[0])
# trainer_class, getdata_class = AGENT_CLASSES[parsed.get('agent')]
trainer_class, getdata_class, builddata_class = get_agent_task(parsed)
trainer_class.add_cmdline_args(parser)
opt = parser.parse_args()
def main():
# my_module = importlib.import_module(module_name)
# model_class = getattr(my_module, class_name)
if not os.path.exists(opt.save_dir):
os.mkdir(opt.save_dir)
opt.best_checkpoint_path = opt.save_dir + opt.save_name + "_" + parsed.get('task') + "_" + parsed.get(
'agent') + '_best_model'
logger.info("Arguments: %s", pformat(opt))
trainer = trainer_class(opt, device)
datasets = getdata_class(opt.dataset_path)
for k, v in datasets.items():
trainer.load_data(k, v, builddata_class, infer=opt.mode == "infer")
if opt.mode == "train":
trainer.set_optim_schedule()
if opt.mode == "infer":
if os.path.exists(opt.best_checkpoint_path):
opt.checkpoint = opt.best_checkpoint_path
logger.info("load checkpoint from {} ".format(opt.checkpoint))
trainer.load(opt.checkpoint)
if opt.infer_data not in trainer.dataset:
raise Exception("%s does not exists in datasets" % opt.infer_data)
result = trainer.infer(opt.infer_data)
if opt.result_path:
save_json(result, opt.result_path)
else:
for e in range(opt.epochs):
trainer.train_epoch(e)
if trainer.patience >= opt.early_stop > 0:
break
trainer.evaluate(e, "valid")
if trainer.patience >= opt.early_stop > 0:
break
logger.info('Test performance {}'.format(trainer.test_performance))
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"logging.getLogger",
"os.path.exists",
"importlib.import_module",
"argparse.ArgumentParser",
"random.seed",
"pprint.pformat",
"platform.system",
"torch.cuda.is_available",
"numpy.random.seed",
"os.mkdir"
] | [((300, 396), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(levelname)s - %(message)s')\n", (319, 396), False, 'import logging\n'), ((415, 442), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (432, 442), False, 'import logging\n'), ((799, 824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (822, 824), False, 'import argparse\n'), ((618, 641), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (635, 641), False, 'import torch\n'), ((646, 678), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (672, 678), False, 'import torch\n'), ((683, 703), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (697, 703), True, 'import numpy as np\n'), ((708, 725), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (719, 725), False, 'import random\n'), ((1929, 1989), 'importlib.import_module', 'importlib.import_module', (["('agents.' + agent_name + '.trainer')"], {}), "('agents.' + agent_name + '.trainer')\n", (1952, 1989), False, 'import importlib\n'), ((2063, 2108), 'importlib.import_module', 'importlib.import_module', (["('tasks.' + task_name)"], {}), "('tasks.' + task_name)\n", (2086, 2108), False, 'import importlib\n'), ((2664, 2692), 'os.path.exists', 'os.path.exists', (['opt.save_dir'], {}), '(opt.save_dir)\n', (2678, 2692), False, 'import os\n'), ((2702, 2724), 'os.mkdir', 'os.mkdir', (['opt.save_dir'], {}), '(opt.save_dir)\n', (2710, 2724), False, 'import os\n'), ((2899, 2911), 'pprint.pformat', 'pformat', (['opt'], {}), '(opt)\n', (2906, 2911), False, 'from pprint import pformat\n'), ((3218, 3258), 'os.path.exists', 'os.path.exists', (['opt.best_checkpoint_path'], {}), '(opt.best_checkpoint_path)\n', (3232, 3258), False, 'import os\n'), ((476, 501), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (499, 501), False, 'import torch\n'), ((506, 523), 'platform.system', 'platform.system', ([], {}), '()\n', (521, 523), False, 'import platform\n')] |
import numpy as np
def is_numeric(u):
return isinstance(u, (int, float)) \
or np.isscalar(u)
| [
"numpy.isscalar"
] | [((95, 109), 'numpy.isscalar', 'np.isscalar', (['u'], {}), '(u)\n', (106, 109), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from keras.utils import to_categorical
import rupunktor.model_zoo as models
# N_HIDDEN = 128
# VOCABULARY_SIZE = 50002
# TAGS_SPACE_SIZE = 18
def _create_model():
return models.cut_augmented_gru(
hidden_units=128,
words_vocabulary_size=50002,
tags_vocabulary_size=702
)
DATA_DIR = './news_data/'
MODELS_DIR = './models/'
CHECKPOINTS_DIR = './weights/'
COMPILE_OPTS = dict(optimizer='adagrad', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
def main(args):
if not os.path.exists(MODELS_DIR):
os.makedirs(MODELS_DIR)
if not os.path.exists(CHECKPOINTS_DIR):
os.makedirs(CHECKPOINTS_DIR)
data_file = os.path.join(args.data_dir, args.data_file)
tags_file = os.path.join(args.data_dir, 'morph_tags.npy')
xy_all = np.load(data_file)
x1_all = xy_all[:, 0, :]
y_all = to_categorical(xy_all[:, 1, :])
# Drop last y mark, because we cannot predict signs after </s> tag
y_all = y_all[:, :-1, :]
x2_all = None
if os.path.isfile(tags_file) and args.use_tags:
x2_all = np.load(tags_file)
if args.no_embedding:
# Reshape data for rnn input
x1_all = x1_all.reshape(x1_all.shape[0], x1_all.shape[1], 1)
if args.model:
print('Load model from file {}'.format(args.model))
model = load_model(args.model)
model.name = os.path.splitext(args.model)[0]
else:
model = _create_model()
model.compile(**COMPILE_OPTS)
if args.weights:
print('Use weights from file {}'.format(args.weights))
model.load_weights(args.weights)
save_fname = os.path.join(MODELS_DIR, model.name + '.hdf5')
print('Will save trained model to {}'.format(save_fname))
checkpoint = ModelCheckpoint(
filepath=CHECKPOINTS_DIR + model.name + '.w.{epoch:02d}-{val_categorical_accuracy:.5f}.hdf5',
monitor='val_categorical_accuracy', save_weights_only=True, period=1, mode='max',
)
opts = dict(batch_size=128, epochs=40, verbose=1, validation_split=0.2, callbacks=[checkpoint])
if x2_all is not None:
model.fit([x1_all, x2_all], y_all, **opts)
else:
model.fit(x1_all, y_all, **opts)
model.save(save_fname)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', metavar='DIR', default=DATA_DIR)
parser.add_argument('--model', metavar='PATH', help='Load pretrained model from file specified')
parser.add_argument('--weights', metavar='PATH', help='Load pretrained model weights')
parser.add_argument('--no_embedding', action='store_true')
parser.add_argument('--use_tags', action='store_true', help='Use morphological tags file morph_tags.npy')
parser.add_argument('--data_file', metavar='NAME', default='data.npy')
args = parser.parse_args()
main(args)
| [
"os.path.exists",
"keras.models.load_model",
"keras.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.splitext",
"keras.utils.to_categorical",
"rupunktor.model_zoo.cut_augmented_gru",
"os.path.isfile",
"numpy.load"
] | [((305, 406), 'rupunktor.model_zoo.cut_augmented_gru', 'models.cut_augmented_gru', ([], {'hidden_units': '(128)', 'words_vocabulary_size': '(50002)', 'tags_vocabulary_size': '(702)'}), '(hidden_units=128, words_vocabulary_size=50002,\n tags_vocabulary_size=702)\n', (329, 406), True, 'import rupunktor.model_zoo as models\n'), ((814, 857), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.data_file'], {}), '(args.data_dir, args.data_file)\n', (826, 857), False, 'import os\n'), ((874, 919), 'os.path.join', 'os.path.join', (['args.data_dir', '"""morph_tags.npy"""'], {}), "(args.data_dir, 'morph_tags.npy')\n", (886, 919), False, 'import os\n'), ((934, 952), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (941, 952), True, 'import numpy as np\n'), ((994, 1025), 'keras.utils.to_categorical', 'to_categorical', (['xy_all[:, 1, :]'], {}), '(xy_all[:, 1, :])\n', (1008, 1025), False, 'from keras.utils import to_categorical\n'), ((1762, 1808), 'os.path.join', 'os.path.join', (['MODELS_DIR', "(model.name + '.hdf5')"], {}), "(MODELS_DIR, model.name + '.hdf5')\n", (1774, 1808), False, 'import os\n'), ((1890, 2090), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(CHECKPOINTS_DIR + model.name +\n '.w.{epoch:02d}-{val_categorical_accuracy:.5f}.hdf5')", 'monitor': '"""val_categorical_accuracy"""', 'save_weights_only': '(True)', 'period': '(1)', 'mode': '"""max"""'}), "(filepath=CHECKPOINTS_DIR + model.name +\n '.w.{epoch:02d}-{val_categorical_accuracy:.5f}.hdf5', monitor=\n 'val_categorical_accuracy', save_weights_only=True, period=1, mode='max')\n", (1905, 2090), False, 'from keras.callbacks import ModelCheckpoint\n'), ((2403, 2428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2426, 2428), False, 'import argparse\n'), ((655, 681), 'os.path.exists', 'os.path.exists', (['MODELS_DIR'], {}), '(MODELS_DIR)\n', (669, 681), False, 'import os\n'), ((691, 714), 'os.makedirs', 'os.makedirs', (['MODELS_DIR'], {}), '(MODELS_DIR)\n', (702, 714), False, 'import os\n'), ((727, 758), 'os.path.exists', 'os.path.exists', (['CHECKPOINTS_DIR'], {}), '(CHECKPOINTS_DIR)\n', (741, 758), False, 'import os\n'), ((768, 796), 'os.makedirs', 'os.makedirs', (['CHECKPOINTS_DIR'], {}), '(CHECKPOINTS_DIR)\n', (779, 796), False, 'import os\n'), ((1152, 1177), 'os.path.isfile', 'os.path.isfile', (['tags_file'], {}), '(tags_file)\n', (1166, 1177), False, 'import os\n'), ((1214, 1232), 'numpy.load', 'np.load', (['tags_file'], {}), '(tags_file)\n', (1221, 1232), True, 'import numpy as np\n'), ((1462, 1484), 'keras.models.load_model', 'load_model', (['args.model'], {}), '(args.model)\n', (1472, 1484), False, 'from keras.models import load_model\n'), ((1506, 1534), 'os.path.splitext', 'os.path.splitext', (['args.model'], {}), '(args.model)\n', (1522, 1534), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""linear.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1shB6vbT83PlKWLx9jUu9cP4W_SWy-cKn
An example of linear model using PyTorch in Google Colab
"""
import torch
from torch.autograd import Variable
class linearRegression(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(linearRegression, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, x):
out = self.linear(x)
return out
import random
x_values = [i + (random.random() - random.random()) for i in range(11)]
print (x_values)
y_values = [(6 * i + (random.random() - random.random()) - 35) for i in range(11)]
print (y_values)
import matplotlib.pyplot as plt
plt.plot(x_values, y_values, 'o')
plt.show()
import numpy as np
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1)
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
inputDim = 1 # takes variable 'x'
outputDim = 1 # takes variable 'y'
learningRate = 0.01
epochs = 1000
model = linearRegression(inputDim, outputDim)
model.cuda()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learningRate)
for epoch in range(epochs):
inputs = Variable(torch.from_numpy(x_train).cuda())
labels = Variable(torch.from_numpy(y_train).cuda())
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
print(loss)
loss.backward()
optimizer.step()
print('epoch {}, loss {}'.format(epoch, loss.item()))
with torch.no_grad(): # we don't need gradients in the testing phase
predicted = model(Variable(torch.from_numpy(x_train).cuda())).cpu().data.numpy()
print(predicted)
plt.clf()
plt.plot(x_train, y_train, 'go', label='Input data')
plt.plot(x_train, predicted, '--', label='Predictions')
plt.legend(loc='best')
plt.show()
print (model)
print(model.state_dict()) | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"torch.from_numpy",
"numpy.array",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.no_grad",
"random.random",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((812, 845), 'matplotlib.pyplot.plot', 'plt.plot', (['x_values', 'y_values', '"""o"""'], {}), "(x_values, y_values, 'o')\n", (820, 845), True, 'import matplotlib.pyplot as plt\n'), ((846, 856), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (854, 856), True, 'import matplotlib.pyplot as plt\n'), ((887, 923), 'numpy.array', 'np.array', (['x_values'], {'dtype': 'np.float32'}), '(x_values, dtype=np.float32)\n', (895, 923), True, 'import numpy as np\n'), ((967, 1003), 'numpy.array', 'np.array', (['y_values'], {'dtype': 'np.float32'}), '(y_values, dtype=np.float32)\n', (975, 1003), True, 'import numpy as np\n'), ((1228, 1246), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1244, 1246), False, 'import torch\n'), ((1836, 1845), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1843, 1845), True, 'import matplotlib.pyplot as plt\n'), ((1846, 1898), 'matplotlib.pyplot.plot', 'plt.plot', (['x_train', 'y_train', '"""go"""'], {'label': '"""Input data"""'}), "(x_train, y_train, 'go', label='Input data')\n", (1854, 1898), True, 'import matplotlib.pyplot as plt\n'), ((1899, 1954), 'matplotlib.pyplot.plot', 'plt.plot', (['x_train', 'predicted', '"""--"""'], {'label': '"""Predictions"""'}), "(x_train, predicted, '--', label='Predictions')\n", (1907, 1954), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1977), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1965, 1977), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1986, 1988), True, 'import matplotlib.pyplot as plt\n'), ((1666, 1681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1679, 1681), False, 'import torch\n'), ((460, 498), 'torch.nn.Linear', 'torch.nn.Linear', (['inputSize', 'outputSize'], {}), '(inputSize, outputSize)\n', (475, 498), False, 'import torch\n'), ((606, 621), 'random.random', 'random.random', ([], {}), '()\n', (619, 621), False, 'import random\n'), ((624, 639), 'random.random', 'random.random', ([], {}), '()\n', (637, 639), False, 'import random\n'), ((701, 716), 'random.random', 'random.random', ([], {}), '()\n', (714, 716), False, 'import random\n'), ((719, 734), 'random.random', 'random.random', ([], {}), '()\n', (732, 734), False, 'import random\n'), ((1363, 1388), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (1379, 1388), False, 'import torch\n'), ((1419, 1444), 'torch.from_numpy', 'torch.from_numpy', (['y_train'], {}), '(y_train)\n', (1435, 1444), False, 'import torch\n'), ((1761, 1786), 'torch.from_numpy', 'torch.from_numpy', (['x_train'], {}), '(x_train)\n', (1777, 1786), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = '<NAME>'
__date__ = '2021-11-04'
__copyright__ = '(C) 2021, <NAME>'
from qgis.core import *
from qgis.gui import *
from lftools.geocapt.topogeo import str2HTML
from lftools.geocapt.imgs import Imgs
from qgis.core import *
from qgis.gui import *
import numpy as np
from numpy.linalg import norm, det, inv, solve
# Tradução
LOC = QgsApplication.locale()[:2]
def tr(*string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return string[0]
else:
return string[0]
# Validação dos Pontos Homólogos
def ValidacaoVetores(vetores, metodo):
# número de feições por modelo matemático
cont = vetores.featureCount()
sinal = True
if metodo == 0:
if cont < 1:
raise QgsProcessingException(tr('It takes 1 or more vectors to perform this transformation!', 'É necessario 1 ou mais vetores para realizar essa transformação!'))
elif metodo == 1:
if cont < 2:
raise QgsProcessingException(tr('It takes 2 or more vectors to perform this transformation!', 'É necessario 2 ou mais vetores para realizar essa transformação!'))
elif metodo == 2:
if cont < 3:
raise QgsProcessingException(tr('It takes 3 or more vectors to perform this transformation!', 'É necessario 3 ou mais vetores para realizar essa transformação!'))
# cada feição (vetor) deve ter 2 dois vértices distintos
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
if len(coord) != 2:
raise QgsProcessingException(tr('The vector lines must be created with exactly two points!', 'As linhas de vetores devem ter exatamente 2 vértices!'))
return sinal
# Transformação de Coordenadas de Geometrias a partir de uma função de transformação
def transformGeom2D(geom, CoordTransf):
if geom.type() == 0: #Point
if geom.isMultipart():
pnts = geom.asMultiPoint()
newPnts = []
for pnt in pnts:
x, y = CoordTransf(pnt)
newPnts += [QgsPointXY(x,y)]
newGeom = QgsGeometry.fromMultiPointXY(newPnts)
return newGeom
else:
pnt = geom.asPoint()
newPnt = QgsPointXY(x,y)
newGeom = QgsGeometry.fromPointXY(newPnt)
return newGeom
elif geom.type() == 1: #Line
if geom.isMultipart():
linhas = geom.asMultiPolyline()
newLines = []
for linha in linhas:
newLine =[]
for pnt in linha:
x, y = CoordTransf(pnt)
newLine += [QgsPointXY(x,y)]
newLines += [newLine]
newGeom = QgsGeometry.fromMultiPolylineXY(newLines)
return newGeom
else:
linha = geom.asPolyline()
newLine =[]
for pnt in linha:
x, y = CoordTransf(pnt)
newLine += [QgsPointXY(x,y)]
newGeom = QgsGeometry.fromPolylineXY(newLine)
return newGeom
elif geom.type() == 2: #Polygon
if geom.isMultipart():
poligonos = geom.asMultiPolygon()
newPolygons = []
for pol in poligonos:
newPol = []
for anel in pol:
newAnel = []
for pnt in anel:
x, y = CoordTransf(pnt)
newAnel += [QgsPointXY(x,y)]
newPol += [newAnel]
newPolygons += [newPol]
newGeom = QgsGeometry.fromMultiPolygonXY(newPolygons)
return newGeom
else:
pol = geom.asPolygon()
newPol = []
for anel in pol:
newAnel = []
for pnt in anel:
x, y = CoordTransf(pnt)
newAnel += [QgsPointXY(x,y)]
newPol += [newAnel]
newGeom = QgsGeometry.fromPolygonXY(newPol)
return newGeom
else:
return None
# Ajustamento 2D
def Ajust2D(vetores, metodo):
# Métodos:
# 0 - translação, 1 - Helmert 2D, 2 - Afim
# numero de pontos homologos
n_pnts_homo = vetores.featureCount()
# numero minimo de pontos homologos por metodo
if metodo == 0: # 0 - translação
min_pnts_homo = n_pnts_homo == 1
elif metodo == 1: # 1 - Helmert 2D
min_pnts_homo = n_pnts_homo == 2
elif metodo == 2: # 2 - Afim
min_pnts_homo = n_pnts_homo == 3
A = [] # Matriz Design
L = [] # Coordenadas Finais
Lo = [] # Coordenadas Iniciais
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
xa = coord[0].x()
ya = coord[0].y()
xb = coord[1].x()
yb = coord[1].y()
if metodo == 0:
A += [[1, 0], [0, 1]]
elif metodo == 1:
A += [[xa, -ya, 1, 0], [ya, xa, 0, 1]]
elif metodo == 2:
A += [[xa, ya, 1, 0, 0, 0], [0, 0, 0, xa, ya, 1]]
L +=[[xb], [yb]]
Lo +=[[xa], [ya]]
A = np.matrix(A)
L = np.matrix(L)
Lo = np.matrix(Lo)
msg_erro = tr('Georeferencing vectors should not be aligned!', 'Os vetores de georreferenciamento não podem ter a mesma direção (alinhados)!')
if metodo == 0:
if min_pnts_homo:
X = L - Lo
else:
M = A.T*A
if det(M):
X = solve(M, A.T*(L - Lo))
else:
raise QgsProcessingException(msg_erro)
else:
if min_pnts_homo:
if det(A):
X = solve(A, L)
else:
raise QgsProcessingException(msg_erro)
else: # asjustamento
M = A.T*A
if det(M):
X = solve(M, A.T*L)
else:
raise QgsProcessingException(msg_erro)
# Parametros e Função da Transformação
if metodo == 0:
a = X[0,0]
b = X[1,0]
def CoordTransf(pnt, a = a, b = b): # Translacao
X, Y = pnt.x(), pnt.y()
Xt = X + a
Yt = Y + b
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b): # Translacao (Inversa)
X, Y = pnt.x(), pnt.y()
Xit = X - a
Yit = Y - b
return (Xit, Yit)
elif metodo == 1:
a = X[0,0]
b = X[1,0]
c = X[2,0]
d = X[3,0]
def CoordTransf(pnt, a = a, b = b, c = c, d = d): # Transformação Conforme - Helmert 2D
'''
Xt = X*a - Y*b + c
Yt = X*b + Y*a + d
a = S*cos(alfa)
b = S*sin(alfa)
'''
X, Y = pnt.x(), pnt.y()
Xt = X*a - Y*b + c
Yt = X*b + Y*a + d
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b, c = c, d = d): # Transformação de Helmert 2D (Inversa)
X, Y = pnt.x(), pnt.y()
A = np.matrix([[a,-b],[b,a]])
B = np.matrix([[X-c],[Y-d]])
sol = solve(A,B)
Xit = sol[0,0]
Yit = sol[1,0]
return (Xit, Yit)
elif metodo == 2:
a = X[0,0]
b = X[1,0]
c = X[2,0]
d = X[3,0]
e = X[4,0]
f = X[5,0]
def CoordTransf(pnt, a = a, b = b, c = c, d = d, e = e, f = f): # Transformação Afim
X, Y = pnt.x(), pnt.y()
Xt = X*a + Y*b + c
Yt = X*d + Y*e + f
return (Xt, Yt)
def CoordInvTransf(pnt, a = a, b = b, c = c, d = d, e = e, f = f): # Transformação Afim (Inversa)
X, Y = pnt.x(), pnt.y()
A = np.matrix([[a,b],[d,e]])
B = np.matrix([[X-c],[Y-f]])
sol = solve(A,B)
Xit = sol[0,0]
Yit = sol[1,0]
return (Xit, Yit)
# Cálculo do Resíduos
transf = []
for feat in vetores.getFeatures():
geom = feat.geometry()
coord = geom.asPolyline()
Xt, Yt = CoordTransf(coord[0])
transf += [[Xt],[Yt]]
X, Y = coord[-1].x(), coord[-1].y()
Vx = X - Xt
Vy = Y - Yt
# MVC dos Parametros e das coordenadas Ajustadas
n = np.shape(A)[0] # número de observações
u = np.shape(A)[1] # número de parâmetros
if not min_pnts_homo:
# Residuos
V = L - np.matrix(transf)
# Sigma posteriori
sigma2 = V.T*V/(n-u)
# Precisão dos Pontos Ajustados
# MVC de Xa
SigmaXa = sigma2[0,0]*inv(A.T*A)
SigmaXa = np.matrix(SigmaXa).astype(float)
# MVC de La
SigmaLa = A*SigmaXa*A.T
SigmaLa = np.matrix(SigmaLa).astype(float)
# RMSE
RMSE = np.sqrt((V.T*V)[0,0]/n_pnts_homo)
else:
sigma2 = 0
RMSE = 0
# Lista de Coordenadas Ajustadas, Precisões e Resíduos
COORD = []
PREC = []
DELTA = []
for index, feat in enumerate(vetores.getFeatures()):
X = transf[2*index][0]
Y = transf[2*index+1][0]
COORD += [QgsPointXY(X, Y)]
if not min_pnts_homo:
s_X = float(np.sqrt(SigmaLa[2*index,2*index]))
s_Y = float(np.sqrt(SigmaLa[2*index+1,2*index+1]))
PREC += [(s_X, s_Y)]
d_X = float(V[2*index][0])
d_Y = float(V[2*index+1][0])
DELTA += [(d_X, d_Y)]
else:
PREC += [(0, 0)]
DELTA += [(0, 0)]
if metodo == 0:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">'''+ tr('Translation',str2HTML('Translação')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style=""></span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">X
= </span></i><i><span style="">x
</span></i><i><span style="">+</span></i><i><span
style=""> a</span></i><i><span style="">
+</span></i><i><span style=""> Vx<o:p></o:p></span></i></p>
<div style="text-align: center;"><i><span style="">Y =
</span></i><i><span style="">y
</span></i><i><span style="">+</span></i><i><span
style=""> b</span></i><i><span style="">
+</span></i><i><span style=""> </span></i><i><span
style=""></span></i><i><span style="">Vy</span></i></div>
'''
elif metodo == 1:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">'''+ tr('Helmert 2D (Conformal)',str2HTML('Helmert 2D (Conforme)')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">X = </span></i><i><span
style="">ax
</span></i><i><span style="">-</span></i><i><span
style=""> by </span></i><i><span
style=""></span></i><i><span style="">+
c +</span></i><i><span style=""> Vx<o:p></o:p></span></i></p>
<div style="text-align: center;"><i><span style="">Y =
</span></i><i><span style="">bx
</span></i><i><span style="">+</span></i><i><span
style=""> ay </span></i><i><span
style="">+ d +</span></i><i><span
style=""> </span></i><i><span style=""></span></i><i><span
style="">Vy</span></i></div>
'''
elif metodo == 2:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">'''+ tr('Affine Transform',str2HTML('Transformação Afim')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">X = </span></i><i><span
style="">ax </span></i><i><span
style="">+</span></i><i><span style="">
by </span></i><i><span style=""></span></i><i><span
style="">+ c +</span></i><i><span
style=""> Vx<o:p></o:p></span></i></p>
<div style="text-align: center;"><i><span style="">Y =
</span></i><i><span style="">dx
</span></i><i><span style="">+</span></i><i><span
style=""> ey </span></i><i><span
style="">+ f +</span></i><i><span
style=""> </span></i><i><span style=""></span></i><i><span
style="">Vy</span></i></div>
'''
parametros = ''
for k in range(u): # para cada parâmetro
letra = chr(k+97)
parametros += '''<p class="MsoNormal" style="text-align: center;"
align="center"><span style="">letrax
</span><span style="">=</span><span
style=""> [x]<o:p></o:p></span></p>
'''.replace('letrax', letra).replace('[x]', str(eval(letra)))
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>'''+ tr('Coordinate Transformation',str2HTML('Transformação de Coordenadas')) + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftoos.png?raw=true" type = "image/x-icon">
</head>
<body
style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);"
alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span
style="font-size: 12pt; line-height: 107%;"><o:p></o:p></span></b><span
style="font-weight: bold; text-decoration: underline;">'''+ tr('COORDINATE TRANSFORMATION',str2HTML('TRANSFORMAÇÃO DE COORDENDAS')) + ''' (2D)</span></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><span style="font-style: italic;">''' + tr('Mathematical Formulation',str2HTML('Formulação Matemática')) + '''
</span></p>''' + formula + '''<p style="text-align: center;" class="MsoNormal"><b><span
style="">''' + tr('Residual Errors of Control Points',str2HTML('Erro residual dos Pontos de Controle')) + '''<o:p></o:p></span></b></p>
<table
style="border: medium none ; border-collapse: collapse; text-align: left; margin-left: auto; margin-right: auto;"
class="MsoTableGrid" border="0" cellpadding="0"
cellspacing="0">
<tbody>
<tr style="">
<td style="padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i><span style="">''' + tr('Point',str2HTML('Ponto')) + '''<o:p></o:p></span></i></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i><span style="">Vx<o:p></o:p></span></i></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i><span style="">Vy<o:p></o:p></span></i></p>
</td>
</tr>
[TABLE]
</tbody>
</table>
<br>
<div>
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span style="">''' + tr('Transformation Parameters:',str2HTML('Parâmetros de Transformação:')) + '''<o:p></o:p></span></b></p>
''' + parametros + '''
<div style="text-align: center;"><b><span style=""
>''' + tr('Adjustment’s Reference Variance',str2HTML('Variância a posteriori')) + '''</span></b><span style=""
> <span style=""> </span>=
</span><span style="">''' + str(round(sigma2[0,0] if not min_pnts_homo else 0, 4)) + '''</span></div>
<br>
<div style="text-align: center;"><b><span style=""
>''' + tr('Root Mean Square Error (RMSE)',str2HTML('Raiz do Erro Médio Quadrático (REMQ)')) + '''</span></b><span style=""
> <span style=""> </span>=
</span><span style="">''' + str(round(RMSE,4)) + '''</span></div>
</div>
<footer">
<p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + tr('<NAME>', str2HTML('<NAME>')) + '''
</br>''' + tr('Cartographic Engineer', 'Eng. Cartógrafo') + '''<o:p></o:p></b></p>
</br>
<div align="right">'''+ Imgs().social_table_color + '''
</div>
<o:p></o:p></b></p>
</footer>
</body>
</html>
'''
table_row = '''<tr style="">
<td style="padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="">[ID]<o:p></o:p></span></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="">[Vx]<o:p></o:p></span></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="">[Vy]</span></p>
</td>
</tr>'''
tabela = ''
# Tabela de Residuos
for ind, delta in enumerate(DELTA):
tableRowN = table_row
itens = {'[ID]' : str(ind+1),
'[Vx]' : '{:.4f}'.format(float(delta[0])),
'[Vy]' : '{:.4f}'.format(float(delta[1])),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
tabela += tableRowN
texto = texto.replace('[TABLE]', tabela)
return COORD, PREC, CoordTransf, texto, CoordInvTransf
# Validação dos Pontos de Controle (GCP)
def ValidacaoGCP(lista, metodo):
# número de feições por modelo matemático
cont = len(lista)
if metodo == 0:
if cont < 1:
raise QgsProcessingException(tr('It takes 1 or more GPC to perform this adjustment!', 'É necessario 1 ou mais GCP para realizar esse ajustamento!'))
elif metodo == 1:
if cont < 3:
raise QgsProcessingException(tr('It takes 3 or more GPC to perform this adjustment!', 'É necessario 3 ou mais GCP para realizar esse ajustamento!'))
# Ajustamento Vertical
def AjustVertical(lista, metodo):
# lista: [(Xa, Ya, Za), Zb] - ponto A - GCP, Zb - cota do MDE
# Métodos:
# 0 - constante, 1 - plano
# numero de pontos homologos
n_pnts_ctrl = len(lista)
# numero minimo de pontos de controle por metodo
if metodo == 0: # 0 - constante
min_pnts_ctrl = n_pnts_ctrl == 1
elif metodo == 1: # 1 - plano
min_pnts_ctrl = n_pnts_ctrl == 3
A = [] # Matriz Design
L = [] # Coordenadas Finais
Lo = [] # Coordenadas Iniciais
for item in lista:
xa = item[0][0]
ya = item[0][1]
za = item[1]
zb = item[0][2]
if metodo == 0:
A += [[1]]
elif metodo == 1:
A += [[xa, ya, 1]]
L +=[[zb]]
Lo +=[[za]]
A = np.matrix(A)
L = np.matrix(L)
Lo = np.matrix(Lo)
msg_erro = tr('Inconsistent values, check your control points!', 'Valores inconsistentes, verifique seus pontos de controle!')
if metodo == 0:
X = (L - Lo).mean()
elif metodo == 1:
if min_pnts_ctrl:
if det(A):
X = solve(A, L - Lo)
else:
raise QgsProcessingException(msg_erro)
else: # asjustamento
M = A.T*A
if det(M):
X = solve(M, A.T*(L-Lo))
else:
raise QgsProcessingException(msg_erro)
# Parametros e Função da Transformação
if metodo == 0:
a = X
def CoordTransf(X, Y, a = a): # Transformação dz Plano
'''
dz = a
'''
dz = a
return dz
elif metodo == 1:
a = X[0,0]
b = X[1,0]
c = X[2,0]
def CoordTransf(X, Y, a = a, b = b, c = c): # Transformação dz Plano
'''
dz = X*a + Y*b + c
'''
dz = X*a + Y*b + c
return dz
# Cálculo do Resíduos
V = []
COTAS = []
DELTA = []
for item in lista:
X = item[0][0]
Y = item[0][1]
Z = item[1]
dz = CoordTransf(X, Y)
Zaj = Z + dz
COTAS += [Zaj]
difer = Zaj - item[0][2]
DELTA += [difer]
V += [[difer]]
# MVC dos Parametros e das coordenadas Ajustadas
n = np.shape(A)[0] # número de observações
u = np.shape(A)[1] # número de parâmetros
V = np.matrix(V)
if not min_pnts_ctrl:
# Sigma posteriori
sigma2 = V.T*V/(n-u)
# Precisão dos Pontos Ajustados
# MVC de Xa
SigmaXa = sigma2[0,0]*inv(A.T*A)
SigmaXa = np.matrix(SigmaXa).astype(float)
# MVC de La
SigmaLa = A*SigmaXa*A.T
SigmaLa = np.matrix(SigmaLa).astype(float)
# RMSE
RMSE = np.sqrt((V.T*V)[0,0]/n_pnts_ctrl)
else:
sigma2 = 0
RMSE = 0
# Lista Precisões das Cotas Ajustadas
PREC = (np.array(SigmaLa.diagonal())).tolist()
if metodo == 0:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">''' + tr('Constant (XY Parallel Plane)','Constante (Plano Paralelo ao XY)') + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style=""></span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">Z = Zo</span></i><i><span
style=""></span></i><i><span style="">
+ a +</span></i><i><span style=""> Vz<o:p></o:p></span></i></p>
'''
elif metodo == 1:
formula = '''<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">''' + tr('Plan as a function of X and Y',str2HTML('Plano em função de X e Y')) + '''</span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style=""></span></i></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><i><span style="">Z = Zo</span></i><i><span
style=""></span></i><i><span style="">
+aX + bY
+ c +</span></i><i><span style=""> Vz<o:p></o:p></span></i></p>
'''
parametros = ''
for k in range(u): # para cada parâmetro
letra = chr(k+97)
parametros += '''<p class="MsoNormal" style="text-align: center;"
align="center"><span style="">letrax
</span><span style="">=</span><span
style=""> [x]<o:p></o:p></span></p>
'''.replace('letrax', letra).replace('[x]', str(eval(letra)))
texto = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta content="text/html; charset=ISO-8859-1"
http-equiv="content-type">
<title>'''+ tr('Vertical Adjustment',str2HTML('Ajuste Vertical')) + '''</title>
<link rel = "icon" href = "https://github.com/LEOXINGU/lftools/blob/main/images/lftoos.png?raw=true" type = "image/x-icon">
</head>
<body
style="color: rgb(0, 0, 0); background-color: rgb(255, 255, 204);"
alink="#000099" link="#000099" vlink="#990099">
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span
style="font-size: 12pt; line-height: 107%;"><o:p></o:p></span></b><span
style="font-weight: bold; text-decoration: underline;">'''+ tr('VERTICAL ADJUSTMENT',str2HTML('AJUSTE VERTICAL')) + ''' </span></p>
<p class="MsoNormal" style="text-align: center;"
align="center"><span style="font-style: italic;">''' + tr('Mathematical Formulation',str2HTML('Formulação Matemática')) + '''
</span></p>''' + formula + '''<p style="text-align: center;" class="MsoNormal"><b><span
style="">''' + tr('Residual Errors of Control Points',str2HTML('Erro residual dos Pontos de Controle')) + '''<o:p></o:p></span></b></p>
<table
style="border: medium none ; border-collapse: collapse; text-align: left; margin-left: auto; margin-right: auto;"
class="MsoTableGrid" border="0" cellpadding="0"
cellspacing="0">
<tbody>
<tr style="">
<td style="padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i><span style="">''' + tr('Point',str2HTML('Ponto')) + '''<o:p></o:p></span></i></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><i><span style="">Vz<o:p></o:p></span></i></p>
</td>
</tr>
[TABLE]
</tbody>
</table>
<br>
<div>
<p class="MsoNormal" style="text-align: center;"
align="center"><b><span style="">''' + tr('Transformation Parameters:',str2HTML('Parâmetros de Transformação:')) + '''<o:p></o:p></span></b></p>
''' + parametros + '''
<div style="text-align: center;"><b><span style=""
>''' + tr('Adjustment’s Reference Variance',str2HTML('Variância a posteriori')) + '''</span></b><span style=""
> <span style=""> </span>=
</span><span style="">''' + str(round(sigma2[0,0],4)) + '''</span></div>
<br>
<div style="text-align: center;"><b><span style=""
>''' + tr('Root Mean Square Error (RMSE)',str2HTML('Raiz do Erro Médio Quadrático (REMQ)')) + '''</span></b><span style=""
> <span style=""> </span>=
</span><span style="">''' + str(round(RMSE,4)) + '''</span></div>
</div>
<footer">
<p class="MsoNormal" style="margin-bottom: 0.0001pt; text-align: right;" align="right"><b>''' + tr('<NAME>', str2HTML('<NAME>')) + '''
</br>''' + tr('Cartographic Engineer', 'Eng. Cartógrafo') + '''<o:p></o:p></b></p>
</br>
<div align="right">'''+ Imgs().social_table_color + '''
</div>
<o:p></o:p></b></p>
</footer>
</body>
</html>
'''
table_row = '''<tr style="">
<td style="padding: 0cm 5.4pt; width: 84.9pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="">[ID]<o:p></o:p></span></p>
</td>
<td style="padding: 0cm 5.4pt; width: 84.95pt;"
valign="top" width="113">
<p class="MsoNormal"
style="margin-bottom: 0.0001pt; text-align: center; line-height: normal;"
align="center"><span style="">[Vz]<o:p></o:p></span></p>
</td>
</tr>'''
tabela = ''
# Tabela de Residuos
for ind, delta in enumerate(DELTA):
tableRowN = table_row
itens = {'[ID]' : str(ind+1),
'[Vz]' : '{:.4f}'.format(float(delta)),
}
for item in itens:
tableRowN = tableRowN.replace(item, itens[item])
tabela += tableRowN
texto = texto.replace('[TABLE]', tabela)
return COTAS, PREC[0], DELTA, CoordTransf, texto
| [
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.linalg.det",
"numpy.linalg.inv",
"lftools.geocapt.imgs.Imgs",
"numpy.shape",
"numpy.matrix",
"lftools.geocapt.topogeo.str2HTML"
] | [((5851, 5863), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (5860, 5863), True, 'import numpy as np\n'), ((5872, 5884), 'numpy.matrix', 'np.matrix', (['L'], {}), '(L)\n', (5881, 5884), True, 'import numpy as np\n'), ((5894, 5907), 'numpy.matrix', 'np.matrix', (['Lo'], {}), '(Lo)\n', (5903, 5907), True, 'import numpy as np\n'), ((19094, 19106), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (19103, 19106), True, 'import numpy as np\n'), ((19115, 19127), 'numpy.matrix', 'np.matrix', (['L'], {}), '(L)\n', (19124, 19127), True, 'import numpy as np\n'), ((19137, 19150), 'numpy.matrix', 'np.matrix', (['Lo'], {}), '(Lo)\n', (19146, 19150), True, 'import numpy as np\n'), ((20674, 20686), 'numpy.matrix', 'np.matrix', (['V'], {}), '(V)\n', (20683, 20686), True, 'import numpy as np\n'), ((8959, 8970), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (8967, 8970), True, 'import numpy as np\n'), ((9006, 9017), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (9014, 9017), True, 'import numpy as np\n'), ((9466, 9504), 'numpy.sqrt', 'np.sqrt', (['((V.T * V)[0, 0] / n_pnts_homo)'], {}), '((V.T * V)[0, 0] / n_pnts_homo)\n', (9473, 9504), True, 'import numpy as np\n'), ((20581, 20592), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (20589, 20592), True, 'import numpy as np\n'), ((20628, 20639), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (20636, 20639), True, 'import numpy as np\n'), ((21056, 21094), 'numpy.sqrt', 'np.sqrt', (['((V.T * V)[0, 0] / n_pnts_ctrl)'], {}), '((V.T * V)[0, 0] / n_pnts_ctrl)\n', (21063, 21094), True, 'import numpy as np\n'), ((6176, 6182), 'numpy.linalg.det', 'det', (['M'], {}), '(M)\n', (6179, 6182), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((6351, 6357), 'numpy.linalg.det', 'det', (['A'], {}), '(A)\n', (6354, 6357), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((6530, 6536), 'numpy.linalg.det', 'det', (['M'], {}), '(M)\n', (6533, 6536), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((9105, 9122), 'numpy.matrix', 'np.matrix', (['transf'], {}), '(transf)\n', (9114, 9122), True, 'import numpy as np\n'), ((9269, 9281), 'numpy.linalg.inv', 'inv', (['(A.T * A)'], {}), '(A.T * A)\n', (9272, 9281), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((20859, 20871), 'numpy.linalg.inv', 'inv', (['(A.T * A)'], {}), '(A.T * A)\n', (20862, 20871), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((6204, 6228), 'numpy.linalg.solve', 'solve', (['M', '(A.T * (L - Lo))'], {}), '(M, A.T * (L - Lo))\n', (6209, 6228), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((6379, 6390), 'numpy.linalg.solve', 'solve', (['A', 'L'], {}), '(A, L)\n', (6384, 6390), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((6558, 6575), 'numpy.linalg.solve', 'solve', (['M', '(A.T * L)'], {}), '(M, A.T * L)\n', (6563, 6575), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((7724, 7752), 'numpy.matrix', 'np.matrix', (['[[a, -b], [b, a]]'], {}), '([[a, -b], [b, a]])\n', (7733, 7752), True, 'import numpy as np\n'), ((7766, 7795), 'numpy.matrix', 'np.matrix', (['[[X - c], [Y - d]]'], {}), '([[X - c], [Y - d]])\n', (7775, 7795), True, 'import numpy as np\n'), ((7809, 7820), 'numpy.linalg.solve', 'solve', (['A', 'B'], {}), '(A, B)\n', (7814, 7820), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((9299, 9317), 'numpy.matrix', 'np.matrix', (['SigmaXa'], {}), '(SigmaXa)\n', (9308, 9317), True, 'import numpy as np\n'), ((9403, 9421), 'numpy.matrix', 'np.matrix', (['SigmaLa'], {}), '(SigmaLa)\n', (9412, 9421), True, 'import numpy as np\n'), ((9861, 9899), 'numpy.sqrt', 'np.sqrt', (['SigmaLa[2 * index, 2 * index]'], {}), '(SigmaLa[2 * index, 2 * index])\n', (9868, 9899), True, 'import numpy as np\n'), ((9920, 9966), 'numpy.sqrt', 'np.sqrt', (['SigmaLa[2 * index + 1, 2 * index + 1]'], {}), '(SigmaLa[2 * index + 1, 2 * index + 1])\n', (9927, 9966), True, 'import numpy as np\n'), ((16331, 16337), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (16335, 16337), False, 'from lftools.geocapt.imgs import Imgs\n'), ((19394, 19400), 'numpy.linalg.det', 'det', (['A'], {}), '(A)\n', (19397, 19400), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((19578, 19584), 'numpy.linalg.det', 'det', (['M'], {}), '(M)\n', (19581, 19584), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((20889, 20907), 'numpy.matrix', 'np.matrix', (['SigmaXa'], {}), '(SigmaXa)\n', (20898, 20907), True, 'import numpy as np\n'), ((20993, 21011), 'numpy.matrix', 'np.matrix', (['SigmaLa'], {}), '(SigmaLa)\n', (21002, 21011), True, 'import numpy as np\n'), ((25648, 25654), 'lftools.geocapt.imgs.Imgs', 'Imgs', ([], {}), '()\n', (25652, 25654), False, 'from lftools.geocapt.imgs import Imgs\n'), ((8418, 8445), 'numpy.matrix', 'np.matrix', (['[[a, b], [d, e]]'], {}), '([[a, b], [d, e]])\n', (8427, 8445), True, 'import numpy as np\n'), ((8459, 8488), 'numpy.matrix', 'np.matrix', (['[[X - c], [Y - f]]'], {}), '([[X - c], [Y - f]])\n', (8468, 8488), True, 'import numpy as np\n'), ((8502, 8513), 'numpy.linalg.solve', 'solve', (['A', 'B'], {}), '(A, B)\n', (8507, 8513), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((10327, 10349), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Translação"""'], {}), "('Translação')\n", (10335, 10349), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((19422, 19438), 'numpy.linalg.solve', 'solve', (['A', '(L - Lo)'], {}), '(A, L - Lo)\n', (19427, 19438), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((19606, 19630), 'numpy.linalg.solve', 'solve', (['M', '(A.T * (L - Lo))'], {}), '(M, A.T * (L - Lo))\n', (19611, 19630), False, 'from numpy.linalg import norm, det, inv, solve\n'), ((11202, 11235), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Helmert 2D (Conforme)"""'], {}), "('Helmert 2D (Conforme)')\n", (11210, 11235), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((21935, 21971), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Plano em função de X e Y"""'], {}), "('Plano em função de X e Y')\n", (21943, 21971), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((12023, 12053), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Transformação Afim"""'], {}), "('Transformação Afim')\n", (12031, 12053), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((16185, 16203), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""<NAME>"""'], {}), "('<NAME>')\n", (16193, 16203), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((25502, 25520), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""<NAME>"""'], {}), "('<NAME>')\n", (25510, 25520), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((15879, 15927), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Raiz do Erro Médio Quadrático (REMQ)"""'], {}), "('Raiz do Erro Médio Quadrático (REMQ)')\n", (15887, 15927), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((25196, 25244), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Raiz do Erro Médio Quadrático (REMQ)"""'], {}), "('Raiz do Erro Médio Quadrático (REMQ)')\n", (25204, 25244), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((15580, 15614), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Variância a posteriori"""'], {}), "('Variância a posteriori')\n", (15588, 15614), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((24926, 24960), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Variância a posteriori"""'], {}), "('Variância a posteriori')\n", (24934, 24960), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((15382, 15422), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Parâmetros de Transformação:"""'], {}), "('Parâmetros de Transformação:')\n", (15390, 15422), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((24728, 24768), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Parâmetros de Transformação:"""'], {}), "('Parâmetros de Transformação:')\n", (24736, 24768), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((14674, 14691), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Ponto"""'], {}), "('Ponto')\n", (14682, 14691), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((24263, 24280), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Ponto"""'], {}), "('Ponto')\n", (24271, 24280), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((14162, 14210), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Erro residual dos Pontos de Controle"""'], {}), "('Erro residual dos Pontos de Controle')\n", (14170, 14210), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((23751, 23799), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Erro residual dos Pontos de Controle"""'], {}), "('Erro residual dos Pontos de Controle')\n", (23759, 23799), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((13979, 14012), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Formulação Matemática"""'], {}), "('Formulação Matemática')\n", (13987, 14012), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((23568, 23601), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Formulação Matemática"""'], {}), "('Formulação Matemática')\n", (23576, 23601), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((13782, 13821), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""TRANSFORMAÇÃO DE COORDENDAS"""'], {}), "('TRANSFORMAÇÃO DE COORDENDAS')\n", (13790, 13821), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((23387, 23414), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""AJUSTE VERTICAL"""'], {}), "('AJUSTE VERTICAL')\n", (23395, 23414), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((13237, 13277), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Transformação de Coordenadas"""'], {}), "('Transformação de Coordenadas')\n", (13245, 13277), False, 'from lftools.geocapt.topogeo import str2HTML\n'), ((22861, 22888), 'lftools.geocapt.topogeo.str2HTML', 'str2HTML', (['"""Ajuste Vertical"""'], {}), "('Ajuste Vertical')\n", (22869, 22888), False, 'from lftools.geocapt.topogeo import str2HTML\n')] |
# prints a speedup graph for multiple benchmark applications, across different test sizes
# the data for each benchmark should be in a separate file, with the following format:
#
# Takes data in the following format:
# aiifft01
# trans-inputs trans-results exec-kernel speedup
# 0.10 0.027648 19.37056 1.84
# 0.28 0.052352 22.72736 3.08
# 0.68 0.1056 22.966848 6.08
# 2.10 0.511552 23.735808 11.78
# 3.17 0.751936 26.812896 20.84
# 8.14 1.593152 58.503872 19.18
# 17.10 3.500096 94.251552 23.66
# 33.55 6.08112 166.381984 26.48
# 72.83 14.454784 311.900768 28.20
# 143.89 28.00176 618.686016 28.10
import numpy
import argparse
import itertools
from utils import getbmrkname, getdatafiles
import matplotlib as mplt
mplt.rcParams['ps.useafm'] = True
mplt.rcParams['pdf.use14corefonts'] = True
mplt.rcParams['text.usetex'] = True
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('datafiledir', help='speedup data for applications')
args = parser.parse_args()
#find all data files in the directory
directory = args.datafiledir
datafiles = getdatafiles(directory)
testsizes = [256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072]
#create handle combinations
linestyles = ['-', ':', '-.', '--']
markers = ['x', '^', 'o', '*']
handlestyles = itertools.product(linestyles, markers)
#used to sort the legend based on the highest speedup
sortingpoints = []
labels = []
handles = []
rowidxmaxtestsuite = 9
colidxkernelwithoverlap = 3
colidxnooverlap = 4
fig, ax = plt.subplots()
ax.set_xscale('log', basex=2)
#plot the data
for datafile in datafiles:
print(datafile)
label = getbmrkname(datafile)
data = numpy.loadtxt(datafile, skiprows=2)
handlestyle = next(handlestyles)
plotdata = data[:,colidxnooverlap]
handle, = plt.plot(testsizes, plotdata, linestyle=handlestyle[0], lw = 2, label=label, marker=handlestyle[1], markersize=20, linewidth=4)
#store legend data
sortingpoints.append(plotdata[rowidxmaxtestsuite])
labels.append(label)
handles.append(handle)
#draw line at 1
plt.axhline(y=1, c='k', linewidth=1)
#change tick frequency of ticks of the y axis
start, end = ax.get_ylim()
plt.yticks(list(plt.yticks()[0])+[1], fontsize=15)
ax.yaxis.grid(True, which='1')
#plt.title('GPU Speedup', fontsize ='large')
plt.xlabel('Number of tests (log base 2)', fontsize=40)
plt.ylabel('GPU speedup when compared to a single CPU', fontsize =40)
plt.yticks(fontsize=30)
plt.xticks(testsizes,fontsize=30)
#sort the labels/handles by the sorting points
sortingpoints, labels, handles = zip(*sorted(zip(sortingpoints, labels, handles), key=lambda t: t[0], reverse=True))
#set the legend
plt.legend(loc = 2, fontsize =35, labels=labels, handles=handles)
fig=plt.figure()
plt.show()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"itertools.product",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.figure",
"utils.getbmrkname",
"matplotlib.pyplot.yticks",
"n... | [((872, 897), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (895, 897), False, 'import argparse\n'), ((1078, 1101), 'utils.getdatafiles', 'getdatafiles', (['directory'], {}), '(directory)\n', (1090, 1101), False, 'from utils import getbmrkname, getdatafiles\n'), ((1290, 1328), 'itertools.product', 'itertools.product', (['linestyles', 'markers'], {}), '(linestyles, markers)\n', (1307, 1328), False, 'import itertools\n'), ((1511, 1525), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1523, 1525), True, 'import matplotlib.pyplot as plt\n'), ((2067, 2103), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(1)', 'c': '"""k"""', 'linewidth': '(1)'}), "(y=1, c='k', linewidth=1)\n", (2078, 2103), True, 'import matplotlib.pyplot as plt\n'), ((2306, 2361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of tests (log base 2)"""'], {'fontsize': '(40)'}), "('Number of tests (log base 2)', fontsize=40)\n", (2316, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2363, 2431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GPU speedup when compared to a single CPU"""'], {'fontsize': '(40)'}), "('GPU speedup when compared to a single CPU', fontsize=40)\n", (2373, 2431), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2456), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(30)'}), '(fontsize=30)\n', (2443, 2456), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2491), 'matplotlib.pyplot.xticks', 'plt.xticks', (['testsizes'], {'fontsize': '(30)'}), '(testsizes, fontsize=30)\n', (2467, 2491), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2734), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)', 'fontsize': '(35)', 'labels': 'labels', 'handles': 'handles'}), '(loc=2, fontsize=35, labels=labels, handles=handles)\n', (2682, 2734), True, 'import matplotlib.pyplot as plt\n'), ((2743, 2755), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2753, 2755), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2765, 2767), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1652), 'utils.getbmrkname', 'getbmrkname', (['datafile'], {}), '(datafile)\n', (1642, 1652), False, 'from utils import getbmrkname, getdatafiles\n'), ((1664, 1699), 'numpy.loadtxt', 'numpy.loadtxt', (['datafile'], {'skiprows': '(2)'}), '(datafile, skiprows=2)\n', (1677, 1699), False, 'import numpy\n'), ((1791, 1920), 'matplotlib.pyplot.plot', 'plt.plot', (['testsizes', 'plotdata'], {'linestyle': 'handlestyle[0]', 'lw': '(2)', 'label': 'label', 'marker': 'handlestyle[1]', 'markersize': '(20)', 'linewidth': '(4)'}), '(testsizes, plotdata, linestyle=handlestyle[0], lw=2, label=label,\n marker=handlestyle[1], markersize=20, linewidth=4)\n', (1799, 1920), True, 'import matplotlib.pyplot as plt\n'), ((2194, 2206), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (2204, 2206), True, 'import matplotlib.pyplot as plt\n')] |
import os
import random
import sys
import time
import numpy as np
import torch
from tensorboardX import SummaryWriter
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def set_cuda(config):
use_cuda = torch.cuda.is_available()
assert config.use_cuda == use_cuda
if use_cuda:
torch.cuda.manual_seed(config.seed)
torch.backends.cudnn.deterministic = True
device = (
torch.device("cuda:{}".format(config.gpu))
if use_cuda
else torch.device("cpu")
)
devices_id = config.gpu
return device, devices_id
def set_tensorboard(config):
summary_dir = os.path.join(config.logdir, config.expname)
if not os.path.exists(summary_dir):
os.makedirs(summary_dir)
for file_name in os.listdir(summary_dir):
if file_name.startswith("events.out.tfevents"):
print(f"Event file {file_name} already exists")
if input("Remove this file? (y/n) ") == "y":
os.remove(os.path.join(summary_dir, file_name))
print(f"Event file {file_name} removed")
return SummaryWriter(summary_dir)
def set_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
| [
"torch.manual_seed",
"os.path.exists",
"os.listdir",
"tensorboardX.SummaryWriter",
"os.makedirs",
"os.path.join",
"random.seed",
"torch.cuda.is_available",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.device"
] | [((308, 333), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (331, 333), False, 'import torch\n'), ((716, 759), 'os.path.join', 'os.path.join', (['config.logdir', 'config.expname'], {}), '(config.logdir, config.expname)\n', (728, 759), False, 'import os\n'), ((854, 877), 'os.listdir', 'os.listdir', (['summary_dir'], {}), '(summary_dir)\n', (864, 877), False, 'import os\n'), ((1184, 1210), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['summary_dir'], {}), '(summary_dir)\n', (1197, 1210), False, 'from tensorboardX import SummaryWriter\n'), ((1237, 1260), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1254, 1260), False, 'import torch\n'), ((1265, 1282), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1276, 1282), False, 'import random\n'), ((1287, 1307), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1301, 1307), True, 'import numpy as np\n'), ((398, 433), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.seed'], {}), '(config.seed)\n', (420, 433), False, 'import torch\n'), ((583, 602), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (595, 602), False, 'import torch\n'), ((771, 798), 'os.path.exists', 'os.path.exists', (['summary_dir'], {}), '(summary_dir)\n', (785, 798), False, 'import os\n'), ((808, 832), 'os.makedirs', 'os.makedirs', (['summary_dir'], {}), '(summary_dir)\n', (819, 832), False, 'import os\n'), ((1078, 1114), 'os.path.join', 'os.path.join', (['summary_dir', 'file_name'], {}), '(summary_dir, file_name)\n', (1090, 1114), False, 'import os\n')] |
from __future__ import division
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
from pycuda.tools import DeviceMemoryPool, PageLockedMemoryPool
from pycuda.sparse.packeted import PacketedSpMV
from pycuda.sparse.operator import DiagonalPreconditioner
from pycuda.sparse.cg import solve_pkt_with_cg
import numpy as np
def _solve_cuda(lap_sparse, B, return_full_prob=False, maxiter=100, tol=5e-5):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
print("using gpu mode")
dev_pool = DeviceMemoryPool()
pagelocked_pool = PageLockedMemoryPool()
csr_mat = lap_sparse
csr_mat = csr_mat.astype(np.float32)
inv_mat_diag = 1 / csr_mat.diagonal()
spmv = PacketedSpMV(csr_mat, True, csr_mat.dtype)
X = []
for i in range(len(B)):
rhs = -B[i].astype(spmv.dtype)
if True:
precon = DiagonalPreconditioner(spmv.permute(gpuarray.to_gpu(inv_mat_diag,
allocator=dev_pool.allocate)))
else:
precon = None
print("start solve")
start = drv.Event()
stop = drv.Event()
start.record()
rhs_gpu = gpuarray.to_gpu(rhs, dev_pool.allocate)
tol = 1e-7 if spmv.dtype == np.float64 else tol
res_gpu, it_count, res_count = solve_pkt_with_cg(spmv, rhs_gpu,
precon, tol=tol,
pagelocked_allocator=pagelocked_pool.allocate)
res = res_gpu.get()
stop.record()
stop.synchronize()
elapsed = stop.time_since(start) * 1e-3
est_flops = (csr_mat.nnz * 2 * (it_count + res_count)
+ csr_mat.shape[0] * (2 + 2 + 2 + 2 + 2) * it_count)
if precon is not None:
est_flops += csr_mat.shape[0] * it_count
print("size: %d, elapsed: %g s, %d it, %d residual, it/second: %g, "
"%g gflops/s" % (
csr_mat.shape[0],
elapsed, it_count, res_count, it_count / elapsed,
est_flops / elapsed / 1e9))
x0 = res[0]
X.append(x0)
pagelocked_pool.stop_holding()
dev_pool.stop_holding()
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| [
"pycuda.tools.DeviceMemoryPool",
"pycuda.sparse.packeted.PacketedSpMV",
"numpy.argmax",
"pycuda.tools.PageLockedMemoryPool",
"pycuda.driver.Event",
"pycuda.sparse.cg.solve_pkt_with_cg",
"numpy.array",
"pycuda.gpuarray.to_gpu"
] | [((663, 681), 'pycuda.tools.DeviceMemoryPool', 'DeviceMemoryPool', ([], {}), '()\n', (679, 681), False, 'from pycuda.tools import DeviceMemoryPool, PageLockedMemoryPool\n'), ((704, 726), 'pycuda.tools.PageLockedMemoryPool', 'PageLockedMemoryPool', ([], {}), '()\n', (724, 726), False, 'from pycuda.tools import DeviceMemoryPool, PageLockedMemoryPool\n'), ((846, 888), 'pycuda.sparse.packeted.PacketedSpMV', 'PacketedSpMV', (['csr_mat', '(True)', 'csr_mat.dtype'], {}), '(csr_mat, True, csr_mat.dtype)\n', (858, 888), False, 'from pycuda.sparse.packeted import PacketedSpMV\n'), ((1260, 1271), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (1269, 1271), True, 'import pycuda.driver as drv\n'), ((1287, 1298), 'pycuda.driver.Event', 'drv.Event', ([], {}), '()\n', (1296, 1298), True, 'import pycuda.driver as drv\n'), ((1340, 1379), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['rhs', 'dev_pool.allocate'], {}), '(rhs, dev_pool.allocate)\n', (1355, 1379), True, 'import pycuda.gpuarray as gpuarray\n'), ((1475, 1576), 'pycuda.sparse.cg.solve_pkt_with_cg', 'solve_pkt_with_cg', (['spmv', 'rhs_gpu', 'precon'], {'tol': 'tol', 'pagelocked_allocator': 'pagelocked_pool.allocate'}), '(spmv, rhs_gpu, precon, tol=tol, pagelocked_allocator=\n pagelocked_pool.allocate)\n', (1492, 1576), False, 'from pycuda.sparse.cg import solve_pkt_with_cg\n'), ((2435, 2446), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2443, 2446), True, 'import numpy as np\n'), ((2459, 2479), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2468, 2479), True, 'import numpy as np\n'), ((1041, 1099), 'pycuda.gpuarray.to_gpu', 'gpuarray.to_gpu', (['inv_mat_diag'], {'allocator': 'dev_pool.allocate'}), '(inv_mat_diag, allocator=dev_pool.allocate)\n', (1056, 1099), True, 'import pycuda.gpuarray as gpuarray\n')] |
import pytest
import numpy as np
from gibbs.minimization import PygmoSelfAdaptiveDESettings, OptimizationProblem
from gibbs.minimization import OptimizationMethod, ScipyDifferentialEvolutionSettings
seed = 123
def f_rosenbrock(x):
"""
Define the benchmark Rosenbrock function.
:param numpy.ndarray x:
The function's argument array.
:return:
The evaluated function at the given input array.
:rtype: numpy.float64
"""
dim = len(x)
f = 0.0
for i in range(dim-1):
left_term = 100. * (x[i + 1] - x[i] * x[i]) * (x[i + 1] - x[i] * x[i])
right_term = (1. - x[i]) * (1. - x[i])
f += left_term + right_term
return f
@pytest.mark.parametrize("problem_dimension", range(2, 5))
def test_pygmo_sade_rosenbrock_minimization(problem_dimension):
bounds = problem_dimension * [[-6, 6]]
solver_settings = PygmoSelfAdaptiveDESettings(
gen=1000,
popsize=60,
seed=seed
)
problem = OptimizationProblem(
objective_function=f_rosenbrock,
bounds=bounds,
optimization_method=OptimizationMethod.PYGMO_DE1220,
solver_args=solver_settings
)
solution = problem.solve_minimization()
assert pytest.approx(np.ones(problem_dimension), rel=1e-3) == solution.x
@pytest.mark.parametrize("problem_dimension", range(2, 5))
def test_scipy_de_rosenbrock_minimization(problem_dimension):
bounds = problem_dimension * [[-6, 6]]
solver_settings = ScipyDifferentialEvolutionSettings(
number_of_decision_variables=problem_dimension,
seed=seed
)
problem = OptimizationProblem(
objective_function=f_rosenbrock,
bounds=bounds,
optimization_method=OptimizationMethod.SCIPY_DE,
solver_args=solver_settings
)
solution = problem.solve_minimization()
assert pytest.approx(np.ones(problem_dimension), rel=1e-3) == solution.x
| [
"gibbs.minimization.OptimizationProblem",
"gibbs.minimization.PygmoSelfAdaptiveDESettings",
"gibbs.minimization.ScipyDifferentialEvolutionSettings",
"numpy.ones"
] | [((881, 941), 'gibbs.minimization.PygmoSelfAdaptiveDESettings', 'PygmoSelfAdaptiveDESettings', ([], {'gen': '(1000)', 'popsize': '(60)', 'seed': 'seed'}), '(gen=1000, popsize=60, seed=seed)\n', (908, 941), False, 'from gibbs.minimization import PygmoSelfAdaptiveDESettings, OptimizationProblem\n'), ((987, 1145), 'gibbs.minimization.OptimizationProblem', 'OptimizationProblem', ([], {'objective_function': 'f_rosenbrock', 'bounds': 'bounds', 'optimization_method': 'OptimizationMethod.PYGMO_DE1220', 'solver_args': 'solver_settings'}), '(objective_function=f_rosenbrock, bounds=bounds,\n optimization_method=OptimizationMethod.PYGMO_DE1220, solver_args=\n solver_settings)\n', (1006, 1145), False, 'from gibbs.minimization import PygmoSelfAdaptiveDESettings, OptimizationProblem\n'), ((1486, 1584), 'gibbs.minimization.ScipyDifferentialEvolutionSettings', 'ScipyDifferentialEvolutionSettings', ([], {'number_of_decision_variables': 'problem_dimension', 'seed': 'seed'}), '(number_of_decision_variables=\n problem_dimension, seed=seed)\n', (1520, 1584), False, 'from gibbs.minimization import OptimizationMethod, ScipyDifferentialEvolutionSettings\n'), ((1617, 1771), 'gibbs.minimization.OptimizationProblem', 'OptimizationProblem', ([], {'objective_function': 'f_rosenbrock', 'bounds': 'bounds', 'optimization_method': 'OptimizationMethod.SCIPY_DE', 'solver_args': 'solver_settings'}), '(objective_function=f_rosenbrock, bounds=bounds,\n optimization_method=OptimizationMethod.SCIPY_DE, solver_args=\n solver_settings)\n', (1636, 1771), False, 'from gibbs.minimization import PygmoSelfAdaptiveDESettings, OptimizationProblem\n'), ((1246, 1272), 'numpy.ones', 'np.ones', (['problem_dimension'], {}), '(problem_dimension)\n', (1253, 1272), True, 'import numpy as np\n'), ((1872, 1898), 'numpy.ones', 'np.ones', (['problem_dimension'], {}), '(problem_dimension)\n', (1879, 1898), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
if __name__ == "__main__":
input_xs = np.random.rand(1000)
input_ys = 3 * input_xs + 0.217 + np.random.randn(*input_xs.shape) * 0.1
print(input_xs[:3], input_ys[:3])
weight = tf.Variable(1., dtype=tf.float32, name='weight')
bias = tf.Variable(1., dtype=tf.float32, name='bias')
opt = tf.optimizers.Adam(1e-1)
def model(xs):
logits = tf.multiply(xs, weight) + bias
return logits
for xs, ys in zip(input_xs, input_ys):
xs = np.reshape(xs, [1])
ys = np.reshape(ys, [1])
with tf.GradientTape() as tape:
_loss = tf.reduce_mean(tf.pow(model(xs) - ys, 2)) / (2 * 1000)
grads = tape.gradient(_loss, [weight, bias])
opt.apply_gradients(zip(grads, [weight, bias]))
print('Training loss is:', _loss.numpy())
print(weight.numpy(), bias.numpy())
| [
"numpy.reshape",
"numpy.random.rand",
"tensorflow.Variable",
"tensorflow.multiply",
"tensorflow.GradientTape",
"tensorflow.optimizers.Adam",
"numpy.random.randn"
] | [((84, 104), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (98, 104), True, 'import numpy as np\n'), ((229, 278), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'dtype': 'tf.float32', 'name': '"""weight"""'}), "(1.0, dtype=tf.float32, name='weight')\n", (240, 278), True, 'import tensorflow as tf\n'), ((287, 334), 'tensorflow.Variable', 'tf.Variable', (['(1.0)'], {'dtype': 'tf.float32', 'name': '"""bias"""'}), "(1.0, dtype=tf.float32, name='bias')\n", (298, 334), True, 'import tensorflow as tf\n'), ((343, 366), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['(0.1)'], {}), '(0.1)\n', (361, 366), True, 'import tensorflow as tf\n'), ((502, 521), 'numpy.reshape', 'np.reshape', (['xs', '[1]'], {}), '(xs, [1])\n', (512, 521), True, 'import numpy as np\n'), ((531, 550), 'numpy.reshape', 'np.reshape', (['ys', '[1]'], {}), '(ys, [1])\n', (541, 550), True, 'import numpy as np\n'), ((141, 173), 'numpy.random.randn', 'np.random.randn', (['*input_xs.shape'], {}), '(*input_xs.shape)\n', (156, 173), True, 'import numpy as np\n'), ((399, 422), 'tensorflow.multiply', 'tf.multiply', (['xs', 'weight'], {}), '(xs, weight)\n', (410, 422), True, 'import tensorflow as tf\n'), ((561, 578), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (576, 578), True, 'import tensorflow as tf\n')] |
from pandas import DataFrame, Series
import numpy as np
import create_data_frames as cdf
def points():
'''Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
'''
olympic_medal_count_df = cdf.create_dataframe()
points = np.dot(olympic_medal_count_df[['gold', 'silver', 'bronze']], [2,4,1])
return DataFrame({'country_name': olympic_medal_count_df['country_name'],
'points': Series(points)
})
if __name__ == '__main__':
points_df = points()
print(points_df)
| [
"pandas.Series",
"numpy.dot",
"create_data_frames.create_dataframe"
] | [((601, 623), 'create_data_frames.create_dataframe', 'cdf.create_dataframe', ([], {}), '()\n', (621, 623), True, 'import create_data_frames as cdf\n'), ((638, 709), 'numpy.dot', 'np.dot', (["olympic_medal_count_df[['gold', 'silver', 'bronze']]", '[2, 4, 1]'], {}), "(olympic_medal_count_df[['gold', 'silver', 'bronze']], [2, 4, 1])\n", (644, 709), True, 'import numpy as np\n'), ((818, 832), 'pandas.Series', 'Series', (['points'], {}), '(points)\n', (824, 832), False, 'from pandas import DataFrame, Series\n')] |
# Module: internal.ensemble
# Provides an Ensemble Forecaster supporting voting, mean and median methods.
# This is a reimplementation from Sktime original EnsembleForecaster.
# This Ensemble is only to be used internally.
import pandas as pd
import numpy as np
import warnings
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._meta import _HeterogenousEnsembleForecaster
_ENSEMBLE_METHODS = ["voting", "mean", "median"]
class _EnsembleForecasterWithVoting(_HeterogenousEnsembleForecaster):
"""
Ensemble of forecasters.
Parameters
----------
forecasters : list of (str, estimator) tuples
method : {'mean', 'median', 'voting'}, default='mean'
Specifies the ensemble method type to be used.
It must be one of 'mean', 'median', or 'voting.
If none is given, 'mean' will be used.
weights : array-like of shape (n_estimators,), default=None
A sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. This parameter is only valid for
'voting' method, uses uniform weights for 'voting' method if None.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for fit. None means 1 unless
in a joblib.parallel_backend context.
-1 means using all processors.
"""
_required_parameters = ["forecasters"]
_not_required_weights = ["mean", "median"]
_required_weights = ["voting", "mean"]
_available_methods = ["voting", "mean", "median"]
def __init__(self, forecasters, method="mean", weights=None, n_jobs=None):
self.forecasters = forecasters
self.method = method
self.weights = weights
super(_EnsembleForecasterWithVoting, self).__init__(
forecasters=self.forecasters, n_jobs=n_jobs
)
@property
def weights(self):
return self._weights
@weights.setter
def weights(self, value):
self._weights = value
def _check_method(self):
if self.method == "voting" and self.weights is None:
warnings.warn("Missing 'weights' argument, setting uniform weights.")
self.weights = np.ones(len(self.forecasters))
elif self.method in self._not_required_weights and self.weights:
warnings.warn(
"Unused 'weights' argument. When method='mean' or method='median', 'weights' argument is not provided. Setting weights to `None`"
)
self.weights = None
elif self.method not in self._available_methods:
raise ValueError(
f"Method {self.method} is not supported. Available methods are {', '.join(self._available_methods)}"
)
def _check_weights(self):
if self.weights is not None and len(self.weights) != len(self.forecasters):
raise ValueError(
f"Number of forecasters and weights must be equal, got {len(self.weights)} weights and {len(self.estimators)} estimators"
)
def _fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
names, forecasters = self._check_forecasters()
self._fit_forecasters(forecasters, y, X, fh)
return self
def update(self, y, X=None, update_params=True):
"""Update fitted parameters
Parameters
----------
y : pd.Series
X : pd.DataFrame
update_params : bool, optional (default=True)
Returns
-------
self : an instance of self
"""
for forecaster in self.forecasters_:
forecaster.update(y, X, update_params=update_params)
return self
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
if return_pred_int:
raise NotImplementedError()
self._check_method()
pred_forecasters = pd.concat(self._predict_forecasters(fh, X), axis=1)
if self.method == "median":
return pd.Series(pred_forecasters.median(axis=1))
elif self.method in self._required_weights:
self._check_weights()
pred_w = np.average(pred_forecasters, axis=1, weights=self.weights)
return pd.Series(pred_w, index=pred_forecasters.index)
| [
"warnings.warn",
"pandas.Series",
"numpy.average"
] | [((2126, 2195), 'warnings.warn', 'warnings.warn', (['"""Missing \'weights\' argument, setting uniform weights."""'], {}), '("Missing \'weights\' argument, setting uniform weights.")\n', (2139, 2195), False, 'import warnings\n'), ((2339, 2493), 'warnings.warn', 'warnings.warn', (['"""Unused \'weights\' argument. When method=\'mean\' or method=\'median\', \'weights\' argument is not provided. Setting weights to `None`"""'], {}), '(\n "Unused \'weights\' argument. When method=\'mean\' or method=\'median\', \'weights\' argument is not provided. Setting weights to `None`"\n )\n', (2352, 2493), False, 'import warnings\n'), ((4603, 4661), 'numpy.average', 'np.average', (['pred_forecasters'], {'axis': '(1)', 'weights': 'self.weights'}), '(pred_forecasters, axis=1, weights=self.weights)\n', (4613, 4661), True, 'import numpy as np\n'), ((4681, 4728), 'pandas.Series', 'pd.Series', (['pred_w'], {'index': 'pred_forecasters.index'}), '(pred_w, index=pred_forecasters.index)\n', (4690, 4728), True, 'import pandas as pd\n')] |
import os
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
import time
from model_structure import create_model, load_model
from data_feeder import TestSlidingWindowGenerator
from appliance_data import appliance_data, mains_data
import matplotlib.pyplot as plt
import nilm_metric as nm
class Tester():
""" Used to test and evaluate a pre-trained seq2point model with or without pruning applied.
Parameters:
__appliance (string): The target appliance.
__algorithm (string): The (pruning) algorithm the model was trained with.
__network_type (string): The architecture of the model.
__crop (int): The maximum number of rows of data to evaluate the model with.
__batch_size (int): The number of rows per testing batch.
__window_size (int): The size of eaech sliding window
__window_offset (int): The offset of the inferred value from the sliding window.
__test_directory (string): The directory of the test file for the model.
"""
def __init__(self, appliance, algorithm, crop, batch_size, network_type,
test_directory, saved_model_dir, log_file_dir, fig_path,
input_window_length):
self.__appliance = appliance
self.__algorithm = algorithm
self.__network_type = network_type
self.__crop = crop
self.__batch_size = batch_size
self._input_window_length = input_window_length
self.__window_size = self._input_window_length + 2
self.__window_offset = int(0.5 * self.__window_size - 1)
# self.__number_of_windows = 100
self.__number_of_windows = batch_size
# self.__number_of_windows = -1
self.__test_directory = test_directory
self.__saved_model_dir = saved_model_dir
self.__log_file = log_file_dir
self.__fig_path = fig_path
logging.basicConfig(filename=self.__log_file,level=logging.INFO)
def test_model(self):
""" Tests a fully-trained model using a sliding window generator as an input. Measures inference time, gathers, and
plots evaluationg metrics. """
test_input, test_target = self.load_dataset(self.__test_directory)
model = create_model(self.__network_type, self._input_window_length)
model = load_model(model, self.__network_type, self.__algorithm,
self.__appliance, self.__saved_model_dir)
model.summary()
test_generator = TestSlidingWindowGenerator(number_of_windows=self.__number_of_windows, inputs=test_input, targets=test_target, offset=self.__window_offset)
# Calculate the optimum steps per epoch.
steps_per_test_epoch = np.round(int(test_generator.total_size / self.__batch_size), decimals=0)
# steps_per_test_epoch = 190
# Test the model.
start_time = time.time()
testing_history = model.predict(x=test_generator.load_dataset(), steps=steps_per_test_epoch, verbose=2)
end_time = time.time()
test_time = end_time - start_time
evaluation_metrics = model.evaluate(x=test_generator.load_dataset(), steps=steps_per_test_epoch)
self.log_results(model, test_time, evaluation_metrics)
self.plot_results(testing_history, test_input, test_target)
def load_dataset(self, directory):
"""Loads the testing dataset from the location specified by file_name.
Parameters:
directory (string): The location at which the dataset is stored, concatenated with the file name.
Returns:
test_input (numpy.array): The first n (crop) features of the test dataset.
test_target (numpy.array): The first n (crop) targets of the test dataset.
"""
data_frame = pd.read_csv(directory, nrows=self.__crop, skiprows=0, header=0)
test_input = np.round(np.array(data_frame.iloc[:, 0], float), 6)
test_target = np.round(np.array(data_frame.iloc[:,1], float), 6)
#由于data_feeder.py中181行,TestSlidingWindowGenerator.load_dataset()中已经offset,此处不必重复
# test_target = np.round(np.array(data_frame.iloc[self.__window_offset: -self.__window_offset, 1], float), 6)
del data_frame
return test_input, test_target
def log_results(self, model, test_time, evaluation_metrics):
"""Logs the inference time, MAE and MSE of an evaluated model.
Parameters:
model (tf.keras.Model): The evaluated model.
test_time (float): The time taken by the model to infer all required values.
evaluation metrics (list): The MSE, MAE, and various compression ratios of the model.
"""
logging.info(time.strftime("%Y%m%d-%H:%M:%S"))
logging.info(self.__saved_model_dir)
logging.info(self.__appliance)
inference_log = "Inference Time: " + str(test_time)
logging.info(inference_log)
metric_string = "MSE: ", str(evaluation_metrics[0]), " MAE: ", str(evaluation_metrics[3])
logging.info(metric_string)
# if self.__algorithm == 'seq2point':
# self.count_pruned_weights(model)
def count_pruned_weights(self, model):
""" Counts the total number of weights, pruned weights, and weights in convolutional
layers. Calculates the sparsity ratio of different layer types and logs these values.
Parameters:
model (tf.keras.Model): The evaluated model.
"""
num_total_zeros = 0
num_dense_zeros = 0
num_dense_weights = 0
num_conv_zeros = 0
num_conv_weights = 0
for layer in model.layers:
if np.shape(layer.get_weights())[0] != 0:
layer_weights = layer.get_weights()[0].flatten()
if "conv" in layer.name:
num_conv_weights += np.size(layer_weights)
num_conv_zeros += np.count_nonzero(layer_weights==0)
num_total_zeros += np.size(layer_weights)
else:
num_dense_weights += np.size(layer_weights)
num_dense_zeros += np.count_nonzero(layer_weights==0)
conv_zeros_string = "CONV. ZEROS: " + str(num_conv_zeros)
conv_weights_string = "CONV. WEIGHTS: " + str(num_conv_weights)
conv_sparsity_ratio = "CONV. RATIO: " + str(num_conv_zeros / num_conv_weights)
dense_weights_string = "DENSE WEIGHTS: " + str(num_dense_weights)
dense_zeros_string = "DENSE ZEROS: " + str(num_dense_zeros)
dense_sparsity_ratio = "DENSE RATIO: " + str(num_dense_zeros / num_dense_weights)
total_zeros_string = "TOTAL ZEROS: " + str(num_total_zeros)
total_weights_string = "TOTAL WEIGHTS: " + str(model.count_params())
total_sparsity_ratio = "TOTAL RATIO: " + str(num_total_zeros / model.count_params())
print("LOGGING PATH: ", self.__log_file)
logging.info(conv_zeros_string)
logging.info(conv_weights_string)
logging.info(conv_sparsity_ratio)
logging.info("")
logging.info(dense_zeros_string)
logging.info(dense_weights_string)
logging.info(dense_sparsity_ratio)
logging.info("")
logging.info(total_zeros_string)
logging.info(total_weights_string)
logging.info(total_sparsity_ratio)
def plot_results(self, testing_history, test_input, test_target):
""" Generates and saves a plot of the testing history of the model against the (actual)
aggregate energy values and the true appliance values.
Parameters:
testing_history (numpy.ndarray): The series of values inferred by the model.
test_input (numpy.ndarray): The aggregate energy data.
test_target (numpy.ndarray): The true energy values of the appliance.
"""
testing_history = ((testing_history * appliance_data[self.__appliance]["std"]) + appliance_data[self.__appliance]["mean"])
test_target = test_target[self.__window_offset: -self.__window_offset] #get midpoint
test_target = ((test_target * appliance_data[self.__appliance]["std"]) + appliance_data[self.__appliance]["mean"])
test_agg = (test_input.flatten() * mains_data["std"]) + mains_data["mean"]
test_agg = test_agg[:testing_history.size]
# Can't have negative energy readings - set any results below 0 to 0.
test_target[test_target < 0] = 0
testing_history[testing_history < 0] = 0
test_input[test_input < 0] = 0
test_target = test_target[:testing_history.size]
pd.DataFrame({'predict':testing_history.flatten(), 'truth':test_target.flatten()}, index=None).to_csv(self.__fig_path + '{}_{}.csv'.format(self.__appliance, self.__algorithm))
# mae = tf.keras.metrics.mean_absolute_error(testing_history.flatten(),test_target.flatten()[:testing_history.size])
# logging.info('real mae:' + str(mae))
logging.info('\nMAE: {:}'.format(nm.get_abs_error(test_target.flatten(), testing_history.flatten())))
logging.info('SAE: {:}\n'.format(nm.get_sae(test_target.flatten(), testing_history.flatten(), 8)))
# Plot testing outcomes against ground truth.
plt.figure(1)
# plt.plot(test_agg[self.__window_offset: -self.__window_offset], label="Aggregate")
plt.plot(test_target[:test_agg.size - (2 * self.__window_offset)], label="Ground Truth")
plt.plot(testing_history[:test_agg.size - (2 * self.__window_offset)], label="Predicted")
plt.title(self.__appliance + " " + self.__network_type + "(" + self.__algorithm + ")")
plt.ylabel("Power Value (Watts)")
plt.xlabel("Testing Window")
plt.legend()
# file_path = "./" + self.__appliance + "/saved_models/" + self.__appliance + "_" + self.__algorithm + "_" + self.__network_type + "_test_figure.png"
file_path = self.__fig_path + self.__appliance + "_" + self.__algorithm + "_" + self.__network_type + "_test_figure.png"
plt.savefig(fname=file_path)
# plt.show() | [
"logging.basicConfig",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"model_structure.load_model",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"time.strftime",
"logging.info",
"numpy.size",
"numpy.count_nonzero",
"numpy.array",
"matplotlib.pyplot.figur... | [((1878, 1943), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'self.__log_file', 'level': 'logging.INFO'}), '(filename=self.__log_file, level=logging.INFO)\n', (1897, 1943), False, 'import logging\n'), ((2227, 2287), 'model_structure.create_model', 'create_model', (['self.__network_type', 'self._input_window_length'], {}), '(self.__network_type, self._input_window_length)\n', (2239, 2287), False, 'from model_structure import create_model, load_model\n'), ((2304, 2406), 'model_structure.load_model', 'load_model', (['model', 'self.__network_type', 'self.__algorithm', 'self.__appliance', 'self.__saved_model_dir'], {}), '(model, self.__network_type, self.__algorithm, self.__appliance,\n self.__saved_model_dir)\n', (2314, 2406), False, 'from model_structure import create_model, load_model\n'), ((2481, 2624), 'data_feeder.TestSlidingWindowGenerator', 'TestSlidingWindowGenerator', ([], {'number_of_windows': 'self.__number_of_windows', 'inputs': 'test_input', 'targets': 'test_target', 'offset': 'self.__window_offset'}), '(number_of_windows=self.__number_of_windows,\n inputs=test_input, targets=test_target, offset=self.__window_offset)\n', (2507, 2624), False, 'from data_feeder import TestSlidingWindowGenerator\n'), ((2860, 2871), 'time.time', 'time.time', ([], {}), '()\n', (2869, 2871), False, 'import time\n'), ((3003, 3014), 'time.time', 'time.time', ([], {}), '()\n', (3012, 3014), False, 'import time\n'), ((3761, 3824), 'pandas.read_csv', 'pd.read_csv', (['directory'], {'nrows': 'self.__crop', 'skiprows': '(0)', 'header': '(0)'}), '(directory, nrows=self.__crop, skiprows=0, header=0)\n', (3772, 3824), True, 'import pandas as pd\n'), ((4716, 4752), 'logging.info', 'logging.info', (['self.__saved_model_dir'], {}), '(self.__saved_model_dir)\n', (4728, 4752), False, 'import logging\n'), ((4761, 4791), 'logging.info', 'logging.info', (['self.__appliance'], {}), '(self.__appliance)\n', (4773, 4791), False, 'import logging\n'), ((4860, 4887), 'logging.info', 'logging.info', (['inference_log'], {}), '(inference_log)\n', (4872, 4887), False, 'import logging\n'), ((4995, 5022), 'logging.info', 'logging.info', (['metric_string'], {}), '(metric_string)\n', (5007, 5022), False, 'import logging\n'), ((6893, 6924), 'logging.info', 'logging.info', (['conv_zeros_string'], {}), '(conv_zeros_string)\n', (6905, 6924), False, 'import logging\n'), ((6933, 6966), 'logging.info', 'logging.info', (['conv_weights_string'], {}), '(conv_weights_string)\n', (6945, 6966), False, 'import logging\n'), ((6975, 7008), 'logging.info', 'logging.info', (['conv_sparsity_ratio'], {}), '(conv_sparsity_ratio)\n', (6987, 7008), False, 'import logging\n'), ((7017, 7033), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (7029, 7033), False, 'import logging\n'), ((7042, 7074), 'logging.info', 'logging.info', (['dense_zeros_string'], {}), '(dense_zeros_string)\n', (7054, 7074), False, 'import logging\n'), ((7083, 7117), 'logging.info', 'logging.info', (['dense_weights_string'], {}), '(dense_weights_string)\n', (7095, 7117), False, 'import logging\n'), ((7126, 7160), 'logging.info', 'logging.info', (['dense_sparsity_ratio'], {}), '(dense_sparsity_ratio)\n', (7138, 7160), False, 'import logging\n'), ((7169, 7185), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (7181, 7185), False, 'import logging\n'), ((7194, 7226), 'logging.info', 'logging.info', (['total_zeros_string'], {}), '(total_zeros_string)\n', (7206, 7226), False, 'import logging\n'), ((7235, 7269), 'logging.info', 'logging.info', (['total_weights_string'], {}), '(total_weights_string)\n', (7247, 7269), False, 'import logging\n'), ((7278, 7312), 'logging.info', 'logging.info', (['total_sparsity_ratio'], {}), '(total_sparsity_ratio)\n', (7290, 7312), False, 'import logging\n'), ((9192, 9205), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (9202, 9205), True, 'import matplotlib.pyplot as plt\n'), ((9307, 9398), 'matplotlib.pyplot.plot', 'plt.plot', (['test_target[:test_agg.size - 2 * self.__window_offset]'], {'label': '"""Ground Truth"""'}), "(test_target[:test_agg.size - 2 * self.__window_offset], label=\n 'Ground Truth')\n", (9315, 9398), True, 'import matplotlib.pyplot as plt\n'), ((9404, 9496), 'matplotlib.pyplot.plot', 'plt.plot', (['testing_history[:test_agg.size - 2 * self.__window_offset]'], {'label': '"""Predicted"""'}), "(testing_history[:test_agg.size - 2 * self.__window_offset], label=\n 'Predicted')\n", (9412, 9496), True, 'import matplotlib.pyplot as plt\n'), ((9502, 9593), 'matplotlib.pyplot.title', 'plt.title', (["(self.__appliance + ' ' + self.__network_type + '(' + self.__algorithm + ')')"], {}), "(self.__appliance + ' ' + self.__network_type + '(' + self.\n __algorithm + ')')\n", (9511, 9593), True, 'import matplotlib.pyplot as plt\n'), ((9597, 9630), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power Value (Watts)"""'], {}), "('Power Value (Watts)')\n", (9607, 9630), True, 'import matplotlib.pyplot as plt\n'), ((9639, 9667), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Testing Window"""'], {}), "('Testing Window')\n", (9649, 9667), True, 'import matplotlib.pyplot as plt\n'), ((9676, 9688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9686, 9688), True, 'import matplotlib.pyplot as plt\n'), ((9985, 10013), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'file_path'}), '(fname=file_path)\n', (9996, 10013), True, 'import matplotlib.pyplot as plt\n'), ((3855, 3893), 'numpy.array', 'np.array', (['data_frame.iloc[:, 0]', 'float'], {}), '(data_frame.iloc[:, 0], float)\n', (3863, 3893), True, 'import numpy as np\n'), ((3929, 3967), 'numpy.array', 'np.array', (['data_frame.iloc[:, 1]', 'float'], {}), '(data_frame.iloc[:, 1], float)\n', (3937, 3967), True, 'import numpy as np\n'), ((4674, 4706), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H:%M:%S"""'], {}), "('%Y%m%d-%H:%M:%S')\n", (4687, 4706), False, 'import time\n'), ((5817, 5839), 'numpy.size', 'np.size', (['layer_weights'], {}), '(layer_weights)\n', (5824, 5839), True, 'import numpy as np\n'), ((5878, 5914), 'numpy.count_nonzero', 'np.count_nonzero', (['(layer_weights == 0)'], {}), '(layer_weights == 0)\n', (5894, 5914), True, 'import numpy as np\n'), ((5953, 5975), 'numpy.size', 'np.size', (['layer_weights'], {}), '(layer_weights)\n', (5960, 5975), True, 'import numpy as np\n'), ((6039, 6061), 'numpy.size', 'np.size', (['layer_weights'], {}), '(layer_weights)\n', (6046, 6061), True, 'import numpy as np\n'), ((6101, 6137), 'numpy.count_nonzero', 'np.count_nonzero', (['(layer_weights == 0)'], {}), '(layer_weights == 0)\n', (6117, 6137), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Simple heat pump model with CoolProp (high-level interface).
Only work with subcritical cycles and single substances
Ther thermodynamic properties are called via CoolProp and the property plots are generated with the dedicated CoolProp functions
Cycles conditions are imposed and the secondary fluid temperature profiles are recalculated.
@author: <NAME>
"""
# clear variables:
from IPython import get_ipython
get_ipython().magic('reset -sf')
"Imports"
import CoolProp.CoolProp as CP
import numpy as np
from misc.utils import NoStdStreams
from components.heat_exchangers import hx
p = np.zeros(8, dtype=float)
v = np.zeros(8, dtype=float)
T = np.zeros(8, dtype=float)
x = np.zeros(8, dtype=float)
h = np.zeros(8, dtype=float)
s = np.zeros(8, dtype=float)
T_hw = np.zeros(8, dtype=float) # hot water
T_cw = np.zeros(8, dtype=float) # cold water
# Cycle parameters:
fluid ='R245fa'
T_ev = 273.15 + 80
T_cd = 273.15 + 145
DELTAT_sh = 5
DELTAT_sc = 5
epsilon_s = 0.6
# Heat sink parameters:
T_w_su_cd =273.15 + 120 # cooling water inlet temperature /K
DELTAT_cd=10 # pinch point temperature of cooling water /K
Q_dot_cd=10000 # heat capacity flowrates of cooling water kW/K
# Heat source parameters:
T_w_su_ev =273.15 + 95 # cooling water inlet temperature /K
DELTAT_ev=10 # pinch point temperature of cooling water /K
# get fluid properties:
#T_crit = CP.PropsSI("Tcrit",fluid)
#p_crit = CP.PropsSI("Pcrit",fluid)
p_low = CP.PropsSI('P','Q', 0.5, 'T', T_ev, fluid)
p_high = CP.PropsSI('P','Q', 0.5, 'T', T_cd, fluid)
#h_crit = CP.PropsSI('H','T', T_crit, 'P', p_crit, fluid)
#s_crit = CP.PropsSI('S','T', T_crit, 'P', p_crit, fluid)
#Evaporator outlet:
p[0] = p_low
s[0] = CP.PropsSI('S','Q', 1, 'P', p_low, fluid)
T[0] = CP.PropsSI('T','Q', 1, 'P', p_low, fluid)
h[0] = CP.PropsSI('H','Q', 1, 'P', p_low, fluid)
# Compressor inlet:
p_su_cp = p_low
T_su_cp = CP.PropsSI('T','Q', 1, 'P', p_low, fluid) + DELTAT_sh
h_su_cp = CP.PropsSI('H','T', T_su_cp, 'P', p_su_cp, fluid)
s_su_cp = CP.PropsSI('S','T', T_su_cp, 'P', p_su_cp, fluid)
s[1] = s_su_cp
T[1] = T_su_cp
h[1] = h_su_cp
p[1] = p_su_cp
#Compressor outlet:
p_ex_cp = p_high
h_ex_cp_s = CP.PropsSI('H','S', s_su_cp, 'P', p_ex_cp, fluid)
h_ex_cp = h_su_cp - (h_su_cp - h_ex_cp_s)/epsilon_s
T_ex_cp = CP.PropsSI('T','H', h_ex_cp, 'P', p_ex_cp, fluid)
s_ex_cp = CP.PropsSI('S','H', h_ex_cp, 'P', p_ex_cp, fluid)
p[2] = p_high
s[2] = s_ex_cp
T[2] = T_ex_cp
h[2] = h_ex_cp
#Saturated vapor in the condenser:
p[3] = p_high
s[3] = CP.PropsSI('S','Q', 1, 'P', p_high, fluid)
T[3] = CP.PropsSI('T','Q', 1, 'P', p_high, fluid)
h[3] = CP.PropsSI('H','Q', 1, 'P', p_high, fluid)
#Saturated liquid in the condenser:
p[4] = p_high
s[4] = CP.PropsSI('S','Q', 0, 'P', p_high, fluid)
T[4] = CP.PropsSI('T','Q', 0, 'P', p_high, fluid)
h[4] = CP.PropsSI('H','Q', 0, 'P', p_high, fluid)
T[5] = T[4] - DELTAT_sc
p[5] = p_high
h[5] = CP.PropsSI('H','T', T[5], 'P', p[5], fluid)
s[5] = CP.PropsSI('S','T', T[5], 'P', p[5], fluid)
#Inlet of the evaporator:
h[6] = h[5]
p[6] = p_low
T[6] = CP.PropsSI('T','H', h[5], 'P', p[6], fluid)
s[6] = CP.PropsSI('S','H', h[5], 'P', p[6], fluid)
h[7] = h[0]
p[7] = p[0]
T[7] = T[0]
s[7] = s[0]
print("The temperature of each state")
print(T)
print("The pressure of each state (Pa) ")
print(p)
print("The enthalpy of each state (J/kg)")
print(h)
print("The entropy of each state (J/kg/K)")
print(s)
# heat sink:
cp_w = 4800
fluid2 ='water'
M_dot = Q_dot_cd / (h[2] - h[5])
T_hw[5] = T_w_su_cd
T_hw[3] = T[3] - DELTAT_cd
M_dot_hw = M_dot * (h[3] - h[5])/(cp_w*(T_hw[3] - T_hw[5]))
T_hw[4] = T_hw[5] + M_dot * (h[4] - h[5]) / (cp_w * M_dot_hw)
T_hw[2] = T_hw[5] + M_dot * (h[2] - h[5]) / (cp_w * M_dot_hw)
print ("The mass flow rate of working fluid is %.2f kg/s" %(M_dot))
print("The mass flowrate of hot water is %.2f kg/s" %(M_dot_hw))
# Heat source:
T_cw[1] = T_w_su_ev
T_cw[6] = T[6] + DELTAT_ev
M_dot_cw = M_dot * (h[1] - h[6])/(cp_w*(T_cw[1] - T_cw[6]))
T_cw[0] = T_cw[6] + M_dot * (h[0] - h[6]) / (cp_w * M_dot_cw)
# Temperature profile in the condenser:
s_cd,T_cd,T_hf,pinch_cd = hx(fluid,10,M_dot,h[2],h[5],p_high,p_high,T_w_su_cd,M_dot_hw*cp_w)
# Temperature profile in the evaporator:
s_ev,T_ev,T_cf,pinch_ev = hx(fluid,10,M_dot,h[6],h[1],p_low,p_low,T_w_su_ev,M_dot_cw*cp_w)
#%%
from CoolProp.Plots import PropertyPlot
from CoolProp.Plots.SimpleCycles import StateContainer
import pickle
import os
cache_plot = False
if cache_plot:
filename = fluid + '.p'
if os.path.isfile(filename): # Load previously saved plot
pp = pickle.load(open( filename, "rb" ) )
else:
states = StateContainer()
states_hf = StateContainer()
states_cf = StateContainer()
pp = PropertyPlot('HEOS::'+fluid, 'TS')
with NoStdStreams():
pp.calc_isolines()
with open(filename, 'wb') as f:
pickle.dump(pp, f)
else:
states = StateContainer()
states_hf = StateContainer()
states_cf = StateContainer()
pp = PropertyPlot('HEOS::'+fluid, 'TS')
with NoStdStreams():
pp.calc_isolines()
for i in range(3):
states[i,'T'] = T[i]
states[i,"S"] = s[i]
for i,Tx in enumerate(T_cd):
states.append({'T':Tx,'S':s_cd[i]})
for i in range(4,len(T)):
states.append({'T':T[i],'S':s[i]})
states.append({'T':T[1],'S':s[1]}) # for some reasons, the second point needs to be repeated to close the cycle
for i,Tx in enumerate(T_hf):
states_hf.append({'T':Tx,'S':s_cd[i]})
for i,Tx in enumerate(T_cf):
states_cf.append({'T':Tx,'S':s_ev[i]})
with NoStdStreams():
pp.draw_process(states,line_opts={'color':'green'})
pp.draw_process(states_hf,line_opts={'color':'red', 'linestyle':'dashed'})
pp.draw_process(states_cf,line_opts={'color':'blue', 'linestyle':'dashed'})
pp.show()
| [
"IPython.get_ipython",
"CoolProp.CoolProp.PropsSI",
"CoolProp.Plots.PropertyPlot",
"misc.utils.NoStdStreams",
"pickle.dump",
"CoolProp.Plots.SimpleCycles.StateContainer",
"components.heat_exchangers.hx",
"os.path.isfile",
"numpy.zeros"
] | [((633, 657), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (641, 657), True, 'import numpy as np\n'), ((662, 686), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (670, 686), True, 'import numpy as np\n'), ((691, 715), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (699, 715), True, 'import numpy as np\n'), ((720, 744), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (728, 744), True, 'import numpy as np\n'), ((749, 773), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (757, 773), True, 'import numpy as np\n'), ((778, 802), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (786, 802), True, 'import numpy as np\n'), ((811, 835), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (819, 835), True, 'import numpy as np\n'), ((859, 883), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'float'}), '(8, dtype=float)\n', (867, 883), True, 'import numpy as np\n'), ((1550, 1593), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""P"""', '"""Q"""', '(0.5)', '"""T"""', 'T_ev', 'fluid'], {}), "('P', 'Q', 0.5, 'T', T_ev, fluid)\n", (1560, 1593), True, 'import CoolProp.CoolProp as CP\n'), ((1602, 1645), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""P"""', '"""Q"""', '(0.5)', '"""T"""', 'T_cd', 'fluid'], {}), "('P', 'Q', 0.5, 'T', T_cd, fluid)\n", (1612, 1645), True, 'import CoolProp.CoolProp as CP\n'), ((1802, 1844), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""Q"""', '(1)', '"""P"""', 'p_low', 'fluid'], {}), "('S', 'Q', 1, 'P', p_low, fluid)\n", (1812, 1844), True, 'import CoolProp.CoolProp as CP\n'), ((1851, 1893), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""Q"""', '(1)', '"""P"""', 'p_low', 'fluid'], {}), "('T', 'Q', 1, 'P', p_low, fluid)\n", (1861, 1893), True, 'import CoolProp.CoolProp as CP\n'), ((1900, 1942), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""Q"""', '(1)', '"""P"""', 'p_low', 'fluid'], {}), "('H', 'Q', 1, 'P', p_low, fluid)\n", (1910, 1942), True, 'import CoolProp.CoolProp as CP\n'), ((2053, 2103), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""T"""', 'T_su_cp', '"""P"""', 'p_su_cp', 'fluid'], {}), "('H', 'T', T_su_cp, 'P', p_su_cp, fluid)\n", (2063, 2103), True, 'import CoolProp.CoolProp as CP\n'), ((2113, 2163), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""T"""', 'T_su_cp', '"""P"""', 'p_su_cp', 'fluid'], {}), "('S', 'T', T_su_cp, 'P', p_su_cp, fluid)\n", (2123, 2163), True, 'import CoolProp.CoolProp as CP\n'), ((2273, 2323), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""S"""', 's_su_cp', '"""P"""', 'p_ex_cp', 'fluid'], {}), "('H', 'S', s_su_cp, 'P', p_ex_cp, fluid)\n", (2283, 2323), True, 'import CoolProp.CoolProp as CP\n'), ((2385, 2435), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""H"""', 'h_ex_cp', '"""P"""', 'p_ex_cp', 'fluid'], {}), "('T', 'H', h_ex_cp, 'P', p_ex_cp, fluid)\n", (2395, 2435), True, 'import CoolProp.CoolProp as CP\n'), ((2445, 2495), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""H"""', 'h_ex_cp', '"""P"""', 'p_ex_cp', 'fluid'], {}), "('S', 'H', h_ex_cp, 'P', p_ex_cp, fluid)\n", (2455, 2495), True, 'import CoolProp.CoolProp as CP\n'), ((2611, 2654), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""Q"""', '(1)', '"""P"""', 'p_high', 'fluid'], {}), "('S', 'Q', 1, 'P', p_high, fluid)\n", (2621, 2654), True, 'import CoolProp.CoolProp as CP\n'), ((2661, 2704), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""Q"""', '(1)', '"""P"""', 'p_high', 'fluid'], {}), "('T', 'Q', 1, 'P', p_high, fluid)\n", (2671, 2704), True, 'import CoolProp.CoolProp as CP\n'), ((2711, 2754), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""Q"""', '(1)', '"""P"""', 'p_high', 'fluid'], {}), "('H', 'Q', 1, 'P', p_high, fluid)\n", (2721, 2754), True, 'import CoolProp.CoolProp as CP\n'), ((2812, 2855), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""Q"""', '(0)', '"""P"""', 'p_high', 'fluid'], {}), "('S', 'Q', 0, 'P', p_high, fluid)\n", (2822, 2855), True, 'import CoolProp.CoolProp as CP\n'), ((2862, 2905), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""Q"""', '(0)', '"""P"""', 'p_high', 'fluid'], {}), "('T', 'Q', 0, 'P', p_high, fluid)\n", (2872, 2905), True, 'import CoolProp.CoolProp as CP\n'), ((2912, 2955), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""Q"""', '(0)', '"""P"""', 'p_high', 'fluid'], {}), "('H', 'Q', 0, 'P', p_high, fluid)\n", (2922, 2955), True, 'import CoolProp.CoolProp as CP\n'), ((3004, 3048), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""H"""', '"""T"""', 'T[5]', '"""P"""', 'p[5]', 'fluid'], {}), "('H', 'T', T[5], 'P', p[5], fluid)\n", (3014, 3048), True, 'import CoolProp.CoolProp as CP\n'), ((3056, 3100), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""T"""', 'T[5]', '"""P"""', 'p[5]', 'fluid'], {}), "('S', 'T', T[5], 'P', p[5], fluid)\n", (3066, 3100), True, 'import CoolProp.CoolProp as CP\n'), ((3161, 3205), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""H"""', 'h[5]', '"""P"""', 'p[6]', 'fluid'], {}), "('T', 'H', h[5], 'P', p[6], fluid)\n", (3171, 3205), True, 'import CoolProp.CoolProp as CP\n'), ((3212, 3256), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""S"""', '"""H"""', 'h[5]', '"""P"""', 'p[6]', 'fluid'], {}), "('S', 'H', h[5], 'P', p[6], fluid)\n", (3222, 3256), True, 'import CoolProp.CoolProp as CP\n'), ((4206, 4282), 'components.heat_exchangers.hx', 'hx', (['fluid', '(10)', 'M_dot', 'h[2]', 'h[5]', 'p_high', 'p_high', 'T_w_su_cd', '(M_dot_hw * cp_w)'], {}), '(fluid, 10, M_dot, h[2], h[5], p_high, p_high, T_w_su_cd, M_dot_hw * cp_w)\n', (4208, 4282), False, 'from components.heat_exchangers import hx\n'), ((4341, 4415), 'components.heat_exchangers.hx', 'hx', (['fluid', '(10)', 'M_dot', 'h[6]', 'h[1]', 'p_low', 'p_low', 'T_w_su_ev', '(M_dot_cw * cp_w)'], {}), '(fluid, 10, M_dot, h[6], h[1], p_low, p_low, T_w_su_ev, M_dot_cw * cp_w)\n', (4343, 4415), False, 'from components.heat_exchangers import hx\n'), ((1989, 2031), 'CoolProp.CoolProp.PropsSI', 'CP.PropsSI', (['"""T"""', '"""Q"""', '(1)', '"""P"""', 'p_low', 'fluid'], {}), "('T', 'Q', 1, 'P', p_low, fluid)\n", (1999, 2031), True, 'import CoolProp.CoolProp as CP\n'), ((4601, 4625), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (4615, 4625), False, 'import os\n'), ((5026, 5042), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (5040, 5042), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((5059, 5075), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (5073, 5075), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((5092, 5108), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (5106, 5108), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((5118, 5154), 'CoolProp.Plots.PropertyPlot', 'PropertyPlot', (["('HEOS::' + fluid)", '"""TS"""'], {}), "('HEOS::' + fluid, 'TS')\n", (5130, 5154), False, 'from CoolProp.Plots import PropertyPlot\n'), ((5705, 5719), 'misc.utils.NoStdStreams', 'NoStdStreams', ([], {}), '()\n', (5717, 5719), False, 'from misc.utils import NoStdStreams\n'), ((446, 459), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (457, 459), False, 'from IPython import get_ipython\n'), ((4735, 4751), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (4749, 4751), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((4772, 4788), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (4786, 4788), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((4809, 4825), 'CoolProp.Plots.SimpleCycles.StateContainer', 'StateContainer', ([], {}), '()\n', (4823, 4825), False, 'from CoolProp.Plots.SimpleCycles import StateContainer\n'), ((4839, 4875), 'CoolProp.Plots.PropertyPlot', 'PropertyPlot', (["('HEOS::' + fluid)", '"""TS"""'], {}), "('HEOS::' + fluid, 'TS')\n", (4851, 4875), False, 'from CoolProp.Plots import PropertyPlot\n'), ((5162, 5176), 'misc.utils.NoStdStreams', 'NoStdStreams', ([], {}), '()\n', (5174, 5176), False, 'from misc.utils import NoStdStreams\n'), ((4887, 4901), 'misc.utils.NoStdStreams', 'NoStdStreams', ([], {}), '()\n', (4899, 4901), False, 'from misc.utils import NoStdStreams\n'), ((4987, 5005), 'pickle.dump', 'pickle.dump', (['pp', 'f'], {}), '(pp, f)\n', (4998, 5005), False, 'import pickle\n')] |
import glob
import os
import sys
import h5py
import numpy as np
import torch
output = open('deeprank_predictions.txt', 'w')
output.write('pdb class p_crystal p_bio \n')
for hdf5 in glob.glob('*/'):
name=hdf5.split('/')[0]
if os.path.exists('{}test_data.hdf5'.format(hdf5)):
f = h5py.File('{}test_data.hdf5'.format(hdf5),'r')
results = f['epoch_0000']['test']['outputs'][()]
res = results[0]
softmax = torch.nn.Softmax(dim=0)
res_softmax = softmax(torch.Tensor(res)).numpy()
bin_class_res = np.argmax(res)
if bin_class_res1 == 0 :
pred = 'crystal'
else :
pred = 'biological'
output.write(name+" "+pred+" "+str(bin_class_res)+" "+str(round(res_softmax[0],3))+" "+str(round(res_softmax[1],3))+"\n")
else:
output.write(name+" None None None None \n")
output.close()
| [
"torch.nn.Softmax",
"torch.Tensor",
"numpy.argmax",
"glob.glob"
] | [((187, 202), 'glob.glob', 'glob.glob', (['"""*/"""'], {}), "('*/')\n", (196, 202), False, 'import glob\n'), ((474, 497), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(0)'}), '(dim=0)\n', (490, 497), False, 'import torch\n'), ((588, 602), 'numpy.argmax', 'np.argmax', (['res'], {}), '(res)\n', (597, 602), True, 'import numpy as np\n'), ((528, 545), 'torch.Tensor', 'torch.Tensor', (['res'], {}), '(res)\n', (540, 545), False, 'import torch\n')] |
import cv2
import numpy as np
import os
dataset_dir_path = '/Users/baulhoa/Documents/PythonProjects/datasets/vggface2/train'
new_dataset_dir_path = '/Users/baulhoa/Documents/PythonProjects/datasets/vggface2/train_refined_resized'
anno_file_path = 'anno/vggface2_refined_anno.txt'
ishape = [142, 128, 3]
anno_file = open(anno_file_path, 'r')
lines = anno_file.readlines()
total_images = len(lines)
print('Total images: {}'.format(total_images))
for i in range(total_images):
line = lines[i][:-1]
anno = line.split(' ')
id_folder, file_name = anno[0].split('/')
image_file_path = dataset_dir_path + '/' + id_folder + '/' + file_name
x = cv2.imread(image_file_path)
x = cv2.resize(x, dsize=(ishape[1], ishape[0]), interpolation=cv2.INTER_CUBIC)
x = np.clip(x, 0, 255)
id_folder_path = new_dataset_dir_path + '/' + id_folder
if not os.path.exists(id_folder_path):
os.mkdir(id_folder_path)
cv2.imwrite(id_folder_path + '/' + file_name, x)
if i%100 == 99:
print('-', end='')
if i%10000 == 9999:
print(round(i*100/total_images, 2), end='%\n') | [
"numpy.clip",
"cv2.imwrite",
"os.path.exists",
"os.mkdir",
"cv2.resize",
"cv2.imread"
] | [((645, 672), 'cv2.imread', 'cv2.imread', (['image_file_path'], {}), '(image_file_path)\n', (655, 672), False, 'import cv2\n'), ((678, 752), 'cv2.resize', 'cv2.resize', (['x'], {'dsize': '(ishape[1], ishape[0])', 'interpolation': 'cv2.INTER_CUBIC'}), '(x, dsize=(ishape[1], ishape[0]), interpolation=cv2.INTER_CUBIC)\n', (688, 752), False, 'import cv2\n'), ((758, 776), 'numpy.clip', 'np.clip', (['x', '(0)', '(255)'], {}), '(x, 0, 255)\n', (765, 776), True, 'import numpy as np\n'), ((904, 952), 'cv2.imwrite', 'cv2.imwrite', (["(id_folder_path + '/' + file_name)", 'x'], {}), "(id_folder_path + '/' + file_name, x)\n", (915, 952), False, 'import cv2\n'), ((843, 873), 'os.path.exists', 'os.path.exists', (['id_folder_path'], {}), '(id_folder_path)\n', (857, 873), False, 'import os\n'), ((877, 901), 'os.mkdir', 'os.mkdir', (['id_folder_path'], {}), '(id_folder_path)\n', (885, 901), False, 'import os\n')] |
import numpy as np
from pandas import DataFrame
class procrustes_test(object):
"""
Docstring for function ecopy.procrustes_test
====================
Conducts permutation procrustes test of relationship
between two non-diagonal (raw) matrices
Use
----
procrustes_test(mat1, mat2, nperm)
Returns an object of class procrustes_test
Parameters
----------
mat1: A raw site x species matrix (or any object x descriptor)
mat2: A raw site x descriptor matrix (or any object x descriptor)
nperm: Number of permutations
Attributes (see online documentation for descriptions)
---------
m12_obs: Observed test statistic, m12**2
pval: p-value
perm: Number of permutations
Methods
--------
summary(): provides a summary of test results
Example
--------
import ecopy as ep
d1 = ep.load_data('varespec')
d2 = ep.load_data('varechem')
d = ep.procrustes_test(d1, d2)
print(d.summary())
"""
def __init__(self, mat1, mat2, nperm=999):
if isinstance(mat1, DataFrame):
X = np.array(mat1).astype('float')
else:
X = mat1.astype('float')
if isinstance(mat2, DataFrame):
Y = np.array(mat2).astype('float')
else:
Y = mat2.astype('float')
if X.shape[0] != Y.shape[0]:
msg = 'Matrices must have the same number of rows'
raise ValueError(msg)
X_cent = np.apply_along_axis(lambda x: x - x.mean(), 0, X)
Y_cent = np.apply_along_axis(lambda y: y - y.mean(), 0, Y)
X_cent = X_cent / np.sqrt(np.sum(X_cent**2))
Y_cent = Y_cent / np.sqrt(np.sum(Y_cent**2))
W = np.sum(np.linalg.svd(X_cent.T.dot(Y_cent), compute_uv=0))
self.m12_obs = 1 - W**2
m12_perm = np.zeros(nperm)
i = 0
while i < nperm:
idx = np.random.permutation(range(X_cent.shape[0]))
X_perm = X_cent[idx,:]
W_perm = np.sum(np.linalg.svd(X_perm.T.dot(Y_cent), compute_uv=0))
m12_perm[i] = 1 - W_perm**2
i += 1
self.pval = np.mean(m12_perm < self.m12_obs)
self.perm = nperm
def summary(self):
summ = '\nm12 squared = {0:.3}\np = {1:.3}\npermutations = {2}'.format(self.m12_obs, self.pval, self.perm)
return summ
| [
"numpy.array",
"numpy.mean",
"numpy.zeros",
"numpy.sum"
] | [((1824, 1839), 'numpy.zeros', 'np.zeros', (['nperm'], {}), '(nperm)\n', (1832, 1839), True, 'import numpy as np\n'), ((2136, 2168), 'numpy.mean', 'np.mean', (['(m12_perm < self.m12_obs)'], {}), '(m12_perm < self.m12_obs)\n', (2143, 2168), True, 'import numpy as np\n'), ((1631, 1650), 'numpy.sum', 'np.sum', (['(X_cent ** 2)'], {}), '(X_cent ** 2)\n', (1637, 1650), True, 'import numpy as np\n'), ((1684, 1703), 'numpy.sum', 'np.sum', (['(Y_cent ** 2)'], {}), '(Y_cent ** 2)\n', (1690, 1703), True, 'import numpy as np\n'), ((1109, 1123), 'numpy.array', 'np.array', (['mat1'], {}), '(mat1)\n', (1117, 1123), True, 'import numpy as np\n'), ((1247, 1261), 'numpy.array', 'np.array', (['mat2'], {}), '(mat2)\n', (1255, 1261), True, 'import numpy as np\n')] |
# encoding: utf-8
# Author: <NAME>
# Created: 2021/9/28
import logging
from typing import Dict, Union
import numpy as np
from src.utils.libcoord.coord_transform import normalize_pc, invert_normalize_pc
class PointCloudSubsampler(object):
""" Point cloud subsampling transformation class.
A transformer to subsample the point cloud data.
Args:
N (int): number of points in output point cloud
allow_repeat (bool): if size of input point cloud < N, allow to use repeat number
"""
def __init__(self, N: int, allow_repeat=False):
self.N = N
self._allow_repeat = allow_repeat
def __call__(self, data: Union[Dict, np.ndarray]):
""" Calls the transformation.
Args:
data (dict or array)
Returns: same format as data
"""
# check arrays have same dim-0 length if it's Dict
data_num: int = -1
if isinstance(data, dict):
for key, arr in data.items():
if data_num < 0:
data_num = arr.shape[0] # init value
assert arr.shape[0] == data_num, f"Size not consistent in data: {arr.shape[0]} != {data_num}"
elif isinstance(data, np.ndarray):
data_num = data.shape[0]
else:
raise AssertionError("Unknown data type. Should be array or Dict")
if data_num < self.N:
logging.warning(f"data_num({data_num}) < self.N ({self.N}):")
if self._allow_repeat:
random_inx = np.random.randint(0, data_num, self.N)
else:
# if not allow repeat, no subsample
n_selected = min(data_num, self.N)
random_inx = np.random.choice(data_num, n_selected, replace=False) # select without repeat
else:
random_inx = np.random.choice(data_num, self.N, replace=False) # select without repeat
output = data.copy()
if isinstance(output, dict):
for key, arr in output.items():
output[key] = arr[random_inx]
elif isinstance(output, np.ndarray):
output = output[random_inx]
return output
# class PointCloudScaler(object):
# """
# Scaling (normalizing) point cloud.
# data * scale + shift
# """
#
# def __init__(self, scale_factor_3d: np.ndarray, shift_3d: np.ndarray = np.array([0, 0, 0])):
# assert 3 == len(scale_factor_3d.reshape(-1)), "Wrong dimension for scale factors"
# self.scale_factor_3d = scale_factor_3d.reshape(3)
# self.shift_3d = shift_3d.reshape(3)
#
# def __call__(self, data: Union[Dict, np.ndarray]):
# if isinstance(data, Dict):
# out = {}
# for key, value in data.items():
# out[key] = value * self.scale_factor_3d + self.shift_3d
# elif isinstance(data, np.ndarray):
# out = data * self.scale_factor_3d + self.shift_3d
# else:
# raise TypeError("Unknown data type")
# return out
#
# def inverse(self, data: Union[Dict, np.ndarray]):
# if isinstance(data, Dict):
# out = {}
# for key, value in data.items():
# out[key] = (value - self.shift_3d) / self.scale_factor_3d
# elif isinstance(data, np.ndarray):
# out = (data - self.shift_3d) / self.scale_factor_3d
# else:
# raise TypeError("Unknown data type")
# return out
class PointCloudNormalizer(object):
def __init__(self, scales, center_shift):
self.scales = scales
self.center_shift = center_shift
def __call__(self, points):
return normalize_pc(points, self.scales, self.center_shift)
def inverse(self, points):
return invert_normalize_pc(points, self.scales, self.center_shift)
class ShiftPoints(object):
def __init__(self, shift_3d: np.ndarray):
self.shift_3d = np.array(shift_3d).reshape(3)
def __call__(self, points, plane='xy'):
if 'xy' == plane:
xy = points[:, :, [0, 1]]
xy[:, :, 0] = xy[:, :, 0] + self.shift_3d[0]
xy[:, :, 1] = xy[:, :, 1] + self.shift_3d[1]
return xy
# # f there are outliers out of the range
# if xy_new.max() >= 1:
# xy_new[xy_new >= 1] = 1 - 10e-6
# if xy_new.min() < 0:
# xy_new[xy_new < 0] = 0.0
# return xy_new
# if isinstance(points, np.ndarray):
# return points + self.shift_3d
# elif isinstance(points, torch.Tensor):
# return points + torch.from_numpy(self.shift_3d)
# else:
# raise TypeError
def inverse(self, points, plane='xy'):
if 'xy' == plane:
xy = points[:, :, [0, 1]]
xy[:, :, 0] = xy[:, :, 0] - self.shift_3d[0]
xy[:, :, 1] = xy[:, :, 1] - self.shift_3d[1]
return xy
# if isinstance(points, np.ndarray):
# return points - self.shift_3d
# elif isinstance(points, torch.Tensor):
# return points - torch.from_numpy(self.shift_3d)
# else:
# raise TypeError
if __name__ == '__main__':
# test subsample
sampler = PointCloudSubsampler(5)
dummy_array = np.random.randint(0, 50, 20)
print(f"dummy_array: {dummy_array}")
sub_arr = sampler(dummy_array)
print(f"sub_arr: {sub_arr}")
print(f"dummy_array: {dummy_array}")
dummy_dic = {'1': dummy_array, 'None': dummy_array}
sub_dic = sampler(dummy_dic)
print(f"dummy_dic: {dummy_dic}")
print(f"sub_dic: {sub_dic}")
| [
"numpy.random.choice",
"logging.warning",
"src.utils.libcoord.coord_transform.invert_normalize_pc",
"numpy.array",
"numpy.random.randint",
"src.utils.libcoord.coord_transform.normalize_pc"
] | [((5281, 5309), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)', '(20)'], {}), '(0, 50, 20)\n', (5298, 5309), True, 'import numpy as np\n'), ((3681, 3733), 'src.utils.libcoord.coord_transform.normalize_pc', 'normalize_pc', (['points', 'self.scales', 'self.center_shift'], {}), '(points, self.scales, self.center_shift)\n', (3693, 3733), False, 'from src.utils.libcoord.coord_transform import normalize_pc, invert_normalize_pc\n'), ((3781, 3840), 'src.utils.libcoord.coord_transform.invert_normalize_pc', 'invert_normalize_pc', (['points', 'self.scales', 'self.center_shift'], {}), '(points, self.scales, self.center_shift)\n', (3800, 3840), False, 'from src.utils.libcoord.coord_transform import normalize_pc, invert_normalize_pc\n'), ((1401, 1462), 'logging.warning', 'logging.warning', (['f"""data_num({data_num}) < self.N ({self.N}):"""'], {}), "(f'data_num({data_num}) < self.N ({self.N}):')\n", (1416, 1462), False, 'import logging\n'), ((1834, 1883), 'numpy.random.choice', 'np.random.choice', (['data_num', 'self.N'], {'replace': '(False)'}), '(data_num, self.N, replace=False)\n', (1850, 1883), True, 'import numpy as np\n'), ((1527, 1565), 'numpy.random.randint', 'np.random.randint', (['(0)', 'data_num', 'self.N'], {}), '(0, data_num, self.N)\n', (1544, 1565), True, 'import numpy as np\n'), ((1716, 1769), 'numpy.random.choice', 'np.random.choice', (['data_num', 'n_selected'], {'replace': '(False)'}), '(data_num, n_selected, replace=False)\n', (1732, 1769), True, 'import numpy as np\n'), ((3940, 3958), 'numpy.array', 'np.array', (['shift_3d'], {}), '(shift_3d)\n', (3948, 3958), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.utils
import torch.utils.data
import numpy as np
from torch.autograd import Variable
class VRNN(nn.Module):
def __init__(self, x_dim, h_dim, z_dim, n_layers, writer, bias=False):
super(VRNN, self).__init__()
self.x_dim = x_dim
self.h_dim = h_dim
self.z_dim = z_dim
self.n_layers = n_layers
self.writer = writer
# feature-extracting transformations
self.phi_x = nn.Sequential(
nn.Linear(x_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.phi_z = nn.Sequential(
nn.Linear(z_dim, h_dim),
nn.LeakyReLU())
# encoder
self.enc = nn.Sequential(
nn.Linear(h_dim + h_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.enc_mean = nn.Linear(h_dim, z_dim)
self.enc_logvar = nn.Linear(h_dim, z_dim) # nn.Softplus())
# prior
self.prior = nn.Sequential(
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.prior_mean = nn.Linear(h_dim, z_dim)
self.prior_logvar = nn.Linear(h_dim, z_dim) # nn.Softplus()
# decoder
self.dec = nn.Sequential(
nn.Linear(h_dim + h_dim, h_dim),
nn.LeakyReLU(),
nn.Linear(h_dim, h_dim),
nn.LeakyReLU())
self.dec_logvar = nn.Linear(h_dim, x_dim) # nn.Softplus()
self.dec_mean = nn.Sequential(nn.Linear(self.h_dim, self.x_dim), nn.Hardtanh(min_val=-10, max_val=10)) # nn.Sigmoid()
# recurrence
self.rnn = nn.GRU(h_dim + h_dim, h_dim, n_layers, bias)
#self.l_abs = nn.Linear(self.x_dim, self.h_dim)
def _encoder(self, phi_x_t, h):
enc_t = self.enc(torch.cat([phi_x_t, h[-1]], 1))
enc_mean_t = self.enc_mean(enc_t)
enc_logvar_t = self.enc_logvar(enc_t)
return enc_mean_t, enc_logvar_t
def _prior(self, h):
prior_t = self.prior(h[-1])
prior_mean_t = self.prior_mean(prior_t)
prior_logvar_t = self.prior_logvar(prior_t)
return prior_mean_t, prior_logvar_t
def _decoder(self, phi_z_t, h):
dec_t = self.dec(torch.cat([phi_z_t, h[-1]], 1))
dec_mean_t = self.dec_mean(dec_t)
dec_logvar_t = self.dec_logvar(dec_t)
return dec_mean_t, dec_logvar_t
def forward(self, x, obs_traj_in):
"""
Inputs:
- x: Tensor of shape (obs_len, batch, 2)
Output:
- final_h: Tensor of shape (self.num_layers, batch, self.h_dim)
"""
kld_loss, nll_loss = 0, 0
x_list, mean_list = [torch.zeros(2)], [torch.zeros(2)]
h = Variable(torch.zeros(self.n_layers, x.size(1), self.h_dim), requires_grad=True).cuda()
#h = self.l_abs(obs_traj_in.cuda()).unsqueeze(0)
for t in range(1, x.size(0)):
phi_x_t = self.phi_x(x[t])
# encoder mean and logvar
enc_mean_t, enc_logvar_t = self._encoder(phi_x_t, h)
# prior mean and logvar
prior_mean_t, prior_logvar_t = self._prior(h)
# sampling and reparameterization
z_t = self._reparameterized_sample(enc_mean_t, enc_logvar_t)
phi_z_t = self.phi_z(z_t.cuda())
# decoder
dec_mean_t, dec_logvar_t = self._decoder(phi_z_t, h)
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
# computing losses
kld_loss += self._kld_gauss(enc_mean_t, enc_logvar_t, prior_mean_t, prior_logvar_t)
nll_loss += self._nll_gauss(dec_mean_t, dec_logvar_t, x[t])
"""
self.writer.add_histogram('input_trajectory', x[t], t)
self.writer.add_histogram('decoder_mean', dec_mean_t, t)
"""
x_list.append(x[t][0])
mean_list.append(dec_mean_t[0])
return kld_loss, nll_loss, (x_list, mean_list), h
def _generate_sample(self, h):
# prior mean and logvar
prior_mean_t, prior_logvar_t = self._prior(h)
# sampling and reparameterization
z_t = self._reparameterized_sample(prior_mean_t, prior_logvar_t)
phi_z_t = self.phi_z(z_t.cuda())
# decoder
dec_mean_t, dec_logvar_t = self._decoder(phi_z_t, h)
#sample_t = self._reparameterized_sample(dec_mean_t, dec_logvar_t)
return dec_mean_t, phi_z_t
def sample(self, seq_len, batch_dim, h_prec=None):
with torch.no_grad():
if h_prec is None:
h = Variable(torch.zeros(self.n_layers, 1, self.h_dim)).cuda()
sample = torch.zeros(seq_len, self.x_dim)
for t in range(seq_len):
sample_t, phi_z_t = self._generate_sample(h)
phi_x_t = self.phi_x(sample_t.view(1, -1).cuda())
sample[t] = sample_t.data
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
else:
h = h_prec
sample = torch.zeros(seq_len, batch_dim, self.x_dim)
for t in range(seq_len):
sample_t, phi_z_t = self._generate_sample(h)
phi_x_t = self.phi_x(sample_t.cuda())
sample[t] = sample_t.data
# recurrence
_, h = self.rnn(torch.cat([phi_x_t, phi_z_t], 1).unsqueeze(0), h)
return sample
def reset_parameters(self, stdv=1e-1):
for weight in self.parameters():
weight.data.normal_(0, stdv)
def _init_weights(self, stdv):
pass
def _reparameterized_sample(self, mean, logvar):
"""Using std to sample"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std).cuda()
return mean + eps * std
def _kld_gauss(self, mean_enc, logvar_enc, mean_prior, logvar_prior):
"""Using std to compute KLD"""
x1 = torch.sum((logvar_prior - logvar_enc), dim=1)
x2 = torch.sum(torch.exp(logvar_enc - logvar_prior), dim=1)
x3 = torch.sum((mean_enc - mean_prior).pow(2) / (torch.exp(logvar_prior)), dim=1)
kld_element = x1 - mean_enc.size(1) + x2 + x3
return torch.mean(0.5 * kld_element)
def _nll_gauss(self, mean, logvar, x):
x1 = torch.sum(((x - mean).pow(2)) / torch.exp(logvar), dim=1)
x2 = x.size(1) * np.log(2 * np.pi)
x3 = torch.sum(logvar, dim=1)
nll = torch.mean(0.5 * (x1 + x2 + x3))
return nll | [
"torch.nn.Hardtanh",
"torch.nn.LeakyReLU",
"torch.mean",
"numpy.log",
"torch.exp",
"torch.randn_like",
"torch.sum",
"torch.nn.Linear",
"torch.no_grad",
"torch.zeros",
"torch.cat",
"torch.nn.GRU"
] | [((945, 968), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (954, 968), True, 'import torch.nn as nn\n'), ((996, 1019), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (1005, 1019), True, 'import torch.nn as nn\n'), ((1183, 1206), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (1192, 1206), True, 'import torch.nn as nn\n'), ((1236, 1259), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'z_dim'], {}), '(h_dim, z_dim)\n', (1245, 1259), True, 'import torch.nn as nn\n'), ((1495, 1518), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'x_dim'], {}), '(h_dim, x_dim)\n', (1504, 1518), True, 'import torch.nn as nn\n'), ((1705, 1749), 'torch.nn.GRU', 'nn.GRU', (['(h_dim + h_dim)', 'h_dim', 'n_layers', 'bias'], {}), '(h_dim + h_dim, h_dim, n_layers, bias)\n', (1711, 1749), True, 'import torch.nn as nn\n'), ((5882, 5905), 'torch.exp', 'torch.exp', (['(0.5 * logvar)'], {}), '(0.5 * logvar)\n', (5891, 5905), False, 'import torch\n'), ((6108, 6151), 'torch.sum', 'torch.sum', (['(logvar_prior - logvar_enc)'], {'dim': '(1)'}), '(logvar_prior - logvar_enc, dim=1)\n', (6117, 6151), False, 'import torch\n'), ((6381, 6410), 'torch.mean', 'torch.mean', (['(0.5 * kld_element)'], {}), '(0.5 * kld_element)\n', (6391, 6410), False, 'import torch\n'), ((6582, 6606), 'torch.sum', 'torch.sum', (['logvar'], {'dim': '(1)'}), '(logvar, dim=1)\n', (6591, 6606), False, 'import torch\n'), ((6621, 6653), 'torch.mean', 'torch.mean', (['(0.5 * (x1 + x2 + x3))'], {}), '(0.5 * (x1 + x2 + x3))\n', (6631, 6653), False, 'import torch\n'), ((509, 532), 'torch.nn.Linear', 'nn.Linear', (['x_dim', 'h_dim'], {}), '(x_dim, h_dim)\n', (518, 532), True, 'import torch.nn as nn\n'), ((546, 560), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (558, 560), True, 'import torch.nn as nn\n'), ((574, 597), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'h_dim'], {}), '(h_dim, h_dim)\n', (583, 597), True, 'import torch.nn as nn\n'), ((611, 625), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (623, 625), True, 'import torch.nn as nn\n'), ((676, 699), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'h_dim'], {}), '(z_dim, h_dim)\n', (685, 699), True, 'import torch.nn as nn\n'), ((713, 727), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (725, 727), True, 'import torch.nn as nn\n'), ((794, 825), 'torch.nn.Linear', 'nn.Linear', (['(h_dim + h_dim)', 'h_dim'], {}), '(h_dim + h_dim, h_dim)\n', (803, 825), True, 'import torch.nn as nn\n'), ((839, 853), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (851, 853), True, 'import torch.nn as nn\n'), ((867, 890), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'h_dim'], {}), '(h_dim, h_dim)\n', (876, 890), True, 'import torch.nn as nn\n'), ((904, 918), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (916, 918), True, 'import torch.nn as nn\n'), ((1103, 1126), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'h_dim'], {}), '(h_dim, h_dim)\n', (1112, 1126), True, 'import torch.nn as nn\n'), ((1140, 1154), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1152, 1154), True, 'import torch.nn as nn\n'), ((1342, 1373), 'torch.nn.Linear', 'nn.Linear', (['(h_dim + h_dim)', 'h_dim'], {}), '(h_dim + h_dim, h_dim)\n', (1351, 1373), True, 'import torch.nn as nn\n'), ((1387, 1401), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1399, 1401), True, 'import torch.nn as nn\n'), ((1415, 1438), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'h_dim'], {}), '(h_dim, h_dim)\n', (1424, 1438), True, 'import torch.nn as nn\n'), ((1452, 1466), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (1464, 1466), True, 'import torch.nn as nn\n'), ((1575, 1608), 'torch.nn.Linear', 'nn.Linear', (['self.h_dim', 'self.x_dim'], {}), '(self.h_dim, self.x_dim)\n', (1584, 1608), True, 'import torch.nn as nn\n'), ((1610, 1646), 'torch.nn.Hardtanh', 'nn.Hardtanh', ([], {'min_val': '(-10)', 'max_val': '(10)'}), '(min_val=-10, max_val=10)\n', (1621, 1646), True, 'import torch.nn as nn\n'), ((1869, 1899), 'torch.cat', 'torch.cat', (['[phi_x_t, h[-1]]', '(1)'], {}), '([phi_x_t, h[-1]], 1)\n', (1878, 1899), False, 'import torch\n'), ((2297, 2327), 'torch.cat', 'torch.cat', (['[phi_z_t, h[-1]]', '(1)'], {}), '([phi_z_t, h[-1]], 1)\n', (2306, 2327), False, 'import torch\n'), ((4611, 4626), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4624, 4626), False, 'import torch\n'), ((6177, 6213), 'torch.exp', 'torch.exp', (['(logvar_enc - logvar_prior)'], {}), '(logvar_enc - logvar_prior)\n', (6186, 6213), False, 'import torch\n'), ((6551, 6568), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6557, 6568), True, 'import numpy as np\n'), ((2738, 2752), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (2749, 2752), False, 'import torch\n'), ((2756, 2770), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (2767, 2770), False, 'import torch\n'), ((4763, 4795), 'torch.zeros', 'torch.zeros', (['seq_len', 'self.x_dim'], {}), '(seq_len, self.x_dim)\n', (4774, 4795), False, 'import torch\n'), ((5208, 5251), 'torch.zeros', 'torch.zeros', (['seq_len', 'batch_dim', 'self.x_dim'], {}), '(seq_len, batch_dim, self.x_dim)\n', (5219, 5251), False, 'import torch\n'), ((5920, 5941), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (5936, 5941), False, 'import torch\n'), ((6279, 6302), 'torch.exp', 'torch.exp', (['logvar_prior'], {}), '(logvar_prior)\n', (6288, 6302), False, 'import torch\n'), ((6500, 6517), 'torch.exp', 'torch.exp', (['logvar'], {}), '(logvar)\n', (6509, 6517), False, 'import torch\n'), ((3513, 3545), 'torch.cat', 'torch.cat', (['[phi_x_t, phi_z_t]', '(1)'], {}), '([phi_x_t, phi_z_t], 1)\n', (3522, 3545), False, 'import torch\n'), ((4688, 4729), 'torch.zeros', 'torch.zeros', (['self.n_layers', '(1)', 'self.h_dim'], {}), '(self.n_layers, 1, self.h_dim)\n', (4699, 4729), False, 'import torch\n'), ((5088, 5120), 'torch.cat', 'torch.cat', (['[phi_x_t, phi_z_t]', '(1)'], {}), '([phi_x_t, phi_z_t], 1)\n', (5097, 5120), False, 'import torch\n'), ((5532, 5564), 'torch.cat', 'torch.cat', (['[phi_x_t, phi_z_t]', '(1)'], {}), '([phi_x_t, phi_z_t], 1)\n', (5541, 5564), False, 'import torch\n')] |
import calendar
from collections import defaultdict
from datetime import datetime
from itertools import cycle, islice
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(20, 5))
ax = fig.add_subplot(1, 1, 1)
major_ticks = np.arange(0, 24 * 7, 24)
minor_ticks = np.arange(0, 24 * 7, 6)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_xticklabels([])
ax.set_xticklabels(islice(cycle([0, 6, 12, 18]), 4 * 7), minor=True, alpha=0.5)
ax.set_yticklabels([])
ax.set_yticks([])
ax.set_axisbelow(True)
# f = sys.stdin
f = open('timestamps.log')
events = defaultdict(lambda: defaultdict(int))
for datetime in (datetime.utcfromtimestamp(int(line.strip())) for line in f):
events[datetime.weekday()][datetime.hour] += 1
xs = range(24 * 7)
ys = []
for i, day in enumerate(calendar.day_name):
plt.text(1 / 14 + i * 1 / 7,
1,
day,
alpha=0.4,
horizontalalignment='center',
verticalalignment='bottom',
transform=ax.transAxes)
ys.extend(events[i][x] for x in range(24))
ax.grid(which='minor', alpha=0.2, axis='x')
ax.grid(which='major', alpha=1, axis='x')
plt.bar(xs, ys, align='edge', width=1, linewidth=1, alpha=0.5, edgecolor='black', facecolor='gray')
plt.margins(0, 0)
plt.savefig('schedule.png')
| [
"matplotlib.pyplot.text",
"itertools.cycle",
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"collections.defaultdict",
"datetime.datetime.weekday",
"matplotlib.pyplot.margins"
] | [((177, 204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (187, 204), True, 'import matplotlib.pyplot as plt\n'), ((250, 274), 'numpy.arange', 'np.arange', (['(0)', '(24 * 7)', '(24)'], {}), '(0, 24 * 7, 24)\n', (259, 274), True, 'import numpy as np\n'), ((289, 312), 'numpy.arange', 'np.arange', (['(0)', '(24 * 7)', '(6)'], {}), '(0, 24 * 7, 6)\n', (298, 312), True, 'import numpy as np\n'), ((1186, 1290), 'matplotlib.pyplot.bar', 'plt.bar', (['xs', 'ys'], {'align': '"""edge"""', 'width': '(1)', 'linewidth': '(1)', 'alpha': '(0.5)', 'edgecolor': '"""black"""', 'facecolor': '"""gray"""'}), "(xs, ys, align='edge', width=1, linewidth=1, alpha=0.5, edgecolor=\n 'black', facecolor='gray')\n", (1193, 1290), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1303), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (1297, 1303), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1331), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""schedule.png"""'], {}), "('schedule.png')\n", (1315, 1331), True, 'import matplotlib.pyplot as plt\n'), ((844, 978), 'matplotlib.pyplot.text', 'plt.text', (['(1 / 14 + i * 1 / 7)', '(1)', 'day'], {'alpha': '(0.4)', 'horizontalalignment': '"""center"""', 'verticalalignment': '"""bottom"""', 'transform': 'ax.transAxes'}), "(1 / 14 + i * 1 / 7, 1, day, alpha=0.4, horizontalalignment=\n 'center', verticalalignment='bottom', transform=ax.transAxes)\n", (852, 978), True, 'import matplotlib.pyplot as plt\n'), ((429, 450), 'itertools.cycle', 'cycle', (['[0, 6, 12, 18]'], {}), '([0, 6, 12, 18])\n', (434, 450), False, 'from itertools import cycle, islice\n'), ((621, 637), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (632, 637), False, 'from collections import defaultdict\n'), ((728, 746), 'datetime.datetime.weekday', 'datetime.weekday', ([], {}), '()\n', (744, 746), False, 'from datetime import datetime\n')] |
import cv2
import numpy as np
import tensorflow as tf
from numpy.linalg import inv
from scipy import sparse
def getLaplacian1(I,consts,win_size = 1,eps = 1e-5):
neb_size = (win_size*2+1)*(win_size*2+1)
h,w,c = I.shape
n = h
m = w
img_size = w*h
consts = cv2.erode(consts,-1,element = np.ones(win_size*2+1))
indsM = np.arange(1,img_size).reshape(h,w,order='F');
tlen = int((-consts[win_size+1:win_size,win_size+1:win_size]+1).sum()*(neb_size**2))
row_inds = np.zeros(tlen)
col_inds = np.zeros(tlen)
vals = np.zeros(tlen)
len = 0;
for j in range(1+win_size,w-win_size):
for i in range(win_size+1,h-win_size):
if consts[i,j] == 1:
continue
win_inds = indsM[i - win_size : i+win_size,j-win_size:j+win_size]
win_inds = win_inds.ravel(order='F')
winI = I[i - win_size:i+win_size,j - win_size:j+win_size]
winI = winI.reshape(neb_size, c)
win_mu = np.mean(winI,axis=0).reshape(c,1)
win_var = np.linalg.inv(winI*winI/neb_size - win_mu*win_mu.T+eps/neb_size*np.identity(c))
winII = winI -np.repeat(win_mu.transpose(),neb_size,0)
tvals = (1+winII*win_var*winII.T)/neb_size
ind_mat = np.broadcast_to(win_inds,(neb_size,neb_size))
row_inds[len:neb_size**2+len] = ind_mat.ravel(order='C')
col_inds[len:neb_size**2+len] = ind_mat.ravel(order='F')
vals[1+len:neb_size**2,len]=tvals.ravel(order='F')
len = len+neb_size**2
vals = vals.ravel(order='F')[0:len]
row_inds = row_inds.ravel(order='F')[0:len]
col_inds = col_inds.ravel(order='F')[0:len]
A = sparse.csr_matrix((vals,(row_inds,col_inds)),shape=(img_size,img_size))
sumA = np.sum(A,1).T.tolist()[0]
A = sparse.diags([sumA],[0],shape = (img_size,img_size)) - A
return A
def getLaplacian(img):
h,w,_ = img.shape
coo = getLaplacian1(img,np.zeros(shape=(h,w))).tocoo()
idx = np.mat([coo.row,coo.col]).transpose()
return tf.SparseTensor(idx,coo.data,coo.shape) | [
"numpy.identity",
"numpy.mat",
"numpy.mean",
"numpy.ones",
"tensorflow.SparseTensor",
"numpy.sum",
"numpy.zeros",
"scipy.sparse.diags",
"scipy.sparse.csr_matrix",
"numpy.broadcast_to",
"numpy.arange"
] | [((497, 511), 'numpy.zeros', 'np.zeros', (['tlen'], {}), '(tlen)\n', (505, 511), True, 'import numpy as np\n'), ((527, 541), 'numpy.zeros', 'np.zeros', (['tlen'], {}), '(tlen)\n', (535, 541), True, 'import numpy as np\n'), ((553, 567), 'numpy.zeros', 'np.zeros', (['tlen'], {}), '(tlen)\n', (561, 567), True, 'import numpy as np\n'), ((1702, 1777), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['(vals, (row_inds, col_inds))'], {'shape': '(img_size, img_size)'}), '((vals, (row_inds, col_inds)), shape=(img_size, img_size))\n', (1719, 1777), False, 'from scipy import sparse\n'), ((2053, 2094), 'tensorflow.SparseTensor', 'tf.SparseTensor', (['idx', 'coo.data', 'coo.shape'], {}), '(idx, coo.data, coo.shape)\n', (2068, 2094), True, 'import tensorflow as tf\n'), ((1820, 1873), 'scipy.sparse.diags', 'sparse.diags', (['[sumA]', '[0]'], {'shape': '(img_size, img_size)'}), '([sumA], [0], shape=(img_size, img_size))\n', (1832, 1873), False, 'from scipy import sparse\n'), ((310, 335), 'numpy.ones', 'np.ones', (['(win_size * 2 + 1)'], {}), '(win_size * 2 + 1)\n', (317, 335), True, 'import numpy as np\n'), ((346, 368), 'numpy.arange', 'np.arange', (['(1)', 'img_size'], {}), '(1, img_size)\n', (355, 368), True, 'import numpy as np\n'), ((1274, 1321), 'numpy.broadcast_to', 'np.broadcast_to', (['win_inds', '(neb_size, neb_size)'], {}), '(win_inds, (neb_size, neb_size))\n', (1289, 1321), True, 'import numpy as np\n'), ((2004, 2030), 'numpy.mat', 'np.mat', (['[coo.row, coo.col]'], {}), '([coo.row, coo.col])\n', (2010, 2030), True, 'import numpy as np\n'), ((1963, 1985), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h, w)'}), '(shape=(h, w))\n', (1971, 1985), True, 'import numpy as np\n'), ((992, 1013), 'numpy.mean', 'np.mean', (['winI'], {'axis': '(0)'}), '(winI, axis=0)\n', (999, 1013), True, 'import numpy as np\n'), ((1786, 1798), 'numpy.sum', 'np.sum', (['A', '(1)'], {}), '(A, 1)\n', (1792, 1798), True, 'import numpy as np\n'), ((1112, 1126), 'numpy.identity', 'np.identity', (['c'], {}), '(c)\n', (1123, 1126), True, 'import numpy as np\n')] |
"""
This example code illustrates how to access and reproject a TerraFusion
Advanced Fusion file in Python.
Usage: save this script and run
$python modis2ug.rn.py
The HDF file must be in your current working directory.
Tested under: Python 3.6.6 :: Anaconda custom (64-bit)
Last updated: 2019-04-05
"""
import h5py
import pytaf
import numpy as np
# Open AF file.
file_name = 'misr_on_modis_SrcLowAnAfBlueGreen_Trg1KM8_9_69365.h5'
# Generate 1-d lat/lon.
cellSize = 0.05
x0, xinc, y0, yinc = (-180, cellSize, 90, -cellSize)
nx, ny = (360*20, 180*20)
x = np.linspace(x0, x0 + xinc*nx, nx)
y = np.linspace(y0, y0 + yinc*ny, ny)
with h5py.File(file_name, 'r') as f:
# Read MODIS Radiance dataset.
modis_dset = f['/Target/Data_Fields/MODIS_Radiance']
modis_data = modis_dset[0,:,:].astype(np.float64)
print(modis_data[0,0:10])
# Read source lat/lon dataset.
modis_ds_lat = f['/Geolocation/Latitude']
modis_lat = modis_ds_lat[:,:].astype(np.float64)
modis_ds_lon = f['/Geolocation/Longitude']
modis_lon = modis_ds_lon[:,:].astype(np.float64)
f.close()
# Set max radius.
M_PI=3.14159265358979323846
earthRadius = 6367444
max_r = earthRadius * cellSize * M_PI / 180
index = np.arange(nx*ny, dtype=np.int32)
distance = np.arange(nx*ny, dtype=np.float64).reshape((ny,nx))
# Kent: try nnInterploate first.
# In the summaryInterpolate, tarSD and nSouPixels are also output parameters.
n_src = modis_lat.size;
print(n_src)
n_trg = nx * ny;
print(n_trg)
# Find indexes of nearest neighbor point.
trg_data = pytaf.resample_n_g(modis_lat, modis_lon,
x, y, modis_data, max_r)
print(trg_data)
print('Finished retrieving data with index.')
# Open file for writing.
f2 = h5py.File('modis2ug.rn.h5', 'w')
dset = f2.create_dataset('/UG_Radiance', data=trg_data)
dset_lat = f2.create_dataset('/Latitude', data=y)
dset_lon = f2.create_dataset('/Longitude', data=x)
# TODO: Add CF attributes on dataset.
f2.close()
| [
"numpy.linspace",
"h5py.File",
"numpy.arange",
"pytaf.resample_n_g"
] | [((564, 599), 'numpy.linspace', 'np.linspace', (['x0', '(x0 + xinc * nx)', 'nx'], {}), '(x0, x0 + xinc * nx, nx)\n', (575, 599), True, 'import numpy as np\n'), ((602, 637), 'numpy.linspace', 'np.linspace', (['y0', '(y0 + yinc * ny)', 'ny'], {}), '(y0, y0 + yinc * ny, ny)\n', (613, 637), True, 'import numpy as np\n'), ((1216, 1250), 'numpy.arange', 'np.arange', (['(nx * ny)'], {'dtype': 'np.int32'}), '(nx * ny, dtype=np.int32)\n', (1225, 1250), True, 'import numpy as np\n'), ((1545, 1610), 'pytaf.resample_n_g', 'pytaf.resample_n_g', (['modis_lat', 'modis_lon', 'x', 'y', 'modis_data', 'max_r'], {}), '(modis_lat, modis_lon, x, y, modis_data, max_r)\n', (1563, 1610), False, 'import pytaf\n'), ((1735, 1767), 'h5py.File', 'h5py.File', (['"""modis2ug.rn.h5"""', '"""w"""'], {}), "('modis2ug.rn.h5', 'w')\n", (1744, 1767), False, 'import h5py\n'), ((642, 667), 'h5py.File', 'h5py.File', (['file_name', '"""r"""'], {}), "(file_name, 'r')\n", (651, 667), False, 'import h5py\n'), ((1260, 1296), 'numpy.arange', 'np.arange', (['(nx * ny)'], {'dtype': 'np.float64'}), '(nx * ny, dtype=np.float64)\n', (1269, 1296), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import dolfin as df
import numpy as np
import sys
try:
solver_type = sys.argv[1]
except IndexError:
print("Usage: time_solver_types.py SOLVER_TYPE [N]")
sys.exit()
try:
N = int(sys.argv[2])
except IndexError:
N = 3
from finmag.example import barmini, bar
from finmag.example.normal_modes import disk
timings = []
for i in xrange(N):
print("[DDD] Starting run #{}".format(i))
#sim = bar(demag_solver_type=solver_type)
sim = disk(d=100, h=10, maxh=3.0, relaxed=False, demag_solver_type=solver_type)
df.tic()
sim.relax()
timings.append(df.toc())
print("Latest run took {:.2f} seconds.".format(timings[-1]))
print("Timings (in seconds): {}".format(['{:.2f}'.format(t) for t in timings]))
print("Mean: {:.2f}".format(np.mean(timings)))
print("Median: {:.2f}".format(np.median(timings)))
print("Fastest: {:.2f}".format(np.min(timings)))
| [
"numpy.mean",
"numpy.median",
"dolfin.tic",
"dolfin.toc",
"numpy.min",
"sys.exit",
"finmag.example.normal_modes.disk"
] | [((482, 555), 'finmag.example.normal_modes.disk', 'disk', ([], {'d': '(100)', 'h': '(10)', 'maxh': '(3.0)', 'relaxed': '(False)', 'demag_solver_type': 'solver_type'}), '(d=100, h=10, maxh=3.0, relaxed=False, demag_solver_type=solver_type)\n', (486, 555), False, 'from finmag.example.normal_modes import disk\n'), ((560, 568), 'dolfin.tic', 'df.tic', ([], {}), '()\n', (566, 568), True, 'import dolfin as df\n'), ((189, 199), 'sys.exit', 'sys.exit', ([], {}), '()\n', (197, 199), False, 'import sys\n'), ((604, 612), 'dolfin.toc', 'df.toc', ([], {}), '()\n', (610, 612), True, 'import dolfin as df\n'), ((787, 803), 'numpy.mean', 'np.mean', (['timings'], {}), '(timings)\n', (794, 803), True, 'import numpy as np\n'), ((836, 854), 'numpy.median', 'np.median', (['timings'], {}), '(timings)\n', (845, 854), True, 'import numpy as np\n'), ((888, 903), 'numpy.min', 'np.min', (['timings'], {}), '(timings)\n', (894, 903), True, 'import numpy as np\n')] |
import os
import sys
import logging
import multiprocessing
import time
from flask import Flask, request, jsonify
from flask_cors import CORS
import io
from io import BytesIO
from PIL import Image
import cv2
import numpy as np
from worker import get_model_api
# define the app
app = Flask(__name__)
CORS(app) # needed for cross-domain requests, allow everything by default
UPLOAD_FOLDER = os.path.basename('uploads')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# global
#global timeout_secs
#timeout_secs = 3
# load the model
model_api = get_model_api()
# API route
@app.route('/answer/blank', methods=['POST'])
def mathreco():
"""API function
All model-specific logic to be defined in the get_model_api()
function
"""
try:
request_id=request.form['id']
except:
request_id=888
try:
color_space=request.form['color']
except:
color_space='sRGB'
file = request.files['image']
filename = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(filename)
app.logger.debug("api_input: " + filename)
if color_space != 'sRGB':
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img2 = np.zeros_like(img)
img2[:,:,0] = gray
img2[:,:,1] = gray
img2[:,:,2] = gray
cv2.imwrite(filename, img2)
app.logger.debug("color space converted : " + filename)
manager = multiprocessing.Manager()
return_list = manager.list()
p = multiprocessing.Process(target=model_api, args=(request_id, filename,return_list))
p.start()
for _ in range(20*timeout_secs):
# check worker every 50 ms
time.sleep(0.05)
if len(return_list) > 0 :
output_data =return_list[0]
#app.logger.debug("api_output: " + str(output_data))
# Cleanup
p.terminate()
p.join()
response = jsonify(output_data)
return response
#output_data = model_api(input_data, return_dict)
# Terminate worker after timeout
app.logger.debug("Timeout")
p.terminate()
output_data={}
output_data['status']='Timeout'
output_data['info']=1000*timeout_secs
response = jsonify(output_data)
return response
@app.route('/')
def index():
return "Index API"
# HTTP Errors handlers
@app.errorhandler(404)
def url_error(e):
return """
Wrong URL!
<pre>{}</pre>""".format(e), 404
@app.errorhandler(500)
def server_error(e):
return """
An internal error occurred: <pre>{}</pre>
See logs for full stacktrace.
""".format(e), 500
if __name__ == '__main__':
global timeout_secs
try:
timeout_secs = int(os.environ["TIMEOUT_SECS"])
app.logger.info("timout "+str(timeout_secs))
except:
timeout_secs=3
app.logger.info("default timout "+str(timeout_secs))
try:
mathreco_port=int(os.environ["MATHRECO_PORT"])
print ("port: ", mathreco_port)
app.logger.info("port "+str(mathreco_port))
except:
mathreco_port=8686;
app.logger.info("default port 8080")
# This is used when running locally.
app.run(host='0.0.0.0',port=mathreco_port, debug=True)
| [
"cv2.imwrite",
"flask_cors.CORS",
"flask.Flask",
"worker.get_model_api",
"multiprocessing.Process",
"os.path.join",
"numpy.zeros_like",
"time.sleep",
"os.path.basename",
"cv2.cvtColor",
"multiprocessing.Manager",
"cv2.imread",
"flask.jsonify"
] | [((287, 302), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (292, 302), False, 'from flask import Flask, request, jsonify\n'), ((303, 312), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (307, 312), False, 'from flask_cors import CORS\n'), ((395, 422), 'os.path.basename', 'os.path.basename', (['"""uploads"""'], {}), "('uploads')\n", (411, 422), False, 'import os\n'), ((546, 561), 'worker.get_model_api', 'get_model_api', ([], {}), '()\n', (559, 561), False, 'from worker import get_model_api\n'), ((974, 1030), 'os.path.join', 'os.path.join', (["app.config['UPLOAD_FOLDER']", 'file.filename'], {}), "(app.config['UPLOAD_FOLDER'], file.filename)\n", (986, 1030), False, 'import os\n'), ((1452, 1477), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (1475, 1477), False, 'import multiprocessing\n'), ((1519, 1606), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'model_api', 'args': '(request_id, filename, return_list)'}), '(target=model_api, args=(request_id, filename,\n return_list))\n', (1542, 1606), False, 'import multiprocessing\n'), ((2246, 2266), 'flask.jsonify', 'jsonify', (['output_data'], {}), '(output_data)\n', (2253, 2266), False, 'from flask import Flask, request, jsonify\n'), ((1148, 1168), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1158, 1168), False, 'import cv2\n'), ((1184, 1221), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1196, 1221), False, 'import cv2\n'), ((1237, 1255), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (1250, 1255), True, 'import numpy as np\n'), ((1345, 1372), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'img2'], {}), '(filename, img2)\n', (1356, 1372), False, 'import cv2\n'), ((1694, 1710), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (1704, 1710), False, 'import time\n'), ((1943, 1963), 'flask.jsonify', 'jsonify', (['output_data'], {}), '(output_data)\n', (1950, 1963), False, 'from flask import Flask, request, jsonify\n')] |
"""
"Gradien-free methods, such as Nelder-Mead simplex algorithm, is used to
minimize a non-linear function containing two variables. This method,
particularly, is fairly robust and works even if very little is known about
the objective function. However, in many situations, we do know more about
the objective function, and this fact allows us to devise faster and more
efficient algorithms for minimizing the function. We can do this by making
use of properties such as the gradient of the function.
The gradient of a function of more than one variable describes the rate of
change of the function in each of its component directions. This is a vector
of the partial derivatives of the function with respect to each of the
variables. From this gradient vector, we can deduce the direction in which
the function is increasing most rapidly and, conversely, the direction in which
the function is decreasing most rapidly from any given position. This gives us
the basis for gradient descent methods for minimizing a function. The algorithm
is very simple: given a starting position, x, we compute the gradient at this x
and the corresponding direction in which the gradient is most rapidly decreasing,
then make a small step in that direction. After a few iterations, this will move
from the starting position to the minimum of the function.
This module illustrates how to implement an algorithm based on the steepest
descent algorithm to minimize an objective function within a bounded region.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def descend(func, x0, grad, bounds, tol=1e-8, max_iter=100):
xn = x0
xnm1 = np.inf
grad_xn = grad(x0)
for i in range(max_iter):
if np.linalg.norm(xn - xnm1) < tol:
break
direction = -grad_xn
xnm1 = xn
xn = xn + 0.2 * direction
grad_xn = grad(xn)
yield i, xn, func(xn), grad_xn
def func(x):
return ((x[0] - 0.5)**2 + (x[1] + 0.5)**2)*np.cos(0.5*x[0]*x[1])
def grad(x):
c1 = x[0]**2 - x[0] + x[1]**2 + 0.5
cos_t = np.cos(0.5*x[0]*x[1])
sin_t = np.sin(0.5*x[0]*x[1])
return np.array([
(2*x[0]-1)*cos_t - 0.5*x[1]*c1*sin_t,
(2*x[1]+1)*cos_t - 0.5*x[0]*c1*sin_t])
x_r = np.linspace(-1, 1)
y_r = np.linspace(-2, 2)
x, y = np.meshgrid(x_r, y_r)
z = func([x, y])
surf_fig = plt.figure(tight_layout=True)
surf_ax = surf_fig.add_subplot(projection="3d")
surf_ax.tick_params(axis="both", which="major", labelsize=9)
surf_ax.set(xlabel="x", ylabel="y", zlabel="z")
surf_ax.set_title("Objective function")
surf_ax.plot_surface(x, y, z, alpha=0.7)
x0 = np.array([-0.8, 1.3])
surf_ax.plot([x0[0]], [x0[1]], func(x0), "r*")
cont_fig, cont_ax = plt.subplots()
cont_ax.set(xlabel="x", ylabel="y")
cont_ax.set_title("Contour plot with iterates")
cont_ax.contour(x, y, z, levels=30)
bounds = ((-1, 1), (-2, 2))
xnm1 = x0
for i, xn, fxn, grad_xn in descend(func, x0, grad, bounds):
cont_ax.plot([xnm1[0], xn[0]], [xnm1[1], xn[1]], "k*--")
xnm1, grad_xnm1 = xn, grad_xn
print(f"iterations={i}")
print(f"min val at {xn}")
print(f"min func value = {fxn}")
surf_ax.plot([xn[0]], [xn[1]], func(xn), "r*")
plt.show()
| [
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2280, 2298), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2291, 2298), True, 'import numpy as np\n'), ((2305, 2323), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)'], {}), '(-2, 2)\n', (2316, 2323), True, 'import numpy as np\n'), ((2331, 2352), 'numpy.meshgrid', 'np.meshgrid', (['x_r', 'y_r'], {}), '(x_r, y_r)\n', (2342, 2352), True, 'import numpy as np\n'), ((2382, 2411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'tight_layout': '(True)'}), '(tight_layout=True)\n', (2392, 2411), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2677), 'numpy.array', 'np.array', (['[-0.8, 1.3]'], {}), '([-0.8, 1.3])\n', (2664, 2677), True, 'import numpy as np\n'), ((2746, 2760), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2758, 2760), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3218, 3220), True, 'import matplotlib.pyplot as plt\n'), ((2101, 2126), 'numpy.cos', 'np.cos', (['(0.5 * x[0] * x[1])'], {}), '(0.5 * x[0] * x[1])\n', (2107, 2126), True, 'import numpy as np\n'), ((2135, 2160), 'numpy.sin', 'np.sin', (['(0.5 * x[0] * x[1])'], {}), '(0.5 * x[0] * x[1])\n', (2141, 2160), True, 'import numpy as np\n'), ((2169, 2283), 'numpy.array', 'np.array', (['[(2 * x[0] - 1) * cos_t - 0.5 * x[1] * c1 * sin_t, (2 * x[1] + 1) * cos_t -\n 0.5 * x[0] * c1 * sin_t]'], {}), '([(2 * x[0] - 1) * cos_t - 0.5 * x[1] * c1 * sin_t, (2 * x[1] + 1) *\n cos_t - 0.5 * x[0] * c1 * sin_t])\n', (2177, 2283), True, 'import numpy as np\n'), ((2013, 2038), 'numpy.cos', 'np.cos', (['(0.5 * x[0] * x[1])'], {}), '(0.5 * x[0] * x[1])\n', (2019, 2038), True, 'import numpy as np\n'), ((1752, 1777), 'numpy.linalg.norm', 'np.linalg.norm', (['(xn - xnm1)'], {}), '(xn - xnm1)\n', (1766, 1777), True, 'import numpy as np\n')] |
"""
Correlation Map
===============================================================================
>>> from techminer2 import *
>>> directory = "data/"
>>> file_name = "sphinx/images/auto_corr_map.png"
>>> matrix = auto_corr_matrix('authors', min_occ=2, directory=directory)
>>> correlation_map(matrix).savefig(file_name)
.. image:: images/auto_corr_map.png
:width: 700px
:align: center
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import networkx as nx
# pyltin: disable=c0103
# pylint: disable=too-many-arguments
# pylint: disable=invalid-name
def _get_edges(matrix):
# build network matrices for plotting with networkx
matrix = matrix.copy()
# diag sup = 0
n_cols = len(matrix.columns)
for i in range(n_cols):
for j in range(i, n_cols):
matrix.iloc[i, j] = 0.0
# selects lower diagonal of the matrix
edges = pd.melt(
matrix,
var_name="target",
value_name="value",
ignore_index=False,
)
edges = edges.reset_index()
edges = edges.rename(columns={edges.columns[0]: "source"})
edges = edges[edges.value > 0]
# line width
edges = edges.assign(weight=edges.value.map(lambda x: 2 if x > 0.75 else 1))
# line style
edges = edges.assign(
style=edges.value.map(
lambda x: "-" if x >= 0.50 else ("--" if x > 0.25 else ":")
)
)
return edges
def _get_nodes(matrix):
nodes = [(a, b, c) for a, b, c in matrix.columns]
nodes = pd.DataFrame(nodes, columns=["name", "num_documents", "global_citations"])
# node sizes
nodes = nodes.assign(node_size=nodes.num_documents / nodes.num_documents.max())
nodes = nodes.assign(node_size=100 + 900 * nodes.node_size)
# node colors
nodes = nodes.assign(alpha=nodes.global_citations / nodes.global_citations.max())
nodes = nodes.assign(alpha=0.2 + 0.6 * nodes.alpha)
return nodes
def correlation_map(
correlation_matrix,
cmap="Greys",
nx_iterations=200,
nx_k=1e-3,
nx_scale=1.0,
nx_random_state=None,
figsize=(6, 6),
):
# computos
matrix = correlation_matrix.copy()
nodes = _get_nodes(matrix)
edges = _get_edges(matrix)
# Networkx
fig = plt.Figure(figsize=figsize)
cmap = plt.cm.get_cmap(cmap)
ax = fig.subplots()
G = nx.Graph(ax=ax)
G.clear()
# add nodes
for _, row in nodes.iterrows():
G.add_node(
row["name"],
node_size=row["node_size"],
)
# add edges
for _, row in edges.iterrows():
G.add_edge(
row["source"],
row["target"],
weight=row["weight"],
style=row["style"],
)
# plot ---------------
node_sizes = list(nx.get_node_attributes(G, "node_size").values())
edge_styles = nx.get_edge_attributes(G, "style").values()
edge_weights = list(nx.get_edge_attributes(G, "weight").values())
## node positions
pos = nx.spring_layout(
G,
iterations=nx_iterations,
k=nx_k,
scale=nx_scale,
seed=nx_random_state,
)
# draws the network
nx.draw(
G,
with_labels=False,
font_size=7,
font_weight="regular",
node_color="k",
width=edge_weights,
# node_color=colors,
node_size=node_sizes,
edge_color="grey",
style=edge_styles,
alpha=0.6,
pos=pos,
ax=ax,
)
ax.collections[0].set_edgecolor("k")
# plot centers as black dots
x_points = [value[0] for value in pos.values()]
y_points = [value[1] for value in pos.values()]
ax.scatter(
x_points,
y_points,
marker="o",
s=30,
c="k",
alpha=1.0,
zorder=10,
)
# Center of the plot
x_mean = sum(x_points) / len(x_points)
y_mean = sum(y_points) / len(y_points)
xlim = ax.get_xlim()
ylim = ax.get_ylim()
factor = 0.05
rx = factor * (xlim[1] - xlim[0])
ry = factor * (ylim[1] - ylim[0])
radious = np.sqrt(rx**2 + ry**2)
for label in nodes.name:
x_point, y_point = pos[label]
x_c = x_point - x_mean
y_c = y_point - y_mean
angle = np.arctan(np.abs(y_c / x_c))
x_label = x_point + np.copysign(radious * np.cos(angle), x_c)
y_label = y_point + np.copysign(radious * np.sin(angle), y_c)
ha = "left" if x_point > x_mean else "right"
va = "center"
ax.text(
x_label,
y_label,
s=label,
fontsize=7,
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
horizontalalignment=ha,
verticalalignment=va,
alpha=0.9,
zorder=13,
)
ax.plot(
[x_point, x_label],
[y_point, y_label],
lw=1,
ls="-",
c="k",
zorder=13,
)
fig.set_tight_layout(True)
return fig
| [
"numpy.abs",
"matplotlib.pyplot.cm.get_cmap",
"numpy.sqrt",
"numpy.sin",
"networkx.get_edge_attributes",
"matplotlib.pyplot.Figure",
"networkx.spring_layout",
"networkx.Graph",
"networkx.get_node_attributes",
"numpy.cos",
"pandas.DataFrame",
"pandas.melt",
"networkx.draw"
] | [((909, 983), 'pandas.melt', 'pd.melt', (['matrix'], {'var_name': '"""target"""', 'value_name': '"""value"""', 'ignore_index': '(False)'}), "(matrix, var_name='target', value_name='value', ignore_index=False)\n", (916, 983), True, 'import pandas as pd\n'), ((1526, 1600), 'pandas.DataFrame', 'pd.DataFrame', (['nodes'], {'columns': "['name', 'num_documents', 'global_citations']"}), "(nodes, columns=['name', 'num_documents', 'global_citations'])\n", (1538, 1600), True, 'import pandas as pd\n'), ((2260, 2287), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2270, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2299, 2320), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (2314, 2320), True, 'import matplotlib.pyplot as plt\n'), ((2353, 2368), 'networkx.Graph', 'nx.Graph', ([], {'ax': 'ax'}), '(ax=ax)\n', (2361, 2368), True, 'import networkx as nx\n'), ((3000, 3096), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {'iterations': 'nx_iterations', 'k': 'nx_k', 'scale': 'nx_scale', 'seed': 'nx_random_state'}), '(G, iterations=nx_iterations, k=nx_k, scale=nx_scale, seed=\n nx_random_state)\n', (3016, 3096), True, 'import networkx as nx\n'), ((3168, 3365), 'networkx.draw', 'nx.draw', (['G'], {'with_labels': '(False)', 'font_size': '(7)', 'font_weight': '"""regular"""', 'node_color': '"""k"""', 'width': 'edge_weights', 'node_size': 'node_sizes', 'edge_color': '"""grey"""', 'style': 'edge_styles', 'alpha': '(0.6)', 'pos': 'pos', 'ax': 'ax'}), "(G, with_labels=False, font_size=7, font_weight='regular',\n node_color='k', width=edge_weights, node_size=node_sizes, edge_color=\n 'grey', style=edge_styles, alpha=0.6, pos=pos, ax=ax)\n", (3175, 3365), True, 'import networkx as nx\n'), ((4087, 4113), 'numpy.sqrt', 'np.sqrt', (['(rx ** 2 + ry ** 2)'], {}), '(rx ** 2 + ry ** 2)\n', (4094, 4113), True, 'import numpy as np\n'), ((2853, 2887), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""style"""'], {}), "(G, 'style')\n", (2875, 2887), True, 'import networkx as nx\n'), ((4268, 4285), 'numpy.abs', 'np.abs', (['(y_c / x_c)'], {}), '(y_c / x_c)\n', (4274, 4285), True, 'import numpy as np\n'), ((2785, 2823), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""node_size"""'], {}), "(G, 'node_size')\n", (2807, 2823), True, 'import networkx as nx\n'), ((2921, 2956), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['G', '"""weight"""'], {}), "(G, 'weight')\n", (2943, 2956), True, 'import networkx as nx\n'), ((4337, 4350), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4343, 4350), True, 'import numpy as np\n'), ((4407, 4420), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4413, 4420), True, 'import numpy as np\n')] |
import numpy as np
def rotate_pos(pos, angle):
""" Transformation the coordinate in the angle
Args:
pos (numpy.ndarray): local state, shape(data_size, 2)
angle (float): rotate angle, in radians
Returns:
rotated_pos (numpy.ndarray): shape(data_size, 2)
"""
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return np.dot(pos, rot_mat.T)
def fit_angle_in_range(angles, min_angle=-np.pi, max_angle=np.pi):
""" Check angle range and correct the range
Args:
angle (numpy.ndarray): in radians
min_angle (float): maximum of range in radians, default -pi
max_angle (float): minimum of range in radians, default pi
Returns:
fitted_angle (numpy.ndarray): range angle in radians
"""
if max_angle < min_angle:
raise ValueError("max angle must be greater than min angle")
if (max_angle - min_angle) < 2.0 * np.pi:
raise ValueError("difference between max_angle \
and min_angle must be greater than 2.0 * pi")
output = np.array(angles)
output_shape = output.shape
output = output.flatten()
output -= min_angle
output %= 2 * np.pi
output += 2 * np.pi
output %= 2 * np.pi
output += min_angle
output = np.minimum(max_angle, np.maximum(min_angle, output))
return output.reshape(output_shape) | [
"numpy.array",
"numpy.dot",
"numpy.cos",
"numpy.sin",
"numpy.maximum"
] | [((429, 451), 'numpy.dot', 'np.dot', (['pos', 'rot_mat.T'], {}), '(pos, rot_mat.T)\n', (435, 451), True, 'import numpy as np\n'), ((1135, 1151), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (1143, 1151), True, 'import numpy as np\n'), ((1371, 1400), 'numpy.maximum', 'np.maximum', (['min_angle', 'output'], {}), '(min_angle, output)\n', (1381, 1400), True, 'import numpy as np\n'), ((328, 341), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (334, 341), True, 'import numpy as np\n'), ((385, 398), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (391, 398), True, 'import numpy as np\n'), ((400, 413), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (406, 413), True, 'import numpy as np\n'), ((344, 357), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (350, 357), True, 'import numpy as np\n')] |
import asyncio
import struct
import sys
import time
import datetime
import atexit
import time
import numpy as np
from bleak import BleakClient
import matplotlib.pyplot as plt
from bleak import exc
import pandas as pd
import atexit
# Nordic NUS characteristic for RX, which should be writable`
UART_RX_UUID = "6e400002-b5a3-f393-e0a9-e50e24dcca9e"
# Nordic NUS characteristic for TX, which should be readable
UART_TX_UUID = "6e400003-b5a3-f393-e0a9-e50e24dcca9e"
sensors = np.zeros((9, 3))
result = []
name = ['Time Stamp', 'Sensor 1', 'Sensor 2', 'Sensor 3',
'Sensor 4', 'Sensor 5', 'Sensor 6', 'Sensor 7', 'Sensor 8']
@atexit.register
def clean():
print("Output csv")
test = pd.DataFrame(columns=name, data=result)
test.to_csv("file_name.csv")
print("Exited")
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
num = 8
global sensors
global result
current = [datetime.datetime.now()]
for i in range(num):
sensors[i, 0] = struct.unpack('f', data[12 * i: 12 * i + 4])[0]
sensors[i, 1] = struct.unpack('f', data[12 * i + 4: 12 * i + 8])[0]
sensors[i, 2] = struct.unpack('f', data[12 * i + 8: 12 * i + 12])[0]
print("Sensor " + str(i+1)+": " +
str(sensors[i, 0]) + ", " + str(sensors[i, 1]) + ", " + str(sensors[i, 2]))
current.append(
"("+str(sensors[i, 0]) + ", " + str(sensors[i, 1]) + ", " + str(sensors[i, 2])+")")
#battery_voltage = struct.unpack('f', data[12 * num: 12 * num + 4])[0]
#print("Battery voltage: " + str(battery_voltage))
print("############")
result.append(current)
async def run(address, loop):
async with BleakClient(address, loop=loop) as client:
# wait for BLE client to be connected
x = await client.is_connected()
print("Connected: {0}".format(x))
print("Press Enter to quit...")
# wait for data to be sent from client
await client.start_notify(UART_TX_UUID, notification_handler)
while True:
await asyncio.sleep(0.01)
address = ("2A59A2D4-BCD8-4AF7-B750-E51195C1CA13")
loop = asyncio.get_event_loop()
loop.run_until_complete(run(address, loop))
| [
"asyncio.sleep",
"datetime.datetime.now",
"numpy.zeros",
"struct.unpack",
"bleak.BleakClient",
"pandas.DataFrame",
"asyncio.get_event_loop"
] | [((475, 491), 'numpy.zeros', 'np.zeros', (['(9, 3)'], {}), '((9, 3))\n', (483, 491), True, 'import numpy as np\n'), ((2168, 2192), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2190, 2192), False, 'import asyncio\n'), ((697, 736), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'name', 'data': 'result'}), '(columns=name, data=result)\n', (709, 736), True, 'import pandas as pd\n'), ((966, 989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (987, 989), False, 'import datetime\n'), ((1723, 1754), 'bleak.BleakClient', 'BleakClient', (['address'], {'loop': 'loop'}), '(address, loop=loop)\n', (1734, 1754), False, 'from bleak import BleakClient\n'), ((1040, 1083), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[12 * i:12 * i + 4]'], {}), "('f', data[12 * i:12 * i + 4])\n", (1053, 1083), False, 'import struct\n'), ((1112, 1159), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[12 * i + 4:12 * i + 8]'], {}), "('f', data[12 * i + 4:12 * i + 8])\n", (1125, 1159), False, 'import struct\n'), ((1188, 1236), 'struct.unpack', 'struct.unpack', (['"""f"""', 'data[12 * i + 8:12 * i + 12]'], {}), "('f', data[12 * i + 8:12 * i + 12])\n", (1201, 1236), False, 'import struct\n'), ((2089, 2108), 'asyncio.sleep', 'asyncio.sleep', (['(0.01)'], {}), '(0.01)\n', (2102, 2108), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import imutils
import time
import os
arguments = {"input" : "videos/trim_version.mp4",
"output" : "output/output.mp4",
"yolo": "yolo-coco/",
"confidence" : 0.5,
"threshold" : 0.3
}
labels_path = os.path.sep.join([arguments["yolo"], "coco.names"])
labels = open(labels_path).read().strip().split("\n")
np.random.seed(42)
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
weights_path = os.path.sep.join([arguments["yolo"], "yolov3.weights"])
config_path = os.path.sep.join([arguments["yolo"], "yolov3.cfg"])
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0]-1] for i in net.getUnconnectedOutLayers()]
#new_layer_names = []
#for i in net.getUnconnectedOutLayers():
# new_layer_names.append(layer_names[i[0]-1])
video_stream = cv2.VideoCapture(arguments["input"])
writer = None
while True:
(grabbed, frame) = video_stream.read()
#check if we have reach the end frame
if not grabbed:
break
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416,416),
swapRB=True, crop=False)
net.setInput(blob)
start_time = time.time()
print("start time : ", start_time)
layer_outputs = net.forward(layer_names)
end_time = time.time()
print("Time taken : ", end_time - start_time)
boxes = []
confidences = []
class_ids = []
for output in layer_outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > arguments["confidence"]:
box = detection[0:4] * np.array([w, h, w, h])
(center_x, center_y, width, height) = box.astype("int")
x = int(center_x - (width/2))
y = int(center_y - (height/2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
class_ids.append(class_id)
nms_boxes = cv2.dnn.NMSBoxes(boxes, confidences, arguments["confidence"],
arguments["threshold"])
if len(nms_boxes) > 0:
for i in nms_boxes.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
color = [int(c) for c in colors[class_ids[i]]]
cv2.rectangle(frame, (x,y), (x+w, y+h), color, 2)
text = "{}: {:.4f}".format(labels[class_ids[i]], confidences[i])
cv2.putText(frame, text, (x, y-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
color, 2)
if writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MPEG")
writer = cv2.VideoWriter(arguments["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
writer.release()
video_stream.release()
| [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"numpy.argmax",
"cv2.VideoWriter",
"cv2.putText",
"numpy.array",
"os.path.sep.join",
"numpy.random.seed",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.dnn.NMSBoxes",
"time.time",
"cv2.dnn.readNetFromDarknet"
] | [((317, 368), 'os.path.sep.join', 'os.path.sep.join', (["[arguments['yolo'], 'coco.names']"], {}), "([arguments['yolo'], 'coco.names'])\n", (333, 368), False, 'import os\n'), ((424, 442), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (438, 442), True, 'import numpy as np\n'), ((533, 588), 'os.path.sep.join', 'os.path.sep.join', (["[arguments['yolo'], 'yolov3.weights']"], {}), "([arguments['yolo'], 'yolov3.weights'])\n", (549, 588), False, 'import os\n'), ((603, 654), 'os.path.sep.join', 'os.path.sep.join', (["[arguments['yolo'], 'yolov3.cfg']"], {}), "([arguments['yolo'], 'yolov3.cfg'])\n", (619, 654), False, 'import os\n'), ((662, 715), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['config_path', 'weights_path'], {}), '(config_path, weights_path)\n', (688, 715), False, 'import cv2\n'), ((954, 990), 'cv2.VideoCapture', 'cv2.VideoCapture', (["arguments['input']"], {}), "(arguments['input'])\n", (970, 990), False, 'import cv2\n'), ((1193, 1269), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1214, 1269), False, 'import cv2\n'), ((1346, 1357), 'time.time', 'time.time', ([], {}), '()\n', (1355, 1357), False, 'import time\n'), ((1462, 1473), 'time.time', 'time.time', ([], {}), '()\n', (1471, 1473), False, 'import time\n'), ((2233, 2323), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', "arguments['confidence']", "arguments['threshold']"], {}), "(boxes, confidences, arguments['confidence'], arguments[\n 'threshold'])\n", (2249, 2323), False, 'import cv2\n'), ((2894, 2925), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MPEG'"], {}), "(*'MPEG')\n", (2916, 2925), False, 'import cv2\n'), ((2943, 3036), 'cv2.VideoWriter', 'cv2.VideoWriter', (["arguments['output']", 'fourcc', '(30)', '(frame.shape[1], frame.shape[0])', '(True)'], {}), "(arguments['output'], fourcc, 30, (frame.shape[1], frame.\n shape[0]), True)\n", (2958, 3036), False, 'import cv2\n'), ((1713, 1730), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1722, 1730), True, 'import numpy as np\n'), ((2602, 2656), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(frame, (x, y), (x + w, y + h), color, 2)\n', (2615, 2656), False, 'import cv2\n'), ((2741, 2818), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(x, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'color', '(2)'], {}), '(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n', (2752, 2818), False, 'import cv2\n'), ((1865, 1887), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1873, 1887), True, 'import numpy as np\n')] |
from flatland.evaluators.client import FlatlandRemoteClient
from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
import numpy as np
remote_client = FlatlandRemoteClient()
def my_controller(obs, _env):
_action = {}
for _idx, _ in enumerate(_env.agents):
_action[_idx] = np.random.randint(0, 5)
return _action
# my_observation_builder = TreeObsForRailEnv(
# max_depth=3, predictor=ShortestPathPredictorForRailEnv())
my_observation_builder = GlobalObsForRailEnv()
episode = 0
while True:
print("==============")
episode += 1
print("[INFO] EPISODE_START : {}".format(episode))
# NO WAY TO CHECK service/self.evaluation_done in client
obs, info = remote_client.env_create(obs_builder_object=my_observation_builder)
if not obs:
"""
The remote env returns False as the first obs
when it is done evaluating all the individual episodes
"""
print("[INFO] DONE ALL, BREAKING")
break
while True:
action = my_controller(obs, remote_client.env)
try:
observation, all_rewards, done, info = remote_client.env_step(
action)
except:
print("[ERR] DONE BUT step() CALLED")
if (True): # debug
print("-----")
# print(done)
print("[DEBUG] REW: ", all_rewards)
# break
if done['__all__']:
print("[INFO] EPISODE_DONE : ", episode)
print("[INFO] TOTAL_REW: ", sum(list(all_rewards.values())))
break
print("Evaluation Complete...")
print(remote_client.submit())
| [
"flatland.envs.observations.GlobalObsForRailEnv",
"numpy.random.randint",
"flatland.evaluators.client.FlatlandRemoteClient"
] | [((245, 267), 'flatland.evaluators.client.FlatlandRemoteClient', 'FlatlandRemoteClient', ([], {}), '()\n', (265, 267), False, 'from flatland.evaluators.client import FlatlandRemoteClient\n'), ((564, 585), 'flatland.envs.observations.GlobalObsForRailEnv', 'GlobalObsForRailEnv', ([], {}), '()\n', (583, 585), False, 'from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv\n'), ((384, 407), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (401, 407), True, 'import numpy as np\n')] |
import os
import boto3
from botocore.config import Config
import datetime as dt
import json
import csv
import numpy as np
import zipfile
from boto3.dynamodb.conditions import Attr
import pickle
# mitigation of potential API rate restrictions (esp for Batch API)
retry_config = Config(retries={"max_attempts": 5})
client = boto3.client("s3", config=retry_config)
dynamodb = boto3.resource("dynamodb", config=retry_config, region_name="us-east-1")
""" ----- FILE I/O OPS ----- """
def get_paths(timestamp):
if timestamp == "now":
train_time = dt.datetime.now()
elif isinstance(timestamp, str):
train_time = dt.datetime.fromisoformat(timestamp)
elif isinstance(timestamp, int) or isinstance(timestamp, float):
train_time = dt.datetime.fromtimestamp(timestamp)
else:
print(
f"Timestamp type must be a string (datetime, isoformat) or int/float (timestamp). You passed {type(timestamp)}."
)
raise ValueError
t0 = train_time.timestamp()
data_path = f"{dt.date.fromtimestamp(t0).isoformat()}-{str(int(t0))}"
return data_path
def proc_time(start, end):
duration = np.round((end - start), 2)
proc_time = np.round((duration / 60), 2)
if duration > 3600:
return f"{proc_time} hours."
elif duration > 60:
return f"{proc_time} minutes."
else:
return f"{duration} seconds."
def get_keys(items):
keys = set([])
for item in items:
keys = keys.union(set(item.keys()))
return keys
def make_fxp(attr):
"""
Generates filter expression based on attributes dict to retrieve a subset of the database using conditional operators and keyword pairs. Returns dict containing filter expression which can be passed into the dynamodb table.scan() method.
Args:
`name` : one of db column names ('timestamp', 'mem_bin', etc.)
`method`: begins_with, between, eq, gt, gte, lt, lte
`value`: str, int, float or low/high list of values if using 'between' method
Ex: to retrieve a subset of data with 'timestamp' col greater than 1620740441:
setting attr={'name':'timestamp', 'method': 'gt', 'value': 1620740441}
returns dict: {'FilterExpression': Attr('timestamp').gt(0)}
"""
# table.scan(FilterExpression=Attr('mem_bin').gt(2))
n = attr['name']
m = attr['method']
v = attr['value']
if m == 'eq':
fxp = Attr(n).eq(v)
elif m == 'gt':
fxp = Attr(n).gt(v)
elif m == 'gte':
fxp = Attr(n).gte(v)
elif m == 'lt':
fxp = Attr(n).lt(v)
elif m == 'lte':
fxp = Attr(n).lte(v)
elif m == 'begins_with':
fxp = Attr(n).begins_with(v)
elif m == 'between':
if isinstance(v, 'list'):
fxp = Attr(n).between(v.min(), v.max())
return {'FilterExpression': fxp}
def ddb_download(table_name, p_key="ipst", attr=None):
"""retrieves data from dynamodb
Args:
table_name: dynamodb table name
p_key: (default is 'ipst') primary key in dynamodb table
attr: (optional) retrieve a subset using an attribute dictionary
If attr is none, returns all items in database.
"""
table = dynamodb.Table(table_name)
key_set = [p_key] # "ipst"
if attr is None or "None":
raw_data = table.scan()
else:
scan_kwargs = make_fxp(attr)
raw_data = table.scan(**scan_kwargs)
if raw_data is None:
return None
items = raw_data["Items"]
fieldnames = set([]).union(get_keys(items))
while raw_data.get("LastEvaluatedKey"):
print("Downloading ", end="")
raw_data = table.scan(ExclusiveStartKey=raw_data["LastEvaluatedKey"])
items.extend(raw_data["Items"])
fieldnames - fieldnames.union(get_keys(items))
print("\nTotal downloaded records: {}".format(len(items)))
for f in fieldnames:
if f not in key_set:
key_set.append(f)
ddb_data = {"items": items, "keys": key_set}
return ddb_data
def write_to_csv(ddb_data, filename=None):
if filename is None:
filename = "batch.csv"
with open(filename, "w") as csvfile:
writer = csv.DictWriter(csvfile, delimiter=",", fieldnames=ddb_data["keys"], quotechar='"')
writer.writeheader()
writer.writerows(ddb_data["items"])
print(f"DDB data saved to: {filename}")
def save_to_file(data_dict):
keys = []
for filename, data in data_dict.items():
key = f"{filename}.txt"
keys.append(key)
with open(f"{key}", "w") as f:
for item in data:
f.writelines(f"{item}\n")
print(f"Saved file keys:\n {keys}")
return keys
def save_dict(data_dict, df_key=None):
keys = []
for key, data in data_dict.items():
filename = f"{key}.txt"
with open(filename, "w") as f:
try:
json.dump(data, f)
except Exception as e:
print(e)
f.writelines(data)
keys.append(filename)
if df_key is not None:
keys.append(df_key)
print(f"File keys:\n {keys}")
return keys
def save_dataframe(df, df_key):
df["ipst"] = df.index
df.to_csv(df_key, index=False)
print(f"Dataframe saved as: {df_key}")
df.set_index("ipst", drop=True, inplace=True)
def save_to_pickle(data_dict, target_col=None, df_key=None):
keys = []
for k, v in data_dict.items():
if target_col is not None:
os.makedirs(f'./{target_col}', exist_ok=True)
key = f"./{target_col}/{k}"
else:
key = k
with open(key, "wb") as file_pi:
pickle.dump(v, file_pi)
print(f"{k} saved as {key}")
keys.append(key)
if df_key is not None:
keys.append(df_key)
print(f"File keys:\n {keys}")
return keys
def s3_upload(keys, bucket_name, prefix):
err = None
for key in keys:
obj = f"{prefix}/{key}" # training/date-timestamp/filename
try:
with open(f"{key}", "rb") as f:
client.upload_fileobj(f, bucket_name, obj)
print(f"Uploaded: {obj}")
except Exception as e:
err = e
continue
if err is not None:
print(err)
def s3_download(keys, bucket_name, prefix):
err = None
for key in keys:
obj = f"{prefix}/{key}" # latest/master.csv
print("s3 key: ", obj)
try:
with open(f"{key}", "wb") as f:
client.download_fileobj(bucket_name, obj, f)
except Exception as e:
err = e
continue
if err is not None:
print(err)
def zip_models(path_to_models, zipname="models.zip"):
file_paths = []
for root, _, files in os.walk(path_to_models):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
print("Zipping model files:")
with zipfile.ZipFile(zipname, "w") as zip_ref:
for file in file_paths:
zip_ref.write(file)
print(file)
| [
"csv.DictWriter",
"datetime.datetime.fromtimestamp",
"boto3.client",
"pickle.dump",
"zipfile.ZipFile",
"os.makedirs",
"botocore.config.Config",
"numpy.round",
"json.dump",
"os.path.join",
"datetime.datetime.now",
"boto3.resource",
"datetime.date.fromtimestamp",
"datetime.datetime.fromisofo... | [((278, 313), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': 5}"}), "(retries={'max_attempts': 5})\n", (284, 313), False, 'from botocore.config import Config\n'), ((323, 362), 'boto3.client', 'boto3.client', (['"""s3"""'], {'config': 'retry_config'}), "('s3', config=retry_config)\n", (335, 362), False, 'import boto3\n'), ((374, 446), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {'config': 'retry_config', 'region_name': '"""us-east-1"""'}), "('dynamodb', config=retry_config, region_name='us-east-1')\n", (388, 446), False, 'import boto3\n'), ((1153, 1177), 'numpy.round', 'np.round', (['(end - start)', '(2)'], {}), '(end - start, 2)\n', (1161, 1177), True, 'import numpy as np\n'), ((1196, 1222), 'numpy.round', 'np.round', (['(duration / 60)', '(2)'], {}), '(duration / 60, 2)\n', (1204, 1222), True, 'import numpy as np\n'), ((6732, 6755), 'os.walk', 'os.walk', (['path_to_models'], {}), '(path_to_models)\n', (6739, 6755), False, 'import os\n'), ((557, 574), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (572, 574), True, 'import datetime as dt\n'), ((4129, 4215), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'delimiter': '""","""', 'fieldnames': "ddb_data['keys']", 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', fieldnames=ddb_data[\'keys\'],\n quotechar=\'"\')\n', (4143, 4215), False, 'import csv\n'), ((6923, 6952), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipname', '"""w"""'], {}), "(zipname, 'w')\n", (6938, 6952), False, 'import zipfile\n'), ((633, 669), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['timestamp'], {}), '(timestamp)\n', (658, 669), True, 'import datetime as dt\n'), ((5438, 5483), 'os.makedirs', 'os.makedirs', (['f"""./{target_col}"""'], {'exist_ok': '(True)'}), "(f'./{target_col}', exist_ok=True)\n", (5449, 5483), False, 'import os\n'), ((5611, 5634), 'pickle.dump', 'pickle.dump', (['v', 'file_pi'], {}), '(v, file_pi)\n', (5622, 5634), False, 'import pickle\n'), ((6811, 6839), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (6823, 6839), False, 'import os\n'), ((760, 796), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (785, 796), True, 'import datetime as dt\n'), ((2396, 2403), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2400, 2403), False, 'from boto3.dynamodb.conditions import Attr\n'), ((4842, 4860), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (4851, 4860), False, 'import json\n'), ((1033, 1058), 'datetime.date.fromtimestamp', 'dt.date.fromtimestamp', (['t0'], {}), '(t0)\n', (1054, 1058), True, 'import datetime as dt\n'), ((2444, 2451), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2448, 2451), False, 'from boto3.dynamodb.conditions import Attr\n'), ((2493, 2500), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2497, 2500), False, 'from boto3.dynamodb.conditions import Attr\n'), ((2542, 2549), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2546, 2549), False, 'from boto3.dynamodb.conditions import Attr\n'), ((2591, 2598), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2595, 2598), False, 'from boto3.dynamodb.conditions import Attr\n'), ((2649, 2656), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2653, 2656), False, 'from boto3.dynamodb.conditions import Attr\n'), ((2749, 2756), 'boto3.dynamodb.conditions.Attr', 'Attr', (['n'], {}), '(n)\n', (2753, 2756), False, 'from boto3.dynamodb.conditions import Attr\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
import keras.backend as K
from keras import activations, initializers, regularizers
from keras.engine import Layer, InputSpec
from keras.layers.recurrent import Recurrent, time_distributed_dense
from keras.layers.core import Flatten
class DecoderVaeLSTM(Recurrent):
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializers.get(init)
self.inner_init = initializers.get(inner_init)
self.forget_bias_init = initializers.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(DecoderVaeLSTM, self).__init__(**kwargs)
def get_initial_states(self, x):
print("initial state building")
# build an all-zero tensor of shape (samples, output_dim)
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_state = K.tile(initial_state, [1, self.input_dim])
initial_states = [initial_state for _ in range(len(self.states))]
return initial_states
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
self.input_dim = input_shape[2]
self.W = self.init((self.output_dim, 4 * self.input_dim),
name='{}_W'.format(self.name))
self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
name='{}_U'.format(self.name))
self.b = K.variable(np.hstack((np.zeros(self.input_dim),
K.get_value(self.forget_bias_init((self.input_dim,))),
np.zeros(self.input_dim),
np.zeros(self.input_dim))),
name='{}_b'.format(self.name))
self.A = self.init((self.input_dim, self.output_dim),
name='{}_A'.format(self.name))
self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))
self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
x_t = self.activation(K.dot(h_tm1, self.A) + self.ba)
z = K.dot(x_t, self.W) + K.dot(h_tm1, self.U) + self.b
z0 = z[:, :self.input_dim]
z1 = z[:, self.input_dim: 2 * self.input_dim]
z2 = z[:, 2 * self.input_dim: 3 * self.input_dim]
z3 = z[:, 3 * self.input_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
h = o * self.activation(c)
return x_t, [h, c]
def call(self, x, mask=None):
input_shape = self.input_spec[0].shape
# state format: [h(t-1), c(t-1), y(t-1)]
#h_0 = K.zeros_like(x[:, 0, :])
#c_0 = K.zeros_like(x[:, 0, :])
h_0 = K.reshape(x, (-1, self.input_dim))
c_0 = K.reshape(x, (-1, self.input_dim))
initial_states = [h_0, c_0]
#self.states = [None, None]
#initial_states = self.get_initial_states(x)
last_output, outputs, states = K.rnn(step_function=self.step,
inputs=x,
initial_states=initial_states,
go_backwards=self.go_backwards,
mask=mask,
constants=None,
unroll=self.unroll,
input_length=input_shape[1])
if self.return_sequences:
return outputs
else:
return last_output
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'out_activation': self.out_activation.__name__,
'inner_activation': self.inner_activation.__name__}
base_config = super(DecoderVaeLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class QRNN(Recurrent):
def __init__(self, output_dim,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
dropout_W=0., dropout_U=0., **kwargs):
self.output_dim = output_dim
self.init = initializers.get(init)
self.inner_init = initializers.get(inner_init)
self.forget_bias_init = initializers.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W = dropout_W
self.dropout_U = dropout_U
self.stateful = False
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(QRNN, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
input_dim = input_shape[2]
self.input_dim = input_dim
if self.stateful:
self.reset_states()
else:
self.states = [None, None]
self.states_dim = [self.input_dim, self.output_dim]
self.weight_size = self.output_dim * 4
self.W = self.add_weight((input_dim, self.weight_size),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer)
self.U = self.add_weight((input_dim, self.weight_size),
initializer=self.inner_init,
name='{}_U'.format(self.name),
regularizer=self.U_regularizer)
def b_reg(shape, name=None):
return K.variable(np.hstack((np.zeros(self.output_dim),
K.get_value(self.forget_bias_init((self.output_dim,))),
np.zeros(self.output_dim),
np.zeros(self.output_dim))),
name='{}_b'.format(self.name))
self.b = self.add_weight((self.weight_size,),
initializer=b_reg,
name='{}_b'.format(self.name),
regularizer=self.b_regularizer)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the time dimension by passing a '
'`batch_shape` argument to your Input layer.')
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.input_dim)))
K.set_value(self.states[1],
np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.input_dim)),
K.zeros((input_shape[0], self.output_dim))]
def get_initial_states(self, x):
initial_state = K.zeros_like(x) # (samples, timesteps, input_dim)
initial_state = K.sum(initial_state, axis=(1, 2)) # (samples,)
initial_state = K.expand_dims(initial_state) # (samples, 1)
initial_states=[]
for dim in self.states_dim:
initial_states.append(K.tile(initial_state, [1, dim]))
return initial_states
def preprocess_input(self, x):
return x
def step(self, x, states):
_previous = states[0]
_p_c = states[1]
#B_U = states[2]
#B_W = states[3]
_current = K.dot(x, self.W)
_p = K.dot(_previous, self.U) + self.b
_weighted = _current + _p
z0 = _weighted[:, :self.output_dim]
z1 = _weighted[:, self.output_dim: 2 * self.output_dim]
#z2 = _weighted[:, 2 * self.output_dim:]
z2 = _weighted[:, 2 * self.output_dim:3 * self.output_dim]
z3 = _weighted[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
z = self.activation(z2)
o = self.inner_activation(z3)
#f = self.inner_activation(z0)
#z = self.activation(z1)
#o = self.inner_activation(z2)
c = f * _p_c + i * z
#c = f * _p_c + (1 - f) * z
h = self.activation(c) * o # h is size vector
return h, [x, c]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.input_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'forget_bias_init': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'inner_activation': self.inner_activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(QRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"keras.engine.InputSpec",
"keras.backend.zeros",
"keras.backend.sum",
"keras.regularizers.get",
"keras.backend.reshape",
"keras.backend.cast_to_floatx",
"keras.activations.get",
"keras.backend.int_shape",
"numpy.zeros",
"keras.backend.dot",
"keras.backend.zeros_like",
"keras.backend.rnn",
"k... | [((755, 777), 'keras.initializers.get', 'initializers.get', (['init'], {}), '(init)\n', (771, 777), False, 'from keras import activations, initializers, regularizers\n'), ((799, 827), 'keras.initializers.get', 'initializers.get', (['inner_init'], {}), '(inner_init)\n', (815, 827), False, 'from keras import activations, initializers, regularizers\n'), ((855, 889), 'keras.initializers.get', 'initializers.get', (['forget_bias_init'], {}), '(forget_bias_init)\n', (871, 889), False, 'from keras import activations, initializers, regularizers\n'), ((911, 938), 'keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (926, 938), False, 'from keras import activations, initializers, regularizers\n'), ((966, 999), 'keras.activations.get', 'activations.get', (['inner_activation'], {}), '(inner_activation)\n', (981, 999), False, 'from keras import activations, initializers, regularizers\n'), ((1024, 1055), 'keras.regularizers.get', 'regularizers.get', (['W_regularizer'], {}), '(W_regularizer)\n', (1040, 1055), False, 'from keras import activations, initializers, regularizers\n'), ((1080, 1111), 'keras.regularizers.get', 'regularizers.get', (['U_regularizer'], {}), '(U_regularizer)\n', (1096, 1111), False, 'from keras import activations, initializers, regularizers\n'), ((1136, 1167), 'keras.regularizers.get', 'regularizers.get', (['b_regularizer'], {}), '(b_regularizer)\n', (1152, 1167), False, 'from keras import activations, initializers, regularizers\n'), ((1505, 1520), 'keras.backend.zeros_like', 'K.zeros_like', (['x'], {}), '(x)\n', (1517, 1520), True, 'import keras.backend as K\n'), ((1575, 1608), 'keras.backend.sum', 'K.sum', (['initial_state'], {'axis': '(1, 2)'}), '(initial_state, axis=(1, 2))\n', (1580, 1608), True, 'import keras.backend as K\n'), ((1642, 1670), 'keras.backend.expand_dims', 'K.expand_dims', (['initial_state'], {}), '(initial_state)\n', (1655, 1670), True, 'import keras.backend as K\n'), ((1706, 1748), 'keras.backend.tile', 'K.tile', (['initial_state', '[1, self.input_dim]'], {}), '(initial_state, [1, self.input_dim])\n', (1712, 1748), True, 'import keras.backend as K\n'), ((3677, 3711), 'keras.backend.reshape', 'K.reshape', (['x', '(-1, self.input_dim)'], {}), '(x, (-1, self.input_dim))\n', (3686, 3711), True, 'import keras.backend as K\n'), ((3721, 3755), 'keras.backend.reshape', 'K.reshape', (['x', '(-1, self.input_dim)'], {}), '(x, (-1, self.input_dim))\n', (3730, 3755), True, 'import keras.backend as K\n'), ((3904, 4092), 'keras.backend.rnn', 'K.rnn', ([], {'step_function': 'self.step', 'inputs': 'x', 'initial_states': 'initial_states', 'go_backwards': 'self.go_backwards', 'mask': 'mask', 'constants': 'None', 'unroll': 'self.unroll', 'input_length': 'input_shape[1]'}), '(step_function=self.step, inputs=x, initial_states=initial_states,\n go_backwards=self.go_backwards, mask=mask, constants=None, unroll=self.\n unroll, input_length=input_shape[1])\n', (3909, 4092), True, 'import keras.backend as K\n'), ((5435, 5457), 'keras.initializers.get', 'initializers.get', (['init'], {}), '(init)\n', (5451, 5457), False, 'from keras import activations, initializers, regularizers\n'), ((5479, 5507), 'keras.initializers.get', 'initializers.get', (['inner_init'], {}), '(inner_init)\n', (5495, 5507), False, 'from keras import activations, initializers, regularizers\n'), ((5535, 5569), 'keras.initializers.get', 'initializers.get', (['forget_bias_init'], {}), '(forget_bias_init)\n', (5551, 5569), False, 'from keras import activations, initializers, regularizers\n'), ((5591, 5618), 'keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (5606, 5618), False, 'from keras import activations, initializers, regularizers\n'), ((5646, 5679), 'keras.activations.get', 'activations.get', (['inner_activation'], {}), '(inner_activation)\n', (5661, 5679), False, 'from keras import activations, initializers, regularizers\n'), ((5704, 5735), 'keras.regularizers.get', 'regularizers.get', (['W_regularizer'], {}), '(W_regularizer)\n', (5720, 5735), False, 'from keras import activations, initializers, regularizers\n'), ((5760, 5791), 'keras.regularizers.get', 'regularizers.get', (['U_regularizer'], {}), '(U_regularizer)\n', (5776, 5791), False, 'from keras import activations, initializers, regularizers\n'), ((5816, 5847), 'keras.regularizers.get', 'regularizers.get', (['b_regularizer'], {}), '(b_regularizer)\n', (5832, 5847), False, 'from keras import activations, initializers, regularizers\n'), ((8671, 8686), 'keras.backend.zeros_like', 'K.zeros_like', (['x'], {}), '(x)\n', (8683, 8686), True, 'import keras.backend as K\n'), ((8741, 8774), 'keras.backend.sum', 'K.sum', (['initial_state'], {'axis': '(1, 2)'}), '(initial_state, axis=(1, 2))\n', (8746, 8774), True, 'import keras.backend as K\n'), ((8808, 8836), 'keras.backend.expand_dims', 'K.expand_dims', (['initial_state'], {}), '(initial_state)\n', (8821, 8836), True, 'import keras.backend as K\n'), ((9168, 9184), 'keras.backend.dot', 'K.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (9173, 9184), True, 'import keras.backend as K\n'), ((1903, 1931), 'keras.engine.InputSpec', 'InputSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1912, 1931), False, 'from keras.engine import Layer, InputSpec\n'), ((6107, 6135), 'keras.engine.InputSpec', 'InputSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (6116, 6135), False, 'from keras.engine import Layer, InputSpec\n'), ((9193, 9217), 'keras.backend.dot', 'K.dot', (['_previous', 'self.U'], {}), '(_previous, self.U)\n', (9198, 9217), True, 'import keras.backend as K\n'), ((10006, 10039), 'keras.backend.tile', 'K.tile', (['ones', '(1, self.input_dim)'], {}), '(ones, (1, self.input_dim))\n', (10012, 10039), True, 'import keras.backend as K\n'), ((10274, 10288), 'keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (10285, 10288), True, 'import keras.backend as K\n'), ((3010, 3030), 'keras.backend.dot', 'K.dot', (['h_tm1', 'self.A'], {}), '(h_tm1, self.A)\n', (3015, 3030), True, 'import keras.backend as K\n'), ((3049, 3067), 'keras.backend.dot', 'K.dot', (['x_t', 'self.W'], {}), '(x_t, self.W)\n', (3054, 3067), True, 'import keras.backend as K\n'), ((3070, 3090), 'keras.backend.dot', 'K.dot', (['h_tm1', 'self.U'], {}), '(h_tm1, self.U)\n', (3075, 3090), True, 'import keras.backend as K\n'), ((8355, 8397), 'numpy.zeros', 'np.zeros', (['(input_shape[0], self.input_dim)'], {}), '((input_shape[0], self.input_dim))\n', (8363, 8397), True, 'import numpy as np\n'), ((8447, 8490), 'numpy.zeros', 'np.zeros', (['(input_shape[0], self.output_dim)'], {}), '((input_shape[0], self.output_dim))\n', (8455, 8490), True, 'import numpy as np\n'), ((8520, 8561), 'keras.backend.zeros', 'K.zeros', (['(input_shape[0], self.input_dim)'], {}), '((input_shape[0], self.input_dim))\n', (8527, 8561), True, 'import keras.backend as K\n'), ((8571, 8613), 'keras.backend.zeros', 'K.zeros', (['(input_shape[0], self.output_dim)'], {}), '((input_shape[0], self.output_dim))\n', (8578, 8613), True, 'import keras.backend as K\n'), ((8931, 8962), 'keras.backend.tile', 'K.tile', (['initial_state', '[1, dim]'], {}), '(initial_state, [1, dim])\n', (8937, 8962), True, 'import keras.backend as K\n'), ((9963, 9993), 'keras.backend.reshape', 'K.reshape', (['x[:, 0, 0]', '(-1, 1)'], {}), '(x[:, 0, 0], (-1, 1))\n', (9972, 9993), True, 'import keras.backend as K\n'), ((10344, 10374), 'keras.backend.reshape', 'K.reshape', (['x[:, 0, 0]', '(-1, 1)'], {}), '(x[:, 0, 0], (-1, 1))\n', (10353, 10374), True, 'import keras.backend as K\n'), ((2243, 2267), 'numpy.zeros', 'np.zeros', (['self.input_dim'], {}), '(self.input_dim)\n', (2251, 2267), True, 'import numpy as np\n'), ((2392, 2416), 'numpy.zeros', 'np.zeros', (['self.input_dim'], {}), '(self.input_dim)\n', (2400, 2416), True, 'import numpy as np\n'), ((2452, 2476), 'numpy.zeros', 'np.zeros', (['self.input_dim'], {}), '(self.input_dim)\n', (2460, 2476), True, 'import numpy as np\n'), ((10068, 10099), 'keras.backend.dropout', 'K.dropout', (['ones', 'self.dropout_U'], {}), '(ones, self.dropout_U)\n', (10077, 10099), True, 'import keras.backend as K\n'), ((10183, 10204), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['(1.0)'], {}), '(1.0)\n', (10199, 10204), True, 'import keras.backend as K\n'), ((10449, 10480), 'keras.backend.dropout', 'K.dropout', (['ones', 'self.dropout_W'], {}), '(ones, self.dropout_W)\n', (10458, 10480), True, 'import keras.backend as K\n'), ((10564, 10585), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['(1.0)'], {}), '(1.0)\n', (10580, 10585), True, 'import keras.backend as K\n'), ((6955, 6980), 'numpy.zeros', 'np.zeros', (['self.output_dim'], {}), '(self.output_dim)\n', (6963, 6980), True, 'import numpy as np\n'), ((7060, 7085), 'numpy.zeros', 'np.zeros', (['self.output_dim'], {}), '(self.output_dim)\n', (7068, 7085), True, 'import numpy as np\n'), ((7098, 7123), 'numpy.zeros', 'np.zeros', (['self.output_dim'], {}), '(self.output_dim)\n', (7106, 7123), True, 'import numpy as np\n')] |
"""Utilities for creating point cloud input representation."""
from dataclasses import dataclass
from typing import List, Tuple
import numpy as np
from constants import DO_CLASSES, DYNAMIC_OBJECTS, SO_CLASSES, STATIC_OBJECTS
from plot_objects_annot_on_image import ObjectAnnotationHandler, get_annotations_files, read_anno_content
AVAILABLE_PROJECTS = [DYNAMIC_OBJECTS, STATIC_OBJECTS]
@dataclass
class BEVSettings:
"""All relevant information about the input."""
# pylint: disable=too-many-instance-attributes
# General settings
grid_min: np.ndarray = np.array([-50.0, 0.0])
grid_max: np.ndarray = np.array([50.0, 100.0])
grid_cell_size: float = 0.1 # Default in PIXOR: 0.1
# Pixor settings
pixor_z_min: float = -2.4
pixor_z_max: float = 1.0
# === Not configurable (automatically calculated) ===
# General
grid_channels: int = 0
grid_res: Tuple[int, int] = (0, 0)
grid_shape: Tuple[int, int, int] = (0, 0, 0)
classes: Tuple[str] = DO_CLASSES + SO_CLASSES
def get_class_name(self, idx: int) -> str:
"""Retrieve class name from index."""
return self.classes[idx]
def encode_classes(self, classes_to_encode: List[str]) -> List[int]:
"""Retrieve classes indexes for a list of class names."""
return [self.classes.index(entry) for entry in classes_to_encode]
def __post_init__(self):
"""Post initialize fields."""
# BEV Grid
grid_res = (self.grid_max - self.grid_min) / self.grid_cell_size
self.grid_res = tuple(grid_res.astype(int))
self.grid_channels = int((self.pixor_z_max - self.pixor_z_min) / self.grid_cell_size) + 3
self.grid_shape = (self.grid_channels,) + self.grid_res
def create_pointcloud_input(points: np.ndarray, settings: BEVSettings) -> np.ndarray:
"""Create input representation from raw data.
Args:
points: Point cloud [N, 4] containing ['x', 'y', 'z', 'intensity'].
settings: Settings defining the input format.
Returns:
input_: An instance of the encoded input point cloud.
"""
# Truncate points according to BEV ranges
mask = get_grid_mask(points, settings)
points = points[mask]
point_indices_xy = get_grid_indices_xy(points[:, :3], settings)
input_ = _create_pointcloud_input_pixor(points, point_indices_xy, settings)
return input_
def _create_pointcloud_input_pixor(
points: np.ndarray, point_indices_xy: np.ndarray, settings: BEVSettings
) -> np.ndarray:
"""Create PIXOR-style input representation.
Args:
points: Point cloud [N, 4] containing ['x', 'y', 'z', 'intensity'].
point_indices_xy: The corresponding xy-indices for the points.
settings: Settings defining the input format.
Returns:
A PIXOR style BEV projection of the input point cloud.
"""
point_indices_c = np.cast["int32"](
(points[:, 2] - settings.pixor_z_min) / settings.grid_cell_size
)
point_indices_c = 1 + np.clip(
point_indices_c,
a_min=-1,
a_max=settings.grid_channels - 3,
)
point_indices_cxy = tuple(
np.transpose(
np.concatenate([np.expand_dims(point_indices_c, axis=-1), point_indices_xy], axis=-1)
).reshape(3, -1)
)
n_points = points.shape[0]
point_indices_intensity_c = np.repeat(settings.grid_channels - 1, n_points)
point_indices_intensity_cxy = tuple(
np.transpose(
np.concatenate(
[np.expand_dims(point_indices_intensity_c, axis=-1), point_indices_xy], axis=-1
)
).reshape(3, -1)
)
# Define the update per index (currently just occupancy)
updates = np.ones((n_points,))
updates_intensity = points[:, 3]
# Create occupancy grid (with intensity)
input_ = np.zeros(settings.grid_shape, dtype=np.float32)
input_[point_indices_cxy] = updates # Occupancy.
input_[point_indices_intensity_cxy] = updates_intensity # Intensity.
return input_
def get_grid_mask(cloud: np.ndarray, settings: BEVSettings) -> np.ndarray:
"""Get the boolean mask to filter out points within the grid.
Args:
cloud: The input point cloud [N, 3]
settings: The input definition.
Returns:
mask: Boolean mask containing true for points within grid.
"""
scaled_xy = cloud[:, :2] - settings.grid_min
bev_max = settings.grid_max - settings.grid_min
mask = np.all(((scaled_xy >= 0) & (scaled_xy < bev_max)), axis=-1)
return mask
def get_grid_indices_xy(cloud: np.ndarray, settings: BEVSettings) -> np.ndarray:
"""Get grid indices from point cloud.
Args:
cloud: Point cloud of x,y,z coordinates [N, 3].
settings: Settings defining the grid size and resolution.
Returns:
indices_xy: The xy grid indices of each point [N, 2].
"""
# Convert points to indices
indices_xy = np.cast["int32"]((cloud[:, :2] - settings.grid_min) / settings.grid_cell_size)
return indices_xy
def filter_point_cloud(cloud: np.ndarray, angle: np.ndarray, cam_pos: np.ndarray) -> np.ndarray:
"""Filter out points outside of camera-centered frustum."""
cloud_xy = cloud[:, :2] - cam_pos[:2]
point_angles = np.arctan2(cloud_xy[:, 1], cloud_xy[:, 0])
mask = np.logical_or(point_angles < angle[0], point_angles > angle[1])
return cloud[mask]
def get_objects_for_bev(
seq_folder: str,
annotation_projects: List[str],
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Get annotation objects for bird eye view visualization.
Args:
seq_folder: path to the sequence folder.
annotation_projects: list of projects you want to visualize.
Possible projects: DYNAMIC_OBJECTS, STATIC_OBJECTS.
Returns:
extracted_anno_objects: positions, dimensions, rotations and classes of bounding boxes
to be visualized.
"""
anno_project_files = get_annotations_files(seq_folder)
anno_objects = []
for proj in annotation_projects:
if proj in AVAILABLE_PROJECTS:
anno_file = anno_project_files[proj]
anno_content = read_anno_content(anno_file)
anno_objects.extend(list(ObjectAnnotationHandler.from_annotations(anno_content)))
else:
raise Exception(
f"Project {proj} is not available to plot. "
f"Available projects: {*AVAILABLE_PROJECTS,}."
)
positions = [obj[2].marking3d.get("Location")[:2] for obj in anno_objects if obj[2].marking3d]
dimensions = [obj[2].marking3d.get("Size")[:2] for obj in anno_objects if obj[2].marking3d]
rotations = [obj[2].marking3d.get("Rotation") for obj in anno_objects if obj[2].marking3d]
classes = [[obj[0]] for obj in anno_objects if obj[2].marking3d]
extracted_anno_objects = (
np.array(classes),
np.array(positions),
np.array(dimensions),
np.array(rotations),
)
return extracted_anno_objects
| [
"numpy.clip",
"numpy.repeat",
"numpy.ones",
"plot_objects_annot_on_image.get_annotations_files",
"plot_objects_annot_on_image.read_anno_content",
"plot_objects_annot_on_image.ObjectAnnotationHandler.from_annotations",
"numpy.logical_or",
"numpy.array",
"numpy.zeros",
"numpy.arctan2",
"numpy.expa... | [((575, 597), 'numpy.array', 'np.array', (['[-50.0, 0.0]'], {}), '([-50.0, 0.0])\n', (583, 597), True, 'import numpy as np\n'), ((625, 648), 'numpy.array', 'np.array', (['[50.0, 100.0]'], {}), '([50.0, 100.0])\n', (633, 648), True, 'import numpy as np\n'), ((3357, 3404), 'numpy.repeat', 'np.repeat', (['(settings.grid_channels - 1)', 'n_points'], {}), '(settings.grid_channels - 1, n_points)\n', (3366, 3404), True, 'import numpy as np\n'), ((3713, 3733), 'numpy.ones', 'np.ones', (['(n_points,)'], {}), '((n_points,))\n', (3720, 3733), True, 'import numpy as np\n'), ((3830, 3877), 'numpy.zeros', 'np.zeros', (['settings.grid_shape'], {'dtype': 'np.float32'}), '(settings.grid_shape, dtype=np.float32)\n', (3838, 3877), True, 'import numpy as np\n'), ((4465, 4522), 'numpy.all', 'np.all', (['((scaled_xy >= 0) & (scaled_xy < bev_max))'], {'axis': '(-1)'}), '((scaled_xy >= 0) & (scaled_xy < bev_max), axis=-1)\n', (4471, 4522), True, 'import numpy as np\n'), ((5258, 5300), 'numpy.arctan2', 'np.arctan2', (['cloud_xy[:, 1]', 'cloud_xy[:, 0]'], {}), '(cloud_xy[:, 1], cloud_xy[:, 0])\n', (5268, 5300), True, 'import numpy as np\n'), ((5312, 5375), 'numpy.logical_or', 'np.logical_or', (['(point_angles < angle[0])', '(point_angles > angle[1])'], {}), '(point_angles < angle[0], point_angles > angle[1])\n', (5325, 5375), True, 'import numpy as np\n'), ((6009, 6042), 'plot_objects_annot_on_image.get_annotations_files', 'get_annotations_files', (['seq_folder'], {}), '(seq_folder)\n', (6030, 6042), False, 'from plot_objects_annot_on_image import ObjectAnnotationHandler, get_annotations_files, read_anno_content\n'), ((3010, 3078), 'numpy.clip', 'np.clip', (['point_indices_c'], {'a_min': '(-1)', 'a_max': '(settings.grid_channels - 3)'}), '(point_indices_c, a_min=-1, a_max=settings.grid_channels - 3)\n', (3017, 3078), True, 'import numpy as np\n'), ((6919, 6936), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (6927, 6936), True, 'import numpy as np\n'), ((6946, 6965), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (6954, 6965), True, 'import numpy as np\n'), ((6975, 6995), 'numpy.array', 'np.array', (['dimensions'], {}), '(dimensions)\n', (6983, 6995), True, 'import numpy as np\n'), ((7005, 7024), 'numpy.array', 'np.array', (['rotations'], {}), '(rotations)\n', (7013, 7024), True, 'import numpy as np\n'), ((6217, 6245), 'plot_objects_annot_on_image.read_anno_content', 'read_anno_content', (['anno_file'], {}), '(anno_file)\n', (6234, 6245), False, 'from plot_objects_annot_on_image import ObjectAnnotationHandler, get_annotations_files, read_anno_content\n'), ((6283, 6337), 'plot_objects_annot_on_image.ObjectAnnotationHandler.from_annotations', 'ObjectAnnotationHandler.from_annotations', (['anno_content'], {}), '(anno_content)\n', (6323, 6337), False, 'from plot_objects_annot_on_image import ObjectAnnotationHandler, get_annotations_files, read_anno_content\n'), ((3191, 3231), 'numpy.expand_dims', 'np.expand_dims', (['point_indices_c'], {'axis': '(-1)'}), '(point_indices_c, axis=-1)\n', (3205, 3231), True, 'import numpy as np\n'), ((3513, 3563), 'numpy.expand_dims', 'np.expand_dims', (['point_indices_intensity_c'], {'axis': '(-1)'}), '(point_indices_intensity_c, axis=-1)\n', (3527, 3563), True, 'import numpy as np\n')] |
"""Some prebuilt visualization"""
import logging
import matplotlib.pyplot as plt
import pandas as pd
def peak_plot(peak, sample_table=None, max_dist=None, norm_on_center=True, log_y=True,
marker_list=None, color_list=None, guidelines=None, guideline_colors=None,
legend_off=False, legend_col=2, ax=None, figsize=None, save_fig_to=None):
"""Plot the distribution of spike_in peak
Plot a scatter-line plot of [adjusted] number of sequences with i edit distance from center sequence (spike-in seq)
Args:
peak (Peak): a Peak instance
sample_table (pd.DataFrame): abundance of sequence in samples. With samples as columns. If None, try `peak.seqs`
max_dist (int): maximum distance to survey. If None, try `peak.radius`
norm_on_center (bool): if the counts/abundance are normalized to the peak center
log_y (bool): if set the y scale as log
marker_list (list of str): overwrite default marker scheme if not `None`, same length and order as
samples in sample_table
color_list (list of str): overwrite default color scheme if not `None`, same length and order as
samples in sample_table
guidelines (list of float): add a series of guidelines of the peak shape with certain mutation rates, optional
guideline_colors (list of color): the color of guidelines, same shape as guidelines
legend_off (bool): do not show the legend if True
legend_col (int): number of col for legend if show
ax (matplotlib.Axis): if use external ax object to plot. Create a new figure if None
figsize (2-tuple): size of the figure
save_fig_to (str): save the figure to file if not None
Returns:
ax for plotted figure
"""
import numpy as np
if sample_table is None:
if isinstance(peak.seqs, pd.DataFrame):
sample_table = peak.seqs
else:
logging.error('Please indicate sample_table')
raise ValueError('Please indicate sample_table')
if max_dist is None:
if peak.radius is None:
logging.error('Please indicate the maximum distance to survey')
raise ValueError('Please indicate the maximum distance to survey')
else:
max_dist = peak.radius
if marker_list is None:
marker_list = Presets.markers(num=sample_table.shape[1], with_line=True)
elif len(marker_list) != sample_table.shape[1]:
logging.error('Error: length of marker_list does not align with the number of valid samples to plot')
raise Exception('Error: length of marker_list does not align with the number of valid samples to plot')
if color_list is None:
color_list = Presets.color_tab10(num=sample_table.shape[1])
elif len(color_list) != sample_table.shape[1]:
logging.error('Error: length of color_list does not align with the number of valid samples to plot')
raise Exception('Error: length of color_list does not align with the number of valid samples to plot')
if ax is None:
if figsize is None:
figsize = (max_dist / 2, 6) if legend_off else (max_dist / 2 + 5, 6)
fig, ax = plt.subplots(1, 1, figsize=figsize)
rel_abun, _ = peak.peak_abun(max_radius=max_dist, table=sample_table, use_relative=norm_on_center)
for sample, color, marker in zip(sample_table.columns, color_list, marker_list):
ax.plot(rel_abun.index, rel_abun[sample], marker, color=color, label=sample,
ls='-', alpha=0.5, markeredgewidth=2)
if log_y:
ax.set_yscale('log')
ylim = ax.get_ylim()
# add guide line if applicable
if guidelines is not None:
if not norm_on_center:
logging.warning('Can only add guidelines if peaks are normed on center, skip guidelines')
else:
# assuming a fix error rate per nt, iid on binom
from scipy.stats import binom
if isinstance(guidelines, (float, int)):
err_guild_lines = [guidelines]
if guideline_colors is None:
guideline_colors = Presets.color_tab10(num=len(guidelines))
dist_series = np.arange(max_dist + 1)
for ix, (p, color) in enumerate(zip(guidelines, guideline_colors)):
rv = binom(len(peak.center_seq), p)
pmfs = np.array([rv.pmf(x) for x in dist_series])
pmfs_normed = pmfs / pmfs[0]
ax.plot(dist_series, pmfs_normed,
color=color, ls='--', alpha=(ix + 1) / len(guidelines), label=f'p = {p}')
ax.set_ylim(ylim)
y_label = ''
if norm_on_center:
y_label += ' normed'
y_label += ' counts'
ax.set_ylabel(y_label.title(), fontsize=14)
ax.set_xlabel('Distance to peak center', fontsize=14)
if not legend_off:
ax.legend(loc=[1.02, 0], fontsize=9, frameon=False, ncol=legend_col)
plt.tight_layout()
if save_fig_to:
fig = plt.gcf()
fig.patch.set_facecolor('none')
fig.patch.set_alpha(0)
plt.savefig(save_fig_to, bbox_inches='tight', dpi=300)
return ax
class Presets:
"""Collection of preset colors/markers"""
@staticmethod
def _cycle_list(num, prop_list):
"""Generate a list of properties, cycle if num > len(prop_list)"""
return [prop_list[i % len(prop_list)] for i in range(num)]
@staticmethod
def from_list(prop_list):
from functools import partial
return partial(Presets._cycle_list, prop_list=prop_list)
@staticmethod
def color_cat10(num=5):
colors = [
'#1F77B4',
'#FF7F0E',
'#2CA02C',
'#D62728',
'#9467BD',
'#8C564B',
'#E377C2',
'#7F7F7F',
'#BCBD22',
'#17BECF'
]
return Presets._cycle_list(num, colors)
@staticmethod
def color_tab10(num=5):
colors = [
'#4C78A8',
'#F58518',
'#E45756',
'#72B7B2',
'#54A24B',
'#EECA3B',
'#B279A2',
'#FF9DA6',
'#9D755D',
'#BAB0AC'
]
return Presets._cycle_list(num, colors)
@staticmethod
def color_pastel1(num=5):
colors = [
"#FBB5AE",
"#B3CDE3",
"#CCEBC5",
"#DECBE4",
"#FED9A6",
"#FFFFCC",
"#E5D8BD",
"#FDDAEC",
"#F2F2F2"
]
return Presets._cycle_list(num, colors)
@staticmethod
def markers(num=5, with_line=False):
from math import ceil
full_marker_list = ['o', '^', 's', '+', 'x', 'D', 'v', '1', 'p', 'H']
marker_list = []
for i in range(ceil(num/10)):
marker_list += full_marker_list
if with_line:
return ['-' + marker for marker in marker_list[:num]]
else:
return marker_list[:num]
| [
"matplotlib.pyplot.savefig",
"math.ceil",
"matplotlib.pyplot.gcf",
"logging.warning",
"functools.partial",
"matplotlib.pyplot.tight_layout",
"logging.error",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((4947, 4965), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4963, 4965), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3247), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': 'figsize'}), '(1, 1, figsize=figsize)\n', (3224, 3247), True, 'import matplotlib.pyplot as plt\n'), ((5001, 5010), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5008, 5010), True, 'import matplotlib.pyplot as plt\n'), ((5090, 5144), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_fig_to'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(save_fig_to, bbox_inches='tight', dpi=300)\n", (5101, 5144), True, 'import matplotlib.pyplot as plt\n'), ((5522, 5571), 'functools.partial', 'partial', (['Presets._cycle_list'], {'prop_list': 'prop_list'}), '(Presets._cycle_list, prop_list=prop_list)\n', (5529, 5571), False, 'from functools import partial\n'), ((1946, 1991), 'logging.error', 'logging.error', (['"""Please indicate sample_table"""'], {}), "('Please indicate sample_table')\n", (1959, 1991), False, 'import logging\n'), ((2123, 2186), 'logging.error', 'logging.error', (['"""Please indicate the maximum distance to survey"""'], {}), "('Please indicate the maximum distance to survey')\n", (2136, 2186), False, 'import logging\n'), ((2485, 2596), 'logging.error', 'logging.error', (['"""Error: length of marker_list does not align with the number of valid samples to plot"""'], {}), "(\n 'Error: length of marker_list does not align with the number of valid samples to plot'\n )\n", (2498, 2596), False, 'import logging\n'), ((2853, 2963), 'logging.error', 'logging.error', (['"""Error: length of color_list does not align with the number of valid samples to plot"""'], {}), "(\n 'Error: length of color_list does not align with the number of valid samples to plot'\n )\n", (2866, 2963), False, 'import logging\n'), ((3755, 3849), 'logging.warning', 'logging.warning', (['"""Can only add guidelines if peaks are normed on center, skip guidelines"""'], {}), "(\n 'Can only add guidelines if peaks are normed on center, skip guidelines')\n", (3770, 3849), False, 'import logging\n'), ((4206, 4229), 'numpy.arange', 'np.arange', (['(max_dist + 1)'], {}), '(max_dist + 1)\n', (4215, 4229), True, 'import numpy as np\n'), ((6827, 6841), 'math.ceil', 'ceil', (['(num / 10)'], {}), '(num / 10)\n', (6831, 6841), False, 'from math import ceil\n')] |
import sys
import pdb
import pickle as pkl
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.data
import numpy as np
import networkx as nx
import scipy
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from ogb.nodeproppred import DglNodePropPredDataset
def _load_data(dataset_str):
"""Load data."""
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(
"data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(ally) )
idx_val = range(len(ally)-0, len(ally))
# idx_train = range(len(ally)-500)
# idx_val = range(len(ally)-500, len(ally))
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return (adj, features, y_train, y_val, y_test,
train_mask, val_mask, test_mask)
def nontuple_preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
ep = 1e-10
r_inv = np.power(rowsum + ep, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def nontuple_preprocess_adj(adj):
adj_normalized = normalize_adj(sp.eye(adj.shape[0]) + adj)
# adj_normalized = sp.eye(adj.shape[0]) + normalize_adj(adj)
return adj_normalized.tocsr()
def load_data(dataset, args):
if dataset == "reddit":
return load_reddit(args)
if dataset == "ogbn_arxiv":
return load_ogbn_arxiv(args)
# train_mask, val_mask, test_mask: np.ndarray, [True/False] * node_number
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = \
_load_data(dataset)
# pdb.set_trace()
train_index = np.where(train_mask)[0]
adj_train = adj[train_index, :][:, train_index]
y_train = y_train[train_index]
val_index = np.where(val_mask)[0]
y_val = y_val[val_index]
test_index = np.where(test_mask)[0]
y_test = y_test[test_index]
num_train = adj_train.shape[0]
features = nontuple_preprocess_features(features).todense()
train_features = features[train_index]
norm_adj_train = nontuple_preprocess_adj(adj_train)
norm_adj = nontuple_preprocess_adj(adj)
if dataset == 'pubmed':
norm_adj = 1*sp.diags(np.ones(norm_adj.shape[0])) + norm_adj
norm_adj_train = 1*sp.diags(np.ones(num_train)) + norm_adj_train
return (norm_adj, features, norm_adj_train, train_features,
y_train, y_test, test_index, adj_train)
def load_reddit(args):
dataset = dgl.data.RedditDataset()
g = dataset[0]
node = g.ndata
edge = g.edges()
num_node = len(node['train_mask'])
num_train = torch.sum(node['train_mask'])
feats = node['feat'].detach().numpy()
labels = node['label']
num_labels = len(torch.unique(num_labels))
labels = F.one_hot(labels, num_classes=num_labels).detach().numpy()
""" Get adjacency matrix from edge node pairs"""
row = edge[1].detach().numpy()
col = edge[0].detach().numpy()
dat = np.ones((len(row)))
print("========= Generating adjacency matrix ===========")
adj = csr_matrix((dat, (row, col)), shape=(num_node, num_node))
""" Get training and testing samples index """
train_index = np.arange(num_node)[node['train_mask']]
test_index = np.arange(num_node)[node['test_mask']]
adj_train = adj[train_index, :][:, train_index]
y_train = labels[train_index]
y_test = labels[test_index]
train_features = feats[train_index]
""" Normalize the adjacency matrix """
norm_adj_train = nontuple_preprocess_adj(adj_train)
norm_adj = nontuple_preprocess_adj(adj)
norm_adj = 1*sp.diags(np.ones(norm_adj.shape[0])) + norm_adj
norm_adj_train = 1*sp.diags(np.ones(num_train)) + norm_adj_train
return (norm_adj, feats, norm_adj_train, train_features, y_train,
y_test, test_index, adj_train)
def load_ogbn_arxiv(args):
dataset = DglNodePropPredDataset('ogbn-arxiv')
g, node_labels = dataset[0]
g = dgl.add_reverse_edges(g)
g.ndata['label'] = node_labels[:, 0]
node = g.ndata
edge = g.edges()
feats = g.ndata['feat']
idx_split = dataset.get_idx_split()
train_index = idx_split['train']
valid_index = idx_split['valid']
test_index = idx_split['test']
num_node = feats.shape[0]
num_train = len(train_index)
feats = node['feat'].detach().numpy()
labels = node['label']
labels = F.one_hot(labels, num_classes=40).detach().numpy()
""" Get adjacency matrix from edge node pairs"""
row = edge[1].detach().numpy()
col = edge[0].detach().numpy()
dat = np.ones((len(row)))
print("========= Generating adjacency matrix ===========")
adj = csr_matrix((dat, (row, col)), shape=(num_node, num_node))
if args.remove_degree_one:
adj, sample_ind, row_s, col_s = remove_degree_one(adj, row, col)
dat = np.ones((len(row_s)))
print("========= Generating pruned adjacency matrix ===========")
adj = csr_matrix((dat, (row_s, col_s)), shape=(num_node, num_node))
adj_train = adj[train_index, :][:, train_index]
y_train = labels[train_index]
y_test = labels[test_index]
train_features = feats[train_index]
""" Normalize the adjacency matrix """
norm_adj_train = nontuple_preprocess_adj(adj_train)
norm_adj = nontuple_preprocess_adj(adj)
norm_adj = 1*sp.diags(np.ones(norm_adj.shape[0])) + norm_adj
norm_adj_train = 1*sp.diags(np.ones(num_train)) + norm_adj_train
return (norm_adj, feats, norm_adj_train, train_features, y_train,
y_test, test_index, adj_train)
def remove_degree_one(adj, row, col):
num_remove = 1
ind = np.arange(adj.shape[0])
while num_remove > 0:
dim1 = adj.shape[0]
col_sum = scipy.sparse.csr_matrix.sum(adj, axis = 0)
col_sum = np.array(col_sum)[0]
non_one_ix = np.nonzero(col_sum - 1)[0]
one_ind = np.delete(ind, non_one_ix)
ind = ind[non_one_ix]
adj = adj[non_one_ix,: ]
adj = adj[:, non_one_ix]
num_remove = dim1 - adj.shape[0]
index_remove = np.intersect1d(row, one_ind, return_indices = True)
print(len(index_remove[1]), len(one_ind))
row = np.delete(row, index_remove[1])
col = np.delete(col, index_remove[1])
index_remove = np.intersect1d(col, one_ind, return_indices = True)
row = np.delete(row, index_remove[1])
col = np.delete(col, index_remove[1])
return adj, ind, row, col
def get_batches(train_ind, train_labels, batch_size=64, shuffle=True):
"""
Inputs:
train_ind: np.array
"""
nums = train_ind.shape[0]
if shuffle:
np.random.shuffle(train_ind)
i = 0
while i < nums:
cur_ind = train_ind[i:i + batch_size]
cur_labels = train_labels[cur_ind]
yield cur_ind, cur_labels
i += batch_size
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
class HLoss(nn.Module):
def __init__(self):
super(HLoss, self).__init__()
def forward(self, x):
b = torch.exp(x) * x
b = -1.0 * b.sum(dim=1)
return b
class Entropy_loss(nn.Module):
def __init__(self):
super(Entropy_loss, self).__init__()
def forward(self, x):
probs = F.softmax(x, dim=1)
b = torch.log(probs) * probs
b = -1.0 * b.sum(dim=1)
return b
if __name__ == '__main__':
pdb.set_trace()
adj, features, adj_train, train_features, y_train, y_test, test_index = \
load_data('cora')
pdb.set_trace()
| [
"torch.from_numpy",
"torch.exp",
"numpy.array",
"torch.sum",
"dgl.data.RedditDataset",
"torch.nn.functional.softmax",
"numpy.arange",
"networkx.from_dict_of_lists",
"torch.unique",
"scipy.sparse.eye",
"numpy.where",
"numpy.delete",
"numpy.sort",
"numpy.vstack",
"scipy.sparse.diags",
"s... | [((1223, 1248), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (1230, 1248), True, 'import numpy as np\n'), ((2043, 2064), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (2052, 2064), True, 'import numpy as np\n'), ((2511, 2533), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (2519, 2533), True, 'import numpy as np\n'), ((2548, 2570), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (2556, 2570), True, 'import numpy as np\n'), ((2585, 2607), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (2593, 2607), True, 'import numpy as np\n'), ((3126, 3141), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (3134, 3141), True, 'import scipy.sparse as sp\n'), ((3289, 3307), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (3302, 3307), True, 'import scipy.sparse as sp\n'), ((3455, 3475), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (3463, 3475), True, 'import scipy.sparse as sp\n'), ((4959, 4983), 'dgl.data.RedditDataset', 'dgl.data.RedditDataset', ([], {}), '()\n', (4981, 4983), False, 'import dgl\n'), ((5098, 5127), 'torch.sum', 'torch.sum', (["node['train_mask']"], {}), "(node['train_mask'])\n", (5107, 5127), False, 'import torch\n'), ((5543, 5600), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(dat, (row, col))'], {'shape': '(num_node, num_node)'}), '((dat, (row, col)), shape=(num_node, num_node))\n', (5553, 5600), False, 'from scipy.sparse import csr_matrix\n'), ((6367, 6403), 'ogb.nodeproppred.DglNodePropPredDataset', 'DglNodePropPredDataset', (['"""ogbn-arxiv"""'], {}), "('ogbn-arxiv')\n", (6389, 6403), False, 'from ogb.nodeproppred import DglNodePropPredDataset\n'), ((6444, 6468), 'dgl.add_reverse_edges', 'dgl.add_reverse_edges', (['g'], {}), '(g)\n', (6465, 6468), False, 'import dgl\n'), ((7156, 7213), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(dat, (row, col))'], {'shape': '(num_node, num_node)'}), '((dat, (row, col)), shape=(num_node, num_node))\n', (7166, 7213), False, 'from scipy.sparse import csr_matrix\n'), ((8145, 8168), 'numpy.arange', 'np.arange', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (8154, 8168), True, 'import numpy as np\n'), ((9836, 9868), 'torch.from_numpy', 'torch.from_numpy', (['sparse_mx.data'], {}), '(sparse_mx.data)\n', (9852, 9868), False, 'import torch\n'), ((9881, 9908), 'torch.Size', 'torch.Size', (['sparse_mx.shape'], {}), '(sparse_mx.shape)\n', (9891, 9908), False, 'import torch\n'), ((9920, 9968), 'torch.sparse.FloatTensor', 'torch.sparse.FloatTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (9944, 9968), False, 'import torch\n'), ((10447, 10462), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10460, 10462), False, 'import pdb\n'), ((10571, 10586), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10584, 10586), False, 'import pdb\n'), ((619, 630), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (627, 630), True, 'import numpy as np\n'), ((668, 697), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (676, 697), True, 'import numpy as np\n'), ((1999, 2027), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (2020, 2027), True, 'import networkx as nx\n'), ((3088, 3103), 'numpy.isinf', 'np.isinf', (['r_inv'], {}), '(r_inv)\n', (3096, 3103), True, 'import numpy as np\n'), ((3407, 3427), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (3415, 3427), True, 'import numpy as np\n'), ((4136, 4156), 'numpy.where', 'np.where', (['train_mask'], {}), '(train_mask)\n', (4144, 4156), True, 'import numpy as np\n'), ((4263, 4281), 'numpy.where', 'np.where', (['val_mask'], {}), '(val_mask)\n', (4271, 4281), True, 'import numpy as np\n'), ((4331, 4350), 'numpy.where', 'np.where', (['test_mask'], {}), '(test_mask)\n', (4339, 4350), True, 'import numpy as np\n'), ((5219, 5243), 'torch.unique', 'torch.unique', (['num_labels'], {}), '(num_labels)\n', (5231, 5243), False, 'import torch\n'), ((5670, 5689), 'numpy.arange', 'np.arange', (['num_node'], {}), '(num_node)\n', (5679, 5689), True, 'import numpy as np\n'), ((5727, 5746), 'numpy.arange', 'np.arange', (['num_node'], {}), '(num_node)\n', (5736, 5746), True, 'import numpy as np\n'), ((7443, 7504), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(dat, (row_s, col_s))'], {'shape': '(num_node, num_node)'}), '((dat, (row_s, col_s)), shape=(num_node, num_node))\n', (7453, 7504), False, 'from scipy.sparse import csr_matrix\n'), ((8241, 8281), 'scipy.sparse.csr_matrix.sum', 'scipy.sparse.csr_matrix.sum', (['adj'], {'axis': '(0)'}), '(adj, axis=0)\n', (8268, 8281), False, 'import scipy\n'), ((8389, 8415), 'numpy.delete', 'np.delete', (['ind', 'non_one_ix'], {}), '(ind, non_one_ix)\n', (8398, 8415), True, 'import numpy as np\n'), ((8585, 8634), 'numpy.intersect1d', 'np.intersect1d', (['row', 'one_ind'], {'return_indices': '(True)'}), '(row, one_ind, return_indices=True)\n', (8599, 8634), True, 'import numpy as np\n'), ((8701, 8732), 'numpy.delete', 'np.delete', (['row', 'index_remove[1]'], {}), '(row, index_remove[1])\n', (8710, 8732), True, 'import numpy as np\n'), ((8747, 8778), 'numpy.delete', 'np.delete', (['col', 'index_remove[1]'], {}), '(col, index_remove[1])\n', (8756, 8778), True, 'import numpy as np\n'), ((8811, 8860), 'numpy.intersect1d', 'np.intersect1d', (['col', 'one_ind'], {'return_indices': '(True)'}), '(col, one_ind, return_indices=True)\n', (8825, 8860), True, 'import numpy as np\n'), ((8877, 8908), 'numpy.delete', 'np.delete', (['row', 'index_remove[1]'], {}), '(row, index_remove[1])\n', (8886, 8908), True, 'import numpy as np\n'), ((8923, 8954), 'numpy.delete', 'np.delete', (['col', 'index_remove[1]'], {}), '(col, index_remove[1])\n', (8932, 8954), True, 'import numpy as np\n'), ((9169, 9197), 'numpy.random.shuffle', 'np.random.shuffle', (['train_ind'], {}), '(train_ind)\n', (9186, 9197), True, 'import numpy as np\n'), ((10307, 10326), 'torch.nn.functional.softmax', 'F.softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (10316, 10326), True, 'import torch.nn.functional as F\n'), ((1875, 1896), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (1884, 1896), True, 'import scipy.sparse as sp\n'), ((3042, 3067), 'numpy.power', 'np.power', (['(rowsum + ep)', '(-1)'], {}), '(rowsum + ep, -1)\n', (3050, 3067), True, 'import numpy as np\n'), ((3359, 3381), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (3367, 3381), True, 'import numpy as np\n'), ((3622, 3642), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (3628, 3642), True, 'import scipy.sparse as sp\n'), ((8302, 8319), 'numpy.array', 'np.array', (['col_sum'], {}), '(col_sum)\n', (8310, 8319), True, 'import numpy as np\n'), ((8344, 8367), 'numpy.nonzero', 'np.nonzero', (['(col_sum - 1)'], {}), '(col_sum - 1)\n', (8354, 8367), True, 'import numpy as np\n'), ((10096, 10108), 'torch.exp', 'torch.exp', (['x'], {}), '(x)\n', (10105, 10108), False, 'import torch\n'), ((10339, 10355), 'torch.log', 'torch.log', (['probs'], {}), '(probs)\n', (10348, 10355), False, 'import torch\n'), ((6102, 6128), 'numpy.ones', 'np.ones', (['norm_adj.shape[0]'], {}), '(norm_adj.shape[0])\n', (6109, 6128), True, 'import numpy as np\n'), ((6173, 6191), 'numpy.ones', 'np.ones', (['num_train'], {}), '(num_train)\n', (6180, 6191), True, 'import numpy as np\n'), ((7853, 7879), 'numpy.ones', 'np.ones', (['norm_adj.shape[0]'], {}), '(norm_adj.shape[0])\n', (7860, 7879), True, 'import numpy as np\n'), ((7924, 7942), 'numpy.ones', 'np.ones', (['num_train'], {}), '(num_train)\n', (7931, 7942), True, 'import numpy as np\n'), ((9763, 9804), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (9772, 9804), True, 'import numpy as np\n'), ((959, 989), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (967, 989), True, 'import pickle as pkl\n'), ((1040, 1051), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1048, 1051), True, 'import pickle as pkl\n'), ((4690, 4716), 'numpy.ones', 'np.ones', (['norm_adj.shape[0]'], {}), '(norm_adj.shape[0])\n', (4697, 4716), True, 'import numpy as np\n'), ((4765, 4783), 'numpy.ones', 'np.ones', (['num_train'], {}), '(num_train)\n', (4772, 4783), True, 'import numpy as np\n'), ((5258, 5299), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels'], {'num_classes': 'num_labels'}), '(labels, num_classes=num_labels)\n', (5267, 5299), True, 'import torch.nn.functional as F\n'), ((6879, 6912), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels'], {'num_classes': '(40)'}), '(labels, num_classes=40)\n', (6888, 6912), True, 'import torch.nn.functional as F\n')] |
import cv2
import numpy as np
import keras.models
import glob
from datetime import datetime
from keras.models import model_from_json
'''
@author:<NAME>
# of classes Based on "Object dataset" in Modules
0: Ball
1: Cube
2: Cylinder
3: Hollow Cube
4: Cross
5: Triangle
6: Star
'''
PATH_TEST = "../image_dataset_keras_shape/"
VIDEO_INFERENCE = 1
IMG_INFERNECE = 0
std = 58.363087
mean = 85.69786
#model_shape = keras.models.load_model('saved_models/keras_RAS_model_shape_4.h5')
# Load trained CNN model
json_file = open('saved_models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model_shape = model_from_json(loaded_model_json)
model_shape.load_weights('saved_models/model.h5')
#model_shape = keras.models.load_model('saved_models/model.h5')
shape_class = ['Ball', 'Cube', 'Cylinder', 'Hollow Cube', 'Cross', 'Triangle', 'Star' ]
if VIDEO_INFERENCE:
cap = cv2.VideoCapture('../ras_labeling/vid2.mp4')
#cap = cv2.VideoCapture(0)
while cap.isOpened():
a = datetime.now()
ret, image = cap.read()
if ret:
input_img = []
input_img.append(cv2.resize(image, (32,32)))
input_img_clone = cv2.resize(image, (32,32))
input_img = np.array(input_img)
input_img = input_img.astype('float32')
input_img = (input_img-mean)/(std+1e-7)
prediction = model_shape.predict(input_img)
print(shape_class[np.argmax(prediction)])
cv2.imshow('image', image)
cv2.waitKey(10)
b = datetime.now()
c = b - a
fps = 1.0/(c.total_seconds())
print('## FPS: ' + str(fps))
print('')
elif IMG_INFERNECE:
try:
# while True:
# label = np.random.randint(0,7)
# if label == 0:
# dirname = 'Ball'
# elif label == 1:
# dirname = 'Cube'
# elif label == 2:
# dirname = 'Cylinder'
# elif label == 3:
# dirname = 'Hollow Cube'
# elif label == 4:
# dirname = 'Cross'
# elif label == 5:
# dirname = 'Triangle'
# elif label == 6:
# dirname = 'Star'
for file in glob.glob('../CROPPED_DATASET'+ "/*.jpg"):
image = cv2.imread(file)
input_img = []
input_img.append(cv2.resize(image, (32,32)))
input_img_clone = cv2.resize(image, (32,32))
input_img = np.array(input_img)
input_img = input_img.astype('float32')
input_img = (input_img-mean)/(std+1e-7)
#input_img = (input_img-mean)/(std+1e-7)
prediction = model_shape.predict(input_img)
#print('Actual: ' + str(dirname) + ' detected: ' + shape_class[np.argmax(prediction)])
print('detected: ' + shape_class[np.argmax(prediction)])
cv2.imshow('image', cv2.resize(input_img_clone, (100,100)))
cv2.waitKey(3000)
#cv2.imshow('image', image)
#cv2.waitKey(0)
except KeyboardInterrupt:
pass
| [
"numpy.argmax",
"cv2.imshow",
"keras.models.model_from_json",
"datetime.datetime.now",
"numpy.array",
"cv2.VideoCapture",
"cv2.resize",
"cv2.waitKey",
"glob.glob",
"cv2.imread"
] | [((631, 665), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (646, 665), False, 'from keras.models import model_from_json\n'), ((904, 948), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../ras_labeling/vid2.mp4"""'], {}), "('../ras_labeling/vid2.mp4')\n", (920, 948), False, 'import cv2\n'), ((1019, 1033), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1031, 1033), False, 'from datetime import datetime\n'), ((1198, 1225), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (1208, 1225), False, 'import cv2\n'), ((1262, 1281), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (1270, 1281), True, 'import numpy as np\n'), ((1523, 1549), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (1533, 1549), False, 'import cv2\n'), ((1562, 1577), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1573, 1577), False, 'import cv2\n'), ((1595, 1609), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1607, 1609), False, 'from datetime import datetime\n'), ((2339, 2381), 'glob.glob', 'glob.glob', (["('../CROPPED_DATASET' + '/*.jpg')"], {}), "('../CROPPED_DATASET' + '/*.jpg')\n", (2348, 2381), False, 'import glob\n'), ((1140, 1167), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (1150, 1167), False, 'import cv2\n'), ((2402, 2418), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (2412, 2418), False, 'import cv2\n'), ((2535, 2562), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (2545, 2562), False, 'import cv2\n'), ((2586, 2605), 'numpy.array', 'np.array', (['input_img'], {}), '(input_img)\n', (2594, 2605), True, 'import numpy as np\n'), ((3091, 3108), 'cv2.waitKey', 'cv2.waitKey', (['(3000)'], {}), '(3000)\n', (3102, 3108), False, 'import cv2\n'), ((1486, 1507), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1495, 1507), True, 'import numpy as np\n'), ((2476, 2503), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (2486, 2503), False, 'import cv2\n'), ((3039, 3078), 'cv2.resize', 'cv2.resize', (['input_img_clone', '(100, 100)'], {}), '(input_img_clone, (100, 100))\n', (3049, 3078), False, 'import cv2\n'), ((2983, 3004), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (2992, 3004), True, 'import numpy as np\n')] |
from keras.callbacks import ModelCheckpoint
# 引入Tensorboard
from keras.callbacks import TensorBoard
from keras.models import Model, load_model, Sequential
from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D
from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape
from keras.optimizers import Adam
from audio_data import graph_spectrogram
import numpy as np
Y = np.load('train_dir/Y.npy')
Y_test = np.load('train_dir/Y_test.npy')
# number of frequncies in fourier decomposition
freq_n = 101
# number of samples in the audio clip
sample_n = 1998
Y_FIX = Y.swapaxes(1,2)
Y_test_FIX = Y_test.swapaxes(1,2)
train_dir = "train_dir"
# Y = Y.reshape(1000,1375,4)
# Y_test = Y_test.reshape(100,1375,4)
def load_training_data(train_dir,num_train=1000,num_test=100):
X = np.zeros((num_train,freq_n,sample_n))
X_test = np.zeros((num_test,freq_n,sample_n))
for i in range(num_train):
X[i,:,:] = graph_spectrogram(train_dir + "/train" + str(i) + ".wav")
for i in range(num_test):
X_test[i,:,:] = graph_spectrogram(train_dir + "/traintest" + str(i) + ".wav")
return X.reshape(num_train,sample_n,freq_n), X_test.reshape(num_test,sample_n,freq_n)
X, X_test = load_training_data(train_dir)
#
X = np.load('train_dir/X.npy')
X_test = np.load('train_dir/X_test.npy')
Ty = 1375 # The number of time steps in the output of the model
def model(input_shape):
"""
Function creating the model's graph in Keras.
Argument:
input_shape -- shape of the model's input data (using Keras conventions)
Returns:
model -- Keras model instance
"""
X_input = Input(shape = input_shape)
# Step 1: CONV layer (≈4 lines) flavor: kernel_size = 15 in coursera
X = Conv1D(196, kernel_size=624, strides=1)(X_input)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Dropout(0.8)(X)
# Step 2: First GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X)
X = Dropout(0.8)(X)
X = BatchNormalization()(X)
# Step 3: Second GRU Layer (≈4 lines)
X = GRU(units = 128, return_sequences = True)(X)
X = Dropout(0.8)(X)
X = BatchNormalization()(X)
X = Dropout(0.8)(X)
# Step 4: Time-distributed dense layer (≈1 line)
X = TimeDistributed(Dense(4, activation = "sigmoid"))(X) # time distributed (sigmoid)
model = Model(inputs = X_input, outputs = X)
return model
# num_train = 1000
# Y_NEW = np.zeros((num_train,Ty,4))
# for i in range(num_train):
# spec = Y[i, :, :]
# Y_NEW[i, :, :] = spec.reshape(Ty, 4, order='F')
model = model(input_shape = (sample_n, freq_n))
model.summary()
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=["accuracy"])
# model.fit(X, Y, batch_size = 5, epochs=10)
model.fit(X, Y_FIX, batch_size = 5, epochs=10)
model.save("audio_wake_model.h5")
# loss, acc = model.evaluate(X_test, Y_test)
# print("Dev set accuracy = ", acc)
#
#
# def detect_triggerword(filename):
# plt.subplot(2, 1, 1)
#
# x = graph_spectrogram(filename)
# # the spectogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model
# x = x.swapaxes(0,1)
# x = np.expand_dims(x, axis=0)
# predictions = model.predict(x)
#
# plt.subplot(2, 1, 2)
# plt.plot(predictions[0,:,0])
# plt.ylabel('probability')
# plt.show()
# return predictions
| [
"keras.optimizers.Adam",
"numpy.zeros",
"keras.layers.Input",
"keras.models.Model",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.layers.GRU",
"keras.layers.BatchNormalization",
"numpy.load",
"keras.layers.Dropout",
"keras.layers.Conv1D"
] | [((430, 456), 'numpy.load', 'np.load', (['"""train_dir/Y.npy"""'], {}), "('train_dir/Y.npy')\n", (437, 456), True, 'import numpy as np\n'), ((466, 497), 'numpy.load', 'np.load', (['"""train_dir/Y_test.npy"""'], {}), "('train_dir/Y_test.npy')\n", (473, 497), True, 'import numpy as np\n'), ((1298, 1324), 'numpy.load', 'np.load', (['"""train_dir/X.npy"""'], {}), "('train_dir/X.npy')\n", (1305, 1324), True, 'import numpy as np\n'), ((1335, 1366), 'numpy.load', 'np.load', (['"""train_dir/X_test.npy"""'], {}), "('train_dir/X_test.npy')\n", (1342, 1366), True, 'import numpy as np\n'), ((2699, 2752), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'decay': '(0.01)'}), '(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)\n', (2703, 2752), False, 'from keras.optimizers import Adam\n'), ((837, 876), 'numpy.zeros', 'np.zeros', (['(num_train, freq_n, sample_n)'], {}), '((num_train, freq_n, sample_n))\n', (845, 876), True, 'import numpy as np\n'), ((888, 926), 'numpy.zeros', 'np.zeros', (['(num_test, freq_n, sample_n)'], {}), '((num_test, freq_n, sample_n))\n', (896, 926), True, 'import numpy as np\n'), ((1676, 1700), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1681, 1700), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((2407, 2439), 'keras.models.Model', 'Model', ([], {'inputs': 'X_input', 'outputs': 'X'}), '(inputs=X_input, outputs=X)\n', (2412, 2439), False, 'from keras.models import Model, load_model, Sequential\n'), ((1787, 1826), 'keras.layers.Conv1D', 'Conv1D', (['(196)'], {'kernel_size': '(624)', 'strides': '(1)'}), '(196, kernel_size=624, strides=1)\n', (1793, 1826), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((1844, 1864), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1862, 1864), False, 'from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\n'), ((1876, 1894), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1886, 1894), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((1906, 1918), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (1913, 1918), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((1972, 2009), 'keras.layers.GRU', 'GRU', ([], {'units': '(128)', 'return_sequences': '(True)'}), '(units=128, return_sequences=True)\n', (1975, 2009), False, 'from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\n'), ((2025, 2037), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (2032, 2037), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((2049, 2069), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2067, 2069), False, 'from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\n'), ((2124, 2161), 'keras.layers.GRU', 'GRU', ([], {'units': '(128)', 'return_sequences': '(True)'}), '(units=128, return_sequences=True)\n', (2127, 2161), False, 'from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\n'), ((2177, 2189), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (2184, 2189), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((2201, 2221), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2219, 2221), False, 'from keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\n'), ((2233, 2245), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (2240, 2245), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n'), ((2327, 2357), 'keras.layers.Dense', 'Dense', (['(4)'], {'activation': '"""sigmoid"""'}), "(4, activation='sigmoid')\n", (2332, 2357), False, 'from keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\n')] |
##
## Import libraries, use opencv (cv2) version 3.4.2.16 so you don't run into
## licencing issues
##
import sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
from os import listdir
import cv2
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
#########################################################################
## The script works for rotated test images very well, still has some
## problem with our test images, since the glitter in the plastic part
## reflects the light very different when scanned from another angle...
## Hence some points that used to be black (in the database image) are
## now white in the test image.
## This problem should be solved when using a scanner that measures the
## pervasive light instead of the reflected
#########################################################################
## Code is basically a modification of https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_matcher/py_matcher.html
## and https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html
# Get the names of all the images in the specified path (database images)
# Change the path so it works for you (Images in google Folder
# Sampledata/PreprocessedScans_SampleData)
# Specify the feature creation algorithm of the opencv library we want to use
# brisk and akaze seem to work nice for our data. I don't know how they work
# yet
########
## TODO: Research the different algorithm and choose a good one
########
alg="surf"
if alg=="sift":
ftAlg = cv2.xfeatures2d.SIFT_create()
if alg=="surf":
ftAlg = cv2.xfeatures2d.SURF_create()
if alg=="brisk":
ftAlg = cv2.BRISK_create()
if alg=="akaze":
ftAlg = cv2.AKAZE_create()
if alg=="kaze":
ftAlg = cv2.KAZE_create()
data_path=r"C:\Users\tobias.grab\IWK_data\test"
# data_path=r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\database'
files=listdir(data_path)
nrOfFiles=len(files)
# imgToMatch = cv2.imread(r'C:\Users\tobias.grab\switchdrive\Schule\datax\projekt\database_test\Nr384_2.jpg',0)
imgToMatch = cv2.imread(r"C:\Users\tobias.grab\IWK_data\test\org_Nr384_2.jpg",0)
(kps1, descs1) = ftAlg.detectAndCompute(imgToMatch, None)
nrOfGoodPerImage=np.zeros([nrOfFiles,1])
DistPerImage=np.zeros([nrOfFiles,1])
bauteilnr=0
#For all images in the database...
for file in files:
img_from_database = cv2.imread(data_path+'\\'+file,0)
(kps2, descs2) = ftAlg.detectAndCompute(img_from_database, None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(descs1,descs2,k=2)
matchesMask = [[0,0] for i in range(len(matches))]
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
matchesMask[i]=[1,0]
nrOfGoodPerImage[bauteilnr]=np.sum(matchesMask[:])
bauteilnr=bauteilnr+1
#Get the best match and display it
print("The best match in the database is", files[np.argmax(nrOfGoodPerImage)])
bestMatch=cv2.imread(data_path+"\\"+files[np.argmax(nrOfGoodPerImage)],0)
fig,(ax0,ax1)=plt.subplots(ncols=1, nrows=2, figsize=(15,8))
ax0.imshow(imgToMatch,cmap='gray')
ax1.imshow(bestMatch,cmap='gray')
| [
"cv2.BFMatcher",
"os.listdir",
"cv2.KAZE_create",
"cv2.xfeatures2d.SURF_create",
"cv2.AKAZE_create",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"cv2.xfeatures2d.SIFT_create",
"cv2.BRISK_create",
"sys.path.append",
"matplotlib.pyplot.subplots",
"cv2.imread"
] | [((119, 176), 'sys.path.append', 'sys.path.append', (['"""/usr/local/lib/python2.7/site-packages"""'], {}), "('/usr/local/lib/python2.7/site-packages')\n", (134, 176), False, 'import sys\n'), ((2214, 2232), 'os.listdir', 'listdir', (['data_path'], {}), '(data_path)\n', (2221, 2232), False, 'from os import listdir\n'), ((2402, 2474), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Users\\\\tobias.grab\\\\IWK_data\\\\test\\\\org_Nr384_2.jpg"""', '(0)'], {}), "('C:\\\\Users\\\\tobias.grab\\\\IWK_data\\\\test\\\\org_Nr384_2.jpg', 0)\n", (2412, 2474), False, 'import cv2\n'), ((2561, 2585), 'numpy.zeros', 'np.zeros', (['[nrOfFiles, 1]'], {}), '([nrOfFiles, 1])\n', (2569, 2585), True, 'import numpy as np\n'), ((2603, 2627), 'numpy.zeros', 'np.zeros', (['[nrOfFiles, 1]'], {}), '([nrOfFiles, 1])\n', (2611, 2627), True, 'import numpy as np\n'), ((3469, 3516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(2)', 'figsize': '(15, 8)'}), '(ncols=1, nrows=2, figsize=(15, 8))\n', (3481, 3516), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1780), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (1778, 1780), False, 'import cv2\n'), ((1829, 1858), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', ([], {}), '()\n', (1856, 1858), False, 'import cv2\n'), ((1908, 1926), 'cv2.BRISK_create', 'cv2.BRISK_create', ([], {}), '()\n', (1924, 1926), False, 'import cv2\n'), ((1976, 1994), 'cv2.AKAZE_create', 'cv2.AKAZE_create', ([], {}), '()\n', (1992, 1994), False, 'import cv2\n'), ((2043, 2060), 'cv2.KAZE_create', 'cv2.KAZE_create', ([], {}), '()\n', (2058, 2060), False, 'import cv2\n'), ((2739, 2777), 'cv2.imread', 'cv2.imread', (["(data_path + '\\\\' + file)", '(0)'], {}), "(data_path + '\\\\' + file, 0)\n", (2749, 2777), False, 'import cv2\n'), ((2871, 2886), 'cv2.BFMatcher', 'cv2.BFMatcher', ([], {}), '()\n', (2884, 2886), False, 'import cv2\n'), ((3179, 3201), 'numpy.sum', 'np.sum', (['matchesMask[:]'], {}), '(matchesMask[:])\n', (3185, 3201), True, 'import numpy as np\n'), ((3341, 3368), 'numpy.argmax', 'np.argmax', (['nrOfGoodPerImage'], {}), '(nrOfGoodPerImage)\n', (3350, 3368), True, 'import numpy as np\n'), ((3418, 3445), 'numpy.argmax', 'np.argmax', (['nrOfGoodPerImage'], {}), '(nrOfGoodPerImage)\n', (3427, 3445), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
##Load Training set
xtrain_tacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt',header = None, delim_whitespace=True))
ytrain_tacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt',header = None, delim_whitespace=True))
ztrain_tacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt',header = None, delim_whitespace=True))
xtrain_bacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt',header = None, delim_whitespace=True))
ytrain_bacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt',header = None, delim_whitespace=True))
ztrain_bacc = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt',header = None, delim_whitespace=True))
xtrain_gyro = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt',header = None, delim_whitespace=True))
ytrain_gyro = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt',header = None, delim_whitespace=True))
ztrain_gyro = np.array(pd.read_csv('UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt',header = None, delim_whitespace=True))
y_train = pd.read_csv('UCI HAR Dataset/train/y_train.txt',header = None, delim_whitespace=True)
#Load test data
xtest_tacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt',header = None, delim_whitespace=True))
ytest_tacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt',header = None, delim_whitespace=True))
ztest_tacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt',header = None, delim_whitespace=True))
xtest_bacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt',header = None, delim_whitespace=True))
ytest_bacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt',header = None, delim_whitespace=True))
ztest_bacc = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt',header = None, delim_whitespace=True))
xtest_gyro = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt',header = None, delim_whitespace=True))
ytest_gyro = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt',header = None, delim_whitespace=True))
ztest_gyro = np.array(pd.read_csv('UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt',header = None, delim_whitespace=True))
#Test Labels
y_test = pd.read_csv('UCI HAR Dataset/test/y_test.txt',header = None, delim_whitespace=True)
def scaling(train, test):
scaler = MinMaxScaler(feature_range=(-1,1))
scaler = scaler.fit(train.reshape(-1,1))
trainN = scaler.transform(train.reshape(-1,1)).reshape(train.shape)
testN = scaler.transform(test.reshape(-1,1)).reshape(test.shape)
return trainN, testN
#Scale signals
xtrain_taccN, xtest_taccN = scaling(xtrain_tacc, xtest_tacc)
xtrain_baccN, xtest_baccN = scaling(xtrain_bacc, xtest_bacc)
xtrain_gyroN, xtest_gyroN = scaling(xtrain_gyro, xtest_gyro)
ytrain_taccN, ytest_taccN = scaling(ytrain_tacc, ytest_tacc)
ytrain_baccN, ytest_baccN = scaling(ytrain_bacc, ytest_bacc)
ytrain_gyroN, ytest_gyroN = scaling(ytrain_gyro, ytest_gyro)
ztrain_taccN, ztest_taccN = scaling(ztrain_tacc, ztest_tacc)
ztrain_baccN, ztest_baccN = scaling(ztrain_bacc, ztest_bacc)
ztrain_gyroN, ztest_gyroN = scaling(ztrain_gyro, ztest_gyro)
#Combine 9 channels together
x_train = [xtrain_taccN, ytrain_taccN, ztrain_taccN,
xtrain_baccN, ytrain_baccN, ztrain_baccN,
xtrain_gyroN, ytrain_gyroN, ztrain_gyroN]
x_test = [xtest_taccN, ytest_taccN, ztest_taccN,
xtest_baccN, ytest_baccN, ztest_baccN,
xtest_gyroN, ytest_gyroN, ztest_gyroN]
x_train = np.array(np.dstack(x_train),dtype=np.float32)
x_test = np.array(np.dstack(x_test),dtype = np.float32)
#make the label's index zero
y_train = y_train-1
y_test = y_test-1
#TFlite Interpreter for Inference
modelname = input("model name?: ")
modelpath=modelname+'_model.tflite'
interpreter = tf.lite.Interpreter(model_path=modelpath)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
#print(input_details)
x_test=x_test.reshape(2947,128,9,1)
#Run Interpreter in for loop for every test data entry
accurate_count = 0
for i in range(0,2947):
input_shape = input_details[0]['shape']
interpreter.set_tensor(input_details[0]['index'], x_test[i:i+1,:,:])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
predict_label = np.argmax(output_data) #predicted label from interpreter
#check if prediction is correct
accurate_count += (predict_label == y_test.iloc[i][0])
#Overall accuracy for entire test
accuracy = accurate_count * 1.0 / 2947
print('TensorFlow Lite Float32 model accuracy = %.4f' % accuracy)
| [
"tensorflow.lite.Interpreter",
"numpy.dstack",
"pandas.read_csv",
"numpy.argmax",
"sklearn.preprocessing.MinMaxScaler"
] | [((1473, 1561), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/y_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/y_train.txt', header=None,\n delim_whitespace=True)\n", (1484, 1561), True, 'import pandas as pd\n'), ((2805, 2891), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/y_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/y_test.txt', header=None,\n delim_whitespace=True)\n", (2816, 2891), True, 'import pandas as pd\n'), ((4382, 4423), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'modelpath'}), '(model_path=modelpath)\n', (4401, 4423), True, 'import tensorflow as tf\n'), ((255, 370), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/total_acc_x_train.txt',\n header=None, delim_whitespace=True)\n", (266, 370), True, 'import pandas as pd\n'), ((392, 507), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/total_acc_y_train.txt',\n header=None, delim_whitespace=True)\n", (403, 507), True, 'import pandas as pd\n'), ((529, 644), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/total_acc_z_train.txt',\n header=None, delim_whitespace=True)\n", (540, 644), True, 'import pandas as pd\n'), ((666, 780), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_acc_x_train.txt',\n header=None, delim_whitespace=True)\n", (677, 780), True, 'import pandas as pd\n'), ((802, 916), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_acc_y_train.txt',\n header=None, delim_whitespace=True)\n", (813, 916), True, 'import pandas as pd\n'), ((938, 1052), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_acc_z_train.txt',\n header=None, delim_whitespace=True)\n", (949, 1052), True, 'import pandas as pd\n'), ((1074, 1189), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_gyro_x_train.txt',\n header=None, delim_whitespace=True)\n", (1085, 1189), True, 'import pandas as pd\n'), ((1211, 1326), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_gyro_y_train.txt',\n header=None, delim_whitespace=True)\n", (1222, 1326), True, 'import pandas as pd\n'), ((1348, 1463), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/train/Inertial Signals/body_gyro_z_train.txt',\n header=None, delim_whitespace=True)\n", (1359, 1463), True, 'import pandas as pd\n'), ((1600, 1713), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/total_acc_x_test.txt',\n header=None, delim_whitespace=True)\n", (1611, 1713), True, 'import pandas as pd\n'), ((1734, 1847), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/total_acc_y_test.txt',\n header=None, delim_whitespace=True)\n", (1745, 1847), True, 'import pandas as pd\n'), ((1868, 1981), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/total_acc_z_test.txt',\n header=None, delim_whitespace=True)\n", (1879, 1981), True, 'import pandas as pd\n'), ((2002, 2114), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_acc_x_test.txt',\n header=None, delim_whitespace=True)\n", (2013, 2114), True, 'import pandas as pd\n'), ((2135, 2247), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_acc_y_test.txt',\n header=None, delim_whitespace=True)\n", (2146, 2247), True, 'import pandas as pd\n'), ((2268, 2380), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_acc_z_test.txt',\n header=None, delim_whitespace=True)\n", (2279, 2380), True, 'import pandas as pd\n'), ((2401, 2514), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_gyro_x_test.txt',\n header=None, delim_whitespace=True)\n", (2412, 2514), True, 'import pandas as pd\n'), ((2535, 2648), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_gyro_y_test.txt',\n header=None, delim_whitespace=True)\n", (2546, 2648), True, 'import pandas as pd\n'), ((2670, 2783), 'pandas.read_csv', 'pd.read_csv', (['"""UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt"""'], {'header': 'None', 'delim_whitespace': '(True)'}), "('UCI HAR Dataset/test/Inertial Signals/body_gyro_z_test.txt',\n header=None, delim_whitespace=True)\n", (2681, 2783), True, 'import pandas as pd\n'), ((2929, 2964), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (2941, 2964), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4099, 4117), 'numpy.dstack', 'np.dstack', (['x_train'], {}), '(x_train)\n', (4108, 4117), True, 'import numpy as np\n'), ((4154, 4171), 'numpy.dstack', 'np.dstack', (['x_test'], {}), '(x_test)\n', (4163, 4171), True, 'import numpy as np\n'), ((4978, 5000), 'numpy.argmax', 'np.argmax', (['output_data'], {}), '(output_data)\n', (4987, 5000), True, 'import numpy as np\n')] |
"""Tests of the core package."""
import numpy as np
import pandas as pd
import pint
import pytest
import xarray as xr
from weldx.constants import Q_, U_
from weldx.core import MathematicalExpression, TimeSeries
from weldx.tests._helpers import get_test_name
from weldx.time import Time
# --------------------------------------------------------------------------------------
# MathematicalExpression
# --------------------------------------------------------------------------------------
class TestMathematicalExpression:
"""Tests the mathematical expression class."""
# Fixtures, aliases and shared variables -------------------------------------------
ME = MathematicalExpression
# unfortunately, fixtures can not be used in a parametrize section
expr_def = "(a + b)**2 + c - d"
params_def = {"a": 2, "c": 3.5}
@staticmethod
@pytest.fixture()
def ma_def() -> MathematicalExpression:
"""Get a default instance for tests."""
return MathematicalExpression(
TestMathematicalExpression.expr_def,
TestMathematicalExpression.params_def,
)
# Helper functions -----------------------------------------------------------------
@staticmethod
def _check_params_and_vars(expression, exp_params, exp_vars):
"""Check parameters and variables of an MathematicalExpression."""
assert expression.num_parameters == len(exp_params)
assert expression.parameters == exp_params
assert expression.num_variables == len(exp_vars)
for variable in exp_vars:
assert variable in expression.get_variable_names()
# test_construction ----------------------------------------------------------------
@pytest.mark.parametrize(
"expression, parameters, exp_vars",
[
("a*b + c/d - e", {"d": 1, "e": 2}, ["a", "b", "c"]),
("a*b + c/d - e", {}, ["a", "b", "c", "d", "e"]),
("a**2 + b - c", {"a": 1, "c": 2}, ["b"]),
],
)
def test_construction(self, expression, parameters, exp_vars):
"""Test the construction."""
expr = MathematicalExpression(expression=expression, parameters=parameters)
self._check_params_and_vars(expr, parameters, exp_vars)
# test_construction_exceptions -----------------------------------------------------
@staticmethod
@pytest.mark.parametrize(
"expression, parameters, exception_type, name",
[
("a*b + c/d - e", {"f": 1}, ValueError, "# parameter not in expression"),
("a*b + c/d - e", 1, ValueError, "# invalid parameter type"),
("a + $b#!==3", {"a": 1}, Exception, "# invalid expression"),
],
ids=get_test_name,
)
def test_construction_exceptions(expression, parameters, exception_type, name):
"""Test the exceptions of the '__init__' method."""
with pytest.raises(exception_type):
MathematicalExpression(expression=expression, parameters=parameters)
# test_set_parameter ---------------------------------------------------------------
def test_set_parameter(self):
"""Test the set_parameter function of the mathematical expression."""
expr = MathematicalExpression("a*b + c/d - e")
# check initial configuration
self._check_params_and_vars(expr, {}, ["a", "b", "c", "d", "e"])
# set first parameters
expr.set_parameter("d", 1)
expr.set_parameter("e", 2)
self._check_params_and_vars(expr, {"d": 1, "e": 2}, ["a", "b", "c"])
# set another parameter and overwrite others
expr.set_parameter("a", 5)
expr.set_parameter("d", 7)
expr.set_parameter("e", -1)
self._check_params_and_vars(expr, {"a": 5, "d": 7, "e": -1}, ["b", "c"])
# test_set_parameter_exceptions ----------------------------------------------------
@staticmethod
@pytest.mark.parametrize(
"name, value, exception_type, test_name",
[
("k", 1, ValueError, "# parameter not in expression"),
(33, 1, ValueError, "# wrong type as name #1"),
({"a": 1}, 1, TypeError, "# wrong type as name #2"),
],
ids=get_test_name,
)
def test_set_parameter_exceptions(ma_def, name, value, exception_type, test_name):
"""Test the exceptions of the 'set_parameter' method."""
with pytest.raises(exception_type):
ma_def.set_parameter(name, value)
# test_comparison ------------------------------------------------------------------
expr_mat_identical = "a**2 + 2*a*b + b**2 + c - d"
expr_different = "a*b + c*d"
params_too_many = {"a": 2, "c": 3.5, "d": 4}
params_wrong_value = {"a": 2, "c": 1.5}
@staticmethod
@pytest.mark.parametrize(
"other, equal, equal_no_params, mat_equal, mat_equal_no_params",
[
(ME(expr_def, params_def), True, True, True, True),
(ME(expr_mat_identical, params_def), False, False, True, True),
(ME(expr_different, params_def), False, False, False, False),
(ME(expr_def, params_too_many), False, True, False, True),
(ME(expr_mat_identical, params_too_many), False, False, False, True),
(ME(expr_different, params_too_many), False, False, False, False),
(ME(expr_def, params_wrong_value), False, True, False, True),
(ME(expr_mat_identical, params_wrong_value), False, False, False, True),
(ME(expr_different, params_wrong_value), False, False, False, False),
(1, False, False, False, False),
("I am not a MathematicalExpression", False, False, False, False),
],
)
def test_comparison(
ma_def,
other,
equal,
equal_no_params,
mat_equal,
mat_equal_no_params,
):
"""Test if another object is equal to the default instance."""
assert (ma_def == other) is equal
assert (ma_def != other) is not equal
assert ma_def.equals(other, False, True) is equal_no_params
assert ma_def.equals(other) is mat_equal
assert ma_def.equals(other, False, False) is mat_equal_no_params
# -----------------------------------------------------
# TODO: Add tests for quantities
@staticmethod
@pytest.mark.parametrize(
"expression, parameters, variables, exp_result",
[
("a*b + c/d - e", {"d": 1, "e": 2}, {"a": 1, "b": 2, "c": 3}, 3),
("(a + b)**2 + c - d", {"a": 3, "d": 2}, {"b": 2, "c": 4}, 27),
(
"a + b",
{"a": np.array([1, 2])},
{"b": np.array([2, 4])},
[3, 6],
),
],
)
def test_evaluation(expression, parameters, variables, exp_result):
"""Test the evaluation of the mathematical function."""
expr = MathematicalExpression(expression=expression, parameters=parameters)
assert np.all(expr.evaluate(**variables) == exp_result)
# test_evaluate_exceptions ---------------------------------------------------------
@staticmethod
@pytest.mark.parametrize(
"variables, exception_type, test_name",
[
({"b": 1, "c": 2, "d": 3}, ValueError, "# input is expression parameter"),
({"b": 1}, Exception, "# not enough values provided"),
],
ids=get_test_name,
)
def test_evaluate_exceptions(ma_def, variables, exception_type, test_name):
"""Test the exceptions of the 'set_parameter' method."""
with pytest.raises(exception_type):
ma_def.evaluate(**variables)
# --------------------------------------------------------------------------------------
# TimeSeries
# --------------------------------------------------------------------------------------
class TestTimeSeries:
"""Tests for the TimeSeries class."""
# Fixtures, aliases and shared variables -------------------------------------------
ME = MathematicalExpression
DTI = pd.DatetimeIndex
TDI = pd.TimedeltaIndex
TS = TimeSeries
time_discrete = pd.TimedeltaIndex([0, 1, 2, 3, 4], unit="s")
value_constant = Q_(1, "m")
values_discrete = Q_(np.array([10, 11, 12, 14, 16]), "mm")
me_expr_str = "a*t + b"
me_params = {"a": Q_(2, "m/s"), "b": Q_(-2, "m")}
me_params_vec = {"a": Q_([2, 0, 1], "m/s"), "b": Q_([-2, 3, 0], "m")}
ts_constant = TimeSeries(value_constant)
ts_disc_step = TimeSeries(values_discrete, time_discrete, "step")
ts_disc_linear = TimeSeries(values_discrete, time_discrete, "linear")
ts_expr = TimeSeries(ME(me_expr_str, me_params))
ts_expr_vec = TimeSeries(ME(me_expr_str, me_params_vec))
# test_construction_discrete -------------------------------------------------------
@staticmethod
@pytest.mark.parametrize(
"data, time, interpolation, shape_exp",
[
(Q_(1, "m"), None, None, (1,)),
(Q_([3, 7, 1], "m"), TDI([0, 1, 2], unit="s"), "step", (3,)),
(Q_([3, 7, 1], ""), Q_([0, 1, 2], "s"), "step", (3,)),
],
)
def test_construction_discrete(data: pint.Quantity, time, interpolation, shape_exp):
"""Test the construction of the TimeSeries class."""
# set expected values
time_exp = time
if isinstance(time_exp, pint.Quantity):
time_exp = pd.TimedeltaIndex(time_exp.m, unit="s")
exp_interpolation = interpolation
if len(data.shape) == 0 and interpolation is None:
exp_interpolation = "step"
# create instance
ts = TimeSeries(data=data, time=time, interpolation=interpolation)
# check
assert np.all(ts.data == data)
assert np.all(ts.time == time_exp)
assert ts.interpolation == exp_interpolation
assert ts.shape == shape_exp
assert data.is_compatible_with(ts.units)
assert np.all(ts.data_array.data == data)
assert ts.data_array.attrs["interpolation"] == exp_interpolation
if time_exp is None:
assert "time" not in ts.data_array
else:
assert np.all(ts.data_array.time == time_exp)
# test_construction_expression -----------------------------------------------------
params_scalar = {"a": Q_(2, "1/s"), "b": Q_(-2, "")}
params_vec = {"a": Q_([[2, 3, 4]], "m/s"), "b": Q_([[-2, 3, 1]], "m")}
@staticmethod
@pytest.mark.parametrize(
"data, shape_exp, unit_exp",
[
(ME("a*t + b", params_scalar), (1,), ""),
(ME("a*t + b", params_vec), (1, 3), "m"),
],
)
def test_construction_expression(data, shape_exp, unit_exp):
"""Test the construction of the TimeSeries class."""
ts = TimeSeries(data=data)
# check
assert ts.data == data
assert ts.time is None
assert ts.interpolation is None
assert ts.shape == shape_exp
assert ts.data_array is None
assert U_(unit_exp).is_compatible_with(ts.units)
# test_init_data_array -------------------------------------------------------------
@staticmethod
@pytest.mark.parametrize(
"data, dims, coords, exception_type",
[
(Q_([1, 2, 3], "m"), "time", dict(time=TDI([1, 2, 3])), None),
(Q_([1, 2, 3], "m"), "a", dict(a=TDI([1, 2, 3])), KeyError),
(Q_([[1, 2]], "m"), ("a", "time"), dict(a=[2], time=TDI([1, 2])), None),
(Q_([1, 2, 3], "m"), "time", None, KeyError),
(Q_([1, 2, 3], "m"), "time", dict(time=[1, 2, 3]), TypeError),
([1, 2, 3], "time", dict(time=TDI([1, 2, 3])), TypeError),
],
)
def test_init_data_array(data, dims, coords, exception_type):
"""Test the `__init__` method with an xarray as data parameter."""
da = xr.DataArray(data=data, dims=dims, coords=coords)
if exception_type is not None:
with pytest.raises(exception_type):
TimeSeries(da)
else:
ts = TimeSeries(da)
assert ts.data_array.dims[0] == "time"
# test_construction_exceptions -----------------------------------------------------
values_def = Q_([5, 7, 3, 6, 8], "m")
time_def = Q_([0, 1, 2, 3, 4], "s")
me_too_many_vars = ME("a*t + b", {})
me_param_units = ME("a*t + b", {"a": Q_(2, "1/s"), "b": Q_(-2, "m")})
@staticmethod
@pytest.mark.parametrize(
"data, time, interpolation, exception_type, test_name",
[
(values_def, time_def, "int", ValueError, "# unknown interpolation"),
(values_def, time_def.magnitude, "step", TypeError, "# invalid time type"),
(me_too_many_vars, None, None, Exception, "# too many free variables"),
(me_param_units, None, None, Exception, "# incompatible parameter units"),
("a string", None, None, TypeError, "# wrong data type"),
],
ids=get_test_name,
)
def test_construction_exceptions(
data, time, interpolation, exception_type, test_name
):
"""Test the exceptions of the 'set_parameter' method."""
with pytest.raises(exception_type):
TimeSeries(data=data, time=time, interpolation=interpolation)
# test_comparison -------------------------------------
time_wrong_values = TDI([0, 1, 2, 3, 5], unit="s")
values_discrete_wrong = Q_(np.array([10, 11, 12, 15, 16]), "mm")
values_unit_wrong = Q_(np.array([10, 11, 12, 14, 16]), "s")
values_unit_prefix_wrong = Q_(np.array([10, 11, 12, 14, 16]), "m")
params_wrong_values = {"a": Q_(2, "1/s"), "b": Q_(-1, "")}
params_wrong_unit = {"a": Q_(2, "g/s"), "b": Q_(-2, "g")}
params_wrong_unit_prefix = {"a": Q_(2, "m/ms"), "b": Q_(-2, "m")}
@staticmethod
@pytest.mark.parametrize(
"ts, ts_other, result_exp",
[
(ts_constant, TS(value_constant), True),
(ts_disc_step, TS(values_discrete, time_discrete, "step"), True),
(ts_expr, TS(ME(me_expr_str, me_params)), True),
(ts_constant, ts_disc_step, False),
(ts_constant, ts_expr, False),
(ts_disc_step, ts_expr, False),
(ts_constant, 1, False),
(ts_disc_step, 1, False),
(ts_expr, 1, False),
(ts_constant, "wrong", False),
(ts_disc_step, "wrong", False),
(ts_expr, "wrong", False),
(ts_constant, TS(Q_(1337, "m")), False),
(ts_constant, TS(Q_(1, "mm")), False),
(ts_constant, TS(Q_(1, "s")), False),
(ts_disc_step, TS(values_discrete, time_wrong_values, "step"), False),
(ts_disc_step, TS(values_discrete_wrong, time_discrete, "step"), False),
(ts_disc_step, TS(values_unit_prefix_wrong, time_discrete, "step"), False),
(ts_disc_step, TS(values_discrete, time_discrete, "linear"), False),
(ts_expr, TS(ME("a*t + 2*b", me_params)), False),
(ts_expr, TS(ME(me_expr_str, params_wrong_values)), False),
(ts_expr, TS(ME(me_expr_str, params_wrong_unit)), False),
(ts_expr, TS(ME(me_expr_str, params_wrong_unit_prefix)), False),
],
)
def test_comparison(ts, ts_other, result_exp):
"""Test the TimeSeries comparison methods."""
assert (ts == ts_other) is result_exp
assert (ts != ts_other) is not result_exp
# test_interp_time -----------------------------------------------------------------
time_single = pd.TimedeltaIndex([2.1], "s")
time_single_q = Q_(2.1, "s")
time_mul = pd.TimedeltaIndex([-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7], "s")
time_mul_q = Q_([-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7], "s")
results_exp_vec = [
[-8, 3, -3],
[-0.6, 3, 0.7],
[0.2, 3, 1.1],
[1.8, 3, 1.9],
[3, 3, 2.5],
[4, 3, 3],
[6, 3, 4],
[12, 3, 7],
]
@staticmethod
@pytest.mark.parametrize(
"ts, time, magnitude_exp, unit_exp",
[
(ts_constant, time_single, 1, "m"),
(ts_constant, time_single_q, 1, "m"),
(ts_constant, time_mul, [1, 1, 1, 1, 1, 1, 1, 1], "m"),
(
ts_constant,
time_mul + pd.Timestamp("2020"),
[1, 1, 1, 1, 1, 1, 1, 1],
"m",
),
(ts_constant, time_mul_q, [1, 1, 1, 1, 1, 1, 1, 1], "m"),
(ts_disc_step, time_single, 12, "mm"),
(ts_disc_step, time_single_q, 12, "mm"),
(ts_disc_step, time_mul, [10, 10, 11, 11, 12, 14, 16, 16], "mm"),
(ts_disc_step, time_mul_q, [10, 10, 11, 11, 12, 14, 16, 16], "mm"),
(ts_disc_linear, time_single, 12.2, "mm"),
(ts_disc_linear, time_single_q, 12.2, "mm"),
(ts_disc_linear, time_mul, [10, 10.7, 11.1, 11.9, 13, 14, 16, 16], "mm"),
(ts_disc_linear, time_mul_q, [10, 10.7, 11.1, 11.9, 13, 14, 16, 16], "mm"),
(ts_expr, time_single, 2.2, "m"),
(ts_expr, time_single_q, 2.2, "m"),
(ts_expr, time_mul, [-8, -0.6, 0.2, 1.8, 3, 4, 6, 12], "m"),
(ts_expr, time_mul_q, [-8, -0.6, 0.2, 1.8, 3, 4, 6, 12], "m"),
(ts_expr_vec, time_single, [[2.2, 3, 2.1]], "m"),
(ts_expr_vec, time_single_q, [[2.2, 3, 2.1]], "m"),
(ts_expr_vec, time_mul, results_exp_vec, "m"),
],
)
def test_interp_time(ts, time, magnitude_exp, unit_exp):
"""Test the interp_time function."""
result = ts.interp_time(time)
assert np.all(np.isclose(result.data.magnitude, magnitude_exp))
assert result.units == U_(unit_exp)
time = Time(time)
if len(time) == 1:
assert result.time is None
else:
assert np.all(Time(result.time, result._reference_time) == time)
# test_interp_time_warning ---------------------------------------------------------
@staticmethod
def test_interp_time_warning():
"""Test if a warning is emitted when interpolating already interpolated data."""
ts = TimeSeries(data=Q_([1, 2, 3], "m"), time=Q_([0, 1, 2], "s"))
with pytest.warns(None) as recorded_warnings:
ts_interp = ts.interp_time(Q_([0.25, 0.5, 0.75, 1], "s"))
assert len(recorded_warnings) == 0
with pytest.warns(UserWarning):
ts_interp.interp_time(Q_([0.4, 0.6], "s"))
# test_interp_time_exceptions ------------------------------------------------------
@staticmethod
@pytest.mark.parametrize("ts", [ts_constant, ts_disc_step, ts_disc_linear, ts_expr])
@pytest.mark.parametrize(
"time, exception_type, test_name",
[
# (DTI(["2010-10-10"]), ValueError, "# wrong type #1"),
("a string", TypeError, "# wrong type #2"),
([1, 2, 3], TypeError, "# wrong type #3"),
(1, TypeError, "# wrong type #4"),
(Q_(2, "s/m"), Exception, "# wrong type #5"),
],
ids=get_test_name,
)
def test_interp_time_exceptions(ts, time, exception_type, test_name):
"""Test the exceptions of the 'set_parameter' method."""
with pytest.raises(exception_type):
ts.interp_time(time)
| [
"weldx.core.TimeSeries",
"numpy.all",
"weldx.constants.U_",
"numpy.isclose",
"weldx.time.Time",
"pandas.Timestamp",
"weldx.core.MathematicalExpression",
"pytest.mark.parametrize",
"numpy.array",
"pytest.raises",
"xarray.DataArray",
"pytest.fixture",
"pandas.TimedeltaIndex",
"weldx.constant... | [((870, 886), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (884, 886), False, 'import pytest\n'), ((1740, 1958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expression, parameters, exp_vars"""', "[('a*b + c/d - e', {'d': 1, 'e': 2}, ['a', 'b', 'c']), ('a*b + c/d - e', {},\n ['a', 'b', 'c', 'd', 'e']), ('a**2 + b - c', {'a': 1, 'c': 2}, ['b'])]"], {}), "('expression, parameters, exp_vars', [(\n 'a*b + c/d - e', {'d': 1, 'e': 2}, ['a', 'b', 'c']), ('a*b + c/d - e',\n {}, ['a', 'b', 'c', 'd', 'e']), ('a**2 + b - c', {'a': 1, 'c': 2}, ['b'])])\n", (1763, 1958), False, 'import pytest\n'), ((2387, 2696), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expression, parameters, exception_type, name"""', "[('a*b + c/d - e', {'f': 1}, ValueError, '# parameter not in expression'),\n ('a*b + c/d - e', 1, ValueError, '# invalid parameter type'), (\n 'a + $b#!==3', {'a': 1}, Exception, '# invalid expression')]"], {'ids': 'get_test_name'}), "('expression, parameters, exception_type, name', [(\n 'a*b + c/d - e', {'f': 1}, ValueError, '# parameter not in expression'),\n ('a*b + c/d - e', 1, ValueError, '# invalid parameter type'), (\n 'a + $b#!==3', {'a': 1}, Exception, '# invalid expression')], ids=\n get_test_name)\n", (2410, 2696), False, 'import pytest\n'), ((3931, 4185), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name, value, exception_type, test_name"""', "[('k', 1, ValueError, '# parameter not in expression'), (33, 1, ValueError,\n '# wrong type as name #1'), ({'a': 1}, 1, TypeError,\n '# wrong type as name #2')]"], {'ids': 'get_test_name'}), "('name, value, exception_type, test_name', [('k', 1,\n ValueError, '# parameter not in expression'), (33, 1, ValueError,\n '# wrong type as name #1'), ({'a': 1}, 1, TypeError,\n '# wrong type as name #2')], ids=get_test_name)\n", (3954, 4185), False, 'import pytest\n'), ((7170, 7392), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""variables, exception_type, test_name"""', "[({'b': 1, 'c': 2, 'd': 3}, ValueError, '# input is expression parameter'),\n ({'b': 1}, Exception, '# not enough values provided')]"], {'ids': 'get_test_name'}), "('variables, exception_type, test_name', [({'b': 1,\n 'c': 2, 'd': 3}, ValueError, '# input is expression parameter'), ({'b':\n 1}, Exception, '# not enough values provided')], ids=get_test_name)\n", (7193, 7392), False, 'import pytest\n'), ((8159, 8203), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (['[0, 1, 2, 3, 4]'], {'unit': '"""s"""'}), "([0, 1, 2, 3, 4], unit='s')\n", (8176, 8203), True, 'import pandas as pd\n'), ((8225, 8235), 'weldx.constants.Q_', 'Q_', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (8227, 8235), False, 'from weldx.constants import Q_, U_\n'), ((8475, 8501), 'weldx.core.TimeSeries', 'TimeSeries', (['value_constant'], {}), '(value_constant)\n', (8485, 8501), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((8521, 8571), 'weldx.core.TimeSeries', 'TimeSeries', (['values_discrete', 'time_discrete', '"""step"""'], {}), "(values_discrete, time_discrete, 'step')\n", (8531, 8571), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((8593, 8645), 'weldx.core.TimeSeries', 'TimeSeries', (['values_discrete', 'time_discrete', '"""linear"""'], {}), "(values_discrete, time_discrete, 'linear')\n", (8603, 8645), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((12259, 12283), 'weldx.constants.Q_', 'Q_', (['[5, 7, 3, 6, 8]', '"""m"""'], {}), "([5, 7, 3, 6, 8], 'm')\n", (12261, 12283), False, 'from weldx.constants import Q_, U_\n'), ((12299, 12323), 'weldx.constants.Q_', 'Q_', (['[0, 1, 2, 3, 4]', '"""s"""'], {}), "([0, 1, 2, 3, 4], 's')\n", (12301, 12323), False, 'from weldx.constants import Q_, U_\n'), ((12463, 12938), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, time, interpolation, exception_type, test_name"""', "[(values_def, time_def, 'int', ValueError, '# unknown interpolation'), (\n values_def, time_def.magnitude, 'step', TypeError,\n '# invalid time type'), (me_too_many_vars, None, None, Exception,\n '# too many free variables'), (me_param_units, None, None, Exception,\n '# incompatible parameter units'), ('a string', None, None, TypeError,\n '# wrong data type')]"], {'ids': 'get_test_name'}), "('data, time, interpolation, exception_type, test_name',\n [(values_def, time_def, 'int', ValueError, '# unknown interpolation'),\n (values_def, time_def.magnitude, 'step', TypeError,\n '# invalid time type'), (me_too_many_vars, None, None, Exception,\n '# too many free variables'), (me_param_units, None, None, Exception,\n '# incompatible parameter units'), ('a string', None, None, TypeError,\n '# wrong data type')], ids=get_test_name)\n", (12486, 12938), False, 'import pytest\n'), ((15577, 15606), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (['[2.1]', '"""s"""'], {}), "([2.1], 's')\n", (15594, 15606), True, 'import pandas as pd\n'), ((15627, 15639), 'weldx.constants.Q_', 'Q_', (['(2.1)', '"""s"""'], {}), "(2.1, 's')\n", (15629, 15639), False, 'from weldx.constants import Q_, U_\n'), ((15655, 15712), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (['[-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7]', '"""s"""'], {}), "([-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7], 's')\n", (15672, 15712), True, 'import pandas as pd\n'), ((15730, 15772), 'weldx.constants.Q_', 'Q_', (['[-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7]', '"""s"""'], {}), "([-3, 0.7, 1.1, 1.9, 2.5, 3, 4, 7], 's')\n", (15732, 15772), False, 'from weldx.constants import Q_, U_\n'), ((18605, 18692), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ts"""', '[ts_constant, ts_disc_step, ts_disc_linear, ts_expr]'], {}), "('ts', [ts_constant, ts_disc_step, ts_disc_linear,\n ts_expr])\n", (18628, 18692), False, 'import pytest\n'), ((994, 1096), 'weldx.core.MathematicalExpression', 'MathematicalExpression', (['TestMathematicalExpression.expr_def', 'TestMathematicalExpression.params_def'], {}), '(TestMathematicalExpression.expr_def,\n TestMathematicalExpression.params_def)\n', (1016, 1096), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((2139, 2207), 'weldx.core.MathematicalExpression', 'MathematicalExpression', ([], {'expression': 'expression', 'parameters': 'parameters'}), '(expression=expression, parameters=parameters)\n', (2161, 2207), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((3243, 3282), 'weldx.core.MathematicalExpression', 'MathematicalExpression', (['"""a*b + c/d - e"""'], {}), "('a*b + c/d - e')\n", (3265, 3282), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((6922, 6990), 'weldx.core.MathematicalExpression', 'MathematicalExpression', ([], {'expression': 'expression', 'parameters': 'parameters'}), '(expression=expression, parameters=parameters)\n', (6944, 6990), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((8261, 8291), 'numpy.array', 'np.array', (['[10, 11, 12, 14, 16]'], {}), '([10, 11, 12, 14, 16])\n', (8269, 8291), True, 'import numpy as np\n'), ((8349, 8361), 'weldx.constants.Q_', 'Q_', (['(2)', '"""m/s"""'], {}), "(2, 'm/s')\n", (8351, 8361), False, 'from weldx.constants import Q_, U_\n'), ((8368, 8379), 'weldx.constants.Q_', 'Q_', (['(-2)', '"""m"""'], {}), "(-2, 'm')\n", (8370, 8379), False, 'from weldx.constants import Q_, U_\n'), ((8408, 8428), 'weldx.constants.Q_', 'Q_', (['[2, 0, 1]', '"""m/s"""'], {}), "([2, 0, 1], 'm/s')\n", (8410, 8428), False, 'from weldx.constants import Q_, U_\n'), ((8435, 8454), 'weldx.constants.Q_', 'Q_', (['[-2, 3, 0]', '"""m"""'], {}), "([-2, 3, 0], 'm')\n", (8437, 8454), False, 'from weldx.constants import Q_, U_\n'), ((9655, 9716), 'weldx.core.TimeSeries', 'TimeSeries', ([], {'data': 'data', 'time': 'time', 'interpolation': 'interpolation'}), '(data=data, time=time, interpolation=interpolation)\n', (9665, 9716), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((9749, 9772), 'numpy.all', 'np.all', (['(ts.data == data)'], {}), '(ts.data == data)\n', (9755, 9772), True, 'import numpy as np\n'), ((9788, 9815), 'numpy.all', 'np.all', (['(ts.time == time_exp)'], {}), '(ts.time == time_exp)\n', (9794, 9815), True, 'import numpy as np\n'), ((9971, 10005), 'numpy.all', 'np.all', (['(ts.data_array.data == data)'], {}), '(ts.data_array.data == data)\n', (9977, 10005), True, 'import numpy as np\n'), ((10344, 10356), 'weldx.constants.Q_', 'Q_', (['(2)', '"""1/s"""'], {}), "(2, '1/s')\n", (10346, 10356), False, 'from weldx.constants import Q_, U_\n'), ((10363, 10373), 'weldx.constants.Q_', 'Q_', (['(-2)', '""""""'], {}), "(-2, '')\n", (10365, 10373), False, 'from weldx.constants import Q_, U_\n'), ((10398, 10420), 'weldx.constants.Q_', 'Q_', (['[[2, 3, 4]]', '"""m/s"""'], {}), "([[2, 3, 4]], 'm/s')\n", (10400, 10420), False, 'from weldx.constants import Q_, U_\n'), ((10427, 10448), 'weldx.constants.Q_', 'Q_', (['[[-2, 3, 1]]', '"""m"""'], {}), "([[-2, 3, 1]], 'm')\n", (10429, 10448), False, 'from weldx.constants import Q_, U_\n'), ((10811, 10832), 'weldx.core.TimeSeries', 'TimeSeries', ([], {'data': 'data'}), '(data=data)\n', (10821, 10832), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((11886, 11935), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'data', 'dims': 'dims', 'coords': 'coords'}), '(data=data, dims=dims, coords=coords)\n', (11898, 11935), True, 'import xarray as xr\n'), ((13454, 13484), 'numpy.array', 'np.array', (['[10, 11, 12, 15, 16]'], {}), '([10, 11, 12, 15, 16])\n', (13462, 13484), True, 'import numpy as np\n'), ((13519, 13549), 'numpy.array', 'np.array', (['[10, 11, 12, 14, 16]'], {}), '([10, 11, 12, 14, 16])\n', (13527, 13549), True, 'import numpy as np\n'), ((13590, 13620), 'numpy.array', 'np.array', (['[10, 11, 12, 14, 16]'], {}), '([10, 11, 12, 14, 16])\n', (13598, 13620), True, 'import numpy as np\n'), ((13659, 13671), 'weldx.constants.Q_', 'Q_', (['(2)', '"""1/s"""'], {}), "(2, '1/s')\n", (13661, 13671), False, 'from weldx.constants import Q_, U_\n'), ((13678, 13688), 'weldx.constants.Q_', 'Q_', (['(-1)', '""""""'], {}), "(-1, '')\n", (13680, 13688), False, 'from weldx.constants import Q_, U_\n'), ((13720, 13732), 'weldx.constants.Q_', 'Q_', (['(2)', '"""g/s"""'], {}), "(2, 'g/s')\n", (13722, 13732), False, 'from weldx.constants import Q_, U_\n'), ((13739, 13750), 'weldx.constants.Q_', 'Q_', (['(-2)', '"""g"""'], {}), "(-2, 'g')\n", (13741, 13750), False, 'from weldx.constants import Q_, U_\n'), ((13789, 13802), 'weldx.constants.Q_', 'Q_', (['(2)', '"""m/ms"""'], {}), "(2, 'm/ms')\n", (13791, 13802), False, 'from weldx.constants import Q_, U_\n'), ((13809, 13820), 'weldx.constants.Q_', 'Q_', (['(-2)', '"""m"""'], {}), "(-2, 'm')\n", (13811, 13820), False, 'from weldx.constants import Q_, U_\n'), ((17752, 17762), 'weldx.time.Time', 'Time', (['time'], {}), '(time)\n', (17756, 17762), False, 'from weldx.time import Time\n'), ((2913, 2942), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (2926, 2942), False, 'import pytest\n'), ((2956, 3024), 'weldx.core.MathematicalExpression', 'MathematicalExpression', ([], {'expression': 'expression', 'parameters': 'parameters'}), '(expression=expression, parameters=parameters)\n', (2978, 3024), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((4417, 4446), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (4430, 4446), False, 'import pytest\n'), ((7609, 7638), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (7622, 7638), False, 'import pytest\n'), ((9434, 9473), 'pandas.TimedeltaIndex', 'pd.TimedeltaIndex', (['time_exp.m'], {'unit': '"""s"""'}), "(time_exp.m, unit='s')\n", (9451, 9473), True, 'import pandas as pd\n'), ((10188, 10226), 'numpy.all', 'np.all', (['(ts.data_array.time == time_exp)'], {}), '(ts.data_array.time == time_exp)\n', (10194, 10226), True, 'import numpy as np\n'), ((12085, 12099), 'weldx.core.TimeSeries', 'TimeSeries', (['da'], {}), '(da)\n', (12095, 12099), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((12406, 12418), 'weldx.constants.Q_', 'Q_', (['(2)', '"""1/s"""'], {}), "(2, '1/s')\n", (12408, 12418), False, 'from weldx.constants import Q_, U_\n'), ((12425, 12436), 'weldx.constants.Q_', 'Q_', (['(-2)', '"""m"""'], {}), "(-2, 'm')\n", (12427, 12436), False, 'from weldx.constants import Q_, U_\n'), ((13201, 13230), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (13214, 13230), False, 'import pytest\n'), ((13244, 13305), 'weldx.core.TimeSeries', 'TimeSeries', ([], {'data': 'data', 'time': 'time', 'interpolation': 'interpolation'}), '(data=data, time=time, interpolation=interpolation)\n', (13254, 13305), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((17642, 17690), 'numpy.isclose', 'np.isclose', (['result.data.magnitude', 'magnitude_exp'], {}), '(result.data.magnitude, magnitude_exp)\n', (17652, 17690), True, 'import numpy as np\n'), ((17723, 17735), 'weldx.constants.U_', 'U_', (['unit_exp'], {}), '(unit_exp)\n', (17725, 17735), False, 'from weldx.constants import Q_, U_\n'), ((18241, 18259), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (18253, 18259), False, 'import pytest\n'), ((18409, 18434), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (18421, 18434), False, 'import pytest\n'), ((19253, 19282), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (19266, 19282), False, 'import pytest\n'), ((8970, 8980), 'weldx.constants.Q_', 'Q_', (['(1)', '"""m"""'], {}), "(1, 'm')\n", (8972, 8980), False, 'from weldx.constants import Q_, U_\n'), ((9014, 9032), 'weldx.constants.Q_', 'Q_', (['[3, 7, 1]', '"""m"""'], {}), "([3, 7, 1], 'm')\n", (9016, 9032), False, 'from weldx.constants import Q_, U_\n'), ((9088, 9105), 'weldx.constants.Q_', 'Q_', (['[3, 7, 1]', '""""""'], {}), "([3, 7, 1], '')\n", (9090, 9105), False, 'from weldx.constants import Q_, U_\n'), ((9107, 9125), 'weldx.constants.Q_', 'Q_', (['[0, 1, 2]', '"""s"""'], {}), "([0, 1, 2], 's')\n", (9109, 9125), False, 'from weldx.constants import Q_, U_\n'), ((11041, 11053), 'weldx.constants.U_', 'U_', (['unit_exp'], {}), '(unit_exp)\n', (11043, 11053), False, 'from weldx.constants import Q_, U_\n'), ((11992, 12021), 'pytest.raises', 'pytest.raises', (['exception_type'], {}), '(exception_type)\n', (12005, 12021), False, 'import pytest\n'), ((12039, 12053), 'weldx.core.TimeSeries', 'TimeSeries', (['da'], {}), '(da)\n', (12049, 12053), False, 'from weldx.core import MathematicalExpression, TimeSeries\n'), ((11291, 11309), 'weldx.constants.Q_', 'Q_', (['[1, 2, 3]', '"""m"""'], {}), "([1, 2, 3], 'm')\n", (11293, 11309), False, 'from weldx.constants import Q_, U_\n'), ((11366, 11384), 'weldx.constants.Q_', 'Q_', (['[1, 2, 3]', '"""m"""'], {}), "([1, 2, 3], 'm')\n", (11368, 11384), False, 'from weldx.constants import Q_, U_\n'), ((11439, 11456), 'weldx.constants.Q_', 'Q_', (['[[1, 2]]', '"""m"""'], {}), "([[1, 2]], 'm')\n", (11441, 11456), False, 'from weldx.constants import Q_, U_\n'), ((11524, 11542), 'weldx.constants.Q_', 'Q_', (['[1, 2, 3]', '"""m"""'], {}), "([1, 2, 3], 'm')\n", (11526, 11542), False, 'from weldx.constants import Q_, U_\n'), ((11582, 11600), 'weldx.constants.Q_', 'Q_', (['[1, 2, 3]', '"""m"""'], {}), "([1, 2, 3], 'm')\n", (11584, 11600), False, 'from weldx.constants import Q_, U_\n'), ((18183, 18201), 'weldx.constants.Q_', 'Q_', (['[1, 2, 3]', '"""m"""'], {}), "([1, 2, 3], 'm')\n", (18185, 18201), False, 'from weldx.constants import Q_, U_\n'), ((18208, 18226), 'weldx.constants.Q_', 'Q_', (['[0, 1, 2]', '"""s"""'], {}), "([0, 1, 2], 's')\n", (18210, 18226), False, 'from weldx.constants import Q_, U_\n'), ((18321, 18350), 'weldx.constants.Q_', 'Q_', (['[0.25, 0.5, 0.75, 1]', '"""s"""'], {}), "([0.25, 0.5, 0.75, 1], 's')\n", (18323, 18350), False, 'from weldx.constants import Q_, U_\n'), ((18470, 18489), 'weldx.constants.Q_', 'Q_', (['[0.4, 0.6]', '"""s"""'], {}), "([0.4, 0.6], 's')\n", (18472, 18489), False, 'from weldx.constants import Q_, U_\n'), ((19012, 19024), 'weldx.constants.Q_', 'Q_', (['(2)', '"""s/m"""'], {}), "(2, 's/m')\n", (19014, 19024), False, 'from weldx.constants import Q_, U_\n'), ((6655, 6671), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (6663, 6671), True, 'import numpy as np\n'), ((6696, 6712), 'numpy.array', 'np.array', (['[2, 4]'], {}), '([2, 4])\n', (6704, 6712), True, 'import numpy as np\n'), ((14507, 14520), 'weldx.constants.Q_', 'Q_', (['(1337)', '"""m"""'], {}), "(1337, 'm')\n", (14509, 14520), False, 'from weldx.constants import Q_, U_\n'), ((14560, 14571), 'weldx.constants.Q_', 'Q_', (['(1)', '"""mm"""'], {}), "(1, 'mm')\n", (14562, 14571), False, 'from weldx.constants import Q_, U_\n'), ((14611, 14621), 'weldx.constants.Q_', 'Q_', (['(1)', '"""s"""'], {}), "(1, 's')\n", (14613, 14621), False, 'from weldx.constants import Q_, U_\n'), ((17869, 17910), 'weldx.time.Time', 'Time', (['result.time', 'result._reference_time'], {}), '(result.time, result._reference_time)\n', (17873, 17910), False, 'from weldx.time import Time\n'), ((16313, 16333), 'pandas.Timestamp', 'pd.Timestamp', (['"""2020"""'], {}), "('2020')\n", (16325, 16333), True, 'import pandas as pd\n')] |
"""
This module contains a class for discrete
1-dimensional exponential families. The main
uses for this class are exact (post-selection)
hypothesis tests and confidence intervals.
"""
import numpy as np
import warnings
from ..truncated import find_root
def crit_func(test_statistic, left_cut, right_cut):
"""
A generic critical function for an interval,
with weights at the endpoints.
((test_statistic < CL) + (test_statistic > CR) +
gammaL * (test_statistic == CL) +
gammaR * (test_statistic == CR))
where (CL, gammaL) = left_cut, (CR, gammaR) = right_cut.
Parameters
----------
test_statistic : np.float
Observed value of test statistic.
left_cut : (float, float)
(CL, gammaL): left endpoint and value at exactly the left endpoint (should be in [0,1]).
right_cut : (float, float)
(CR, gammaR): right endpoint and value at exactly the right endpoint (should be in [0,1]).
Returns
-------
decision : np.float
"""
CL, gammaL = left_cut
CR, gammaR = right_cut
value = ((test_statistic < CL) + (test_statistic > CR)) * 1.
if gammaL != 0:
value += gammaL * (test_statistic == CL)
if gammaR != 0:
value += gammaR * (test_statistic == CR)
return value
class discrete_family(object):
def __init__(self, sufficient_stat, weights):
r"""
A discrete 1-dimensional
exponential family with reference measure $\sum_j w_j \delta_{X_j}$
and sufficient statistic `sufficient_stat`. For any $\theta$, the distribution
is
.. math::
P_{\theta} = \sum_{j} e^{\theta X_j - \Lambda(\theta)} w_j \delta_{X_j}
where
.. math::
\Lambda(\theta) = \log \left(\sum_j w_j e^{\theta X_j} \right).
Parameters
----------
sufficient_stat : `np.float((n))`
weights : `np.float(n)`
Notes
-----
The weights are normalized to sum to 1.
"""
xw = np.array(sorted(zip(sufficient_stat, weights)))
self._x = xw[:,0]
self._w = xw[:,1]
self._lw = np.log(xw[:,1])
self._w /= self._w.sum() # make sure they are a pmf
self.n = len(xw)
self._theta = np.nan
@property
def theta(self):
"""
The natural parameter of the family.
"""
return self._theta
@theta.setter
def theta(self, _theta):
if _theta != self._theta:
_thetaX = _theta * self.sufficient_stat + self._lw
_largest = _thetaX.max() - 5 # try to avoid over/under flow, 5 seems arbitrary
_exp_thetaX = np.exp(_thetaX - _largest)
_prod = _exp_thetaX
self._partition = np.sum(_prod)
self._pdf = _prod / self._partition
self._partition *= np.exp(_largest)
self._theta = _theta
@property
def partition(self):
r"""
Partition function at `self.theta`:
.. math::
\sum_j e^{\theta X_j} w_j
"""
if hasattr(self, "_partition"):
return self._partition
@property
def sufficient_stat(self):
"""
Sufficient statistics of the exponential family.
"""
return self._x
@property
def weights(self):
"""
Weights of the exponential family.
"""
return self._w
def pdf(self, theta):
r"""
Density of $P_{\theta}$ with respect to $P_0$.
Parameters
----------
theta : float
Natural parameter.
Returns
-------
pdf : np.float
"""
self.theta = theta # compute partition if necessary
return self._pdf
def cdf(self, theta, x=None, gamma=1):
r"""
The cumulative distribution function of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X < x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
cdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf) - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat < x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def ccdf(self, theta, x=None, gamma=0, return_unnorm=False):
r"""
The complementary cumulative distribution function
(i.e. survival function) of $P_{\theta}$ with
weight `gamma` at `x`
.. math::
P_{\theta}(X > x) + \gamma * P_{\theta}(X = x)
Parameters
----------
theta : float
Natural parameter.
x : float (optional)
Where to evaluate CCDF.
gamma : float(optional)
Weight given at `x`.
Returns
-------
ccdf : np.float
"""
pdf = self.pdf(theta)
if x is None:
return np.cumsum(pdf[::-1])[::-1] - pdf * (1 - gamma)
else:
tr = np.sum(pdf * (self.sufficient_stat > x))
if x in self.sufficient_stat:
tr += gamma * np.sum(pdf[np.where(self.sufficient_stat == x)])
return tr
def E(self, theta, func):
r"""
Expectation of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
gamma : float(optional)
Weight given at `x`.
Returns
-------
E : np.float
"""
return (func(self.sufficient_stat) * self.pdf(theta)).sum()
def Var(self, theta, func):
r"""
Variance of `func` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func : callable
Assumed to be vectorized.
Returns
-------
var : np.float
"""
mu = self.E(theta, func)
return self.E(theta, lambda x: (func(x)-mu)**2)
def Cov(self, theta, func1, func2):
r"""
Covariance of `func1` and `func2` under $P_{\theta}$
Parameters
----------
theta : float
Natural parameter.
func1, func2 : callable
Assumed to be vectorized.
Returns
-------
cov : np.float
"""
mu1 = self.E(theta, func1)
mu2 = self.E(theta, func2)
return self.E(theta, lambda x: (func1(x)-mu1)*(func2(x)-mu2))
def two_sided_acceptance(self, theta, alpha=0.05, tol=1e-6):
r"""
Compute cutoffs of UMPU two-sided test.
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if theta != self._theta:
CL = np.max([x for x in self.sufficient_stat if self._critCovFromLeft(theta, (x, 0), alpha) >= 0])
gammaL = find_root(lambda x: self._critCovFromLeft(theta, (CL, x), alpha), 0., 0., 1., tol)
CR, gammaR = self._rightCutFromLeft(theta, (CL, gammaL), alpha)
self._left_cut, self._right_cut = (CL, gammaL), (CR, gammaR)
return self._left_cut, self._right_cut
def two_sided_test(self, theta0, observed, alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
rejLeft = self._test2RejectsLeft(theta0, observed, alpha, auxVar)
rejRight = self._test2RejectsRight(theta0, observed, alpha, auxVar)
else:
rejLeft = self._test2RejectsLeft(theta0, observed, alpha)
rejRight = self._test2RejectsRight(theta0, observed, alpha)
return rejLeft or rejRight
def one_sided_test(self, theta0, observed, alternative='greater', alpha=0.05, randomize=True, auxVar=None):
r"""
Perform UMPU one-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alternative : str
One of ['greater', 'less']
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
if alternative not in ['greater', 'less']:
raise ValueError('alternative must be one of ["greater", "less"]')
self.theta = theta0
if randomize:
if auxVar is None:
auxVar = np.random.random()
if alternative == 'greater':
return self.ccdf(theta0, observed, gamma=auxVar) < alpha
else:
return self.cdf(theta0, observed, gamma=auxVar) < alpha
else:
if alternative == 'greater':
return self.ccdf(theta0, observed) < alpha
else:
return self.cdf(theta0, observed) < alpha
def interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form UMAU confidence interval.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
if randomize:
if auxVar is None:
auxVar = np.random.random()
upper = self._inter2Upper(observed, auxVar, alpha, tol)
lower = self._inter2Lower(observed, auxVar, alpha, tol)
else:
upper = self._inter2Upper(observed, 1., alpha, tol)
lower = self._inter2Lower(observed, 0., alpha, tol)
return lower, upper
def equal_tailed_interval(self, observed, alpha=0.05, randomize=True, auxVar=None, tol=1e-6):
"""
Form interval by inverting
equal-tailed test with $\alpha/2$ in each tail.
Parameters
----------
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
lower, upper : float
Limits of confidence interval.
"""
mu = self.E(self.theta, lambda x: x)
sigma = np.sqrt(self.Var(self.theta, lambda x: x))
lb = mu - 20 * sigma
ub = mu + 20 * sigma
F = lambda th : self.cdf(th, observed)
L = find_root(F, 1.0 - 0.5 * alpha, lb, ub)
U = find_root(F, 0.5 * alpha, lb, ub)
return L, U
def equal_tailed_test(self, theta0, observed, alpha=0.05):
r"""
Perform UMPU two-sided test.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
observed : float
Observed sufficient statistic.
alpha : float (optional)
Size of two-sided test.
randomize : bool
Perform the randomized test (or conservative test).
auxVar : [None, float]
If randomizing and not None, use this
as the random uniform variate.
Returns
-------
decision : np.bool
Is the null hypothesis $H_0:\theta=\theta_0$ rejected?
Notes
-----
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger." It can be passed in,
or chosen at random. If randomize=False, we get a conservative test.
"""
pval = self.cdf(theta0, observed, gamma=0.5)
return min(pval, 1-pval) < alpha
def one_sided_acceptance(self, theta,
alpha=0.05,
alternative='greater',
tol=1e-6):
r"""
Compute the acceptance region cutoffs of UMPU one-sided test.
TODO: Include randomization?
Parameters
----------
theta : float
Natural parameter.
alpha : float (optional)
Size of two-sided test.
alternative : str
One of ['greater', 'less'].
tol : float
Tolerance for root-finding.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
if alternative == 'greater':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.min(self.sufficient_stat[F <= alpha])
acceptance = (-np.inf, cutoff)
elif alternative == 'less':
F = self.ccdf(theta, gamma=0.5)
cutoff = np.max(self.sufficient_stat[F <= alpha])
acceptance = (cutoff, np.inf)
else:
raise ValueError("alternative should be one of ['greater', 'less']")
return acceptance
def equal_tailed_acceptance(self, theta0, alpha=0.05):
r"""
Compute the acceptance region cutoffs of
equal-tailed test (without randomization).
Therefore, size may not be exactly $\alpha$.
Parameters
----------
theta0 : float
Natural parameter under null hypothesis.
alpha : float (optional)
Size of two-sided test.
Returns
-------
left_cut : (float, float)
Boundary and randomization weight for left endpoint.
right_cut : (float, float)
Boundary and randomization weight for right endpoint.
"""
F = self.cdf(theta0, gamma=0.5)
Lcutoff = np.max(self.sufficient_stat[F <= 0.5 * alpha])
Rcutoff = np.min(self.sufficient_stat[F >= 1 - 0.5*alpha])
return Lcutoff, Rcutoff
# Private methods
def _rightCutFromLeft(self, theta, leftCut, alpha=0.05):
"""
Given C1, gamma1, choose C2, gamma2 to make E(phi(X)) = alpha
"""
C1, gamma1 = leftCut
alpha1 = self.cdf(theta, C1, gamma1)
if alpha1 >= alpha:
return (np.inf, 1)
else:
alpha2 = alpha - alpha1
P = self.ccdf(theta, gamma=0)
idx = np.nonzero(P < alpha2)[0].min()
cut = self.sufficient_stat[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
ccdf_term = P[idx]
gamma2 = (alpha2 - ccdf_term) / pdf_term
return (cut, gamma2)
def _leftCutFromRight(self, theta, rightCut, alpha=0.05):
"""
Given C2, gamma2, choose C1, gamma1 to make E(phi(X)) = alpha
"""
C2, gamma2 = rightCut
alpha2 = self.ccdf(theta, C2, gamma2)
if alpha2 >= alpha:
return (-np.inf, 1)
else:
alpha1 = alpha - alpha2
P = self.cdf(theta, gamma=0)
idx = np.nonzero(P < alpha1)[0].max()
cut = self.sufficient_stat[idx]
cdf_term = P[idx]
pdf_term = np.exp(theta * cut) / self.partition * self.weights[idx]
gamma1 = (alpha1 - cdf_term) / pdf_term
return (cut, gamma1)
def _critCovFromLeft(self, theta, leftCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with left cutoff C1, gamma1
"""
C1, gamma1 = leftCut
C2, gamma2 = self._rightCutFromLeft(theta, leftCut, alpha)
if C2 == np.inf:
return -np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _critCovFromRight(self, theta, rightCut, alpha=0.05):
"""
Covariance of X with phi(X) where phi(X) is the level-alpha test with right cutoff C2, gamma2
"""
C2, gamma2 = rightCut
C1, gamma1 = self._leftCutFromRight(theta, rightCut, alpha)
if C1 == -np.inf:
return np.inf
else:
return self.Cov(theta, lambda x: x, lambda x: crit_func(x, (C1, gamma1), (C2, gamma2)))
def _test2RejectsLeft(self, theta, observed, alpha=0.05, auxVar=1.):
"""
Returns 1 if x in left lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to "larger" x, so LESS likely to reject
auxVar = 1 is conservative
"""
return self._critCovFromLeft(theta, (observed, auxVar), alpha) > 0
def _test2RejectsRight(self, theta, observed, alpha=0.05, auxVar=0.):
"""
Returns 1 if x in right lobe of umpu two-sided rejection region
We need an auxiliary uniform variable to carry out the randomized test.
Larger auxVar corresponds to x being slightly "larger," so MORE likely to reject.
auxVar = 0 is conservative.
"""
return self._critCovFromRight(theta, (observed, 1.-auxVar), alpha) < 0
def _inter2Upper(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
upper bound of two-sided umpu interval
"""
if observed < self.sufficient_stat[0] or (observed == self.sufficient_stat[0] and auxVar <= alpha):
return -np.inf # observed, auxVar too small, every test rejects left
if observed > self.sufficient_stat[self.n - 2] or (observed == self.sufficient_stat[self.n - 2] and auxVar == 1.):
return np.inf # observed, auxVar too large, no test rejects left
return find_root(lambda theta: -1*self._test2RejectsLeft(theta, observed, alpha, auxVar), -0.5, -1., 1., tol)
def _inter2Lower(self, observed, auxVar, alpha=0.05, tol=1e-6):
"""
lower bound of two-sided umpu interval
"""
if observed > self.sufficient_stat[self.n-1] or (observed == self.sufficient_stat[self.n-1] and auxVar >= 1.-alpha):
return np.inf # observed, auxVar too large, every test rejects right
if observed < self.sufficient_stat[1] or (observed == self.sufficient_stat[1] and auxVar == 0.):
return -np.inf # observed, auxVar too small, no test rejects right
return find_root(lambda theta: 1.*self._test2RejectsRight(theta, observed, alpha, auxVar), 0.5, -1., 1., tol)
| [
"numpy.random.random",
"numpy.where",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.nonzero",
"numpy.min",
"numpy.cumsum"
] | [((2242, 2258), 'numpy.log', 'np.log', (['xw[:, 1]'], {}), '(xw[:, 1])\n', (2248, 2258), True, 'import numpy as np\n'), ((17157, 17203), 'numpy.max', 'np.max', (['self.sufficient_stat[F <= 0.5 * alpha]'], {}), '(self.sufficient_stat[F <= 0.5 * alpha])\n', (17163, 17203), True, 'import numpy as np\n'), ((17223, 17273), 'numpy.min', 'np.min', (['self.sufficient_stat[F >= 1 - 0.5 * alpha]'], {}), '(self.sufficient_stat[F >= 1 - 0.5 * alpha])\n', (17229, 17273), True, 'import numpy as np\n'), ((2783, 2809), 'numpy.exp', 'np.exp', (['(_thetaX - _largest)'], {}), '(_thetaX - _largest)\n', (2789, 2809), True, 'import numpy as np\n'), ((2874, 2887), 'numpy.sum', 'np.sum', (['_prod'], {}), '(_prod)\n', (2880, 2887), True, 'import numpy as np\n'), ((2969, 2985), 'numpy.exp', 'np.exp', (['_largest'], {}), '(_largest)\n', (2975, 2985), True, 'import numpy as np\n'), ((4633, 4673), 'numpy.sum', 'np.sum', (['(pdf * (self.sufficient_stat < x))'], {}), '(pdf * (self.sufficient_stat < x))\n', (4639, 4673), True, 'import numpy as np\n'), ((5605, 5645), 'numpy.sum', 'np.sum', (['(pdf * (self.sufficient_stat > x))'], {}), '(pdf * (self.sufficient_stat > x))\n', (5611, 5645), True, 'import numpy as np\n'), ((16004, 16044), 'numpy.min', 'np.min', (['self.sufficient_stat[F <= alpha]'], {}), '(self.sufficient_stat[F <= alpha])\n', (16010, 16044), True, 'import numpy as np\n'), ((4565, 4579), 'numpy.cumsum', 'np.cumsum', (['pdf'], {}), '(pdf)\n', (4574, 4579), True, 'import numpy as np\n'), ((9464, 9482), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (9480, 9482), True, 'import numpy as np\n'), ((11262, 11280), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (11278, 11280), True, 'import numpy as np\n'), ((12448, 12466), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (12464, 12466), True, 'import numpy as np\n'), ((16193, 16233), 'numpy.max', 'np.max', (['self.sufficient_stat[F <= alpha]'], {}), '(self.sufficient_stat[F <= alpha])\n', (16199, 16233), True, 'import numpy as np\n'), ((5525, 5545), 'numpy.cumsum', 'np.cumsum', (['pdf[::-1]'], {}), '(pdf[::-1])\n', (5534, 5545), True, 'import numpy as np\n'), ((17843, 17862), 'numpy.exp', 'np.exp', (['(theta * cut)'], {}), '(theta * cut)\n', (17849, 17862), True, 'import numpy as np\n'), ((18567, 18586), 'numpy.exp', 'np.exp', (['(theta * cut)'], {}), '(theta * cut)\n', (18573, 18586), True, 'import numpy as np\n'), ((17742, 17764), 'numpy.nonzero', 'np.nonzero', (['(P < alpha2)'], {}), '(P < alpha2)\n', (17752, 17764), True, 'import numpy as np\n'), ((18435, 18457), 'numpy.nonzero', 'np.nonzero', (['(P < alpha1)'], {}), '(P < alpha1)\n', (18445, 18457), True, 'import numpy as np\n'), ((4760, 4795), 'numpy.where', 'np.where', (['(self.sufficient_stat == x)'], {}), '(self.sufficient_stat == x)\n', (4768, 4795), True, 'import numpy as np\n'), ((5732, 5767), 'numpy.where', 'np.where', (['(self.sufficient_stat == x)'], {}), '(self.sufficient_stat == x)\n', (5740, 5767), True, 'import numpy as np\n')] |
"""*****************************************************************************************
MIT License
Copyright (c) 2020 <NAME>, <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*****************************************************************************************"""
"""
This module is for generating a coreset given the sensitivities
Author: <NAME>
"""
import numpy as np
import time
import Utils
import copy
import PointSet
class Coreset(object):
"""
################## Coreset ####################
Functions:
- __init__ : instructor
- computeCoreset
- mergeCoreset
"""
def __init__(self, prob_dep_vars=None, is_uniform=False):
"""
:param prob_dep_vars: Problem dependant variables (not used)
:param is_uniform: A boolean variable stating whether uniform or importance sampling, i.e., using sensitivity
(Default value: False)
"""
self.weights = []
self.S = []
self.probability = []
self.is_uniform = is_uniform
self.prob_dep_vars = prob_dep_vars
def computeCoreset(self, P, sensitivity, sampleSize, weights=None, core_seed=1.0):
"""
:param P: A list of point sets.
:param sensitivity: A vector of n entries (number of point sets in P) which describes the sensitivity of
each point set.
:param sampleSize: An integer describing the size of the coreset.
:param weights: A weight vector of the data points (Default value: None)
:return: A subset of P (the datapoints alongside their respected labels), weights for that subset and the
time needed for generating the coreset.
"""
startTime = time.time()
if weights is None:
weights = np.ones((len(P), 1)).flatten()
# Compute the sum of sensitivities.
t = np.sum(sensitivity)
# The probability of a point prob(p_i) = s(p_i) / t
self.probability = sensitivity.flatten() / t
# The number of points is equivalent to the number of rows in P.
n = len(P)
# initialize new seed
np.random.seed()
# Multinomial distribution.
indxs = np.random.choice(n, sampleSize, p=self.probability.flatten())
"""countcorset = 0; importantidxes = 218
for idx in indxs:
if idx <= importantidxes:
countcorset+=1
print ("choosen {} points from state".format(countcorset))"""
# Compute the frequencies of each sampled item.
hist = np.histogram(indxs, bins=range(n))[0].flatten()
indxs = copy.deepcopy(np.nonzero(hist)[0])
# Select the indices.
# S = P[indxs]
S = copy.deepcopy([P[i] for i in indxs])
# Compute the weights of each point: w_i = (number of times i is sampled) / (sampleSize * prob(p_i))
weights = np.asarray(np.multiply(weights[indxs], hist[indxs]), dtype=float).flatten()
# Compute the weights of the coreset
weights = np.multiply(weights, 1.0 / (self.probability[indxs]*sampleSize))
timeTaken = time.time() - startTime
for idx, p_set in enumerate(S):
p_set[0].updateWeight(weights[idx])
return S, timeTaken
| [
"numpy.multiply",
"numpy.sum",
"numpy.random.seed",
"numpy.nonzero",
"copy.deepcopy",
"time.time"
] | [((2798, 2809), 'time.time', 'time.time', ([], {}), '()\n', (2807, 2809), False, 'import time\n'), ((2953, 2972), 'numpy.sum', 'np.sum', (['sensitivity'], {}), '(sensitivity)\n', (2959, 2972), True, 'import numpy as np\n'), ((3228, 3244), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3242, 3244), True, 'import numpy as np\n'), ((3827, 3863), 'copy.deepcopy', 'copy.deepcopy', (['[P[i] for i in indxs]'], {}), '([P[i] for i in indxs])\n', (3840, 3863), False, 'import copy\n'), ((4140, 4206), 'numpy.multiply', 'np.multiply', (['weights', '(1.0 / (self.probability[indxs] * sampleSize))'], {}), '(weights, 1.0 / (self.probability[indxs] * sampleSize))\n', (4151, 4206), True, 'import numpy as np\n'), ((4226, 4237), 'time.time', 'time.time', ([], {}), '()\n', (4235, 4237), False, 'import time\n'), ((3736, 3752), 'numpy.nonzero', 'np.nonzero', (['hist'], {}), '(hist)\n', (3746, 3752), True, 'import numpy as np\n'), ((4008, 4048), 'numpy.multiply', 'np.multiply', (['weights[indxs]', 'hist[indxs]'], {}), '(weights[indxs], hist[indxs])\n', (4019, 4048), True, 'import numpy as np\n')] |
import numpy as np
def vf(f, i=None, j=None):
if i is None:
return "f%03d" % f
elif j is None:
return "f%03d_i%03d" % (f, i)
else:
return "f%03d_i%03d_j%03d" % (f, i, j)
def to_purepy(embG, solved=False):
if solved:
access = lambda obj: obj.x
else:
access = lambda obj: obj.start
tmp = np.copy(embG)
it = np.nditer(tmp, flags=['multi_index', 'refs_ok'])
while not it.finished:
c, i, j = it.multi_index
if not isinstance(embG[c, i, j], int):
tmp[c, i, j] = access(embG[c, i, j])
it.iternext()
# Convert to float array
tmp = tmp.astype(np.float)
return tmp
def sp_init_embG(G, Gnpy, flows):
import networkx as nx
import random
init_embG = np.zeros((len(flows), *Gnpy.shape))
#
# Set the path to shortest path by default
#
for f, (from_id, to_id) in enumerate(flows):
path = random.choice(list(nx.shortest_paths.all_shortest_paths(G, source=from_id, target=to_id)))
for li_idx, node in enumerate(path):
if node == to_id:
break
init_embG[f, node, path[(li_idx+1)]] = 1
return init_embG
def grb_ndarr_py(grb_ndarr):
if type(grb_ndarr) is list:
grb_ndarr = np.array(grb_ndarr)
ndarr = np.zeros(grb_ndarr.shape)
it = np.nditer(grb_ndarr, flags=['multi_index', 'refs_ok'])
while not it.finished:
if isinstance(grb_ndarr[it.multi_index], int) or \
isinstance(grb_ndarr[it.multi_index], float) or \
isinstance(grb_ndarr[it.multi_index], np.int64) or \
isinstance(grb_ndarr[it.multi_index], np.float64):
ndarr[it.multi_index] = grb_ndarr[it.multi_index]
else:
try:
ndarr[it.multi_index] = grb_ndarr[it.multi_index].x
except:
ndarr[it.multi_index] = np.nan
it.iternext()
return ndarr.astype(np.float)
def convert(embG, b_MOS_a, FlowDelay, Delay):
py_embG = to_purepy(embG, solved=True)
py_FlowDelay = np.zeros(FlowDelay.shape)
for i in range(FlowDelay.shape[0]):
py_FlowDelay[i] = FlowDelay[i].x
py_Delay = np.zeros(Delay.shape)
for i in range(Delay.shape[0]):
for j in range(Delay.shape[1]):
try:
py_Delay[i,j] = Delay[i,j].x
except:
py_Delay[i,j] = np.nan
#py_A_UTIL = np.zeros(A_UTIL.shape)
#
# for i in range(A_UTIL.shape[0]):
# for j in range(A_UTIL.shape[1]):
# try:
# py_A_UTIL[i,j] = A_UTIL[i,j].x
# except:
# py_A_UTIL[i,j] = np.nan
MOS_a = []
for a in b_MOS_a:
mos = [np.float(x.x) for mos_idx, x in enumerate(a)]
mos_idx = np.argmax(mos)
MOS_a.append(mos_idx)
return py_embG, np.array(MOS_a), py_FlowDelay, py_Delay
| [
"numpy.copy",
"numpy.float",
"networkx.shortest_paths.all_shortest_paths",
"numpy.nditer",
"numpy.argmax",
"numpy.array",
"numpy.zeros"
] | [((355, 368), 'numpy.copy', 'np.copy', (['embG'], {}), '(embG)\n', (362, 368), True, 'import numpy as np\n'), ((379, 427), 'numpy.nditer', 'np.nditer', (['tmp'], {'flags': "['multi_index', 'refs_ok']"}), "(tmp, flags=['multi_index', 'refs_ok'])\n", (388, 427), True, 'import numpy as np\n'), ((1324, 1349), 'numpy.zeros', 'np.zeros', (['grb_ndarr.shape'], {}), '(grb_ndarr.shape)\n', (1332, 1349), True, 'import numpy as np\n'), ((1360, 1414), 'numpy.nditer', 'np.nditer', (['grb_ndarr'], {'flags': "['multi_index', 'refs_ok']"}), "(grb_ndarr, flags=['multi_index', 'refs_ok'])\n", (1369, 1414), True, 'import numpy as np\n'), ((2087, 2112), 'numpy.zeros', 'np.zeros', (['FlowDelay.shape'], {}), '(FlowDelay.shape)\n', (2095, 2112), True, 'import numpy as np\n'), ((2211, 2232), 'numpy.zeros', 'np.zeros', (['Delay.shape'], {}), '(Delay.shape)\n', (2219, 2232), True, 'import numpy as np\n'), ((1291, 1310), 'numpy.array', 'np.array', (['grb_ndarr'], {}), '(grb_ndarr)\n', (1299, 1310), True, 'import numpy as np\n'), ((2799, 2813), 'numpy.argmax', 'np.argmax', (['mos'], {}), '(mos)\n', (2808, 2813), True, 'import numpy as np\n'), ((2865, 2880), 'numpy.array', 'np.array', (['MOS_a'], {}), '(MOS_a)\n', (2873, 2880), True, 'import numpy as np\n'), ((2735, 2748), 'numpy.float', 'np.float', (['x.x'], {}), '(x.x)\n', (2743, 2748), True, 'import numpy as np\n'), ((961, 1030), 'networkx.shortest_paths.all_shortest_paths', 'nx.shortest_paths.all_shortest_paths', (['G'], {'source': 'from_id', 'target': 'to_id'}), '(G, source=from_id, target=to_id)\n', (997, 1030), True, 'import networkx as nx\n')] |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import copy
class CDC:
def __init__(self,
actor,
critic,
p_lr = None,
q_lr = None,
gamma =0.99,
ptau = 0.005,
batch_size = 100,
max_action = None,
num_samples = 15,
eta_coef = 1.0,
action_dims = None,
nu = 1.0,
max_timesteps = 1e6,
lambda_coef = 1.0,
device = 'cpu',
):
'''
actor: actor network
critic: critic network
p_lr: policy learning rate
q_lr: Q learning rate
gamma: reward discounting parameter
ptau: Interpolation factor in polyak averaging
num_samples: number of actions samples
'''
# networks
self.actor = actor
self.critic = critic
#####
# params
#####
self.gamma = gamma
self.ptau = ptau
self.max_action = max_action
self.batch_size = batch_size
self.device = device
self.nu = nu
self.num_samples = num_samples
self.eta_coef = eta_coef
self.action_dims = np.prod(action_dims)
self.total_grad_steps = 0
self.lambda_coef = lambda_coef
#####
# load target models.
#####
self.critic_target = copy.deepcopy(self.critic)
self.number_qs = self.critic.number_of_qs
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in self.critic_target.parameters():
p.requires_grad = False
#####
# Optims
#####
self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=p_lr)
self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=q_lr)
print('-----------------------------')
print('Optim Params')
print("Actor:\n ", self.actor_optimizer)
print("Critic:\n ", self.critic_optimizer )
print('-----------------------------')
print('Hyper-parameters and flags details')
print('batch_size: ', self.batch_size)
print('nu: ', self.nu)
print('action_dims: ', self.action_dims)
print('num_samples: ', self.num_samples)
print('number_qs: ', self.number_qs)
print('lambda_coef: ', self.lambda_coef)
print('eta_coef: ', self.eta_coef)
print('-----------------------------')
def select_action(self, obs, deterministic=False):
'''
return action
'''
# since there is a dropout in critic, make sure it is off during eval whenever drop > 0
self.critic.eval()
with torch.no_grad():
# obs [D] ==> [1, D] ==> [num_samples, D]
state = torch.FloatTensor(obs.reshape(1, -1)).repeat(self.num_samples, 1).to(self.device)
# action [num_samples, action_dim]
action = self.actor(state)
# qis [num_qs, B, 1]
qis = self.critic(state, action)
# qis [num_qs, B, 1] ==> q [B, 1]
q = self.nu * qis.min(0)[0] + (1. - self.nu) * qis.max(0)[0]
# now find max value.
ind = q.argmax(0)
return action[ind].cpu().data.numpy().flatten()
def lower_bound_Q_max(self, obs, next_obs, cQs_current=None, ct_loss=None):
'''
This function return loss between Q_pi(s, a~pi) without/or/and Q_b(s, a~b)
cQs_current: [num_qs, B, 1]
obs: [B, D]
next_obs: [B, D]
'''
bsize = obs.shape[0]
a_dim = self.action_dims
# obs_rep [B, D] ==> view [B * num_samples, D]
# obs_rep = torch.repeat_interleave(obs, self.num_samples, 0)
obs_rep = obs.unsqueeze(1).repeat(1, self.num_samples, 1).view(bsize * self.num_samples, -1)
with torch.no_grad():
# get actions from current policy (pi)
curr_actions = self.actor(obs_rep)
# curr_actions [B * num_samples, action_dim]
# obs_rep [B * num_samples, obs_dim]
# qXs: [number_qs, B * num_samples, 1]
# cQs_current: [num_qs, B, 1]
qXs = self.critic(obs_rep, curr_actions)
# qXs: [number_qs, B * num_samples, 1] ==> [number_qs, B , num_samples] ==> max [number_qs, B] ==> [number_qs, B, 1]
max_qXs = qXs.reshape(self.number_qs, self.batch_size, self.num_samples).max(2)[0].reshape(self.number_qs, self.batch_size, 1)
all_losses = []
for k in range(self.number_qs):
for i in range(self.number_qs):
tm_qs = (torch.relu(max_qXs[i] - cQs_current[k])**2).mean()
all_losses.append(tm_qs)
mloss = sum(all_losses)/len(all_losses)
return mloss
def compute_critic_loss(self, obs, next_obs, action, reward, mask):
'''
Compute loss Q:
t = r + gamma * mask *( min_{1,2} Q_target(s', a') - alpha * log pi(s', a'))
'''
with torch.no_grad():
########
# Target update
########
# next_action [B, num_actions]
bsize = next_obs.shape[0]
next_obs_rep = torch.repeat_interleave(next_obs, self.num_samples, 0)
# get next actions using current policy
next_action_rep = self.actor(next_obs_rep)
every_target_Qs = self.critic_target(next_obs_rep, next_action_rep)
# we want to do max_a (min_i Q_i(a))
# every_target_Qs [number_qs, B * samples , 1] ==> [B * samples , 1]
min_target_qs = self.nu * every_target_Qs.min(0)[0] + (1. - self.nu) * every_target_Qs.max(0)[0]
# [B * samples , 1] ==> reshape [B, samples] ==> max [B] ==> reshape [B, 1]
target_Q = min_target_qs.reshape(self.batch_size, -1).max(1)[0].reshape(-1, 1)
# backup: [B, 1]
backup = reward + self.gamma * mask * target_Q
# 2. Get current Q estimates
cQs = self.critic(obs, action)
# 3. Compute critic loss
# even we picked min Q, we still need to backprob to both Qs
all_critic_losses = []
for ci in cQs:
all_critic_losses.append(F.mse_loss(ci, backup))
critic_loss = sum(all_critic_losses)
return critic_loss, cQs
def compute_loss_pi(self, obs, actions):
'''
Compute pi loss
loss = alpha * log_pi - min_{1,2} Q(s, pi)
'''
pi_action, lopprobs = self.actor(obs, gt_actions=actions, with_log_mle=True)
qs_pi = self.critic(obs, pi_action)
# q_pi = qs_pi.min(0)[0]
q_pi = self.nu * qs_pi.min(0)[0] + (1. - self.nu) * qs_pi.max(0)[0]
loss_pi = (- q_pi - self.lambda_coef * lopprobs ).mean()
return loss_pi, lopprobs
def train(self, replay_buffer=None,
iterations=None,
burn_in_passed=False):
'''
inputs:
replay_buffer
outputs:
'''
actor_loss_out = 0.0
critic_loss_out = 0.0
qs_loss_out = 0
lgprob_out = 0
# since there is a dropout in critic and
# make sure it is on whenever drop > 0
self.critic.train()
for it in range(iterations):
########
# Sample replay buffer
########
x, y, u, r, d = replay_buffer.sample(self.batch_size)
obs = torch.FloatTensor(x).to(self.device)
next_obs = torch.FloatTensor(y).to(self.device)
action = torch.FloatTensor(u).to(self.device)
reward = torch.FloatTensor(r).to(self.device)
mask = torch.FloatTensor(1. - d).to(self.device)
########
# policy updates
########
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in self.critic.parameters():
p.requires_grad = False
actor_loss, lgprp = self.compute_loss_pi(obs, action)
actor_loss_out += actor_loss.item()
lgprob_out += lgprp.mean().item()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in self.critic.parameters():
p.requires_grad = True
########
# critic updates
########
# 1. Compute critic loss
# even we picked min Q, we still need to backprob to both Qs
critic_loss, ccQs = self.compute_critic_loss(obs, next_obs, action, reward, mask)
critic_loss_out += critic_loss.item()
# 2. now get difference between Qs
qs_loss = self.lower_bound_Q_max(obs, next_obs, cQs_current=ccQs, ct_loss=critic_loss_out)
qs_loss_out += qs_loss.item()
crt_losses = critic_loss + self.eta_coef * qs_loss
# 3. Optimize the critic
self.critic_optimizer.zero_grad()
crt_losses.backward()
self.critic_optimizer.step()
########
# target updates
########
with torch.no_grad():
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
# Use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
target_param.data.mul_((1 - self.ptau))
target_param.data.add_(self.ptau * param.data)
self.total_grad_steps += 1
out = {}
out['lg_loss'] = -lgprob_out/iterations
out['critic_loss'] = critic_loss_out/iterations
out['actor_loss'] = actor_loss_out/iterations
out['qs_loss'] = qs_loss_out/iterations
return out | [
"numpy.prod",
"torch.nn.functional.mse_loss",
"torch.relu",
"copy.deepcopy",
"torch.repeat_interleave",
"torch.no_grad",
"torch.FloatTensor"
] | [((1421, 1441), 'numpy.prod', 'np.prod', (['action_dims'], {}), '(action_dims)\n', (1428, 1441), True, 'import numpy as np\n'), ((1605, 1631), 'copy.deepcopy', 'copy.deepcopy', (['self.critic'], {}), '(self.critic)\n', (1618, 1631), False, 'import copy\n'), ((2947, 2962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2960, 2962), False, 'import torch\n'), ((4116, 4131), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4129, 4131), False, 'import torch\n'), ((5252, 5267), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5265, 5267), False, 'import torch\n'), ((5472, 5526), 'torch.repeat_interleave', 'torch.repeat_interleave', (['next_obs', 'self.num_samples', '(0)'], {}), '(next_obs, self.num_samples, 0)\n', (5495, 5526), False, 'import torch\n'), ((6537, 6559), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['ci', 'backup'], {}), '(ci, backup)\n', (6547, 6559), True, 'import torch.nn.functional as F\n'), ((9660, 9675), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9673, 9675), False, 'import torch\n'), ((7802, 7822), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (7819, 7822), False, 'import torch\n'), ((7862, 7882), 'torch.FloatTensor', 'torch.FloatTensor', (['y'], {}), '(y)\n', (7879, 7882), False, 'import torch\n'), ((7920, 7940), 'torch.FloatTensor', 'torch.FloatTensor', (['u'], {}), '(u)\n', (7937, 7940), False, 'import torch\n'), ((7978, 7998), 'torch.FloatTensor', 'torch.FloatTensor', (['r'], {}), '(r)\n', (7995, 7998), False, 'import torch\n'), ((8034, 8060), 'torch.FloatTensor', 'torch.FloatTensor', (['(1.0 - d)'], {}), '(1.0 - d)\n', (8051, 8060), False, 'import torch\n'), ((4861, 4900), 'torch.relu', 'torch.relu', (['(max_qXs[i] - cQs_current[k])'], {}), '(max_qXs[i] - cQs_current[k])\n', (4871, 4900), False, 'import torch\n')] |
def nmf_evaluation(spectrogramSignal, spectrogramData, nsweeplabels, reference_sweeps):
'''
Purpose : evaluate NMF performance in terms of correlation coefficients and RMSE
Introduction
------------
- corr is the Pearson correlation coefficient between spectrogramSignal and spectrogramData
- RMS (root-mean-square) error is the RMS value of the differences between spectrogramSignal and spectrogramData
- NOTE: When estimating Correlation and RMSEnd , computations are based on "straightened/flattened spectrogram data". That is, 2D spectrograms are reshaped into vectors, prior to computing differnces and correlations.
Parameters
----------
spectrogramSignal : numpy array (3D)
- spectrogramSignal contains the spectrograms to be processed. It is a 3 dimensional matrix: number of frequencies x number of time frames x number of spectrograms (e.g., 1000 x 201 x 11)
spectrogramData : numpy array (3D)
- spectrogramData contains the spectrograms that will be used as the reference point. It is a 3 dimensional matrix: number of frequencies x number of time frames x number of spectrograms (e.g., 1000 x 201 x 11)
nsweeplabels : numpy array (1D)
- nsweeplabels are the nSweeps labels that corresponds to all the spectrograms saved in spectrogramData (and should be the same to those in spectrogramSignal as well)
reference_sweeps : integer
- reference_sweeps is the number of sweeps you want to use for the reference in performance evaluation. For example, reference_sweeps = 8000
- For example, when nFrequencies = 1000, nFrames = 201, nSweeps = [100 250 500 1000:1000:8000] and npermutation = 1, the input parameters can be:
- spectrogramSignal is a 3D matrix [1000 x 201 x 11]
- spectrogramData is a 3D matrix [1000 x 201 x 11]
- nsweeplabels is [100 100 100 100 100 250 250 250 250 250 ... 8000 8000 8000 8000 8000]
- reference_sweeps can be say 8000
Returns
-------
CORR : dictionary
- CORR is the correlation coefficient between spectrogramSignal and spectrogramData
'results' : numpy array (2D)
- correlation coefficients. It has a shape of (npermutation, nsweepCondition).
'mean' : numpy array (2D)
- mean value across npermutation. It has a shape of (1, nsweepCondition).
'std' : numpy array (2D)
- standard deviation across npermutation. It has a shape of (1, nsweepCondition).
'se' : numpy array (2D)
- standard error across npermutation. It has a shape of (1, nsweepCondition).
RMSE : dictionary
- RMSE is the RMS (root-mean-square) error between spectrogramSignal and spectrogramData
'results' : numpy array (2D)
- rmse raw data. It has a shape of (npermutation, nsweepCondition).
'mean' : numpy array (2D)
- mean value across npermutation. It has a shape of (1, nsweepCondition).
'std' : numpy array (2D)
- standard deviation across npermutation. It has a shape of (1, nsweepCondition).
'se' : numpy array (2D)
- standard error across npermutation. It has a shape of (1, nsweepCondition).
nsweeplabels_unique : numpy array (1D)
- an array of unique nsweep labels
Diary
-----
2019-09-09 (v01)
- Fuh-Cherng (Fuh) Jeng borrowed this script, originally written in Matlab, from Tzu-Hao (<NAME>
2021-01-24 (v02)
- Fuh rewrote this script in Python and made some minor adjustments
'''
import numpy as np
from ssnmftools import corr_columnwise
# reshape each spectrogram to a 1D array (i.e., flatten each spectrogram)
signal = spectrogramSignal.reshape(-1, spectrogramSignal.shape[2], order='F')
# compute npermutation, nsweeplabels_unique, and nsweepCondition
npermutation = np.sum(nsweeplabels == reference_sweeps)
nsweeplabels_unique = nsweeplabels[npermutation-1::npermutation]
nsweepCondition = len(nsweeplabels_unique)
# create reference data
ref_data = spectrogramData[:, :, nsweeplabels==reference_sweeps].reshape(-1, npermutation, order='F')
ref_data = np.mean(ref_data, axis=1).reshape(ref_data.shape[0],1)
data = np.tile(ref_data, spectrogramData.shape[2])
# compute corr
corr = corr_columnwise(signal, data)
# compute rmse
rmse = np.sqrt(np.mean((signal-data)**2, axis=0))
# reshape rmse and corr into 2D matrices (npermutation x nsweepCondition)
corr = corr.reshape((npermutation, nsweepCondition), order='F')
rmse = rmse.reshape((npermutation, nsweepCondition), order='F')
# save results in RMSE and CORR dictionaries
CORR = {
'results': corr,
'mean': np.mean(corr, axis=0),
'std': np.std(corr, axis=0),
'se': np.std(corr, axis=0) / np.sqrt(npermutation)
}
RMSE = {
'results': rmse,
'mean': np.mean(rmse, axis=0),
'std': np.std(rmse, axis=0),
'se': np.std(rmse, axis=0) / np.sqrt(npermutation)
}
return (CORR, RMSE, nsweeplabels_unique)
| [
"numpy.mean",
"numpy.tile",
"numpy.sqrt",
"ssnmftools.corr_columnwise",
"numpy.sum",
"numpy.std"
] | [((3905, 3945), 'numpy.sum', 'np.sum', (['(nsweeplabels == reference_sweeps)'], {}), '(nsweeplabels == reference_sweeps)\n', (3911, 3945), True, 'import numpy as np\n'), ((4353, 4396), 'numpy.tile', 'np.tile', (['ref_data', 'spectrogramData.shape[2]'], {}), '(ref_data, spectrogramData.shape[2])\n', (4360, 4396), True, 'import numpy as np\n'), ((4434, 4463), 'ssnmftools.corr_columnwise', 'corr_columnwise', (['signal', 'data'], {}), '(signal, data)\n', (4449, 4463), False, 'from ssnmftools import corr_columnwise\n'), ((4503, 4540), 'numpy.mean', 'np.mean', (['((signal - data) ** 2)'], {'axis': '(0)'}), '((signal - data) ** 2, axis=0)\n', (4510, 4540), True, 'import numpy as np\n'), ((4869, 4890), 'numpy.mean', 'np.mean', (['corr'], {'axis': '(0)'}), '(corr, axis=0)\n', (4876, 4890), True, 'import numpy as np\n'), ((4911, 4931), 'numpy.std', 'np.std', (['corr'], {'axis': '(0)'}), '(corr, axis=0)\n', (4917, 4931), True, 'import numpy as np\n'), ((5071, 5092), 'numpy.mean', 'np.mean', (['rmse'], {'axis': '(0)'}), '(rmse, axis=0)\n', (5078, 5092), True, 'import numpy as np\n'), ((5112, 5132), 'numpy.std', 'np.std', (['rmse'], {'axis': '(0)'}), '(rmse, axis=0)\n', (5118, 5132), True, 'import numpy as np\n'), ((4262, 4287), 'numpy.mean', 'np.mean', (['ref_data'], {'axis': '(1)'}), '(ref_data, axis=1)\n', (4269, 4287), True, 'import numpy as np\n'), ((4952, 4972), 'numpy.std', 'np.std', (['corr'], {'axis': '(0)'}), '(corr, axis=0)\n', (4958, 4972), True, 'import numpy as np\n'), ((4975, 4996), 'numpy.sqrt', 'np.sqrt', (['npermutation'], {}), '(npermutation)\n', (4982, 4996), True, 'import numpy as np\n'), ((5153, 5173), 'numpy.std', 'np.std', (['rmse'], {'axis': '(0)'}), '(rmse, axis=0)\n', (5159, 5173), True, 'import numpy as np\n'), ((5176, 5197), 'numpy.sqrt', 'np.sqrt', (['npermutation'], {}), '(npermutation)\n', (5183, 5197), True, 'import numpy as np\n')] |
import colorsys
import numpy as np
from PIL import Image
from sklearn.metrics import precision_recall_curve
from cutils.viz.vizutils import figure2image
class PRCurves:
"""
Generates the plot for the precision recall curve
"""
def __init__(self):
self.data = []
def add(self, gt, pred, legend):
precision, recall, thresholds = precision_recall_curve(gt, pred)
self.data.append([precision, recall, thresholds, legend])
def generate(self):
import matplotlib.pyplot as plt
plt.clf()
lw = 2
N_class = len(self.data)
half = int(np.ceil(N_class / 2.0))
colors_half = (
'b', 'g', 'r', 'c', 'm', 'y', 'k') # [colorsys.hsv_to_rgb(x * 1.0 / half, 0.6, 1) for x in range(half)]
colors = []
colors.extend(colors_half)
colors.extend(colors_half)
lst = []
lst_half1 = ['-' for c in colors_half]
lst_half2 = [':' for c in colors_half]
lst.extend(lst_half1)
lst.extend(lst_half2)
cc = 0
plt.figure(figsize=(6, 6))
l2d = None
for pre, rec, th, legend in self.data:
# roc_auc = auc(fpr, tpr)
legend_disp = legend # + ' ' + str(np.round(roc_auc, 2))
l2d = plt.plot(pre, rec, lw=lw, color=colors[cc], linestyle=lst[cc],
label=legend_disp)
cc += 1
plt.xlabel('recall')
plt.ylabel('precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
# plt.title('ROC curve')
plt.legend(loc="lower right")
im = figure2image(l2d[0].figure)
print ('Size of roc image ' + str(np.shape(im)))
return Image.fromarray(im)
| [
"numpy.shape",
"PIL.Image.fromarray",
"numpy.ceil",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"sklearn.metrics.precision_recall_curve",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.p... | [((369, 401), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['gt', 'pred'], {}), '(gt, pred)\n', (391, 401), False, 'from sklearn.metrics import precision_recall_curve\n'), ((541, 550), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (548, 550), True, 'import matplotlib.pyplot as plt\n'), ((1071, 1097), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1081, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1448), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recall"""'], {}), "('recall')\n", (1438, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1480), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (1467, 1480), True, 'import matplotlib.pyplot as plt\n'), ((1489, 1510), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (1497, 1510), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1539), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (1527, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1610), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1591, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1625, 1652), 'cutils.viz.vizutils.figure2image', 'figure2image', (['l2d[0].figure'], {}), '(l2d[0].figure)\n', (1637, 1652), False, 'from cutils.viz.vizutils import figure2image\n'), ((1725, 1744), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (1740, 1744), False, 'from PIL import Image\n'), ((619, 641), 'numpy.ceil', 'np.ceil', (['(N_class / 2.0)'], {}), '(N_class / 2.0)\n', (626, 641), True, 'import numpy as np\n'), ((1290, 1376), 'matplotlib.pyplot.plot', 'plt.plot', (['pre', 'rec'], {'lw': 'lw', 'color': 'colors[cc]', 'linestyle': 'lst[cc]', 'label': 'legend_disp'}), '(pre, rec, lw=lw, color=colors[cc], linestyle=lst[cc], label=\n legend_disp)\n', (1298, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1707), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (1703, 1707), True, 'import numpy as np\n')] |
'''
Created on Feb 13, 2012
@author: fmertens
'''
import numpy as np
import nputils
import imgutils
import wavelets
def get_wavelet_obj(w):
if isinstance(w, str):
return wavelets.get_wavelet(w)
if isinstance(w, wavelets.WaveletBase):
return w
raise ValueError("w is not a correct wavelet")
def dwt(signal, wavelet, boundary, level=None, initial_signal=None, axis=None):
'''
Perform a one level discrete wavelet transform.
Result is len k + l + 1 if len(s) = 2k and len(hkd) = 2l
it is len k + l if len(s) = 2k + 1
'''
hkd = get_wavelet_obj(wavelet).get_dec_hk()
gkd = get_wavelet_obj(wavelet).get_dec_gk()
a_conv = nputils.convolve(signal, hkd, boundary, axis=axis)
d_conv = nputils.convolve(signal, gkd, boundary, axis=axis)
a = nputils.downsample(a_conv, 2, oddeven=1, axis=axis)
d = nputils.downsample(d_conv, 2, oddeven=1, axis=axis)
return (a, d)
def dwt_inv(a, d, wavelet, boundary, level=None, axis=None):
'''
Perform a one level inverse discrete wavelet transform.
Result len is always 2 * len(a) - len(hkr) + 1
Warning: if len(s) = 2k + 1, then idwt(dwt(s)) will give one element
too much which will be zero. There is no way to know the
parity of the original signal. It can be safely removed.
For this reason, if len(a) is bigger than len(d) to 1, we strip
this last element
'''
if len(a) == len(d) + 1:
a = a[:-1]
hkr = get_wavelet_obj(wavelet).get_rec_hk()
gkr = get_wavelet_obj(wavelet).get_rec_gk()
a_upsample = nputils.upsample(a, 2, oddeven=1, lastzero=True, axis=axis)
d_upsample = nputils.upsample(d, 2, oddeven=1, lastzero=True, axis=axis)
c1 = nputils.convolve(a_upsample, hkr, boundary, axis=axis, mode='valid')
c2 = nputils.convolve(d_upsample, gkr, boundary, axis=axis, mode='valid')
return c1 + c2
def uwt(signal, wavelet, boundary, level, initial_signal=None, axis=None):
hkd = nputils.atrou(get_wavelet_obj(wavelet).get_dec_hk(), pow(2, level))
gkd = nputils.atrou(get_wavelet_obj(wavelet).get_dec_gk(), pow(2, level))
a = nputils.convolve(signal, hkd, boundary, axis=axis)
d = nputils.convolve(signal, gkd, boundary, axis=axis)
return (a, d)
def uwt_inv(a, d, wavelet, boundary, level, initial_signal=None, axis=None):
hkr = nputils.atrou(get_wavelet_obj(wavelet).get_rec_hk(), pow(2, level))
gkr = nputils.atrou(get_wavelet_obj(wavelet).get_rec_gk(), pow(2, level))
c1 = nputils.convolve(a, hkr, boundary, axis=axis, mode="valid")
c2 = nputils.convolve(d, gkr, boundary, axis=axis, mode="valid")
return 1 / 2. * (c1 + c2)
def uiwt(signal, wavelet, boundary, level, initial_signal=None, axis=None):
hkd = nputils.atrou(get_wavelet_obj(wavelet).get_dec_hk(), pow(2, level))
a = nputils.convolve(signal, hkd, boundary, axis=axis, mode='same')
d = signal - a
return (a, d)
def uimwt(signal, wavelet, boundary, level, initial_signal=None, axis=None):
hkd = nputils.atrou(get_wavelet_obj(wavelet).get_dec_hk(), pow(2, level))
a = nputils.convolve(signal, hkd, boundary, axis=axis, mode='same')
a2 = nputils.convolve(a, hkd, boundary, axis=axis, mode='same')
d = signal - a2
return (a, d)
def uiwt_inv(a, d, wavelet, boundary, level, axis=None):
return a + d
def wavedec(signal, wavelet, level, boundary="symm",
dec=dwt, axis=None, thread=None):
# max_level = get_wavelet_obj(wavelet).get_max_level(signal)
# if level > max_level:
# raise ValueError("Level should be < %s" % max_level)
res = []
a = signal
for j in range(int(level)):
if thread and not thread.is_alive():
return None
a, d = dec(a, wavelet, boundary, j, initial_signal=signal, axis=axis)
res.append(d)
res.append(a)
return res
def dogdec(signal, widths=None, angle=0, ellipticity=1, boundary="symm"):
if widths is None:
widths = np.arange(1, min(signal.shape) / 4)
beams = [imgutils.GaussianBeam(ellipticity * w, w, bpa=angle) for w in widths]
filtered = [b.convolve(signal, boundary=boundary) for b in beams]
res = [(el[0] - el[-1]) for el in nputils.nwise(filtered, 2)]
for s in res:
s[s <= 0] = 0
res = [s - b2.convolve(s, boundary=boundary) for (s, (b1, b2)) in zip(res, nputils.nwise(beams, 2))]
# res = [b1.convolve(s, boundary=boundary) - b2.convolve(s, boundary=boundary) for (s, (b1, b2)) in zip(res, nputils.nwise(beams, 2))]
return res
def pyramiddec(signal, widths=None, angle=0, ellipticity=1, boundary="symm"):
if widths is None:
widths = np.arange(1, min(signal.shape) / 4)
beams = [imgutils.GaussianBeam(ellipticity * w, w, angle=angle) for w in widths]
min_scale = beams[0].convolve(signal, boundary=boundary) - beams[1].convolve(signal, boundary=boundary)
filtered_min = [b.convolve(min_scale, boundary=boundary) for b in beams]
filtered_all = [b.convolve(signal, boundary=boundary) for b in beams]
dog = [(el[0] - el[-1]) for el in nputils.nwise(filtered_all, 2)]
return [v - k for k, v in zip(filtered_min, dog)]
def waverec(coefs, wavelet, boundary="symm", rec=dwt_inv,
axis=None, shape=None, thread=None):
a = coefs[-1]
for j in range(len(coefs) - 2, -1, -1):
if thread and not thread.is_alive():
return None
a = rec(a, coefs[j], wavelet, boundary, j, axis=axis)
if shape and shape != a.shape:
# See idwt() for an explaination
a = nputils.index(a, np.s_[:-1], axis)
return a
def dec2d(img, wavelet, boundary, dec, level):
rows_a, rows_d = dec(img, wavelet, boundary, level, axis=0)
a, d1 = dec(rows_a, wavelet, boundary, level, axis=1)
d2, d3 = dec(rows_d, wavelet, boundary, level, axis=1)
return (a, d1, d2, d3)
def wavedec2d(img, wavelet, level, boundary="symm", dec=dwt, thread=None):
a = img
res = []
for j in range(int(level)):
if thread and not thread.is_alive():
return None
a, d1, d2, d3 = dec2d(a, wavelet, boundary, dec, j)
res.append([d1, d2, d3])
res.append(a)
return res
def rec2d(a, d, wavelet, boundary, rec, level):
d1, d2, d3 = d
if a.shape != d1.shape:
a = nputils.index(a, np.s_[:-1])
temp_a = rec(a, d1, wavelet, boundary, level, axis=1)
temp_d = rec(d2, d3, wavelet, boundary, level, axis=1)
img = rec(temp_a, temp_d, wavelet, boundary, level, axis=0)
return img
def waverec2d(coefs, wavelet, boundary="symm", rec=dwt_inv, shape=None, thread=None):
a = coefs[-1]
for j in range(len(coefs) - 2, -1, -1):
if thread and not thread.is_alive():
return None
a = rec2d(a, coefs[j], wavelet, boundary, rec, j)
if shape and shape != a.shape:
a = nputils.index(a, np.s_[:-1])
return a
def dyadic_image(coeffs, shape=None, normalize=True):
def normalize(a):
return (a - a.min()) / float(a.max() - a.min())
if shape:
d = [nputils.resize(coeffs[0], [k / pow(2., len(coeffs) - 1)
for k in shape])]
for l in range(1, len(coeffs)):
s = [k / pow(2., len(coeffs) - l) for k in shape]
d.append(map(nputils.resize, coeffs[l], [s] * 3))
else:
shape = coeffs[-1].shape
d = coeffs
for coef in coeffs[0:-1]:
shape = shape + np.array(coef[0].shape)
if normalize:
# normalize aproximation
d[-1] = normalize(d[-1])
# normalize details
for l in range(0, len(coeffs) - 1):
d[l] = map(normalize, d[l])
res = np.ones(shape)
nputils.fill_at(res, (0, 0), d[-1])
(x, y) = d[-1].shape
for l in range(len(d) - 2, -1, -1):
nputils.fill_at(res, (0, y), d[l][0])
nputils.fill_at(res, (x, 0), d[l][1])
nputils.fill_at(res, (x, y), d[l][2])
(x, y) = d[l][0].shape + np.array((x, y))
return res
def get_noise_factor_from_background(wavelet, level, dec, background):
scales = wavedec(background, wavelet, level, dec=dec)
return [scale.std() for scale in scales[:-1]]
def get_noise_factor_from_data(wavelet, level, dec, data):
scales = wavedec(data, wavelet, level, dec=dec)
return [nputils.k_sigma_noise_estimation(scale) for scale in scales[:-1]]
def get_noise_factor(wavelet, level, dec, beam=None):
# n = (250000)
n = (200, 200)
background = nputils.gaussian_noise(n, 0, 1)
if beam is not None:
background = beam.convolve(background)
return get_noise_factor_from_background(wavelet, level, dec, background)
def wave_noise_factor(bg, wavelet, level, dec, beam=None):
if isinstance(bg, np.ndarray):
scales_noise = get_noise_factor_from_background(wavelet, level, dec, bg)
else:
scales_noise = bg * np.array(get_noise_factor(wavelet, level, dec, beam=beam))
return scales_noise
def dec_noise_factor(dec, bg, beam=None, **kargs):
if not isinstance(bg, np.ndarray):
n = (200, 200)
bg = nputils.gaussian_noise(n, 0, bg)
if beam is not None:
bg = beam.convolve(bg)
scales = dec(bg, **kargs)
return [scale.std() for scale in scales[:-1]]
def dog_noise_factor(bg, widths=None, angle=0, ellipticity=1, beam=None):
return dec_noise_factor(dogdec, bg, beam=beam, widths=widths, angle=angle, ellipticity=ellipticity)
| [
"nputils.convolve",
"nputils.nwise",
"numpy.ones",
"nputils.index",
"nputils.fill_at",
"wavelets.get_wavelet",
"nputils.upsample",
"numpy.array",
"imgutils.GaussianBeam",
"nputils.k_sigma_noise_estimation",
"nputils.downsample",
"nputils.gaussian_noise"
] | [((690, 740), 'nputils.convolve', 'nputils.convolve', (['signal', 'hkd', 'boundary'], {'axis': 'axis'}), '(signal, hkd, boundary, axis=axis)\n', (706, 740), False, 'import nputils\n'), ((754, 804), 'nputils.convolve', 'nputils.convolve', (['signal', 'gkd', 'boundary'], {'axis': 'axis'}), '(signal, gkd, boundary, axis=axis)\n', (770, 804), False, 'import nputils\n'), ((814, 865), 'nputils.downsample', 'nputils.downsample', (['a_conv', '(2)'], {'oddeven': '(1)', 'axis': 'axis'}), '(a_conv, 2, oddeven=1, axis=axis)\n', (832, 865), False, 'import nputils\n'), ((874, 925), 'nputils.downsample', 'nputils.downsample', (['d_conv', '(2)'], {'oddeven': '(1)', 'axis': 'axis'}), '(d_conv, 2, oddeven=1, axis=axis)\n', (892, 925), False, 'import nputils\n'), ((1621, 1680), 'nputils.upsample', 'nputils.upsample', (['a', '(2)'], {'oddeven': '(1)', 'lastzero': '(True)', 'axis': 'axis'}), '(a, 2, oddeven=1, lastzero=True, axis=axis)\n', (1637, 1680), False, 'import nputils\n'), ((1698, 1757), 'nputils.upsample', 'nputils.upsample', (['d', '(2)'], {'oddeven': '(1)', 'lastzero': '(True)', 'axis': 'axis'}), '(d, 2, oddeven=1, lastzero=True, axis=axis)\n', (1714, 1757), False, 'import nputils\n'), ((1768, 1836), 'nputils.convolve', 'nputils.convolve', (['a_upsample', 'hkr', 'boundary'], {'axis': 'axis', 'mode': '"""valid"""'}), "(a_upsample, hkr, boundary, axis=axis, mode='valid')\n", (1784, 1836), False, 'import nputils\n'), ((1846, 1914), 'nputils.convolve', 'nputils.convolve', (['d_upsample', 'gkr', 'boundary'], {'axis': 'axis', 'mode': '"""valid"""'}), "(d_upsample, gkr, boundary, axis=axis, mode='valid')\n", (1862, 1914), False, 'import nputils\n'), ((2177, 2227), 'nputils.convolve', 'nputils.convolve', (['signal', 'hkd', 'boundary'], {'axis': 'axis'}), '(signal, hkd, boundary, axis=axis)\n', (2193, 2227), False, 'import nputils\n'), ((2236, 2286), 'nputils.convolve', 'nputils.convolve', (['signal', 'gkd', 'boundary'], {'axis': 'axis'}), '(signal, gkd, boundary, axis=axis)\n', (2252, 2286), False, 'import nputils\n'), ((2551, 2610), 'nputils.convolve', 'nputils.convolve', (['a', 'hkr', 'boundary'], {'axis': 'axis', 'mode': '"""valid"""'}), "(a, hkr, boundary, axis=axis, mode='valid')\n", (2567, 2610), False, 'import nputils\n'), ((2620, 2679), 'nputils.convolve', 'nputils.convolve', (['d', 'gkr', 'boundary'], {'axis': 'axis', 'mode': '"""valid"""'}), "(d, gkr, boundary, axis=axis, mode='valid')\n", (2636, 2679), False, 'import nputils\n'), ((2876, 2939), 'nputils.convolve', 'nputils.convolve', (['signal', 'hkd', 'boundary'], {'axis': 'axis', 'mode': '"""same"""'}), "(signal, hkd, boundary, axis=axis, mode='same')\n", (2892, 2939), False, 'import nputils\n'), ((3145, 3208), 'nputils.convolve', 'nputils.convolve', (['signal', 'hkd', 'boundary'], {'axis': 'axis', 'mode': '"""same"""'}), "(signal, hkd, boundary, axis=axis, mode='same')\n", (3161, 3208), False, 'import nputils\n'), ((3218, 3276), 'nputils.convolve', 'nputils.convolve', (['a', 'hkd', 'boundary'], {'axis': 'axis', 'mode': '"""same"""'}), "(a, hkd, boundary, axis=axis, mode='same')\n", (3234, 3276), False, 'import nputils\n'), ((7731, 7745), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (7738, 7745), True, 'import numpy as np\n'), ((7751, 7786), 'nputils.fill_at', 'nputils.fill_at', (['res', '(0, 0)', 'd[-1]'], {}), '(res, (0, 0), d[-1])\n', (7766, 7786), False, 'import nputils\n'), ((8538, 8569), 'nputils.gaussian_noise', 'nputils.gaussian_noise', (['n', '(0)', '(1)'], {}), '(n, 0, 1)\n', (8560, 8569), False, 'import nputils\n'), ((187, 210), 'wavelets.get_wavelet', 'wavelets.get_wavelet', (['w'], {}), '(w)\n', (207, 210), False, 'import wavelets\n'), ((4078, 4130), 'imgutils.GaussianBeam', 'imgutils.GaussianBeam', (['(ellipticity * w)', 'w'], {'bpa': 'angle'}), '(ellipticity * w, w, bpa=angle)\n', (4099, 4130), False, 'import imgutils\n'), ((4753, 4807), 'imgutils.GaussianBeam', 'imgutils.GaussianBeam', (['(ellipticity * w)', 'w'], {'angle': 'angle'}), '(ellipticity * w, w, angle=angle)\n', (4774, 4807), False, 'import imgutils\n'), ((5599, 5633), 'nputils.index', 'nputils.index', (['a', 'np.s_[:-1]', 'axis'], {}), '(a, np.s_[:-1], axis)\n', (5612, 5633), False, 'import nputils\n'), ((6343, 6371), 'nputils.index', 'nputils.index', (['a', 'np.s_[:-1]'], {}), '(a, np.s_[:-1])\n', (6356, 6371), False, 'import nputils\n'), ((6892, 6920), 'nputils.index', 'nputils.index', (['a', 'np.s_[:-1]'], {}), '(a, np.s_[:-1])\n', (6905, 6920), False, 'import nputils\n'), ((7860, 7897), 'nputils.fill_at', 'nputils.fill_at', (['res', '(0, y)', 'd[l][0]'], {}), '(res, (0, y), d[l][0])\n', (7875, 7897), False, 'import nputils\n'), ((7906, 7943), 'nputils.fill_at', 'nputils.fill_at', (['res', '(x, 0)', 'd[l][1]'], {}), '(res, (x, 0), d[l][1])\n', (7921, 7943), False, 'import nputils\n'), ((7952, 7989), 'nputils.fill_at', 'nputils.fill_at', (['res', '(x, y)', 'd[l][2]'], {}), '(res, (x, y), d[l][2])\n', (7967, 7989), False, 'import nputils\n'), ((8361, 8400), 'nputils.k_sigma_noise_estimation', 'nputils.k_sigma_noise_estimation', (['scale'], {}), '(scale)\n', (8393, 8400), False, 'import nputils\n'), ((9145, 9177), 'nputils.gaussian_noise', 'nputils.gaussian_noise', (['n', '(0)', 'bg'], {}), '(n, 0, bg)\n', (9167, 9177), False, 'import nputils\n'), ((4256, 4282), 'nputils.nwise', 'nputils.nwise', (['filtered', '(2)'], {}), '(filtered, 2)\n', (4269, 4282), False, 'import nputils\n'), ((5122, 5152), 'nputils.nwise', 'nputils.nwise', (['filtered_all', '(2)'], {}), '(filtered_all, 2)\n', (5135, 5152), False, 'import nputils\n'), ((8023, 8039), 'numpy.array', 'np.array', (['(x, y)'], {}), '((x, y))\n', (8031, 8039), True, 'import numpy as np\n'), ((4403, 4426), 'nputils.nwise', 'nputils.nwise', (['beams', '(2)'], {}), '(beams, 2)\n', (4416, 4426), False, 'import nputils\n'), ((7499, 7522), 'numpy.array', 'np.array', (['coef[0].shape'], {}), '(coef[0].shape)\n', (7507, 7522), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2022 by <NAME>
import os
import sys
import glob
import torch
import random
import timeit
import configs
import logging
import numpy as np
import pandas as pd
import transformers
from tqdm import tqdm, trange
from adapt_mrc_model import AdaptMRCModel
from data_generator import QADataset, qa_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
get_raw_scores,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # specify which GPU(s) to be used
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def set_seed(configs):
random.seed(configs.seed)
np.random.seed(configs.seed)
torch.manual_seed(configs.seed)
if configs.n_gpu > 0:
torch.cuda.manual_seed_all(configs.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(model, tokenizer):
"""Train the model"""
set_seed(configs)
if configs.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
configs.train_batch_size = configs.per_gpu_train_batch_size * max(1, configs.n_gpu)
train_dataset = QADataset()
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=configs.train_batch_size,
shuffle=False,
num_workers=configs.num_workers,
collate_fn=qa_collate
)
if configs.max_steps > 0:
t_total = configs.max_steps
configs.num_train_epochs = configs.max_steps // (
len(train_dataloader) // configs.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // configs.gradient_accumulation_steps * configs.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
# check the parameters
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": configs.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=configs.learning_rate,
eps=configs.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=configs.warmup_steps,
num_training_steps=t_total * configs.lr_multiplier
)
# Check if saved optimizer or scheduler states exist
if (os.path.isfile(os.path.join(configs.pretrained_model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(configs.pretrained_model_name_or_path, "scheduler.pt"))):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(configs.pretrained_model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(configs.pretrained_model_name_or_path, "scheduler.pt")))
if configs.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=configs.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if configs.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if configs.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[configs.local_rank], output_device=configs.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", configs.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", configs.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
configs.train_batch_size
* configs.gradient_accumulation_steps
* (torch.distributed.get_world_size() if configs.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", configs.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(configs.pretrained_model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = configs.pretrained_model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // configs.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // configs.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(configs.num_train_epochs), desc="Epoch", disable=configs.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(configs)
ite = 0
patience = configs.patience_threshold
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=configs.local_rank not in [-1, 0])
local_step = 0
mean_loss = []
for step, batch in enumerate(epoch_iterator):
local_step += 1
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
outputs = model(batch)
encodings, factoid_qa_outputs, aux_qa_outputs, \
adv_loss, aux_qa_loss, original_qa_loss, loss = outputs
if configs.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if configs.gradient_accumulation_steps > 1:
loss = loss / configs.gradient_accumulation_steps
if configs.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
mean_loss.append(loss)
if (step + 1) % configs.gradient_accumulation_steps == 0:
if configs.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), configs.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), configs.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if configs.local_rank in [-1, 0] and configs.logging_steps > 0 and global_step % configs.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if configs.local_rank == -1 and configs.evaluate_during_training:
results = evaluate(model, tokenizer, None, in_domain=None, out_domain=None, evaluate_all=False,
evaluate_domain_0=False)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / configs.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if configs.local_rank in [-1, 0] and configs.save_steps > 0 and global_step % configs.save_steps == 0:
output_dir = os.path.join(configs.output_model_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir) and configs.local_rank in [-1, 0]:
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), f'{output_dir}/model.pt')
tokenizer.save_pretrained(output_dir)
# torch.save(configs, os.path.join(output_dir, "training_configs.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if configs.max_steps > 0 and global_step > configs.max_steps:
epoch_iterator.close()
break
ite += 1
if (ite % 10 == 0):
if (configs.reverse_layer_lambda < 0.04):
configs.reverse_layer_lambda = configs.reverse_layer_lambda + configs.lambda_delta
if configs.max_steps > 0 and global_step > configs.max_steps:
train_iterator.close()
break
if configs.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def main():
set_seed(configs)
if configs.doc_stride >= configs.max_seq_length - configs.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(configs.output_dir)
and os.listdir(configs.output_dir)
and configs.do_train
and not configs.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
configs.output_dir
)
)
# Setup distant debugging if needed
if configs.server_ip and configs.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(configs.server_ip, configs.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if configs.local_rank == -1 or configs.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not configs.no_cuda else "cpu")
configs.n_gpu = 0 if configs.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(configs.local_rank)
device = torch.device("cuda", configs.local_rank)
torch.distributed.init_process_group(backend="nccl")
configs.n_gpu = 1
configs.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if configs.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
configs.local_rank,
device,
configs.n_gpu,
bool(configs.local_rank != -1),
configs.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(configs.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(configs)
# Load pretrained model and tokenizer
if configs.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
configs.model_type = configs.model_type.lower()
tokenizer = AutoTokenizer.from_pretrained(
configs.tokenizer_name if configs.tokenizer_name else configs.pretrained_model_name_or_path,
do_lower_case=configs.do_lower_case,
cache_dir=configs.cache_dir if configs.cache_dir else None,
use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling
)
if configs.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model = AdaptMRCModel()
model.to(configs.device)
logger.info("Training/evaluation parameters %s", configs)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if configs.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if configs.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if configs.do_train:
global_step, tr_loss = train(model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if configs.do_train and (configs.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(configs.output_model_dir):
os.makedirs(configs.output_model_dir)
logger.info("Saving model checkpoint to %s", configs.output_model_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
# TODO: Save model to
save_folder_dir = f'{configs.output_model_dir}/adapt-mrc-mbbank'
print(f"Save model to {save_folder_dir}")
if os.path.exists(save_folder_dir):
os.makedirs(save_folder_dir)
model_to_save.pretrained_model.save_pretrained(save_folder_dir)
tokenizer.save_pretrained(save_folder_dir)
# torch.save(model_to_save.state_dict(), f'{configs.output_model_dir}model.pt')
# tokenizer.save_pretrained(configs.output_model_dir)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"apex.amp.scale_loss",
"torch.cuda.device_count",
"adapt_mrc_model.AdaptMRCModel",
"apex.amp.initialize",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"torch.distributed.get_rank",
"configs.model_type.lower",
"torch.distributed.barrier",
"os.path.... | [((1168, 1195), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1185, 1195), False, 'import logging\n'), ((1302, 1345), 'transformers.MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys', ([], {}), '()\n', (1343, 1345), False, 'from transformers import MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features\n'), ((1446, 1471), 'random.seed', 'random.seed', (['configs.seed'], {}), '(configs.seed)\n', (1457, 1471), False, 'import random\n'), ((1476, 1504), 'numpy.random.seed', 'np.random.seed', (['configs.seed'], {}), '(configs.seed)\n', (1490, 1504), True, 'import numpy as np\n'), ((1509, 1540), 'torch.manual_seed', 'torch.manual_seed', (['configs.seed'], {}), '(configs.seed)\n', (1526, 1540), False, 'import torch\n'), ((1946, 1957), 'data_generator.QADataset', 'QADataset', ([], {}), '()\n', (1955, 1957), False, 'from data_generator import QADataset, qa_collate\n'), ((1981, 2140), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'configs.train_batch_size', 'shuffle': '(False)', 'num_workers': 'configs.num_workers', 'collate_fn': 'qa_collate'}), '(train_dataset, batch_size=configs.\n train_batch_size, shuffle=False, num_workers=configs.num_workers,\n collate_fn=qa_collate)\n', (2008, 2140), False, 'import torch\n'), ((3022, 3114), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'configs.learning_rate', 'eps': 'configs.adam_epsilon'}), '(optimizer_grouped_parameters, lr=configs.learning_rate, eps=configs.\n adam_epsilon)\n', (3027, 3114), False, 'from transformers import MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features\n'), ((3156, 3294), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'configs.warmup_steps', 'num_training_steps': '(t_total * configs.lr_multiplier)'}), '(optimizer, num_warmup_steps=configs.\n warmup_steps, num_training_steps=t_total * configs.lr_multiplier)\n', (3187, 3294), False, 'from transformers import MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features\n'), ((5425, 5478), 'os.path.exists', 'os.path.exists', (['configs.pretrained_model_name_or_path'], {}), '(configs.pretrained_model_name_or_path)\n', (5439, 5478), False, 'import os\n'), ((12950, 13147), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': '(logging.INFO if configs.local_rank in [-1, 0] else logging.WARN)'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO if configs.local_rank in [-1, 0\n ] else logging.WARN)\n", (12969, 13147), False, 'import logging\n'), ((13508, 13543), 'transformers.trainer_utils.is_main_process', 'is_main_process', (['configs.local_rank'], {}), '(configs.local_rank)\n', (13523, 13543), False, 'from transformers.trainer_utils import is_main_process\n'), ((14001, 14027), 'configs.model_type.lower', 'configs.model_type.lower', ([], {}), '()\n', (14025, 14027), False, 'import configs\n'), ((14045, 14293), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['(configs.tokenizer_name if configs.tokenizer_name else configs.\n pretrained_model_name_or_path)'], {'do_lower_case': 'configs.do_lower_case', 'cache_dir': '(configs.cache_dir if configs.cache_dir else None)', 'use_fast': '(False)'}), '(configs.tokenizer_name if configs.\n tokenizer_name else configs.pretrained_model_name_or_path,\n do_lower_case=configs.do_lower_case, cache_dir=configs.cache_dir if\n configs.cache_dir else None, use_fast=False)\n', (14074, 14293), False, 'from transformers import MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features\n'), ((14592, 14607), 'adapt_mrc_model.AdaptMRCModel', 'AdaptMRCModel', ([], {}), '()\n', (14605, 14607), False, 'from adapt_mrc_model import AdaptMRCModel\n'), ((1575, 1615), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['configs.seed'], {}), '(configs.seed)\n', (1601, 1615), False, 'import torch\n'), ((1820, 1835), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1833, 1835), False, 'from tensorboardX import SummaryWriter\n'), ((4091, 4157), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': 'configs.fp16_opt_level'}), '(model, optimizer, opt_level=configs.fp16_opt_level)\n', (4105, 4157), False, 'from apex import amp\n'), ((4269, 4297), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (4290, 4297), False, 'import torch\n'), ((4418, 4567), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[configs.local_rank]', 'output_device': 'configs.local_rank', 'find_unused_parameters': '(True)'}), '(model, device_ids=[configs.\n local_rank], output_device=configs.local_rank, find_unused_parameters=True)\n', (4459, 4567), False, 'import torch\n'), ((6797, 6884), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""', 'disable': '(configs.local_rank not in [-1, 0])'}), "(train_dataloader, desc='Iteration', disable=configs.local_rank not in\n [-1, 0])\n", (6801, 6884), False, 'from tqdm import tqdm, trange\n'), ((11553, 11587), 'os.path.exists', 'os.path.exists', (['configs.output_dir'], {}), '(configs.output_dir)\n', (11567, 11587), False, 'import os\n'), ((11604, 11634), 'os.listdir', 'os.listdir', (['configs.output_dir'], {}), '(configs.output_dir)\n', (11614, 11634), False, 'import os\n'), ((12204, 12299), 'ptvsd.enable_attach', 'ptvsd.enable_attach', ([], {'address': '(configs.server_ip, configs.server_port)', 'redirect_output': '(True)'}), '(address=(configs.server_ip, configs.server_port),\n redirect_output=True)\n', (12223, 12299), False, 'import ptvsd\n'), ((12304, 12327), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (12325, 12327), False, 'import ptvsd\n'), ((12709, 12750), 'torch.cuda.set_device', 'torch.cuda.set_device', (['configs.local_rank'], {}), '(configs.local_rank)\n', (12730, 12750), False, 'import torch\n'), ((12768, 12808), 'torch.device', 'torch.device', (['"""cuda"""', 'configs.local_rank'], {}), "('cuda', configs.local_rank)\n", (12780, 12808), False, 'import torch\n'), ((12817, 12869), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (12853, 12869), False, 'import torch\n'), ((13553, 13600), 'transformers.utils.logging.set_verbosity_info', 'transformers.utils.logging.set_verbosity_info', ([], {}), '()\n', (13598, 13600), False, 'import transformers\n'), ((13609, 13660), 'transformers.utils.logging.enable_default_handler', 'transformers.utils.logging.enable_default_handler', ([], {}), '()\n', (13658, 13660), False, 'import transformers\n'), ((13669, 13720), 'transformers.utils.logging.enable_explicit_format', 'transformers.utils.logging.enable_explicit_format', ([], {}), '()\n', (13718, 13720), False, 'import transformers\n'), ((13947, 13974), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (13972, 13974), False, 'import torch\n'), ((14551, 14578), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (14576, 14578), False, 'import torch\n'), ((16227, 16258), 'os.path.exists', 'os.path.exists', (['save_folder_dir'], {}), '(save_folder_dir)\n', (16241, 16258), False, 'import os\n'), ((3401, 3468), 'os.path.join', 'os.path.join', (['configs.pretrained_model_name_or_path', '"""optimizer.pt"""'], {}), "(configs.pretrained_model_name_or_path, 'optimizer.pt')\n", (3413, 3468), False, 'import os\n'), ((3501, 3568), 'os.path.join', 'os.path.join', (['configs.pretrained_model_name_or_path', '"""scheduler.pt"""'], {}), "(configs.pretrained_model_name_or_path, 'scheduler.pt')\n", (3513, 3568), False, 'import os\n'), ((12578, 12603), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12601, 12603), False, 'import torch\n'), ((15085, 15133), 'apex.amp.register_half_function', 'apex.amp.register_half_function', (['torch', '"""einsum"""'], {}), "(torch, 'einsum')\n", (15116, 15133), False, 'import apex\n'), ((15612, 15652), 'os.path.exists', 'os.path.exists', (['configs.output_model_dir'], {}), '(configs.output_model_dir)\n', (15626, 15652), False, 'import os\n'), ((15666, 15703), 'os.makedirs', 'os.makedirs', (['configs.output_model_dir'], {}), '(configs.output_model_dir)\n', (15677, 15703), False, 'import os\n'), ((16272, 16300), 'os.makedirs', 'os.makedirs', (['save_folder_dir'], {}), '(save_folder_dir)\n', (16283, 16300), False, 'import os\n'), ((3666, 3733), 'os.path.join', 'os.path.join', (['configs.pretrained_model_name_or_path', '"""optimizer.pt"""'], {}), "(configs.pretrained_model_name_or_path, 'optimizer.pt')\n", (3678, 3733), False, 'import os\n'), ((3781, 3848), 'os.path.join', 'os.path.join', (['configs.pretrained_model_name_or_path', '"""scheduler.pt"""'], {}), "(configs.pretrained_model_name_or_path, 'scheduler.pt')\n", (3793, 3848), False, 'import os\n'), ((5052, 5086), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (5084, 5086), False, 'import torch\n'), ((15560, 15588), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (15586, 15588), False, 'import torch\n'), ((7718, 7749), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (7732, 7749), False, 'from apex import amp\n'), ((12466, 12491), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12489, 12491), False, 'import torch\n'), ((8085, 8113), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (8102, 8113), False, 'from apex import amp\n'), ((9715, 9738), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (9726, 9738), False, 'import os\n'), ((10262, 10302), 'os.path.join', 'os.path.join', (['output_dir', '"""optimizer.pt"""'], {}), "(output_dir, 'optimizer.pt')\n", (10274, 10302), False, 'import os\n'), ((10359, 10399), 'os.path.join', 'os.path.join', (['output_dir', '"""scheduler.pt"""'], {}), "(output_dir, 'scheduler.pt')\n", (10371, 10399), False, 'import os\n'), ((5610, 5658), 'configs.pretrained_model_name_or_path.split', 'configs.pretrained_model_name_or_path.split', (['"""-"""'], {}), "('-')\n", (5653, 5658), False, 'import configs\n'), ((9629, 9655), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (9643, 9655), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 15 22:18:57 2014
Modified on Wed Jan 27 15:36:00 2016
@author: <NAME>
"""
from serie2QMlib import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
#Define sliding window
def window_time_series(series, n, step = 1):
# print "in window_time_series",series
if step < 1.0:
step = max(int(step * n), 1)
return [series[i:i+n] for i in range(0, len(series) - n + 1, step)]
#PAA function
def paa(series, now, opw):
if now == None:
now = int(len(series) / opw)
if opw == None:
opw = int(len(series) / now)
return [sum(series[i * opw : (i + 1) * opw]) / float(opw) for i in range(now)]
def standardize(serie):
dev = np.sqrt(np.var(serie))
mean = np.mean(serie)
return [(each-mean)/dev for each in serie]
#Rescale data into [0,1]
def rescale(serie):
maxval = max(serie)
minval = min(serie)
gap = float(maxval-minval)
return [(each-minval)/gap for each in serie]
#Rescale data into [-1,1]
def rescaleminus(serie):
maxval = max(serie)
minval = min(serie)
gap = float(maxval-minval)
return [(each-minval)/gap*2-1 for each in serie]
#Generate quantile bins
def QMeq(series, Q):
q_labels = pd.qcut(list(series), Q, labels=False)
q_levels = pd.qcut(list(series), Q, labels=None)
dic = dict(zip(series, q_labels))
MSM = np.zeros([Q,Q])
label = []
for each in series:
label.append(dic[each])
for i in range(0, len(label)-1):
MSM[label[i]][label[i+1]] += 1
for i in range(Q):
if sum(MSM[i][:]) == 0:
continue
MSM[i][:] = MSM[i][:]/sum(MSM[i][:])
return np.array(MSM), q_labels, q_levels.categories.get_values().tolist()
#Generate quantile bins when equal values exist in the array (slower than QMeq)
def QVeq(series, Q):
q = pd.qcut(list(set(series)), Q)
dic = dict(zip(set(series), q.labels))
qv = np.zeros([1,Q])
label = []
for each in series:
label.append(dic[each])
for i in range(0,len(label)):
qv[0][label[i]] += 1.0
return np.array(qv[0][:]/sum(qv[0][:])), label
#Generate Markov Matrix given a spesicif number of quantile bins
def paaMarkovMatrix(paalist,levels):
paaindex = []
for each in paalist:
for level in levels:
if each >=level.left and each <= level.right:
paaindex.append(k)
return paaindex
# Generate pdf files of generated images
def gengrampdfs(image,paaimages,label,name):
import matplotlib.backends.backend_pdf as bpdf
import operator
index = zip(range(len(label)),label)
index.sort(key = operator.itemgetter(1))
with bpdf.PdfPages(name) as pdf:
count = 0
for p,q in index:
count += 1
print('generate fig of pdfs:'.format(p))
plt.ioff();fig= plt.figure();plt.suptitle(datafile+'_'+str(label[p]));ax1 = plt.subplot(121);plt.imshow(image[p]);divider = make_axes_locatable(ax1);cax = divider.append_axes("right", size="5%", pad=0.1);plt.colorbar(cax = cax);ax2 = plt.subplot(122);plt.imshow(paaimage[p]);divider = make_axes_locatable(ax2);cax = divider.append_axes("right", size="5%", pad=0.1);plt.colorbar(cax = cax);
pdf.savefig(fig)
plt.close(fig)
if count > 30:
break
pdf.close
# Generate pdf files of trainsisted array in porlar coordinates
def genpolarpdfs(raw,label,name):
import matplotlib.backends.backend_pdf as bpdf
import operator
index = zip(range(len(label)),label)
index.sort(key = operator.itemgetter(1))
len(raw[0]) - 1
with bpdf.PdfPages(name) as pdf:
for p,q in index:
print('generate fig of pdfs:'.format(p))
plt.ioff();r = np.array(range(1,length+1));r=r/100.0;theta = np.arccos(np.array(rescaleminus(standardize(raw[p][1:]))))*2;fig=plt.figure();plt.suptitle(datafile+'_'+str(label[p]));ax = plt.subplot(111, polar=True);ax.plot(theta, r, color='r', linewidth=3);
pdf.savefig(fig)
plt.close(fig)
pdf.close
#return the max value instead of mean value in PAAs
def maxsample(mat, s):
retval = []
x, y, z = mat.shape
l = np.int(np.floor(y/float(s)))
for each in mat:
block = []
for i in range(s):
block.append([np.max(each[i*l:(i+1)*l,j*l:(j+1)*l]) for j in range(s)])
retval.append(np.asarray(block))
return np.asarray(retval)
#Pickle the data and save in the pkl file
def pickledata(mat, label, train, name):
print('..pickling data:'.format(name))
traintp = (mat[:train], label[:train])
testtp = (mat[train:], label[train:])
with open(name+'.pkl', 'wb') as f:
pickletp = [traintp, testtp]
pickle.dump(pickletp, f, protocol=pickle.HIGHEST_PROTOCOL)
def pickle3data(mat, label, train, name):
print('..pickling data:'.format(name))
traintp = (mat[:train], label[:train])
validtp = (mat[:train], label[:train])
testtp = (mat[train:], label[train:])
with open (name+'.pkl', 'wb') as f:
pickletp = [traintp, validtp, testtp]
pickle.dump(pickletp, f, protocol=pickle.HIGHEST_PROTOCOL)
#################################
###Define the parameters here####
#################################
datafiles = ['Coffee_ALL'] # Data fine name
trains = [28] # Number of training instances (because we assume training and test data are mixed in one file)
size = [64] # PAA size
quantile = [16] # Quantile size
reduction_type = 'patch' # Reduce the image size using: full, patch, paa
for datafile, train in zip(datafiles,trains):
fn = datafile
for s in size:
for Q in quantile:
print('read file: {}, size: {}, reduction_type: {}'.format(datafile, s, reduction_type))
raw = open(fn).readlines()
raw = [list(map(float, each.strip().split())) for each in raw]
length = len(raw[0])-1
print('format data')
label = []
paaimage = []
paamatrix = []
patchimage = []
patchmatrix = []
fullimage = []
fullmatrix = []
for each in raw:
label.append(each[0])
#std_data = rescaleminus(each[1:])
#std_data = rescale(each[1:])
std_data = each[1:]
#std_data = standardize(each[1:])
#std_data = rescaleminus(std_data)
paalist = paa(std_data,s,None)
############### Markov Matrix #######################
mat, matindex, level = QMeq(std_data, Q)
##paamat,paamatindex = QMeq(paalist,Q)
paamatindex = paaMarkovMatrix(paalist, level)
column = []
paacolumn = []
for p in range(len(std_data)):
for q in range(len(std_data)):
column.append(mat[matindex[p]][matindex[(q)]])
for p in range(s):
for q in range(s):
paacolumn.append(mat[paamatindex[p]][paamatindex[(q)]])
column = np.array(column)
columnmatrix = column.reshape(len(std_data),len(std_data))
fullmatrix.append(column)
paacolumn = np.array(paacolumn)
paamatrix.append(paacolumn)
fullimage.append(column.reshape(len(std_data),len(std_data)))
paaimage.append(paacolumn.reshape(s,s))
batch = int(len(std_data)/s)
patch = []
for p in range(s):
for q in range(s):
patch.append(np.mean(columnmatrix[p*batch:(p+1)*batch,q*batch:(q+1)*batch]))
patchimage.append(np.array(patch).reshape(s,s))
patchmatrix.append(np.array(patch))
paaimage = np.asarray(paaimage)
paamatrix = np.asarray(paamatrix)
patchimage = np.asarray(patchimage)
patchmatrix = np.asarray(patchmatrix)
fullimage = np.asarray(fullimage)
fullmatrix = np.asarray(fullmatrix)
label = np.array(label)
if reduction_type == 'patch':
savematrix = patchmatrix
elif reduction_type == 'paa':
savematrix = paamatrix
else:
savematrix = fullmatrix
datafilename = datafile +'_'+reduction_type+'_PAA_'+str(s)+'_Q_'+str(Q)+'_MTF'
pickledata(savematrix, label, train, datafilename)
k=0;plt.figure();
plt.suptitle(datafile+'_index_'+str(k)+'_label_'+str(label[k])+'_Q_'+str(Q)+'_S_'+str(s));
ax1 = plt.subplot(121);plt.imshow(fullimage[k]);
plt.title('full image');
divider = make_axes_locatable(ax1);
cax = divider.append_axes("right", size="5%", pad=0.2);plt.colorbar(cax = cax);
# ax2 = plt.subplot(132);plt.imshow(paaimage[k]);plt.title('PAA image');
# divider = make_axes_locatable(ax2);cax = divider.append_axes("right", size="5%", pad=0.2);
# plt.colorbar(cax = cax);
ax3 = plt.subplot(122);
plt.imshow(patchimage[k]);
plt.title('patch average');
divider = make_axes_locatable(ax3);
cax = divider.append_axes("right", size="5%", pad=0.2);plt.colorbar(cax = cax); | [
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.colorbar",
"numpy.asarray",
"matplotlib.pyplot.ioff",
"operator.itemgetter",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotl... | [((8794, 8806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8804, 8806), True, 'import matplotlib.pyplot as plt\n'), ((8906, 8922), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (8917, 8922), True, 'import matplotlib.pyplot as plt\n'), ((8923, 8947), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fullimage[k]'], {}), '(fullimage[k])\n', (8933, 8947), True, 'import matplotlib.pyplot as plt\n'), ((8949, 8972), 'matplotlib.pyplot.title', 'plt.title', (['"""full image"""'], {}), "('full image')\n", (8958, 8972), True, 'import matplotlib.pyplot as plt\n'), ((8984, 9008), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax1'], {}), '(ax1)\n', (9003, 9008), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((9065, 9086), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'cax': 'cax'}), '(cax=cax)\n', (9077, 9086), True, 'import matplotlib.pyplot as plt\n'), ((9291, 9307), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (9302, 9307), True, 'import matplotlib.pyplot as plt\n'), ((9309, 9334), 'matplotlib.pyplot.imshow', 'plt.imshow', (['patchimage[k]'], {}), '(patchimage[k])\n', (9319, 9334), True, 'import matplotlib.pyplot as plt\n'), ((9336, 9362), 'matplotlib.pyplot.title', 'plt.title', (['"""patch average"""'], {}), "('patch average')\n", (9345, 9362), True, 'import matplotlib.pyplot as plt\n'), ((9374, 9398), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax3'], {}), '(ax3)\n', (9393, 9398), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((9455, 9476), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'cax': 'cax'}), '(cax=cax)\n', (9467, 9476), True, 'import matplotlib.pyplot as plt\n'), ((822, 836), 'numpy.mean', 'np.mean', (['serie'], {}), '(serie)\n', (829, 836), True, 'import numpy as np\n'), ((1442, 1458), 'numpy.zeros', 'np.zeros', (['[Q, Q]'], {}), '([Q, Q])\n', (1450, 1458), True, 'import numpy as np\n'), ((1996, 2012), 'numpy.zeros', 'np.zeros', (['[1, Q]'], {}), '([1, Q])\n', (2004, 2012), True, 'import numpy as np\n'), ((4496, 4514), 'numpy.asarray', 'np.asarray', (['retval'], {}), '(retval)\n', (4506, 4514), True, 'import numpy as np\n'), ((796, 809), 'numpy.var', 'np.var', (['serie'], {}), '(serie)\n', (802, 809), True, 'import numpy as np\n'), ((1737, 1750), 'numpy.array', 'np.array', (['MSM'], {}), '(MSM)\n', (1745, 1750), True, 'import numpy as np\n'), ((2740, 2759), 'matplotlib.backends.backend_pdf.PdfPages', 'bpdf.PdfPages', (['name'], {}), '(name)\n', (2753, 2759), True, 'import matplotlib.backends.backend_pdf as bpdf\n'), ((3694, 3713), 'matplotlib.backends.backend_pdf.PdfPages', 'bpdf.PdfPages', (['name'], {}), '(name)\n', (3707, 3713), True, 'import matplotlib.backends.backend_pdf as bpdf\n'), ((2707, 2729), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (2726, 2729), False, 'import operator\n'), ((2900, 2910), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (2908, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2916, 2928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2926, 2928), True, 'import matplotlib.pyplot as plt\n'), ((2976, 2992), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2987, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3013), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image[p]'], {}), '(image[p])\n', (3003, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3048), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax1'], {}), '(ax1)\n', (3043, 3048), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3104, 3125), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'cax': 'cax'}), '(cax=cax)\n', (3116, 3125), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3150), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (3145, 3150), True, 'import matplotlib.pyplot as plt\n'), ((3151, 3174), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paaimage[p]'], {}), '(paaimage[p])\n', (3161, 3174), True, 'import matplotlib.pyplot as plt\n'), ((3185, 3209), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax2'], {}), '(ax2)\n', (3204, 3209), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3265, 3286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'cax': 'cax'}), '(cax=cax)\n', (3277, 3286), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3345), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3340, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3641, 3663), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3660, 3663), False, 'import operator\n'), ((3813, 3823), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3821, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3939, 3951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3949, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3998, 4026), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {'polar': '(True)'}), '(111, polar=True)\n', (4009, 4026), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4125), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4120, 4125), True, 'import matplotlib.pyplot as plt\n'), ((4466, 4483), 'numpy.asarray', 'np.asarray', (['block'], {}), '(block)\n', (4476, 4483), True, 'import numpy as np\n'), ((8088, 8108), 'numpy.asarray', 'np.asarray', (['paaimage'], {}), '(paaimage)\n', (8098, 8108), True, 'import numpy as np\n'), ((8133, 8154), 'numpy.asarray', 'np.asarray', (['paamatrix'], {}), '(paamatrix)\n', (8143, 8154), True, 'import numpy as np\n'), ((8180, 8202), 'numpy.asarray', 'np.asarray', (['patchimage'], {}), '(patchimage)\n', (8190, 8202), True, 'import numpy as np\n'), ((8229, 8252), 'numpy.asarray', 'np.asarray', (['patchmatrix'], {}), '(patchmatrix)\n', (8239, 8252), True, 'import numpy as np\n'), ((8277, 8298), 'numpy.asarray', 'np.asarray', (['fullimage'], {}), '(fullimage)\n', (8287, 8298), True, 'import numpy as np\n'), ((8324, 8346), 'numpy.asarray', 'np.asarray', (['fullmatrix'], {}), '(fullmatrix)\n', (8334, 8346), True, 'import numpy as np\n'), ((8367, 8382), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (8375, 8382), True, 'import numpy as np\n'), ((7306, 7322), 'numpy.array', 'np.array', (['column'], {}), '(column)\n', (7314, 7322), True, 'import numpy as np\n'), ((7468, 7487), 'numpy.array', 'np.array', (['paacolumn'], {}), '(paacolumn)\n', (7476, 7487), True, 'import numpy as np\n'), ((4386, 4436), 'numpy.max', 'np.max', (['each[i * l:(i + 1) * l, j * l:(j + 1) * l]'], {}), '(each[i * l:(i + 1) * l, j * l:(j + 1) * l])\n', (4392, 4436), True, 'import numpy as np\n'), ((8046, 8061), 'numpy.array', 'np.array', (['patch'], {}), '(patch)\n', (8054, 8061), True, 'import numpy as np\n'), ((7883, 7958), 'numpy.mean', 'np.mean', (['columnmatrix[p * batch:(p + 1) * batch, q * batch:(q + 1) * batch]'], {}), '(columnmatrix[p * batch:(p + 1) * batch, q * batch:(q + 1) * batch])\n', (7890, 7958), True, 'import numpy as np\n'), ((7981, 7996), 'numpy.array', 'np.array', (['patch'], {}), '(patch)\n', (7989, 7996), True, 'import numpy as np\n')] |
import CGAT.Experiment as E
import pandas as pd
import pandas.rpy.common as com
from rpy2.robjects import pandas2ri as py2ri
from rpy2.robjects.packages import importr
from rpy2.robjects import r as R
import rpy2.robjects as ro
import CGATPipelines.Pipeline as P
import itertools
import numpy as np
@P.cluster_runnable
def deseqAnalysis(counts_table,
design,
reference,
outfile):
'''
Perform differential expression analysis using DESeq2
'''
design_df = pd.read_table(design, sep="\t",
header=0, index_col=0)
counts_df = pd.read_table(counts_table, sep="\t",
header=0, index_col=0, compression="gzip")
E.info("setting up counts table")
py2ri.activate()
r_design = py2ri.py2ri_pandasdataframe(design_df)
r_counts = py2ri.py2ri_pandasdataframe(counts_df)
R.assign("design", r_design)
R.assign("counts", r_counts)
R('''sink(file="/dev/null")''')
E.info("loading required R packages")
R('''suppressPackageStartupMessages(library(DESeq2))''')
R('''suppressPackageStartupMessages(library(gplots))''')
R('''suppressPackageStartupMessages(library(RColorBrewer))''')
R('''suppressPackageStartupMessages(library(ggplot2))''')
R('''notZero <- counts[rowMeans(counts) > 1,]''')
R('''dds <- DESeqDataSetFromMatrix(countData=notZero,'''
'''colData=design, design=~group)''')
E.info("performing differential expression testing")
R('''de <- DESeq(dds, fitType="parametric")''')
R('''res <- results(de)''')
E.info("generating MA plot")
# generate MAplots
R('''png("images.dir/%s-MAplot.png", height=480, width=480)''' % reference)
R('''plotMA(res, alpha=0.05)''')
R('''dev.off()''')
E.info("performing variance stabilising transformation")
R('''vst <- data.frame(getVarianceStabilizedData(de))''')
E.info("clustering samples and plotting heatmap")
R('''cors <- cor(vst)''')
R('''hmcol <- colorRampPalette(brewer.pal(9, "PuOr"))(100)''')
R('''png("images.dir/%s-sample_clustering-heatmap.png", height=480, '''
'''width=480)''' % reference)
R('''heatmap.2(as.matrix(cors), col=hmcol, trace="none", '''
'''breaks=seq(0, 1, 0.01), margins=c(10,10), cexRow=0.8,'''
'''cexCol=0.8)''')
R('''dev.off()''')
E.info("performing principal components analysis")
R('''pca <- prcomp(data.frame(t(vst)), scale=T, centre=T)''')
R('''pcs <- data.frame(pca$x)''')
R('''pcs$condition <- as.factor(design$group)''')
R('''p_pca <- ggplot(pcs, aes(x=PC1, y=PC2, colour=condition)) + '''
'''geom_point(size=6)''')
R('''png("images.dir/%s-PCA_pc1-pc2.png", height=480, '''
'''width=480)''' % reference)
R('''print(p_pca)''')
R('''dev.off()''')
E.info("writing table of results")
R('''res.df <- data.frame(res)''')
('''sink(file=NULL)''')
out_df = com.load_data("res.df")
out_df.to_csv(outfile, sep="\t", index_label="gene_id")
def enumerateCounts(counts_file, design_file,
bin_size, max_bin):
'''
enumerate a counts file, then shuffle the rows and bin into
log fold change range
'''
if counts_file.endswith("gz"):
compression = "gzip"
else:
compression = None
counts_df = pd.read_table(counts_file, header=0, index_col=0, sep="\t",
compression=compression)
design_df = pd.read_table(design_file, header=0, index_col=0, sep="\t")
n_genes = len(counts_df)
genes = counts_df.index
gene_combs = itertools.combinations(genes, 2)
log_fold_changes = pd.DataFrame(columns=genes, index=genes,
dtype=np.float64)
# control diets are first 4 samples, hfd are last 4
controls = counts_df.iloc[:,:4]
hfd = counts_df.iloc[:,4:]
bins = range(int(-max_bin*100), int(max_bin*100), int(bin_size*100))
bins = [x/100.0 for x in bins]
idx = 0
for gene1, gene2 in gene_combs:
cntrl = controls.loc[gene1]
test = hfd.loc[gene2]
try:
res = np.mean(test)/np.mean(cntrl)
except ZeroDivisionError:
res = 2.0
log_fold_changes.loc[gene1, gene2] = res
log_fold_changes = log_fold_changes.fillna(0.0)
log_fold_changes = log_fold_changes.apply(np.nan_to_num, axis=0)
mean_filter = lambda x: np.mean(abs(x)) > 1.0
cols = log_fold_changes.apply(mean_filter, axis=1)
rows = log_fold_changes.apply(mean_filter, axis=0)
col_df = log_fold_changes.loc[cols,:]
row_df = col_df.loc[:,rows]
return row_df
| [
"numpy.mean",
"rpy2.robjects.pandas2ri.activate",
"pandas.rpy.common.load_data",
"rpy2.robjects.r.assign",
"itertools.combinations",
"CGAT.Experiment.info",
"pandas.read_table",
"pandas.DataFrame",
"rpy2.robjects.pandas2ri.py2ri_pandasdataframe",
"rpy2.robjects.r"
] | [((527, 581), 'pandas.read_table', 'pd.read_table', (['design'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)'}), "(design, sep='\\t', header=0, index_col=0)\n", (540, 581), True, 'import pandas as pd\n'), ((628, 713), 'pandas.read_table', 'pd.read_table', (['counts_table'], {'sep': '"""\t"""', 'header': '(0)', 'index_col': '(0)', 'compression': '"""gzip"""'}), "(counts_table, sep='\\t', header=0, index_col=0, compression='gzip'\n )\n", (641, 713), True, 'import pandas as pd\n'), ((744, 777), 'CGAT.Experiment.info', 'E.info', (['"""setting up counts table"""'], {}), "('setting up counts table')\n", (750, 777), True, 'import CGAT.Experiment as E\n'), ((782, 798), 'rpy2.robjects.pandas2ri.activate', 'py2ri.activate', ([], {}), '()\n', (796, 798), True, 'from rpy2.robjects import pandas2ri as py2ri\n'), ((814, 852), 'rpy2.robjects.pandas2ri.py2ri_pandasdataframe', 'py2ri.py2ri_pandasdataframe', (['design_df'], {}), '(design_df)\n', (841, 852), True, 'from rpy2.robjects import pandas2ri as py2ri\n'), ((868, 906), 'rpy2.robjects.pandas2ri.py2ri_pandasdataframe', 'py2ri.py2ri_pandasdataframe', (['counts_df'], {}), '(counts_df)\n', (895, 906), True, 'from rpy2.robjects import pandas2ri as py2ri\n'), ((912, 940), 'rpy2.robjects.r.assign', 'R.assign', (['"""design"""', 'r_design'], {}), "('design', r_design)\n", (920, 940), True, 'from rpy2.robjects import r as R\n'), ((945, 973), 'rpy2.robjects.r.assign', 'R.assign', (['"""counts"""', 'r_counts'], {}), "('counts', r_counts)\n", (953, 973), True, 'from rpy2.robjects import r as R\n'), ((979, 1006), 'rpy2.robjects.r', 'R', (['"""sink(file="/dev/null")"""'], {}), '(\'sink(file="/dev/null")\')\n', (980, 1006), True, 'from rpy2.robjects import r as R\n'), ((1015, 1052), 'CGAT.Experiment.info', 'E.info', (['"""loading required R packages"""'], {}), "('loading required R packages')\n", (1021, 1052), True, 'import CGAT.Experiment as E\n'), ((1057, 1109), 'rpy2.robjects.r', 'R', (['"""suppressPackageStartupMessages(library(DESeq2))"""'], {}), "('suppressPackageStartupMessages(library(DESeq2))')\n", (1058, 1109), True, 'from rpy2.robjects import r as R\n'), ((1118, 1170), 'rpy2.robjects.r', 'R', (['"""suppressPackageStartupMessages(library(gplots))"""'], {}), "('suppressPackageStartupMessages(library(gplots))')\n", (1119, 1170), True, 'from rpy2.robjects import r as R\n'), ((1179, 1237), 'rpy2.robjects.r', 'R', (['"""suppressPackageStartupMessages(library(RColorBrewer))"""'], {}), "('suppressPackageStartupMessages(library(RColorBrewer))')\n", (1180, 1237), True, 'from rpy2.robjects import r as R\n'), ((1246, 1299), 'rpy2.robjects.r', 'R', (['"""suppressPackageStartupMessages(library(ggplot2))"""'], {}), "('suppressPackageStartupMessages(library(ggplot2))')\n", (1247, 1299), True, 'from rpy2.robjects import r as R\n'), ((1309, 1354), 'rpy2.robjects.r', 'R', (['"""notZero <- counts[rowMeans(counts) > 1,]"""'], {}), "('notZero <- counts[rowMeans(counts) > 1,]')\n", (1310, 1354), True, 'from rpy2.robjects import r as R\n'), ((1363, 1451), 'rpy2.robjects.r', 'R', (['"""dds <- DESeqDataSetFromMatrix(countData=notZero,colData=design, design=~group)"""'], {}), "('dds <- DESeqDataSetFromMatrix(countData=notZero,colData=design, design=~group)'\n )\n", (1364, 1451), True, 'from rpy2.robjects import r as R\n'), ((1468, 1520), 'CGAT.Experiment.info', 'E.info', (['"""performing differential expression testing"""'], {}), "('performing differential expression testing')\n", (1474, 1520), True, 'import CGAT.Experiment as E\n'), ((1525, 1568), 'rpy2.robjects.r', 'R', (['"""de <- DESeq(dds, fitType="parametric")"""'], {}), '(\'de <- DESeq(dds, fitType="parametric")\')\n', (1526, 1568), True, 'from rpy2.robjects import r as R\n'), ((1577, 1600), 'rpy2.robjects.r', 'R', (['"""res <- results(de)"""'], {}), "('res <- results(de)')\n", (1578, 1600), True, 'from rpy2.robjects import r as R\n'), ((1610, 1638), 'CGAT.Experiment.info', 'E.info', (['"""generating MA plot"""'], {}), "('generating MA plot')\n", (1616, 1638), True, 'import CGAT.Experiment as E\n'), ((1666, 1737), 'rpy2.robjects.r', 'R', (['(\'png("images.dir/%s-MAplot.png", height=480, width=480)\' % reference)'], {}), '(\'png("images.dir/%s-MAplot.png", height=480, width=480)\' % reference)\n', (1667, 1737), True, 'from rpy2.robjects import r as R\n'), ((1746, 1774), 'rpy2.robjects.r', 'R', (['"""plotMA(res, alpha=0.05)"""'], {}), "('plotMA(res, alpha=0.05)')\n", (1747, 1774), True, 'from rpy2.robjects import r as R\n'), ((1783, 1797), 'rpy2.robjects.r', 'R', (['"""dev.off()"""'], {}), "('dev.off()')\n", (1784, 1797), True, 'from rpy2.robjects import r as R\n'), ((1807, 1863), 'CGAT.Experiment.info', 'E.info', (['"""performing variance stabilising transformation"""'], {}), "('performing variance stabilising transformation')\n", (1813, 1863), True, 'import CGAT.Experiment as E\n'), ((1868, 1921), 'rpy2.robjects.r', 'R', (['"""vst <- data.frame(getVarianceStabilizedData(de))"""'], {}), "('vst <- data.frame(getVarianceStabilizedData(de))')\n", (1869, 1921), True, 'from rpy2.robjects import r as R\n'), ((1931, 1980), 'CGAT.Experiment.info', 'E.info', (['"""clustering samples and plotting heatmap"""'], {}), "('clustering samples and plotting heatmap')\n", (1937, 1980), True, 'import CGAT.Experiment as E\n'), ((1985, 2006), 'rpy2.robjects.r', 'R', (['"""cors <- cor(vst)"""'], {}), "('cors <- cor(vst)')\n", (1986, 2006), True, 'from rpy2.robjects import r as R\n'), ((2015, 2073), 'rpy2.robjects.r', 'R', (['"""hmcol <- colorRampPalette(brewer.pal(9, "PuOr"))(100)"""'], {}), '(\'hmcol <- colorRampPalette(brewer.pal(9, "PuOr"))(100)\')\n', (2016, 2073), True, 'from rpy2.robjects import r as R\n'), ((2082, 2182), 'rpy2.robjects.r', 'R', (['(\'png("images.dir/%s-sample_clustering-heatmap.png", height=480, width=480)\' %\n reference)'], {}), '(\n \'png("images.dir/%s-sample_clustering-heatmap.png", height=480, width=480)\'\n % reference)\n', (2083, 2182), True, 'from rpy2.robjects import r as R\n'), ((2194, 2320), 'rpy2.robjects.r', 'R', (['"""heatmap.2(as.matrix(cors), col=hmcol, trace="none", breaks=seq(0, 1, 0.01), margins=c(10,10), cexRow=0.8,cexCol=0.8)"""'], {}), '(\'heatmap.2(as.matrix(cors), col=hmcol, trace="none", breaks=seq(0, 1, 0.01), margins=c(10,10), cexRow=0.8,cexCol=0.8)\'\n )\n', (2195, 2320), True, 'from rpy2.robjects import r as R\n'), ((2350, 2364), 'rpy2.robjects.r', 'R', (['"""dev.off()"""'], {}), "('dev.off()')\n", (2351, 2364), True, 'from rpy2.robjects import r as R\n'), ((2374, 2424), 'CGAT.Experiment.info', 'E.info', (['"""performing principal components analysis"""'], {}), "('performing principal components analysis')\n", (2380, 2424), True, 'import CGAT.Experiment as E\n'), ((2429, 2486), 'rpy2.robjects.r', 'R', (['"""pca <- prcomp(data.frame(t(vst)), scale=T, centre=T)"""'], {}), "('pca <- prcomp(data.frame(t(vst)), scale=T, centre=T)')\n", (2430, 2486), True, 'from rpy2.robjects import r as R\n'), ((2495, 2524), 'rpy2.robjects.r', 'R', (['"""pcs <- data.frame(pca$x)"""'], {}), "('pcs <- data.frame(pca$x)')\n", (2496, 2524), True, 'from rpy2.robjects import r as R\n'), ((2533, 2578), 'rpy2.robjects.r', 'R', (['"""pcs$condition <- as.factor(design$group)"""'], {}), "('pcs$condition <- as.factor(design$group)')\n", (2534, 2578), True, 'from rpy2.robjects import r as R\n'), ((2587, 2675), 'rpy2.robjects.r', 'R', (['"""p_pca <- ggplot(pcs, aes(x=PC1, y=PC2, colour=condition)) + geom_point(size=6)"""'], {}), "('p_pca <- ggplot(pcs, aes(x=PC1, y=PC2, colour=condition)) + geom_point(size=6)'\n )\n", (2588, 2675), True, 'from rpy2.robjects import r as R\n'), ((2692, 2768), 'rpy2.robjects.r', 'R', (['(\'png("images.dir/%s-PCA_pc1-pc2.png", height=480, width=480)\' % reference)'], {}), '(\'png("images.dir/%s-PCA_pc1-pc2.png", height=480, width=480)\' % reference)\n', (2693, 2768), True, 'from rpy2.robjects import r as R\n'), ((2790, 2807), 'rpy2.robjects.r', 'R', (['"""print(p_pca)"""'], {}), "('print(p_pca)')\n", (2791, 2807), True, 'from rpy2.robjects import r as R\n'), ((2816, 2830), 'rpy2.robjects.r', 'R', (['"""dev.off()"""'], {}), "('dev.off()')\n", (2817, 2830), True, 'from rpy2.robjects import r as R\n'), ((2840, 2874), 'CGAT.Experiment.info', 'E.info', (['"""writing table of results"""'], {}), "('writing table of results')\n", (2846, 2874), True, 'import CGAT.Experiment as E\n'), ((2879, 2909), 'rpy2.robjects.r', 'R', (['"""res.df <- data.frame(res)"""'], {}), "('res.df <- data.frame(res)')\n", (2880, 2909), True, 'from rpy2.robjects import r as R\n'), ((2955, 2978), 'pandas.rpy.common.load_data', 'com.load_data', (['"""res.df"""'], {}), "('res.df')\n", (2968, 2978), True, 'import pandas.rpy.common as com\n'), ((3353, 3442), 'pandas.read_table', 'pd.read_table', (['counts_file'], {'header': '(0)', 'index_col': '(0)', 'sep': '"""\t"""', 'compression': 'compression'}), "(counts_file, header=0, index_col=0, sep='\\t', compression=\n compression)\n", (3366, 3442), True, 'import pandas as pd\n'), ((3484, 3543), 'pandas.read_table', 'pd.read_table', (['design_file'], {'header': '(0)', 'index_col': '(0)', 'sep': '"""\t"""'}), "(design_file, header=0, index_col=0, sep='\\t')\n", (3497, 3543), True, 'import pandas as pd\n'), ((3619, 3651), 'itertools.combinations', 'itertools.combinations', (['genes', '(2)'], {}), '(genes, 2)\n', (3641, 3651), False, 'import itertools\n'), ((3675, 3733), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'genes', 'index': 'genes', 'dtype': 'np.float64'}), '(columns=genes, index=genes, dtype=np.float64)\n', (3687, 3733), True, 'import pandas as pd\n'), ((4149, 4162), 'numpy.mean', 'np.mean', (['test'], {}), '(test)\n', (4156, 4162), True, 'import numpy as np\n'), ((4163, 4177), 'numpy.mean', 'np.mean', (['cntrl'], {}), '(cntrl)\n', (4170, 4177), True, 'import numpy as np\n')] |
from typing import Dict, List, Any, Optional
from .aggregating import Aggregating, aggregating
from typing import Callable, Mapping, Iterator
import numpy as np
from tqdm import tqdm
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from operation import DatasetOperation, dataset_operation
from featurize import *
from data import TextData
class TextClassificationAggregating(Aggregating, DatasetOperation):
def __init__(self,
name:str = None,
func:Callable[...,Any] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text"],
generated_field: str = None,
task = "text-classification",
description = None,
):
super().__init__(name = name, func = func, resources = resources, contributor = contributor,
task = task,description=description)
self._type = 'TextClassificationAggregating'
self.processed_fields = ["text"]
if isinstance(processed_fields,str):
self.processed_fields[0] = processed_fields
else:
self.processed_fields = processed_fields
self.generated_field = generated_field
self._data_type = "Dataset"
class text_classification_aggregating(aggregating, dataset_operation):
def __init__(self,
name: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
contributor: str = None,
processed_fields: List = ["text"],
generated_field:str = None,
task = "text-classification",
description = None,
):
super().__init__(name = name, resources = resources, contributor = contributor, description=description)
self.processed_fields = processed_fields
self.generated_field = generated_field
self.task = task
def __call__(self, *param_arg):
if callable(self.name):
tf_class = TextClassificationAggregating(name = self.name.__name__, func=self.name)
return tf_class(*param_arg)
else:
f = param_arg[0]
name = self.name or f.__name__
tf_cls = TextClassificationAggregating(name=name, func = f,
resources = self.resources,
contributor = self.contributor,
processed_fields = self.processed_fields,
generated_field = self.generated_field,
task = self.task,
description=self.description,)
return tf_cls
@text_classification_aggregating(name="get_features_dataset_level", contributor="datalab",
task="Any",
description="Get the average length of a list of texts")
def get_features_dataset_level(samples:Iterator):
"""
Package: python
Input:
texts: Iterator
Output:
int
"""
res_info = {}
for sample in samples:
for feature_name, value in sample.items():
if feature_name == "label":
continue
if isinstance(value, int) or isinstance(value, float):
if feature_name not in res_info.keys():
res_info[feature_name] = value
else:
res_info[feature_name] += value
for feature_name, value in res_info.items():
res_info[feature_name] /= len(samples)
return res_info
@text_classification_aggregating(name = "get_label_distribution", contributor= "datalab", processed_fields= "text",
task="text-classification", description="Calculate the label distribution of a given text classification dataset")
def get_label_distribution(samples:Iterator):
"""
Input:
samples: [{
"text":
"label":
}]
Output:
dict:
"label":n_samples
"""
labels_to_number = {}
for sample in samples:
text, label = sample["text"], sample["label"]
if label in labels_to_number.keys():
labels_to_number[label] += 1
else:
labels_to_number[label] = 1
res = {
"imbalance_ratio": min(labels_to_number.values())*1.0/max(labels_to_number.values()),
"label_distribution":labels_to_number
}
return res
@text_classification_aggregating(name="get_statistics", contributor="datalab",
task="text-classification",
description="Calculate the overall statistics (e.g., average length) of a given text classification dataset")
def get_statistics(samples: Iterator):
"""
Input:
samples: [{
"text":
"label":
}]
Output:
dict:
"label":n_samples
usage:
you can test it with following code:
from datalabs import load_dataset
from aggregate import *
dataset = load_dataset('mr')
res = dataset['test'].apply(get_statistics)
print(next(res))
"""
# Grammar checker
# from spellchecker import SpellChecker
# spell = SpellChecker()
#spell = SpellChecker(distance=1) # set at initialization
scriptpath = os.path.dirname(__file__)
with open(os.path.join(scriptpath, '../edit/resources/spell_corrections.json'), 'r') as file:
COMMON_MISSPELLINGS_DICT = json.loads(file.read())
# print(COMMON_MISSPELLINGS_DICT)
# exit()
# for hate speech
from hatesonar import Sonar
sonar = Sonar()
sample_infos = []
labels_to_number = {}
lengths = []
gender_results = []
vocab = {}
number_of_tokens = 0
hatespeech = {
"hate_speech":{"ratio":0,"texts":[]},
"offensive_language":{"ratio":0,"texts":[]},
"neither":{"ratio":0,"texts":[]}}
spelling_errors = []
for sample in tqdm(samples):
text, label = sample["text"], sample["label"]
# grammar checker
for word in text.split(" "):
#word_corrected = spell.correction(word)
if word.lower() in COMMON_MISSPELLINGS_DICT.keys():
spelling_errors.append((word, COMMON_MISSPELLINGS_DICT[word.lower()]))
# hataspeech
results = sonar.ping(text=text)
class_ = results['top_class']
confidence = 0
for value in results['classes']:
if value['class_name'] == class_:
confidence = value['confidence']
break
hatespeech[class_]["ratio"] += 1
if class_ != "neither":
hatespeech[class_]["texts"].append(text)
# update the number of tokens
number_of_tokens += len(text.split())
# update vocabulary
for w in text.split(" "):
if w in vocab.keys():
vocab[w] += 1
else:
vocab[w] = 1
# gender bias
"""
result = {
'word': {
'male': one_words_results['words_m'],
'female': one_words_results['words_f']
},
'single_name': {
'male': one_words_results['single_name_m'],
'female': one_words_results['single_name_f']
},
}
"""
gender_result = get_gender_bias.func(text)
gender_results.append(gender_result)
# average length
text_length = get_length.func(text)
lengths.append(text_length)
# label imbalance
if label in labels_to_number.keys():
labels_to_number[label] += 1
else:
labels_to_number[label] = 1
sample_info = {
"text":text,
"label":label,
"text_length": text_length,
"gender":gender_result,
"hate_speech_class":class_,
}
if len(sample_infos) < 10000:
sample_infos.append(sample_info)
# -------------------------- dataset-level ---------------------------
# compute dataset-level gender_ratio
gender_ratio = {"word":
{"male": 0, "female": 0},
"single_name":
{"male": 0, "female": 0},
}
for result in gender_results:
res_word = result['word']
gender_ratio['word']['male'] += result['word']['male']
gender_ratio['word']['female'] += result['word']['female']
gender_ratio['single_name']['male'] += result['single_name']['male']
gender_ratio['single_name']['female'] += result['single_name']['female']
n_gender = (gender_ratio['word']['male'] + gender_ratio['word']['female'])
if n_gender != 0:
gender_ratio['word']['male'] /= n_gender
gender_ratio['word']['female'] /= n_gender
else:
gender_ratio['word']['male'] = 0
gender_ratio['word']['female'] = 0
n_gender = (gender_ratio['single_name']['male'] + gender_ratio['single_name']['female'])
if n_gender != 0:
gender_ratio['single_name']['male'] /= n_gender
gender_ratio['single_name']['female'] /= n_gender
else:
gender_ratio['single_name']['male'] = 0
gender_ratio['single_name']['female'] = 0
# get vocabulary
vocab_sorted = dict(sorted(vocab.items(), key=lambda item: item[1], reverse=True))
# get ratio of hate_speech:offensive_language:neither
for k,v in hatespeech.items():
hatespeech[k]["ratio"] /= len(samples)
#print(hatespeech)
res = {
"dataset-level":{
"length_info": {
"max_text_length": np.max(lengths),
"min_text_length": np.min(lengths),
"average_text_length": np.average(lengths),
},
"label_info": {
"ratio":min(labels_to_number.values()) * 1.0 / max(labels_to_number.values()),
"distribution": labels_to_number,
},
"gender_info":gender_ratio,
"vocabulary_info":vocab_sorted,
"number_of_samples":len(samples),
"number_of_tokens":number_of_tokens,
"hatespeech_info":hatespeech,
"spelling_errors":len(spelling_errors),
},
"sample-level":sample_infos
}
return res | [
"numpy.average",
"tqdm.tqdm",
"os.path.join",
"hatesonar.Sonar",
"numpy.max",
"os.path.dirname",
"numpy.min"
] | [((5425, 5450), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5440, 5450), False, 'import os\n'), ((5733, 5740), 'hatesonar.Sonar', 'Sonar', ([], {}), '()\n', (5738, 5740), False, 'from hatesonar import Sonar\n'), ((6124, 6137), 'tqdm.tqdm', 'tqdm', (['samples'], {}), '(samples)\n', (6128, 6137), False, 'from tqdm import tqdm\n'), ((249, 274), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (264, 274), False, 'import os\n'), ((5465, 5533), 'os.path.join', 'os.path.join', (['scriptpath', '"""../edit/resources/spell_corrections.json"""'], {}), "(scriptpath, '../edit/resources/spell_corrections.json')\n", (5477, 5533), False, 'import os\n'), ((9832, 9847), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (9838, 9847), True, 'import numpy as np\n'), ((9888, 9903), 'numpy.min', 'np.min', (['lengths'], {}), '(lengths)\n', (9894, 9903), True, 'import numpy as np\n'), ((9948, 9967), 'numpy.average', 'np.average', (['lengths'], {}), '(lengths)\n', (9958, 9967), True, 'import numpy as np\n')] |
from math import ceil, log, sqrt
from random import randrange, seed, uniform
import numpy
import scipy.stats
from .JobRequest import JobRequest
from .Scheduler import Scheduler, SchedulerConfig
class Experiments:
"""A simulation environment for running scheduling experiments.
"""
GENERATED_JOBS_COUNT = 50 #: Number of jobs to generate.
def __init__(
self, reconfig_enabled=True, power_off_enabled=True, param_enabled=True,
):
"""Constructs an Experiments object.
Args:
reconfig_enabled: A flag for enabling reconfigurations.
power_off_enabled: A flag for enabling power-offs.
param_enabled: A flag for enabling the decision making process.
"""
self.reconfig_enabled = reconfig_enabled
"""A flag for enabling reconfigurations."""
self.power_off_enabled = power_off_enabled #: A flag for enabling power-offs.
self.param_enabled = param_enabled #: A flag for enabling power-offs.
def run_expts(
self, config: SchedulerConfig, num_srvs: int, num_expts: int, seed_num: int
):
"""Runs a number of experiments with the specified configuration.
Args:
config: The configuration the Scheduler within the experiments.
num_srvs: The total number of servers.
num_expts: The number of experiements to be run.
seed_num: A seed used to update the job generator.
Returns:
list: A list of scheduling statistics.
"""
stats = []
for i in range(num_expts):
expt_stats = self._run_expt(config, num_srvs, seed_num + i)
stats.append(expt_stats)
return stats
def _run_expt(self, config: SchedulerConfig, num_srvs: int, seed_num: int):
"""Runs one experiment.
Args:
config: The configuration the Scheduler within the experiments.
num_srvs: The total number of servers.
num_expts: The number of experiements to be run.
seed_num: A seed used to update the job generator.
Returns:
SchedulerStats: A SchedulerStats object wrapping the statistics of\
within the Experiments.
By default the weights of the reconfigurations and power-offs in the\
in the resulting objects are 1.
"""
scheduler = Scheduler(
num_srvs,
config,
self.reconfig_enabled,
self.power_off_enabled,
self.param_enabled,
)
jobs = self._generate_jobs(Experiments.GENERATED_JOBS_COUNT, num_srvs, seed_num)
time = 0
while jobs or scheduler.is_working():
for job in list(jobs):
if time < job.sub_time:
break
scheduler.schedule(job)
jobs.remove(job)
scheduler.update_schedule(time)
time += 10
scheduler.stop(time)
return scheduler.stats(stretch_time_weight=1, energy_weight=1)
def _generate_jobs(self, job_count, server_count, seed_num):
"""Generates a set of jobs.
Args:
job_count: The number of jobs to be generates.
server_count: The total number of servers.
seed_num: A seed used to update the job generator.
Returns:
list: A list of generated JobRequest objects.
"""
jobs = []
previous_sub_time = 0
for i in range(job_count):
job = self._generate_job(previous_sub_time, server_count, i, seed_num)
jobs.append(job)
previous_sub_time = job.sub_time
return jobs
def _generate_job(self, timestampLastEvent, server_count, num, seed_num):
"""Generates one job.
Args:
timestampLastEvent: The time of the last event.
server_count: The total number of servers.
num: An index of the job, used to update the job generator.
seed_num: A seed used to update the job generator.
Returns:
list: A list of generated jobs
"""
seed(seed_num + num)
numpy.random.seed(seed=seed_num + num)
sub_time, mass = self._get_next_task(timestampLastEvent, 500, 1700, 3.8)
alpha = uniform(0.5, 1)
data = uniform(10, 500)
min_num_servers = ceil((alpha / 3) * (server_count - 1))
max_num_servers = randrange(min_num_servers, server_count)
return JobRequest(
"job" + str(num),
sub_time,
alpha,
data,
mass,
min_num_servers,
max_num_servers,
)
def _get_next_task(self, timestampLastEvent, dynamism, mass, disparity):
arrival = scipy.stats.pareto.rvs(4, loc=-1) * 3 * dynamism
newTimeStamp = timestampLastEvent + arrival
makespan = self._get_makespan(mass, disparity)
return (newTimeStamp, makespan)
def _get_makespan(self, mass, disparity):
mu = log(mass / disparity)
sigma = sqrt(2 * (numpy.log(mass) - mu))
return scipy.stats.lognorm.rvs(sigma, scale=mass / disparity)
| [
"random.uniform",
"math.ceil",
"random.randrange",
"numpy.log",
"random.seed",
"math.log",
"numpy.random.seed"
] | [((4155, 4175), 'random.seed', 'seed', (['(seed_num + num)'], {}), '(seed_num + num)\n', (4159, 4175), False, 'from random import randrange, seed, uniform\n'), ((4184, 4222), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': '(seed_num + num)'}), '(seed=seed_num + num)\n', (4201, 4222), False, 'import numpy\n'), ((4320, 4335), 'random.uniform', 'uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (4327, 4335), False, 'from random import randrange, seed, uniform\n'), ((4351, 4367), 'random.uniform', 'uniform', (['(10)', '(500)'], {}), '(10, 500)\n', (4358, 4367), False, 'from random import randrange, seed, uniform\n'), ((4394, 4430), 'math.ceil', 'ceil', (['(alpha / 3 * (server_count - 1))'], {}), '(alpha / 3 * (server_count - 1))\n', (4398, 4430), False, 'from math import ceil, log, sqrt\n'), ((4459, 4499), 'random.randrange', 'randrange', (['min_num_servers', 'server_count'], {}), '(min_num_servers, server_count)\n', (4468, 4499), False, 'from random import randrange, seed, uniform\n'), ((5054, 5075), 'math.log', 'log', (['(mass / disparity)'], {}), '(mass / disparity)\n', (5057, 5075), False, 'from math import ceil, log, sqrt\n'), ((5102, 5117), 'numpy.log', 'numpy.log', (['mass'], {}), '(mass)\n', (5111, 5117), False, 'import numpy\n')] |
import numpy as np
from static import PATH_START, PATH_START_PERSONAL
from static import PATH_START_SERVER , PATH_START_PERSONAL_SERVER
import pandas as pd
from core_functions import crystal_scopus_abstract, crystal_scopus_abstract2
import time
# ! VALIDATION IS REQUIRED
MTCOUNT = 10 #00
df = pd.read_excel(PATH_START + r'raw data algemeen\api_caches\try02.xlsx', nrows=MTCOUNT, usecols=['eid'])
print(df.head())
#res = crystal_scopus_abstract2(df.head(2))
t0 = time.time()
fullres = crystal_scopus_abstract2(df.head(MTCOUNT), multi_thread=True)
t1 = time.time()
print(t1-t0)
print('we just did ' + str(MTCOUNT) + ' records in just ' + str(t1-t0) + ' seconds!')
print(fullres.scopus_abstract_text.isnull().mean())
print(fullres.scopus_abstract_text.isnull().mean())
print(fullres.scopus_abstract_retries.mean())
print(fullres.scopus_abstract_retries.max())
# ST
#we just did 100 records in just 124.84675812721252 seconds!
#0.04
#0.04
#0.0
#0.0
qq=1
qq+=1
#input('nu productie?')
# go for it
start_path = 'E:/Shared drives/Aurora-SDG-analysis/Aurora-SDG-Analysis-project02/02-query-crafting/SDG-Survey/sdg-survey-result-data/'
df_eids = pd.read_csv(start_path + 'eids.csv')
#df_eids = df_eids.head(102)
bits=10
stepsize = int(np.ceil(len(df_eids) / bits)+1)
for cur_bit in np.arange(0,bits):
print('-------')
print(cur_bit)
df_eids_CUR = df_eids.iloc[stepsize*cur_bit: stepsize*(cur_bit+1),:]
if len(df_eids_CUR) > 0:
t0 = time.time()
fullres = crystal_scopus_abstract2(df_eids_CUR, multi_thread=True)
t1 = time.time()
print(t1-t0)
print('we just did ' + str(len(df_eids_CUR)) + ' records in just ' + str(t1-t0) + ' seconds!')
print(fullres.scopus_abstract_text.isnull().mean())
print(fullres.scopus_abstract_text.isnull().mean())
print(fullres.scopus_abstract_retries.mean())
print(fullres.scopus_abstract_retries.max())
fullres[['eid', 'scopus_abstract_text']].to_csv(start_path + 'experimental_abstract_texts' + str(cur_bit) + '.csv')
# we validated it now I guess
| [
"pandas.read_csv",
"core_functions.crystal_scopus_abstract2",
"pandas.read_excel",
"time.time",
"numpy.arange"
] | [((299, 406), 'pandas.read_excel', 'pd.read_excel', (["(PATH_START + 'raw data algemeen\\\\api_caches\\\\try02.xlsx')"], {'nrows': 'MTCOUNT', 'usecols': "['eid']"}), "(PATH_START + 'raw data algemeen\\\\api_caches\\\\try02.xlsx',\n nrows=MTCOUNT, usecols=['eid'])\n", (312, 406), True, 'import pandas as pd\n'), ((473, 484), 'time.time', 'time.time', ([], {}), '()\n', (482, 484), False, 'import time\n'), ((562, 573), 'time.time', 'time.time', ([], {}), '()\n', (571, 573), False, 'import time\n'), ((1156, 1192), 'pandas.read_csv', 'pd.read_csv', (["(start_path + 'eids.csv')"], {}), "(start_path + 'eids.csv')\n", (1167, 1192), True, 'import pandas as pd\n'), ((1295, 1313), 'numpy.arange', 'np.arange', (['(0)', 'bits'], {}), '(0, bits)\n', (1304, 1313), True, 'import numpy as np\n'), ((1471, 1482), 'time.time', 'time.time', ([], {}), '()\n', (1480, 1482), False, 'import time\n'), ((1501, 1557), 'core_functions.crystal_scopus_abstract2', 'crystal_scopus_abstract2', (['df_eids_CUR'], {'multi_thread': '(True)'}), '(df_eids_CUR, multi_thread=True)\n', (1525, 1557), False, 'from core_functions import crystal_scopus_abstract, crystal_scopus_abstract2\n'), ((1571, 1582), 'time.time', 'time.time', ([], {}), '()\n', (1580, 1582), False, 'import time\n')] |
import wkskel
import os
import numpy as np
from matplotlib import pyplot as plt
import torch
from sklearn import decomposition
from genEM3.data.wkwdata import WkwData
from genEM3.data.skeleton import getAllTreeCoordinates
from genEM3.model.autoencoder2d import AE, Encoder_4_sampling_bn_1px_deep_convonly_skip, Decoder_4_sampling_bn_1px_deep_convonly_skip
from genEM3.inference.inference import Predictor
from genEM3.util.image import normalize, readWkwFromCenter
# seed numpy random number generator
np.random.seed(5)
# Read the nml and print some basic properties
nmlDir = '/u/alik/code/genEM3/data/'
nmlName = 'artefact_trainingData.nml'
nmlPath = os.path.join(nmlDir, nmlName)
skel = wkskel.Skeleton(nmlPath)
# Get coordinates of the debris locations
coordArray = getAllTreeCoordinates(skel)
numTrainingExamples = 600
assert coordArray.shape == (numTrainingExamples, 3)
# Get the bounding boxes of each debris location and read into numpy array
dimsForCrop = np.array([140, 140, 0])
wkwDir = '/tmpscratch/webknossos/Connectomics_Department/2018-11-13_scMS109_1to7199_v01_l4_06_24_fixed_mag8/color/1'
# Read images
images = readWkwFromCenter(wkwDir, coordArray, dimsForCrop)
# Normalize
imagesNormal = normalize(images, mean=148, std=36)
# Create pytorch dataLoader from numpy array
imgT = torch.Tensor(imagesNormal)
dataset_debris = torch.utils.data.TensorDataset(imgT)
dataLoader_debris = torch.utils.data.DataLoader(dataset_debris, batch_size=5)
# Plot all the images and examples from the data loader for sanity checking
showExamples = False
if showExamples:
for i in range(111):
plt.imshow(np.squeeze(images[i, 0, :, :]), cmap='gray')
plt.show()
for i, example in enumerate(dataLoader_debris):
plt.imshow(np.squeeze(example[0].numpy()), cmap='gray')
plt.show()
# Running model ae_v03 on the data
run_root = os.path.dirname(os.path.abspath(__file__))
datasources_json_path = os.path.join(run_root, 'datasources_distributed.json')
# setting for the clean data loader
batch_size = 5
input_shape = (140, 140, 1)
output_shape = (140, 140, 1)
num_workers = 0
# construct clean data loader from json file
datasources = WkwData.datasources_from_json(datasources_json_path)
dataset = WkwData(
input_shape=input_shape,
target_shape=output_shape,
data_sources=datasources,
cache_HDD=False,
cache_RAM=True,
)
clean_loader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, num_workers=num_workers)
# settings for the model to be loaded
# (Is there a way to save so that you do not need to specify model again?)
state_dict_path = os.path.join(run_root, './.log/torch_model')
device = 'cpu'
kernel_size = 3
stride = 1
n_fmaps = 16
n_latent = 2048
input_size = 140
output_size = input_size
model = AE(
Encoder_4_sampling_bn_1px_deep_convonly_skip(input_size, kernel_size, stride, n_fmaps, n_latent),
Decoder_4_sampling_bn_1px_deep_convonly_skip(output_size, kernel_size, stride, n_fmaps, n_latent))
# loading the model
checkpoint = torch.load(state_dict_path, map_location=lambda storage, loc: storage)
state_dict = checkpoint['model_state_dict']
model.load_state_dict(state_dict)
# Create a dictionary to keep the hidden state of debris and clean images
TYPENAMES = ('clean', 'debris')
hidden_dict = {htype: [] for htype in TYPENAMES}
# predicting for clean data
predictor_clean = Predictor(
dataloader=clean_loader,
model=model,
state_dict=state_dict,
device=device,
batch_size=batch_size,
input_shape=input_shape,
output_shape=output_shape)
hidden_dict[TYPENAMES[0]] = predictor_clean.encode()
# predicting for debris
predictor_debris = Predictor(
dataloader=dataLoader_debris,
model=model,
state_dict=state_dict,
device=device,
batch_size=batch_size,
input_shape=input_shape,
output_shape=output_shape)
hidden_dict[TYPENAMES[1]] = predictor_debris.encodeList()
# Concatenate individual batches into single torch tensors
hidden_dict_cat = {key: torch.cat(val, dim=0).numpy().squeeze() for key, val in hidden_dict.items()}
# Get indices for clean vs. debris images
numSamples = [x.shape[0] for x in hidden_dict_cat.values()]
indices = [np.arange(numSamples[0]), np.arange(numSamples[0], sum(numSamples))]
# colors for plot (debris: black, clean: blue)
blackC = np.zeros((1, 3))
blueC = np.zeros((1, 3))
blueC[0, 2] = 1
colors = [blueC, blackC]
colorsForPlot = {key: value for key, value in zip(TYPENAMES, colors)}
# Generate the input
hiddenMatrix = np.concatenate((hidden_dict_cat[TYPENAMES[0]], hidden_dict_cat[TYPENAMES[1]]), axis=0)
# perform the principal component analysis using scikitlearn
pca = decomposition.PCA(n_components=2)
pca.fit(hiddenMatrix)
PCs = pca.transform(hiddenMatrix)
# Plot the PCA results
for index, label in enumerate(TYPENAMES):
plt.scatter(PCs[indices[index], 0], PCs[indices[index], 1], c=colorsForPlot.get(label), label=label)
plt.xlabel('Principle Component 1')
plt.ylabel('Principle Component 2')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"genEM3.model.autoencoder2d.Decoder_4_sampling_bn_1px_deep_convonly_skip",
"numpy.arange",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.xlabel",
"genEM3.util.image.normalize",
"numpy.random.seed",
"numpy.concatenate",
"genEM3.inference.inference.Predic... | [((503, 520), 'numpy.random.seed', 'np.random.seed', (['(5)'], {}), '(5)\n', (517, 520), True, 'import numpy as np\n'), ((654, 683), 'os.path.join', 'os.path.join', (['nmlDir', 'nmlName'], {}), '(nmlDir, nmlName)\n', (666, 683), False, 'import os\n'), ((691, 715), 'wkskel.Skeleton', 'wkskel.Skeleton', (['nmlPath'], {}), '(nmlPath)\n', (706, 715), False, 'import wkskel\n'), ((772, 799), 'genEM3.data.skeleton.getAllTreeCoordinates', 'getAllTreeCoordinates', (['skel'], {}), '(skel)\n', (793, 799), False, 'from genEM3.data.skeleton import getAllTreeCoordinates\n'), ((967, 990), 'numpy.array', 'np.array', (['[140, 140, 0]'], {}), '([140, 140, 0])\n', (975, 990), True, 'import numpy as np\n'), ((1131, 1181), 'genEM3.util.image.readWkwFromCenter', 'readWkwFromCenter', (['wkwDir', 'coordArray', 'dimsForCrop'], {}), '(wkwDir, coordArray, dimsForCrop)\n', (1148, 1181), False, 'from genEM3.util.image import normalize, readWkwFromCenter\n'), ((1209, 1244), 'genEM3.util.image.normalize', 'normalize', (['images'], {'mean': '(148)', 'std': '(36)'}), '(images, mean=148, std=36)\n', (1218, 1244), False, 'from genEM3.util.image import normalize, readWkwFromCenter\n'), ((1297, 1323), 'torch.Tensor', 'torch.Tensor', (['imagesNormal'], {}), '(imagesNormal)\n', (1309, 1323), False, 'import torch\n'), ((1341, 1377), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['imgT'], {}), '(imgT)\n', (1371, 1377), False, 'import torch\n'), ((1398, 1455), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_debris'], {'batch_size': '(5)'}), '(dataset_debris, batch_size=5)\n', (1425, 1455), False, 'import torch\n'), ((1929, 1983), 'os.path.join', 'os.path.join', (['run_root', '"""datasources_distributed.json"""'], {}), "(run_root, 'datasources_distributed.json')\n", (1941, 1983), False, 'import os\n'), ((2167, 2219), 'genEM3.data.wkwdata.WkwData.datasources_from_json', 'WkwData.datasources_from_json', (['datasources_json_path'], {}), '(datasources_json_path)\n', (2196, 2219), False, 'from genEM3.data.wkwdata import WkwData\n'), ((2230, 2353), 'genEM3.data.wkwdata.WkwData', 'WkwData', ([], {'input_shape': 'input_shape', 'target_shape': 'output_shape', 'data_sources': 'datasources', 'cache_HDD': '(False)', 'cache_RAM': '(True)'}), '(input_shape=input_shape, target_shape=output_shape, data_sources=\n datasources, cache_HDD=False, cache_RAM=True)\n', (2237, 2353), False, 'from genEM3.data.wkwdata import WkwData\n'), ((2387, 2483), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(dataset=dataset, batch_size=batch_size,\n num_workers=num_workers)\n', (2414, 2483), False, 'import torch\n'), ((2616, 2660), 'os.path.join', 'os.path.join', (['run_root', '"""./.log/torch_model"""'], {}), "(run_root, './.log/torch_model')\n", (2628, 2660), False, 'import os\n'), ((3024, 3094), 'torch.load', 'torch.load', (['state_dict_path'], {'map_location': '(lambda storage, loc: storage)'}), '(state_dict_path, map_location=lambda storage, loc: storage)\n', (3034, 3094), False, 'import torch\n'), ((3375, 3543), 'genEM3.inference.inference.Predictor', 'Predictor', ([], {'dataloader': 'clean_loader', 'model': 'model', 'state_dict': 'state_dict', 'device': 'device', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'output_shape': 'output_shape'}), '(dataloader=clean_loader, model=model, state_dict=state_dict,\n device=device, batch_size=batch_size, input_shape=input_shape,\n output_shape=output_shape)\n', (3384, 3543), False, 'from genEM3.inference.inference import Predictor\n'), ((3662, 3835), 'genEM3.inference.inference.Predictor', 'Predictor', ([], {'dataloader': 'dataLoader_debris', 'model': 'model', 'state_dict': 'state_dict', 'device': 'device', 'batch_size': 'batch_size', 'input_shape': 'input_shape', 'output_shape': 'output_shape'}), '(dataloader=dataLoader_debris, model=model, state_dict=state_dict,\n device=device, batch_size=batch_size, input_shape=input_shape,\n output_shape=output_shape)\n', (3671, 3835), False, 'from genEM3.inference.inference import Predictor\n'), ((4313, 4329), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (4321, 4329), True, 'import numpy as np\n'), ((4338, 4354), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (4346, 4354), True, 'import numpy as np\n'), ((4502, 4593), 'numpy.concatenate', 'np.concatenate', (['(hidden_dict_cat[TYPENAMES[0]], hidden_dict_cat[TYPENAMES[1]])'], {'axis': '(0)'}), '((hidden_dict_cat[TYPENAMES[0]], hidden_dict_cat[TYPENAMES[1]\n ]), axis=0)\n', (4516, 4593), True, 'import numpy as np\n'), ((4657, 4690), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (4674, 4690), False, 'from sklearn import decomposition\n'), ((4918, 4953), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Principle Component 1"""'], {}), "('Principle Component 1')\n", (4928, 4953), True, 'from matplotlib import pyplot as plt\n'), ((4954, 4989), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Principle Component 2"""'], {}), "('Principle Component 2')\n", (4964, 4989), True, 'from matplotlib import pyplot as plt\n'), ((4990, 5002), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5000, 5002), True, 'from matplotlib import pyplot as plt\n'), ((5003, 5013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5011, 5013), True, 'from matplotlib import pyplot as plt\n'), ((1878, 1903), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1893, 1903), False, 'import os\n'), ((2790, 2890), 'genEM3.model.autoencoder2d.Encoder_4_sampling_bn_1px_deep_convonly_skip', 'Encoder_4_sampling_bn_1px_deep_convonly_skip', (['input_size', 'kernel_size', 'stride', 'n_fmaps', 'n_latent'], {}), '(input_size, kernel_size,\n stride, n_fmaps, n_latent)\n', (2834, 2890), False, 'from genEM3.model.autoencoder2d import AE, Encoder_4_sampling_bn_1px_deep_convonly_skip, Decoder_4_sampling_bn_1px_deep_convonly_skip\n'), ((2892, 2993), 'genEM3.model.autoencoder2d.Decoder_4_sampling_bn_1px_deep_convonly_skip', 'Decoder_4_sampling_bn_1px_deep_convonly_skip', (['output_size', 'kernel_size', 'stride', 'n_fmaps', 'n_latent'], {}), '(output_size, kernel_size,\n stride, n_fmaps, n_latent)\n', (2936, 2993), False, 'from genEM3.model.autoencoder2d import AE, Encoder_4_sampling_bn_1px_deep_convonly_skip, Decoder_4_sampling_bn_1px_deep_convonly_skip\n'), ((4188, 4212), 'numpy.arange', 'np.arange', (['numSamples[0]'], {}), '(numSamples[0])\n', (4197, 4212), True, 'import numpy as np\n'), ((1668, 1678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1676, 1678), True, 'from matplotlib import pyplot as plt\n'), ((1804, 1814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1812, 1814), True, 'from matplotlib import pyplot as plt\n'), ((1615, 1645), 'numpy.squeeze', 'np.squeeze', (['images[i, 0, :, :]'], {}), '(images[i, 0, :, :])\n', (1625, 1645), True, 'import numpy as np\n'), ((3998, 4019), 'torch.cat', 'torch.cat', (['val'], {'dim': '(0)'}), '(val, dim=0)\n', (4007, 4019), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
'''
Created on Thu Jan 10 13:03:23 2019
@author:
<NAME>
Turku University Hospital
January 2019
@description:
This class is used as a helper function to train multiple models at the
same time
Original implementation can be found from:
http://www.davidsbatista.net/blog/2018/02/23/model_optimization/
'''
#%% import necessary libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
#%% define class
class EstimatorSelectionHelper:
def __init__(self, models, params):
if not set(models.keys()).issubset(set(params.keys())):
missing_params = list(set(models.keys()) - set(params.keys()))
raise ValueError("Some estimators are missing parameters: %s" % missing_params)
self.models = models
self.params = params
self.keys = models.keys()
self.grid_searches = {}
def fit(self, X, y, cv = 5, n_jobs = -1, verbose = 1, scoring = None, refit = True):
for key in self.keys:
print("Running GridSearchCV for %s." % key)
model = self.models[key]
params = self.params[key]
gs = GridSearchCV(model, params, cv = cv, n_jobs = n_jobs,
verbose = verbose, scoring = scoring,
refit = refit, return_train_score = True)
gs.fit(X, y)
self.grid_searches[key] = gs
def score_summary(self, sort_by = 'mean_score'):
def row(key, scores, params):
d = {
'estimator': key,
'min_score': min(scores),
'max_score': max(scores),
'mean_score': np.mean(scores),
'std_score': np.std(scores),
}
return pd.Series({**params, **d})
rows = []
for k in self.grid_searches:
print(k)
params = self.grid_searches[k].cv_results_['params']
scores = []
for i in range(self.grid_searches[k].cv):
key = "split{}_test_score".format(i)
r = self.grid_searches[k].cv_results_[key]
scores.append(r.reshape(len(params), 1))
all_scores = np.hstack(scores)
for p, s in zip(params, all_scores):
rows.append((row(k, s, p)))
df = pd.concat(rows, axis = 1).T.sort_values([sort_by], ascending = False)
columns = ['estimator', 'min_score', 'mean_score', 'max_score', 'std_score']
columns = columns + [c for c in df.columns if c not in columns]
return df[columns] | [
"pandas.Series",
"sklearn.model_selection.GridSearchCV",
"numpy.mean",
"numpy.hstack",
"numpy.std",
"pandas.concat"
] | [((1216, 1342), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'params'], {'cv': 'cv', 'n_jobs': 'n_jobs', 'verbose': 'verbose', 'scoring': 'scoring', 'refit': 'refit', 'return_train_score': '(True)'}), '(model, params, cv=cv, n_jobs=n_jobs, verbose=verbose, scoring=\n scoring, refit=refit, return_train_score=True)\n', (1228, 1342), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1838, 1864), 'pandas.Series', 'pd.Series', (['{**params, **d}'], {}), '({**params, **d})\n', (1847, 1864), True, 'import pandas as pd\n'), ((2288, 2305), 'numpy.hstack', 'np.hstack', (['scores'], {}), '(scores)\n', (2297, 2305), True, 'import numpy as np\n'), ((1742, 1757), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1749, 1757), True, 'import numpy as np\n'), ((1789, 1803), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (1795, 1803), True, 'import numpy as np\n'), ((2413, 2436), 'pandas.concat', 'pd.concat', (['rows'], {'axis': '(1)'}), '(rows, axis=1)\n', (2422, 2436), True, 'import pandas as pd\n')] |
from keras.layers import Dense, Input
from keras.models import Model
from pathlib import Path
import spacy
from itertools import product, chain
from keras.utils import Sequence
from collections import Counter
import numpy as np
from time import time
from keras import backend as K
class SkipGramDataGenerator(Sequence):
def __init__(self, datafile:Path, batch_size, r_sample=0.001, n_window=2, language_model="xx_ent_wiki_sm", random_seed=None, shuffle=True):
self.shuffle = shuffle
self.n_window = n_window
self.r_sample = r_sample
self.batch_size = batch_size
np.random.seed(random_seed or int(time()))
self._prepare_nlps(datafile, language_model)
self._prepare_trainset()
self.on_epoch_end()
def _prepare_nlps(self, datafile, language_model):
nlp = spacy.load(language_model)
nlp.add_pipe(nlp.create_pipe('sentencizer'))
with open(datafile, 'r') as f:
doc = nlp(f.read())
tokens_cnter = Counter([t.text for t in doc])
self.kv_token_freq = {t: v / sum(tokens_cnter.values()) for t, v in tokens_cnter.items()}
self.tokens = list(tokens_cnter.keys())
self.n_vocab_size = len(self.tokens)
P_subsample = lambda token: (np.sqrt(self.kv_token_freq[token] / self.r_sample) + 1) * self.r_sample / self.kv_token_freq[token]
kv_token__P_subsample = {t: P_subsample(t) for t in self.tokens}
subsample_sentence = lambda sent: [t for t in sent if kv_token__P_subsample[t.text] >= np.random.rand()]
self.doc = [subsample_sentence(sent) for sent in doc.sents]
def _prepare_trainset(self):
offsets = list(range(-1 * self.n_window, 0)) + list(range(1, self.n_window + 1))
pair_locs = lambda loc_ceil: [(i, offset) for i, offset in product(range(loc_ceil), offsets) if i + offset >=0 and i + offset < loc_ceil]
instances = dict()
for sentence in self.doc:
for i, offset in pair_locs(len(sentence)):
centra = sentence[i].text
surround = sentence[i + offset].text
instances[centra] = instances.get(centra, []) + [surround]
self.instances = list(instances.items())
self.kv_surround_token__instance_id = dict()
for i, (_, surrounds) in enumerate(self.instances):
for s in surrounds:
self.kv_surround_token__instance_id[s] = self.kv_surround_token__instance_id.get(s, []) + [i]
def __len__(self):
return len(self.instances)
def __getitem__(self, index):
pos_instance_ids = self._pick_instances(self.batch_size // 2)
neg_instance_ids = self._negative_sampling(pos_instance_ids)
batch_instance_ids = pos_instance_ids + neg_instance_ids
if len(batch_instance_ids) < self.batch_size:
batch_instance_ids += self._pick_instances(self.batch_size - len(batch_instance_ids))
X = np.empty((len(batch_instance_ids), self.n_vocab_size))
Y = np.empty((len(batch_instance_ids), self.n_vocab_size), dtype=int)
for i, instance_id in enumerate(batch_instance_ids):
X[i, ] = self._oh_encode_tokens([self.instances[i][0]])
Y[i, ] = self._oh_encode_tokens(self.instances[i][1])
return X, Y
def _oh_encode_tokens(self, tokens):
encoding = np.zeros(self.n_vocab_size)
indexs = [self.tokens.index(t) for t in tokens]
encoding[indexs] = 1
return encoding
def _pick_instances(self, size):
unfetched_instance_ids = np.squeeze(self.__unfetched_instance_flags.nonzero())
if self.shuffle:
np.random.shuffle(unfetched_instance_ids)
picked = unfetched_instance_ids[:size]
self.__unfetched_instance_flags[picked] = 0
return picked.tolist()
def _negative_sampling(self, positive_instance_ids):
pos_surrounds = chain(*[self.instances[i][1] for i in positive_instance_ids])
neg_candidate_instance_ids = []
for token in set(self.kv_surround_token__instance_id.keys()) - set(pos_surrounds):
for i in self.kv_surround_token__instance_id[token]:
if self.__unfetched_instance_flags[i]:
neg_candidate_instance_ids.append(i)
if self.shuffle:
np.random.shuffle(neg_candidate_instance_ids)
candidates_ids = neg_candidate_instance_ids[:self.batch_size // 2]
self.__unfetched_instance_flags[candidates_ids] = 0
return candidates_ids
def on_epoch_end(self):
self.__unfetched_instance_flags = np.ones(len(self.instances))
return super().on_epoch_end()
def summary(self):
return f'''>>> Skip Gram Data Generator Summary:
| Name | Value |
|------------------+----------------------------------------------------|
| n_vocab_size | {self.n_vocab_size:^50d} |
| batch_size | {self.batch_size:^50d} |
| r_sample | {self.r_sample:^50f} |
| n_window | {self.n_window:^50d} |
| steps_each_epoch | {self.n_vocab_size // self.batch_size + 1:^50d} |
\n'''
class SkipGram(Model):
def __init__(self, n_vocab_size, n_embedding):
super(SkipGram, self).__init__(name='SkipGram')
self.n_vocab_size = n_vocab_size
self.n_embedding = n_embedding
self._prepare_network()
def _prepare_network(self):
self.hidden_layer = Dense(self.n_embedding, name="hidden_layer")
self.output_layer = Dense(self.n_vocab_size, name="output_layer")
def call(self, inputs):
return self.output_layer(self.hidden_layer(inputs))
def get_embeddings(self, inputs):
return K.function([self.hidden_layer.input],[self.hidden_layer.output])([inputs])[0]
| [
"itertools.chain",
"numpy.sqrt",
"numpy.random.rand",
"spacy.load",
"collections.Counter",
"numpy.zeros",
"keras.layers.Dense",
"keras.backend.function",
"time.time",
"numpy.random.shuffle"
] | [((843, 869), 'spacy.load', 'spacy.load', (['language_model'], {}), '(language_model)\n', (853, 869), False, 'import spacy\n'), ((1019, 1049), 'collections.Counter', 'Counter', (['[t.text for t in doc]'], {}), '([t.text for t in doc])\n', (1026, 1049), False, 'from collections import Counter\n'), ((3382, 3409), 'numpy.zeros', 'np.zeros', (['self.n_vocab_size'], {}), '(self.n_vocab_size)\n', (3390, 3409), True, 'import numpy as np\n'), ((3941, 4002), 'itertools.chain', 'chain', (['*[self.instances[i][1] for i in positive_instance_ids]'], {}), '(*[self.instances[i][1] for i in positive_instance_ids])\n', (3946, 4002), False, 'from itertools import product, chain\n'), ((5580, 5624), 'keras.layers.Dense', 'Dense', (['self.n_embedding'], {'name': '"""hidden_layer"""'}), "(self.n_embedding, name='hidden_layer')\n", (5585, 5624), False, 'from keras.layers import Dense, Input\n'), ((5653, 5698), 'keras.layers.Dense', 'Dense', (['self.n_vocab_size'], {'name': '"""output_layer"""'}), "(self.n_vocab_size, name='output_layer')\n", (5658, 5698), False, 'from keras.layers import Dense, Input\n'), ((3681, 3722), 'numpy.random.shuffle', 'np.random.shuffle', (['unfetched_instance_ids'], {}), '(unfetched_instance_ids)\n', (3698, 3722), True, 'import numpy as np\n'), ((4363, 4408), 'numpy.random.shuffle', 'np.random.shuffle', (['neg_candidate_instance_ids'], {}), '(neg_candidate_instance_ids)\n', (4380, 4408), True, 'import numpy as np\n'), ((5851, 5916), 'keras.backend.function', 'K.function', (['[self.hidden_layer.input]', '[self.hidden_layer.output]'], {}), '([self.hidden_layer.input], [self.hidden_layer.output])\n', (5861, 5916), True, 'from keras import backend as K\n'), ((650, 656), 'time.time', 'time', ([], {}), '()\n', (654, 656), False, 'from time import time\n'), ((1279, 1329), 'numpy.sqrt', 'np.sqrt', (['(self.kv_token_freq[token] / self.r_sample)'], {}), '(self.kv_token_freq[token] / self.r_sample)\n', (1286, 1329), True, 'import numpy as np\n'), ((1548, 1564), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1562, 1564), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import random
from sklearn.metrics import classification_report
from sklearn.neural_network import MLPClassifier
filename = '../Combined Trajectory_Label_Geolife/Hand_Crafted_features.csv'
np.random.seed(7)
random.seed(7)
df = pd.read_csv(filename)
X = np.array(df.loc[:, df.columns != 'Label'])
Y = np.array(df['Label'])
# Split Data into test and train
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=7)
# Random forest Grid Search
RandomForest = RandomForestClassifier()
parameters = {'n_estimators': [5, 15, 25, 35, 45, 55, 65, 75, 85, 95]}
clf = GridSearchCV(estimator=RandomForest, param_grid=parameters, cv=5)
fit = clf.fit(X_train, y_train)
print('optimal parameter value: ', fit.best_params_)
Prediction_RT = fit.best_estimator_.predict(X_test)
Accuracy_RandomForest = len(np.where(Prediction_RT == y_test)[0]) * 1. / len(y_test)
print('Accuracy: ', Accuracy_RandomForest)
print(classification_report(y_test, Prediction_RT, digits=3))
# Multilayer perceptron
MLP = MLPClassifier(early_stopping=True, hidden_layer_sizes=(2 * np.shape(X_train)[1],))
parameters = {'hidden_layer_sizes': [(2 * np.shape(X_train)[1],)]}
clf = GridSearchCV(estimator=MLP, param_grid=parameters, cv=5)
fit = clf.fit(X_train, y_train)
print('optimal parameter value: ', fit.best_params_)
Prediction_MLP = fit.best_estimator_.predict(X_test)
Accuracy_MLP = len(np.where(Prediction_MLP == y_test)[0]) * 1. / len(y_test)
print('Accuracy: ', Accuracy_MLP)
print(classification_report(y_test, Prediction_MLP, digits=3))
# Decision Tree Grid Search
DT = DecisionTreeClassifier()
parameters = {'max_depth': [1, 5, 10, 15, 20, 25, 30, 35, 40]}
clf = GridSearchCV(estimator=DT, param_grid=parameters, cv=5)
fit = clf.fit(X_train, y_train)
print('optimal parameter value: ', fit.best_params_)
Prediction_DT = fit.best_estimator_.predict(X_test)
Accuracy_DecisionTree = len(np.where(Prediction_DT == y_test)[0]) * 1. / len(y_test)
print('Accuracy: ', Accuracy_DecisionTree)
print(classification_report(y_test, Prediction_DT, digits=3))
# SVM Grid Search
SVM = SVC()
parameters = {'C': [0.5, 1, 4, 7, 10, 13, 16, 20]}
clf = GridSearchCV(estimator=SVM, param_grid=parameters, cv=5)
fit = clf.fit(X_train, y_train)
print('optimal parameter value: ', fit.best_params_)
Prediction_SVM = fit.best_estimator_.predict(X_test)
Accuracy_SVM = len(np.where(Prediction_SVM == y_test)[0]) * 1. / len(y_test)
print('Accuracy: ', Accuracy_SVM)
print(classification_report(y_test, Prediction_SVM, digits=3))
# KNN Grid Search
KNN = KNeighborsClassifier()
parameters = {'n_neighbors': [3, 5, 10, 15, 20, 25, 30, 35, 40]}
clf = GridSearchCV(estimator=KNN, param_grid=parameters, cv=5)
fit = clf.fit(X_train, y_train)
print('optimal parameter value: ', fit.best_params_)
Prediction_KNN = fit.best_estimator_.predict(X_test)
Accuracy_KNN = len(np.where(Prediction_KNN == y_test)[0]) * 1. / len(y_test)
print('Accuracy: ', Accuracy_KNN)
print(classification_report(y_test, Prediction_KNN, digits=3))
| [
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"numpy.where",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"random.seed",
... | [((580, 597), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (594, 597), True, 'import numpy as np\n'), ((599, 613), 'random.seed', 'random.seed', (['(7)'], {}), '(7)\n', (610, 613), False, 'import random\n'), ((622, 643), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (633, 643), True, 'import pandas as pd\n'), ((649, 691), 'numpy.array', 'np.array', (["df.loc[:, df.columns != 'Label']"], {}), "(df.loc[:, df.columns != 'Label'])\n", (657, 691), True, 'import numpy as np\n'), ((697, 718), 'numpy.array', 'np.array', (["df['Label']"], {}), "(df['Label'])\n", (705, 718), True, 'import numpy as np\n'), ((791, 844), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(7)'}), '(X, Y, test_size=0.2, random_state=7)\n', (807, 844), False, 'from sklearn.model_selection import train_test_split\n'), ((893, 917), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (915, 917), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((997, 1062), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'RandomForest', 'param_grid': 'parameters', 'cv': '(5)'}), '(estimator=RandomForest, param_grid=parameters, cv=5)\n', (1009, 1062), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1588, 1644), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'MLP', 'param_grid': 'parameters', 'cv': '(5)'}), '(estimator=MLP, param_grid=parameters, cv=5)\n', (1600, 1644), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2000, 2024), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (2022, 2024), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2096, 2151), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'DT', 'param_grid': 'parameters', 'cv': '(5)'}), '(estimator=DT, param_grid=parameters, cv=5)\n', (2108, 2151), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2513, 2518), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (2516, 2518), False, 'from sklearn.svm import SVC\n'), ((2578, 2634), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'SVM', 'param_grid': 'parameters', 'cv': '(5)'}), '(estimator=SVM, param_grid=parameters, cv=5)\n', (2590, 2634), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2981, 3003), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (3001, 3003), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3077, 3133), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'KNN', 'param_grid': 'parameters', 'cv': '(5)'}), '(estimator=KNN, param_grid=parameters, cv=5)\n', (3089, 3133), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1340, 1394), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'Prediction_RT'], {'digits': '(3)'}), '(y_test, Prediction_RT, digits=3)\n', (1361, 1394), False, 'from sklearn.metrics import classification_report\n'), ((1906, 1961), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'Prediction_MLP'], {'digits': '(3)'}), '(y_test, Prediction_MLP, digits=3)\n', (1927, 1961), False, 'from sklearn.metrics import classification_report\n'), ((2429, 2483), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'Prediction_DT'], {'digits': '(3)'}), '(y_test, Prediction_DT, digits=3)\n', (2450, 2483), False, 'from sklearn.metrics import classification_report\n'), ((2896, 2951), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'Prediction_SVM'], {'digits': '(3)'}), '(y_test, Prediction_SVM, digits=3)\n', (2917, 2951), False, 'from sklearn.metrics import classification_report\n'), ((3395, 3450), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'Prediction_KNN'], {'digits': '(3)'}), '(y_test, Prediction_KNN, digits=3)\n', (3416, 3450), False, 'from sklearn.metrics import classification_report\n'), ((1232, 1265), 'numpy.where', 'np.where', (['(Prediction_RT == y_test)'], {}), '(Prediction_RT == y_test)\n', (1240, 1265), True, 'import numpy as np\n'), ((1806, 1840), 'numpy.where', 'np.where', (['(Prediction_MLP == y_test)'], {}), '(Prediction_MLP == y_test)\n', (1814, 1840), True, 'import numpy as np\n'), ((2321, 2354), 'numpy.where', 'np.where', (['(Prediction_DT == y_test)'], {}), '(Prediction_DT == y_test)\n', (2329, 2354), True, 'import numpy as np\n'), ((2796, 2830), 'numpy.where', 'np.where', (['(Prediction_SVM == y_test)'], {}), '(Prediction_SVM == y_test)\n', (2804, 2830), True, 'import numpy as np\n'), ((3295, 3329), 'numpy.where', 'np.where', (['(Prediction_KNN == y_test)'], {}), '(Prediction_KNN == y_test)\n', (3303, 3329), True, 'import numpy as np\n'), ((1489, 1506), 'numpy.shape', 'np.shape', (['X_train'], {}), '(X_train)\n', (1497, 1506), True, 'import numpy as np\n'), ((1556, 1573), 'numpy.shape', 'np.shape', (['X_train'], {}), '(X_train)\n', (1564, 1573), True, 'import numpy as np\n')] |
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.generic):
return np.asscalar(obj)
return json.JSONEncoder.default(self, obj)
class UsageError(TypeError):
pass
| [
"json.JSONEncoder.default",
"numpy.asscalar"
] | [((222, 257), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (246, 257), False, 'import json\n'), ((196, 212), 'numpy.asscalar', 'np.asscalar', (['obj'], {}), '(obj)\n', (207, 212), True, 'import numpy as np\n')] |
from collections import defaultdict
import os.path
from os import path
import numpy as np
# Prints a bar
def print_split():
print("\n--------------------------------------------------\n")
def input_problem():
print("---- NOTE ----\nThe file must be in the following format:\nn MAX\t\t\tWhere n - number of items MAX - Maximum allowed weight\nW0 W1 W2 W3 ... Wn\nP0 P1 P2 P3 ... Pn")
print_split()
file_name = input("Enter the file name: ")
# Creating absolute path to the file in folder examples
file_dir = os.path.dirname(__file__)
rel_path = "examples/" + file_name
abs_file_path = os.path.join(file_dir, rel_path)
# Checking if the file exists
if os.path.exists(abs_file_path) == False:
# File not found, throw error
print("The file doesn't exist!")
raise Exception("The file didn't load because it doesn't exist")
# File found, opening
f = open(abs_file_path, 'r')
n, max_weight = [int(x) for x in next(f).split()] # Reads the dimensions and max weight
weights = []
prices = []
weights = next(f).split()
for i in range(0, len(weights)):
weights[i] = int(weights[i])
prices = next(f).split()
for i in range(0, len(prices)):
prices[i] = int(prices[i])
f.close()
return n, weights, prices, max_weight
# Iterative solution for Knapsack
# Returns the maximum value that can
# be put in a knapsack of a given capacity
def knapsack(max_weight, weights, prices, n):
F = [[0 for x in range(max_weight + 1)] for x in range(n + 1)]
# Building matrix F[][] from the bottom-up
for i in range(n + 1):
for max_weight in range(max_weight + 1):
if i == 0 or max_weight == 0:
F[i][max_weight] = 0
elif weights[i-1] <= max_weight:
F[i][max_weight] = max(prices[i-1] + F[i-1][max_weight-weights[i-1]], F[i-1][max_weight])
else:
F[i][max_weight] = F[i-1][max_weight]
return F, F[n][max_weight]
def main():
n, weights, prices, max_weight = input_problem()
print()
F, solution = knapsack(max_weight, weights, prices, n)
F = np.array(F)
print("F:\n", F)
print_split()
print("Maximal basket value: ", solution)
if __name__ == '__main__':
main() | [
"numpy.array"
] | [((2207, 2218), 'numpy.array', 'np.array', (['F'], {}), '(F)\n', (2215, 2218), True, 'import numpy as np\n')] |
import logging
import os
import astropy.units as u
import numpy as np
from astropy.coordinates import SkyCoord, AltAz
from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom
from astropy.io import fits
from astropy.table import Table, QTable
from astropy.time import Time
from lstchain.__init__ import __version__
from lstchain.reco.utils import location, camera_to_altaz
__all__ = [
"add_icrs_position_params",
"create_event_list",
"create_hdu_index_hdu",
"create_obs_index_hdu",
"get_pointing_params",
"get_timing_params",
"set_expected_pos_to_reco_altaz",
]
log = logging.getLogger(__name__)
DEFAULT_HEADER = fits.Header()
DEFAULT_HEADER["CREATOR"] = f"lstchain v{__version__}"
DEFAULT_HEADER["HDUDOC"] = "https://github.com/open-gamma-ray-astro/gamma-astro-data-formats"
DEFAULT_HEADER["HDUVERS"] = "0.2"
DEFAULT_HEADER["HDUCLASS"] = "GADF"
DEFAULT_HEADER["ORIGIN"] = "CTA"
DEFAULT_HEADER["TELESCOP"] = "CTA-N"
wobble_offset = 0.4 * u.deg
def create_obs_index_hdu(filename_list, fits_dir, obs_index_file, overwrite):
"""
Create the obs index table and write it to the given file.
The Index table is created as per,
http://gamma-astro-data-formats.readthedocs.io/en/latest/data_storage/obs_index/index.html
Parameters
----------
filename_list : list
list of filenames of the fits files
fits_dir : Path
Path of the fits files
obs_index_file : Path
Path for the OBS index file
overwrite : Bool
Boolean to overwrite existing file
"""
obs_index_tables = []
# loop through the files
for file in filename_list:
filepath = fits_dir / file
if filepath.is_file():
try:
hdu_list = fits.open(filepath)
evt_hdr = hdu_list["EVENTS"].header
except Exception:
log.error(f"fits corrupted for file {file}")
continue
else:
log.error(f"fits {file} doesn't exist")
continue
# Obs_table
t_obs = {
"OBS_ID": evt_hdr["OBS_ID"],
"DATE-OBS": evt_hdr["DATE-OBS"],
"TIME-OBS": evt_hdr["TIME-OBS"],
"DATE-END": evt_hdr["DATE-END"],
"TIME-END": evt_hdr["TIME-END"],
"RA_PNT": evt_hdr["RA_PNT"] * u.deg,
"DEC_PNT": evt_hdr["DEC_PNT"] * u.deg,
"ZEN_PNT": (90 - float(evt_hdr["ALT_PNT"])) * u.deg,
"ALT_PNT": evt_hdr["ALT_PNT"] * u.deg,
"AZ_PNT": evt_hdr["AZ_PNT"] * u.deg,
"RA_OBJ": evt_hdr["RA_OBJ"] * u.deg,
"DEC_OBJ": evt_hdr["DEC_OBJ"] * u.deg,
"TSTART": evt_hdr["TSTART"] * u.s,
"TSTOP": evt_hdr["TSTOP"] * u.s,
"ONTIME": evt_hdr["ONTIME"] * u.s,
"TELAPSE": evt_hdr["TELAPSE"] * u.s,
"LIVETIME": evt_hdr["LIVETIME"] * u.s,
"DEADC": evt_hdr["DEADC"],
"OBJECT": evt_hdr["OBJECT"],
"OBS_MODE": evt_hdr["OBS_MODE"],
"N_TELS": evt_hdr["N_TELS"],
"TELLIST": evt_hdr["TELLIST"],
"INSTRUME": evt_hdr["INSTRUME"],
}
obs_index_tables.append(t_obs)
obs_index_table = QTable(obs_index_tables)
obs_index_header = DEFAULT_HEADER.copy()
obs_index_header["CREATED"] = Time.now().utc.iso
obs_index_header["HDUCLAS1"] = "INDEX"
obs_index_header["HDUCLAS2"] = "OBS"
obs_index_header["INSTRUME"] = t_obs["INSTRUME"]
obs_index_header["MJDREFI"] = evt_hdr["MJDREFI"]
obs_index_header["MJDREFF"] = evt_hdr["MJDREFF"]
obs_index = fits.BinTableHDU(
obs_index_table, header=obs_index_header, name="OBS INDEX"
)
obs_index_list = fits.HDUList([fits.PrimaryHDU(), obs_index])
obs_index_list.writeto(obs_index_file, overwrite=overwrite)
def create_hdu_index_hdu(
filename_list,
fits_dir,
hdu_index_file,
overwrite=False
):
"""
Create the hdu index table and write it to the given file.
The Index table is created as per,
http://gamma-astro-data-formats.readthedocs.io/en/latest/data_storage/hdu_index/index.html
Parameters
----------
filename_list : list
list of filenames of the fits files
fits_dir : Path
Path of the fits files
hdu_index_file : Path
Path for HDU index file
overwrite : Bool
Boolean to overwrite existing file
"""
hdu_index_tables = []
base_dir = os.path.commonpath(
[
hdu_index_file.parent.absolute().resolve(),
fits_dir.absolute().resolve()
]
)
# loop through the files
for file in filename_list:
filepath = fits_dir / file
if filepath.is_file():
try:
hdu_list = fits.open(filepath)
evt_hdr = hdu_list["EVENTS"].header
# just test they are here
hdu_list["GTI"].header
hdu_list["POINTING"].header
except Exception:
log.error(f"fits corrupted for file {file}")
continue
else:
log.error(f"fits {file} doesn't exist")
continue
# The column names for the table follows the scheme as shown in
# https://gamma-astro-data-formats.readthedocs.io/en/latest/general/hduclass.html
# Event list
t_events = {
"OBS_ID": evt_hdr["OBS_ID"],
"HDU_TYPE": "events",
"HDU_CLASS": "events",
"FILE_DIR": str(os.path.relpath(fits_dir, hdu_index_file.parent)),
"FILE_NAME": str(file),
"HDU_NAME": "EVENTS",
"SIZE": filepath.stat().st_size,
}
hdu_index_tables.append(t_events)
# GTI
t_gti = t_events.copy()
t_gti["HDU_TYPE"] = "gti"
t_gti["HDU_CLASS"] = "gti"
t_gti["HDU_NAME"] = "GTI"
hdu_index_tables.append(t_gti)
# POINTING
t_pnt = t_events.copy()
t_pnt["HDU_TYPE"] = "pointing"
t_pnt["HDU_CLASS"] = "pointing"
t_pnt["HDU_NAME"] = "POINTING"
hdu_index_tables.append(t_pnt)
hdu_names = [
"EFFECTIVE AREA", "ENERGY DISPERSION", "BACKGROUND",
"PSF", "RAD_MAX"
]
for irf in hdu_names:
try:
t_irf = t_events.copy()
irf_hdu = hdu_list[irf].header["HDUCLAS4"]
t_irf["HDU_CLASS"] = irf_hdu.lower()
t_irf["HDU_TYPE"] = irf_hdu.lower().strip(
'_' + irf_hdu.lower().split("_")[-1]
)
t_irf["HDU_NAME"] = irf
hdu_index_tables.append(t_irf)
except KeyError:
log.error(
f"Run {t_events['OBS_ID']} does not contain HDU {irf}"
)
hdu_index_table = Table(hdu_index_tables)
hdu_index_header = DEFAULT_HEADER.copy()
hdu_index_header["CREATED"] = Time.now().utc.iso
hdu_index_header["HDUCLAS1"] = "INDEX"
hdu_index_header["HDUCLAS2"] = "HDU"
hdu_index_header["INSTRUME"] = evt_hdr["INSTRUME"]
hdu_index_header["BASE_DIR"] = base_dir
hdu_index = fits.BinTableHDU(
hdu_index_table, header=hdu_index_header, name="HDU INDEX"
)
hdu_index_list = fits.HDUList([fits.PrimaryHDU(), hdu_index])
hdu_index_list.writeto(hdu_index_file, overwrite=overwrite)
def get_timing_params(data):
"""
Get event lists and retrieve some timing parameters for the DL3 event list
as a dict
"""
time_utc = Time(data["dragon_time"], format="unix", scale="utc")
t_start_iso = time_utc[0].to_value("iso", "date_hms")
t_stop_iso = time_utc[-1].to_value("iso", "date_hms")
time_pars = {
"t_start": data["dragon_time"].value[0],
"t_stop": data["dragon_time"].value[-1],
"t_start_iso": t_start_iso,
"t_stop_iso": t_stop_iso,
"date_obs": t_start_iso[:10],
"time_obs": t_start_iso[11:],
"date_end": t_stop_iso[:10],
"time_end": t_stop_iso[11:],
"MJDREF": Time("1970-01-01T00:00", scale="utc")
}
return time_pars
def get_pointing_params(data, source_pos, wobble_offset_std):
"""
Convert the telescope pointing and reconstructed pointing position for
each event into AltAz and ICRS Frame of reference.
Also get the observational mode and wobble offset of the data as per
the given source position and standard wobble offset to compare.
"""
pointing_alt = data["pointing_alt"]
pointing_az = data["pointing_az"]
time_utc = Time(data["dragon_time"], format="unix", scale="utc")
pnt_icrs = SkyCoord(
alt=pointing_alt[0],
az=pointing_az[0],
frame=AltAz(obstime=time_utc[0], location=location),
).transform_to(frame="icrs")
# Observation modes
source_pointing_diff = source_pos.separation(pnt_icrs)
if np.around(source_pointing_diff, 1) == wobble_offset_std:
mode = "WOBBLE"
elif np.around(source_pointing_diff, 1) > 1 * u.deg:
mode = "OFF"
elif np.around(source_pointing_diff, 1) == 0.0 * u.deg:
mode = "ON"
else:
# Nomenclature is to be worked out or have a separate way to mark mispointings
mode = "UNDETERMINED"
log.info(
"Source pointing difference with camera pointing"
f" is {source_pointing_diff:.3f}"
)
return pnt_icrs, mode, source_pointing_diff
def add_icrs_position_params(data, source_pos):
"""
Updating data with ICRS position values of reconstructed positions in
RA DEC coordinates and add column on separation form true source position.
"""
reco_alt = data["reco_alt"]
reco_az = data["reco_az"]
time_utc = Time(data["dragon_time"], format="unix", scale="utc")
reco_altaz = SkyCoord(
alt=reco_alt, az=reco_az, frame=AltAz(
obstime=time_utc, location=location
)
)
with erfa_astrom.set(ErfaAstromInterpolator(300 * u.s)):
reco_icrs = reco_altaz.transform_to(frame="icrs")
data["RA"] = reco_icrs.ra.to(u.deg)
data["Dec"] = reco_icrs.dec.to(u.deg)
data["theta"] = reco_icrs.separation(source_pos).to(u.deg)
return data
def set_expected_pos_to_reco_altaz(data):
"""
Set expected source positions to reconstructed alt, az positions for source-dependent analysis
This is just a trick to easily extract ON/OFF events in gammapy analysis.
"""
# set expected source positions as reco positions
time = data['dragon_time']
obstime = Time(time, scale='utc', format='unix')
expected_src_x = data['expected_src_x'] * u.m
expected_src_y = data['expected_src_y'] * u.m
focal = 28 * u.m
pointing_alt = data['pointing_alt']
pointing_az = data['pointing_az']
expected_src_altaz = camera_to_altaz(expected_src_x, expected_src_y, focal, pointing_alt, pointing_az, obstime=obstime)
data["reco_alt"] = expected_src_altaz.alt
data["reco_az"] = expected_src_altaz.az
def create_event_list(
data, run_number, source_name, source_pos, effective_time, elapsed_time
):
"""
Create the event_list BinTableHDUs from the given data
Parameters
----------
data: DL2 data file
'astropy.table.QTable'
run: Run number
Int
source_name: Name of the source
Str
source_pos: Ra/Dec position of the source
'astropy.coordinates.SkyCoord'
effective_time: Effective time of triggered events of the run
Float
elapsed_time: Total elapsed time of triggered events of the run
Float
Returns
-------
Events HDU: `astropy.io.fits.BinTableHDU`
GTI HDU: `astropy.io.fits.BinTableHDU`
Pointing HDU: `astropy.io.fits.BinTableHDU`
"""
tel_list = np.unique(data["tel_id"])
time_params = get_timing_params(data)
reco_icrs = SkyCoord(ra=data["RA"], dec=data["Dec"], unit="deg")
pnt_icrs, mode, wobble_pos = get_pointing_params(
data, source_pos, wobble_offset
)
event_table = QTable(
{
"EVENT_ID": data["event_id"],
"TIME": data["dragon_time"],
"RA": data["RA"].to(u.deg),
"DEC": data["Dec"].to(u.deg),
"ENERGY": data["reco_energy"],
# Optional columns
"GAMMANESS": data["gh_score"],
"MULTIP": u.Quantity(np.repeat(len(tel_list), len(data)), dtype=int),
"GLON": reco_icrs.galactic.l.to(u.deg),
"GLAT": reco_icrs.galactic.b.to(u.deg),
"ALT": data["reco_alt"].to(u.deg),
"AZ": data["reco_az"].to(u.deg),
}
)
gti_table = QTable(
{
"START": u.Quantity(time_params["t_start"], unit=u.s, ndmin=1),
"STOP": u.Quantity(time_params["t_stop"], unit=u.s, ndmin=1),
}
)
pnt_table = QTable(
{
"TIME": u.Quantity(time_params["t_start"], unit=u.s, ndmin=1),
"RA_PNT": u.Quantity(pnt_icrs.ra.to(u.deg), ndmin=1),
"DEC_PNT": u.Quantity(pnt_icrs.dec.to(u.deg), ndmin=1),
# Optional Columns
"ALT_PNT": u.Quantity(data["pointing_alt"][0].to(u.deg), ndmin=1),
"AZ_PNT": u.Quantity(data["pointing_az"][0].to(u.deg), ndmin=1),
}
)
# Adding the meta data
# Comments can be added later for relevant metadata
# Event table metadata
ev_header = DEFAULT_HEADER.copy()
ev_header["CREATED"] = Time.now().utc.iso
ev_header["HDUCLAS1"] = "EVENTS"
ev_header["OBS_ID"] = run_number
ev_header["DATE-OBS"] = time_params["date_obs"]
ev_header["TIME-OBS"] = time_params["time_obs"]
ev_header["DATE-END"] = time_params["date_end"]
ev_header["TIME-END"] = time_params["time_end"]
ev_header["TSTART"] = time_params["t_start"]
ev_header["TSTOP"] = time_params["t_stop"]
ev_header["MJDREFI"] = int(time_params["MJDREF"].mjd)
ev_header["MJDREFF"] = time_params["MJDREF"].mjd - int(time_params["MJDREF"].mjd)
ev_header["TIMEUNIT"] = "s"
ev_header["TIMESYS"] = "UTC"
ev_header["TIMEREF"] = "TOPOCENTER"
ev_header["ONTIME"] = elapsed_time
ev_header["TELAPSE"] = time_params["t_stop"] - time_params["t_start"]
ev_header["DEADC"] = effective_time / elapsed_time
ev_header["LIVETIME"] = effective_time
ev_header["OBJECT"] = source_name
ev_header["OBS_MODE"] = mode
ev_header["N_TELS"] = len(tel_list)
ev_header["TELLIST"] = "LST-" + " ".join(map(str, tel_list))
ev_header["INSTRUME"] = f"{ev_header['TELLIST']}"
ev_header["RA_PNT"] = pnt_icrs.ra.to_value()
ev_header["DEC_PNT"] = pnt_icrs.dec.to_value()
ev_header["ALT_PNT"] = data["pointing_alt"].mean().to_value(u.deg)
ev_header["AZ_PNT"] = data["pointing_az"].mean().to_value(u.deg)
ev_header["RA_OBJ"] = source_pos.ra.to_value()
ev_header["DEC_OBJ"] = source_pos.dec.to_value()
ev_header["FOVALIGN"] = "RADEC"
# GTI table metadata
gti_header = DEFAULT_HEADER.copy()
gti_header["CREATED"] = Time.now().utc.iso
gti_header["HDUCLAS1"] = "GTI"
gti_header["OBS_ID"] = run_number
gti_header["MJDREFI"] = ev_header["MJDREFI"]
gti_header["MJDREFF"] = ev_header["MJDREFF"]
gti_header["TIMESYS"] = ev_header["TIMESYS"]
gti_header["TIMEUNIT"] = ev_header["TIMEUNIT"]
gti_header["TIMEREF"] = ev_header["TIMEREF"]
# Pointing table metadata
pnt_header = DEFAULT_HEADER.copy()
pnt_header["CREATED"] = Time.now().utc.iso
pnt_header["HDUCLAS1"] = "POINTING"
pnt_header["OBS_ID"] = run_number
pnt_header["MJDREFI"] = ev_header["MJDREFI"]
pnt_header["MJDREFF"] = ev_header["MJDREFF"]
pnt_header["TIMEUNIT"] = ev_header["TIMEUNIT"]
pnt_header["TIMESYS"] = ev_header["TIMESYS"]
pnt_header["OBSGEO-L"] = (
location.lon.to_value(u.deg),
"Geographic longitude of telescope (deg)",
)
pnt_header["OBSGEO-B"] = (
location.lat.to_value(u.deg),
"Geographic latitude of telescope (deg)",
)
pnt_header["OBSGEO-H"] = (
round(location.height.to_value(u.m), 2),
"Geographic latitude of telescope (m)",
)
pnt_header["TIMEREF"] = ev_header["TIMEREF"]
# Create HDUs
event = fits.BinTableHDU(event_table, header=ev_header, name="EVENTS")
gti = fits.BinTableHDU(gti_table, header=gti_header, name="GTI")
pointing = fits.BinTableHDU(pnt_table, header=pnt_header, name="POINTING")
return event, gti, pointing
| [
"logging.getLogger",
"astropy.table.Table",
"astropy.coordinates.erfa_astrom.ErfaAstromInterpolator",
"astropy.io.fits.open",
"astropy.coordinates.AltAz",
"astropy.table.QTable",
"lstchain.reco.utils.location.lon.to_value",
"os.path.relpath",
"astropy.io.fits.PrimaryHDU",
"lstchain.reco.utils.came... | [((623, 650), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (640, 650), False, 'import logging\n'), ((669, 682), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (680, 682), False, 'from astropy.io import fits\n'), ((3233, 3257), 'astropy.table.QTable', 'QTable', (['obs_index_tables'], {}), '(obs_index_tables)\n', (3239, 3257), False, 'from astropy.table import Table, QTable\n'), ((3617, 3693), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['obs_index_table'], {'header': 'obs_index_header', 'name': '"""OBS INDEX"""'}), "(obs_index_table, header=obs_index_header, name='OBS INDEX')\n", (3633, 3693), False, 'from astropy.io import fits\n'), ((6885, 6908), 'astropy.table.Table', 'Table', (['hdu_index_tables'], {}), '(hdu_index_tables)\n', (6890, 6908), False, 'from astropy.table import Table, QTable\n'), ((7208, 7284), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['hdu_index_table'], {'header': 'hdu_index_header', 'name': '"""HDU INDEX"""'}), "(hdu_index_table, header=hdu_index_header, name='HDU INDEX')\n", (7224, 7284), False, 'from astropy.io import fits\n'), ((7585, 7638), 'astropy.time.Time', 'Time', (["data['dragon_time']"], {'format': '"""unix"""', 'scale': '"""utc"""'}), "(data['dragon_time'], format='unix', scale='utc')\n", (7589, 7638), False, 'from astropy.time import Time\n'), ((8622, 8675), 'astropy.time.Time', 'Time', (["data['dragon_time']"], {'format': '"""unix"""', 'scale': '"""utc"""'}), "(data['dragon_time'], format='unix', scale='utc')\n", (8626, 8675), False, 'from astropy.time import Time\n'), ((9776, 9829), 'astropy.time.Time', 'Time', (["data['dragon_time']"], {'format': '"""unix"""', 'scale': '"""utc"""'}), "(data['dragon_time'], format='unix', scale='utc')\n", (9780, 9829), False, 'from astropy.time import Time\n'), ((10588, 10626), 'astropy.time.Time', 'Time', (['time'], {'scale': '"""utc"""', 'format': '"""unix"""'}), "(time, scale='utc', format='unix')\n", (10592, 10626), False, 'from astropy.time import Time\n'), ((10852, 10954), 'lstchain.reco.utils.camera_to_altaz', 'camera_to_altaz', (['expected_src_x', 'expected_src_y', 'focal', 'pointing_alt', 'pointing_az'], {'obstime': 'obstime'}), '(expected_src_x, expected_src_y, focal, pointing_alt,\n pointing_az, obstime=obstime)\n', (10867, 10954), False, 'from lstchain.reco.utils import location, camera_to_altaz\n'), ((11822, 11847), 'numpy.unique', 'np.unique', (["data['tel_id']"], {}), "(data['tel_id'])\n", (11831, 11847), True, 'import numpy as np\n'), ((11907, 11959), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "data['RA']", 'dec': "data['Dec']", 'unit': '"""deg"""'}), "(ra=data['RA'], dec=data['Dec'], unit='deg')\n", (11915, 11959), False, 'from astropy.coordinates import SkyCoord, AltAz\n'), ((16261, 16323), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['event_table'], {'header': 'ev_header', 'name': '"""EVENTS"""'}), "(event_table, header=ev_header, name='EVENTS')\n", (16277, 16323), False, 'from astropy.io import fits\n'), ((16334, 16392), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['gti_table'], {'header': 'gti_header', 'name': '"""GTI"""'}), "(gti_table, header=gti_header, name='GTI')\n", (16350, 16392), False, 'from astropy.io import fits\n'), ((16408, 16471), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['pnt_table'], {'header': 'pnt_header', 'name': '"""POINTING"""'}), "(pnt_table, header=pnt_header, name='POINTING')\n", (16424, 16471), False, 'from astropy.io import fits\n'), ((8110, 8147), 'astropy.time.Time', 'Time', (['"""1970-01-01T00:00"""'], {'scale': '"""utc"""'}), "('1970-01-01T00:00', scale='utc')\n", (8114, 8147), False, 'from astropy.time import Time\n'), ((8943, 8977), 'numpy.around', 'np.around', (['source_pointing_diff', '(1)'], {}), '(source_pointing_diff, 1)\n', (8952, 8977), True, 'import numpy as np\n'), ((15834, 15862), 'lstchain.reco.utils.location.lon.to_value', 'location.lon.to_value', (['u.deg'], {}), '(u.deg)\n', (15855, 15862), False, 'from lstchain.reco.utils import location, camera_to_altaz\n'), ((15960, 15988), 'lstchain.reco.utils.location.lat.to_value', 'location.lat.to_value', (['u.deg'], {}), '(u.deg)\n', (15981, 15988), False, 'from lstchain.reco.utils import location, camera_to_altaz\n'), ((3338, 3348), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (3346, 3348), False, 'from astropy.time import Time\n'), ((3743, 3760), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (3758, 3760), False, 'from astropy.io import fits\n'), ((6989, 6999), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (6997, 6999), False, 'from astropy.time import Time\n'), ((7334, 7351), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (7349, 7351), False, 'from astropy.io import fits\n'), ((9033, 9067), 'numpy.around', 'np.around', (['source_pointing_diff', '(1)'], {}), '(source_pointing_diff, 1)\n', (9042, 9067), True, 'import numpy as np\n'), ((9898, 9940), 'astropy.coordinates.AltAz', 'AltAz', ([], {'obstime': 'time_utc', 'location': 'location'}), '(obstime=time_utc, location=location)\n', (9903, 9940), False, 'from astropy.coordinates import SkyCoord, AltAz\n'), ((9995, 10028), 'astropy.coordinates.erfa_astrom.ErfaAstromInterpolator', 'ErfaAstromInterpolator', (['(300 * u.s)'], {}), '(300 * u.s)\n', (10017, 10028), False, 'from astropy.coordinates.erfa_astrom import ErfaAstromInterpolator, erfa_astrom\n'), ((12729, 12782), 'astropy.units.Quantity', 'u.Quantity', (["time_params['t_start']"], {'unit': 'u.s', 'ndmin': '(1)'}), "(time_params['t_start'], unit=u.s, ndmin=1)\n", (12739, 12782), True, 'import astropy.units as u\n'), ((12804, 12856), 'astropy.units.Quantity', 'u.Quantity', (["time_params['t_stop']"], {'unit': 'u.s', 'ndmin': '(1)'}), "(time_params['t_stop'], unit=u.s, ndmin=1)\n", (12814, 12856), True, 'import astropy.units as u\n'), ((12928, 12981), 'astropy.units.Quantity', 'u.Quantity', (["time_params['t_start']"], {'unit': 'u.s', 'ndmin': '(1)'}), "(time_params['t_start'], unit=u.s, ndmin=1)\n", (12938, 12981), True, 'import astropy.units as u\n'), ((13496, 13506), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (13504, 13506), False, 'from astropy.time import Time\n'), ((15061, 15071), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (15069, 15071), False, 'from astropy.time import Time\n'), ((15499, 15509), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (15507, 15509), False, 'from astropy.time import Time\n'), ((16091, 16120), 'lstchain.reco.utils.location.height.to_value', 'location.height.to_value', (['u.m'], {}), '(u.m)\n', (16115, 16120), False, 'from lstchain.reco.utils import location, camera_to_altaz\n'), ((1768, 1787), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (1777, 1787), False, 'from astropy.io import fits\n'), ((4801, 4820), 'astropy.io.fits.open', 'fits.open', (['filepath'], {}), '(filepath)\n', (4810, 4820), False, 'from astropy.io import fits\n'), ((5545, 5593), 'os.path.relpath', 'os.path.relpath', (['fits_dir', 'hdu_index_file.parent'], {}), '(fits_dir, hdu_index_file.parent)\n', (5560, 5593), False, 'import os\n'), ((9111, 9145), 'numpy.around', 'np.around', (['source_pointing_diff', '(1)'], {}), '(source_pointing_diff, 1)\n', (9120, 9145), True, 'import numpy as np\n'), ((8772, 8817), 'astropy.coordinates.AltAz', 'AltAz', ([], {'obstime': 'time_utc[0]', 'location': 'location'}), '(obstime=time_utc[0], location=location)\n', (8777, 8817), False, 'from astropy.coordinates import SkyCoord, AltAz\n')] |
'''
Script to generate embeddings from resnet trained using pcl
Command to run:
python eval_kmeans.py --pretrained experiment_pcl_resume/checkpoint.pth.tar /home/mprabhud/dataset/shapenet_renders/npys/
'''
from __future__ import print_function
import os
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import argparse
import random
import numpy as np
from tqdm import tqdm
import faiss
from torchvision import transforms, datasets
import torchvision.models as models
import pcl.loader
import ipdb
st = ipdb.set_trace
def parse_option():
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--batch-size', type=int, default=128, help='batch size')
parser.add_argument('--num-workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--cost', type=str, default='0.5')
parser.add_argument('--seed', default=0, type=int)
# model definition
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--pretrained', default='', type=str,
help='path to pretrained checkpoint')
# dataset
parser.add_argument('--low-shot', default=False, action='store_true', help='whether to perform low-shot training.')
parser.add_argument('--low-dim', default=16, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--pcl-r', default=1024, type=int,
help='queue size; number of negative pairs; needs to be smaller than num_cluster (default: 16384)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--temperature', default=0.2, type=float,
help='softmax temperature')
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco-v2/SimCLR data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
parser.add_argument('--num-cluster', default='2500,5000,10000', type=str,
help='number of clusters')
opt = parser.parse_args()
opt.num_class = 20
# if low shot experiment, do 5 random runs
if opt.low_shot:
opt.n_run = 5
else:
opt.n_run = 1
return opt
def main():
args = parse_option()
args.num_cluster = args.num_cluster.split(',')
random.seed(args.seed)
np.random.seed(args.seed)
########################################################################
# STEP 1: SETuP DATALOADER (MAKE SURE TO CONVERT IT TO PIL IMAGE !!!!!)#
########################################################################
traindir = os.path.join(args.data)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = pcl.loader.ShapeNet(
traindir,
'split_allpt.txt',
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.ToTensor(),
normalize
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size*2, shuffle=False,
sampler=None, num_workers=args.num_workers, pin_memory=True)
############################
# STEP 2: INITIALIZE MODEL #
############################
# create model
print("=> creating model '{}'".format(args.arch))
kmeans_model = models.__dict__[args.arch](num_classes=16)
kmeans_model.fc = nn.Sequential(nn.Linear(2048, 2048), nn.ReLU(), kmeans_model.fc)
# load from pre-trained
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
state_dict = checkpoint['state_dict']
# rename pre-trained keys
for k in list(state_dict.keys()):
if k.startswith('module.encoder_k'):
# remove prefix
state_dict[k[len("module.encoder_k."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
kmeans_model.load_state_dict(state_dict, strict=False)
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
kmeans_model.cuda()
###############################
# STEP 3: GET Kmeans Clusters #
##############################
cluster_result = None
features = compute_embeddings(train_loader, kmeans_model, args) #generate embeddings based on keys encoder (different from eval_embeddings.py)
# placeholder for clustering result
cluster_result = {'im2cluster':[],'centroids':[],'density':[]}
for num_cluster in args.num_cluster:
cluster_result['im2cluster'].append(torch.zeros(len(train_dataset),dtype=torch.long).cuda())
cluster_result['centroids'].append(torch.zeros(int(num_cluster),16).cuda())
cluster_result['density'].append(torch.zeros(int(num_cluster)).cuda())
features[torch.norm(features,dim=1)>1.5] /= 2 #account for the few samples that are computed twice
features = features.numpy()
cluster_result = run_kmeans(features,args) #run kmeans clustering
def compute_embeddings(eval_loader, model, args):
print('Computing embeddings...')
model.eval()
features = torch.zeros(len(eval_loader.dataset),16).cuda()
for i, (images, index) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(non_blocking=True)
feat = model(images)
features[index] = feat
return features.cpu()
def run_kmeans(x, args):
"""
Args:
x: data to be clustered
"""
results = {'im2cluster':[],'centroids':[],'density':[]}
for seed, num_cluster in enumerate(args.num_cluster):
print('performing kmeans clustering on ...',num_cluster)
# intialize faiss clustering parameters
d = x.shape[1]
k = int(num_cluster)
clus = faiss.Clustering(d, k)
clus.verbose = True
clus.niter = 20
clus.nredo = 5
clus.seed = seed
clus.max_points_per_centroid = 1000
clus.min_points_per_centroid = 10
res = faiss.StandardGpuResources()
cfg = faiss.GpuIndexFlatConfig()
cfg.useFloat16 = False
cfg.device = 0
index = faiss.GpuIndexFlatL2(res, d, cfg)
clus.train(x, index)
D, I = index.search(x, 1) # for each sample, find cluster distance and assignments
im2cluster = [int(n[0]) for n in I]
# get cluster centroids
centroids = faiss.vector_to_array(clus.centroids).reshape(k,d)
# sample-to-centroid distances for each cluster
Dcluster = [[] for c in range(k)]
for im,i in enumerate(im2cluster):
Dcluster[i].append(D[im][0])
# concentration estimation (phi)
density = np.zeros(k)
for i,dist in enumerate(Dcluster):
if len(dist)>1:
d = (np.asarray(dist)**0.5).mean()/np.log(len(dist)+10)
density[i] = d
#if cluster only has one point, use the max to estimate its concentration
dmax = density.max()
for i,dist in enumerate(Dcluster):
if len(dist)<=1:
density[i] = dmax
density = density.clip(np.percentile(density,10),np.percentile(density,90)) #clamp extreme values for stability
density = args.temperature*density/density.mean() #scale the mean to temperature
# convert to cuda Tensors for broadcast
centroids = torch.Tensor(centroids).cuda()
centroids = nn.functional.normalize(centroids, p=2, dim=1)
im2cluster = torch.LongTensor(im2cluster).cuda()
density = torch.Tensor(density).cuda()
results['centroids'].append(centroids)
results['density'].append(density)
results['im2cluster'].append(im2cluster)
return results
if __name__ == '__main__':
main() | [
"torch.nn.ReLU",
"torchvision.transforms.ToPILImage",
"torch.LongTensor",
"numpy.percentile",
"argparse.ArgumentParser",
"faiss.GpuIndexFlatL2",
"numpy.asarray",
"numpy.random.seed",
"faiss.StandardGpuResources",
"torchvision.transforms.ToTensor",
"faiss.Clustering",
"torch.Tensor",
"os.path... | [((825, 873), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""argument for training"""'], {}), "('argument for training')\n", (848, 873), False, 'import argparse\n'), ((3169, 3191), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3180, 3191), False, 'import random\n'), ((3196, 3221), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3210, 3221), True, 'import numpy as np\n'), ((3478, 3501), 'os.path.join', 'os.path.join', (['args.data'], {}), '(args.data)\n', (3490, 3501), False, 'import os\n'), ((3518, 3593), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3538, 3593), False, 'from torchvision import transforms, datasets\n'), ((3887, 4041), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(args.batch_size * 2)', 'shuffle': '(False)', 'sampler': 'None', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=args.batch_size * 2,\n shuffle=False, sampler=None, num_workers=args.num_workers, pin_memory=True)\n', (3914, 4041), False, 'import torch\n'), ((4329, 4350), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(2048)'], {}), '(2048, 2048)\n', (4338, 4350), True, 'import torch.nn as nn\n'), ((4352, 4361), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4359, 4361), True, 'import torch.nn as nn\n'), ((4449, 4480), 'os.path.isfile', 'os.path.isfile', (['args.pretrained'], {}), '(args.pretrained)\n', (4463, 4480), False, 'import os\n'), ((6465, 6482), 'tqdm.tqdm', 'tqdm', (['eval_loader'], {}), '(eval_loader)\n', (6469, 6482), False, 'from tqdm import tqdm\n'), ((7064, 7086), 'faiss.Clustering', 'faiss.Clustering', (['d', 'k'], {}), '(d, k)\n', (7080, 7086), False, 'import faiss\n'), ((7288, 7316), 'faiss.StandardGpuResources', 'faiss.StandardGpuResources', ([], {}), '()\n', (7314, 7316), False, 'import faiss\n'), ((7332, 7358), 'faiss.GpuIndexFlatConfig', 'faiss.GpuIndexFlatConfig', ([], {}), '()\n', (7356, 7358), False, 'import faiss\n'), ((7432, 7465), 'faiss.GpuIndexFlatL2', 'faiss.GpuIndexFlatL2', (['res', 'd', 'cfg'], {}), '(res, d, cfg)\n', (7452, 7465), False, 'import faiss\n'), ((8027, 8038), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (8035, 8038), True, 'import numpy as np\n'), ((8813, 8859), 'torch.nn.functional.normalize', 'nn.functional.normalize', (['centroids'], {'p': '(2)', 'dim': '(1)'}), '(centroids, p=2, dim=1)\n', (8836, 8859), True, 'import torch.nn as nn\n'), ((4579, 4626), 'torch.load', 'torch.load', (['args.pretrained'], {'map_location': '"""cpu"""'}), "(args.pretrained, map_location='cpu')\n", (4589, 4626), False, 'import torch\n'), ((6043, 6070), 'torch.norm', 'torch.norm', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (6053, 6070), False, 'import torch\n'), ((6498, 6513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6511, 6513), False, 'import torch\n'), ((8505, 8531), 'numpy.percentile', 'np.percentile', (['density', '(10)'], {}), '(density, 10)\n', (8518, 8531), True, 'import numpy as np\n'), ((8531, 8557), 'numpy.percentile', 'np.percentile', (['density', '(90)'], {}), '(density, 90)\n', (8544, 8557), True, 'import numpy as np\n'), ((7698, 7735), 'faiss.vector_to_array', 'faiss.vector_to_array', (['clus.centroids'], {}), '(clus.centroids)\n', (7719, 7735), False, 'import faiss\n'), ((8762, 8785), 'torch.Tensor', 'torch.Tensor', (['centroids'], {}), '(centroids)\n', (8774, 8785), False, 'import torch\n'), ((8886, 8914), 'torch.LongTensor', 'torch.LongTensor', (['im2cluster'], {}), '(im2cluster)\n', (8902, 8914), False, 'import torch\n'), ((8955, 8976), 'torch.Tensor', 'torch.Tensor', (['density'], {}), '(density)\n', (8967, 8976), False, 'import torch\n'), ((3773, 3796), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (3794, 3796), False, 'from torchvision import transforms, datasets\n'), ((3810, 3831), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3829, 3831), False, 'from torchvision import transforms, datasets\n'), ((8131, 8147), 'numpy.asarray', 'np.asarray', (['dist'], {}), '(dist)\n', (8141, 8147), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import datetime
import json
import os
import time
import numpy as np
import torch
from dwi_ml.data.dataset.data_list import (DataListForTorch,
LazyDataListForTorch)
from dwi_ml.experiment.timer import Timer
from dwi_ml.experiment.monitoring import ValueHistoryMonitor
class DWIMLAbstractLocal:
""" Meant for projects working on learning local information in the
voxel. Information will be X = a voxel. """
def __init__(self):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def train(self, **kwargs):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
class DWIMLAbstractSequences:
""" Meant for projects working on learning tractography. Information will
be X = sequences."""
def __init__(self,
train_database_path,
valid_database_path,
name: str = None,
# Concerning the choice of inputs:
nb_degree_angles: int = 128,
add_streamline_noise: bool = False,
streamlines_cut_ratio: float = None, step_size: float = None,
neighborhood_dist_mm: float = None,
nb_neighborhood_axes: int = 6,
add_previous_dir: bool = False,
lazy: bool = False,
# Concerning the memory usage:
batch_size: int = 20000, volumes_per_batch: int = None,
cycles_per_volume_batch: int = 1,
n_epoch: int = 100, seed: int = 1234, patience: int = 20,
use_gpu: bool = True, num_workers: int = 0,
worker_interpolation: bool = False,
cache_manager: bool = False, taskman_managed: bool = False):
"""
Mandatory parameters:
---------------------
train_database_path : str
Path to training database (hdf5 file)
valid_database_path : str
Path to validation database (hdf5 file)
Optional parameters:
--------------------
====> General
name : str
Optional name of the experiment. If given, it is prepended to the
auto-generated name. [None]
====> Concerning the choice of inputs:
nb_degree_angles: int
Precision for angles: number of directions on the sphere. If
previous direction is added to input, we need to know how many that
is. But we manage the output with output_model, not with this
option. [128]
add_streamline_noise : bool
If set, add random gaussian noise to streamline coordinates
on-the-fly. Noise variance is 0.1 * step-size, or 0.1mm if no step
size is used. [False]
streamlines_cut_ratio : float
Percentage of streamlines to randomly cut in each batch. If None, do
not split streamlines. [None]
NOTE: Preprocessed .hdf5 file should contain resampled
streamlines; otherwise, cutting streamlines will be biased
towards long segments (less points)
step_size : float
Constant step size that every streamline should have between points
(in mm). If None, train on streamlines as they are (ex, compressed).
[None]
neighborhood_dist_mm : float
If given, add neighboring information to the input signal at the
given distance in each axis (in mm). [None]
neighborhood_axes : int
Nb of axes at which to get neighborhood distance. Default = 6 (up,
down, left, right, front, back).
add_previous_dir : bool
If set, add the previous streamline direction to the input signal.
[False]
lazy : bool
If True, use a lazy dataset. [False]
====> Concerning the memory usage:
batch_size : int
Number of time steps to use in a batch (the length of sequences vary
a lot, so we define the number of time steps to use a more
consistent amount of memory) [20,000]
volumes_per_batch : int
Limit the number of sampled volumes inside a single batch.
If None, use true random sampling. [None]
cycles_per_volume_batch : int
Number of batches where the same volumes will be reused before
sampling new volumes. [1]
n_epoch : int
Maximum number of epochs [100]
seed : int
Seed for random numbers [1234]
patience : int
Use early stopping. Defines the number of epochs after which
the model should stop training if the loss hasn't improved. [20]
use_gpu : bool
Use the GPU; if False, use CPU. [True]
num_workers : int
Number of processes that should process the data between training
updates. [0]
worker_interpolation : bool
If True and num_workers > 0, interpolation will be done on CPU by
the workers. Otherwise, interpolation is done on the main thread
using the chosen device. [False]
cache_manager : bool
If True, use a cache manager to keep volumes and streamlines in
memory. [False]
taskman_managed : bool
If True, taskman manages the experiment. Do not output progress
bars and instead output special messages for taskman. [False]
"""
# Init mandatory properties
self.train_database_path = train_database_path
self.valid_database_path = valid_database_path
# Init optional properties
self.name = name
# Init "globals" from user's project
self.nb_degree_angles = nb_degree_angles
# Init args concerning choice of inputs
self.add_streamline_noise = add_streamline_noise
self.streamlines_cut_ratio = streamlines_cut_ratio
self.step_size = step_size
self.neighborhood_dist_mm = neighborhood_dist_mm
self.nb_neighborhood_axes = nb_neighborhood_axes # toDo. À voir!! Je vais peut-être devoir changer int pour str='method'
# On aurait la méthode "6axes" et la méthode "mimicGrid" pour mon CNN
# où je prendrais 27 axes, pas tous de la même longueur! Possiblement
# le double d'axe pour avoir l'équivalent de 2 voxels autour de mon point
# dans toutes les directions. Ça pourrait être [str|int]
self.add_previous_dir = add_previous_dir
self.lazy = lazy
# Init args concerning memory usage
self.batch_size = int(batch_size)
self.volumes_per_batch = volumes_per_batch
self.n_epoch = int(n_epoch)
self.seed = seed
self.patience = patience
self.use_gpu = use_gpu
self.num_workers = num_workers
self.worker_interpolation = worker_interpolation
self.cycles_per_volume_batch = cycles_per_volume_batch
self.cache_manager = cache_manager
self.taskman_managed = taskman_managed
self.taskman_report = {
'loss_train': None,
'loss_valid': None,
'epoch': None,
'best_epoch': None,
'best_score': None,
'update': None,
'update_loss': None
}
# Time limited run
self.hangup_time = None
htime = os.environ.get('HANGUP_TIME', None)
if htime is not None:
self.hangup_time = int(htime)
print('Will hang up at ' + htime)
# Set device
self.device = None
if self.use_gpu and torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# Set random numbers
self.rng = np.random.RandomState(self.seed)
torch.manual_seed(self.seed) # Set torch seed
if self.use_gpu:
torch.cuda.manual_seed(self.seed) # toDo. Pourquoi ça dit error?
# If using worker_interpolation, data is processed on CPU
self.dataset_device = torch.device(
'cpu') if self.worker_interpolation else self.device
# Init datasets
# NOTE. WE HOPE THAT MULTISUBJECT CAN REALLY BE COMMON TO ALL OF US.
# So, I've pu the dataset creation here in the abstract. Else we can
# bring it back to each user's script.
other_kw_args = {}
if self.lazy:
dataset_cls = LazyMultiSubjectDataset
if self.cache_manager:
other_kw_args['cache_size'] = self.volumes_per_batch
else:
dataset_cls = MultiSubjectDataset
self.train_dataset = dataset_cls(
self.train_database_path, self.rng,
add_streamline_noise=self.add_streamline_noise,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=self.streamlines_cut_ratio,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
self.valid_dataset = dataset_cls(
self.valid_database_path, self.rng,
add_streamline_noise=False,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=None,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
# Other variables
self.sh_order = None # Will be set once the dataset is loaded
self.input_size = None # Will be set once the dataset is loaded
self.current_epoch = 0
self.experiment_dir = (self.name if self.name
else datetime.datetime.now().strftime(
"%Y_%m_%d_%H%M%S")) + '_' + type(self).__name__
self.optimizer = None # Will be defined later with ADAM
self.model = None # Will be defined by the main user
# Setup monitors
self.train_loss_monitor = ValueHistoryMonitor("Training loss")
self.valid_loss_monitor = ValueHistoryMonitor("Validation loss")
self.grad_norm_monitor = ValueHistoryMonitor("Grad Norm") # ToDo Est-ce que tout le monde utilise grad norm??
def train(self, **kwargs):
raise NotImplementedError
# ToDo: "train" depends on each user, but can we define
# sub-functions here that could encapsulate some sub-tasks that
# everybody uses? One day we could compare our codes.
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
def load_dataset(self):
"""
This method loads the data (streamlines and data volume).
"""
with Timer("Loading training dataset", newline=True, color='blue'):
self.train_dataset.load()
input_size = self._compute_input_size()
self.input_size = input_size
self.sh_order = self.train_dataset.sh_order
with Timer("Loading validation dataset", newline=True, color='blue'):
self.valid_dataset.load()
def _compute_input_size(self):
# Basic input size
expected_input_size = self.train_dataset.multisubject_manager.feature_size
# + neighbors
if self.neighborhood_dist_mm:
expected_input_size += \
self.nb_neighborhood_axes * \
self.train_dataset.multisubject_manager.feature_size
# + previous direction
if self.add_previous_dir:
expected_input_size += self.nb_degree_angles
return expected_input_size
def _should_quit(self, iter_timer):
# If:
# hang up signal received
# time remaining is less than one epoch + 30 seconds
# exit training.
return (self.hangup_time is not None and
time.time() + iter_timer.mean * 2.0 + 30 > self.hangup_time)
def _update_taskman_report(self, updates):
self.taskman_report.update(updates)
self.taskman_report['time'] = time.time()
print('!taskman' + json.dumps(self.taskman_report), flush=True)
| [
"torch.manual_seed",
"json.dumps",
"os.environ.get",
"dwi_ml.experiment.monitoring.ValueHistoryMonitor",
"datetime.datetime.now",
"torch.cuda.is_available",
"dwi_ml.experiment.timer.Timer",
"torch.cuda.manual_seed",
"time.time",
"numpy.random.RandomState",
"torch.device"
] | [((8026, 8061), 'os.environ.get', 'os.environ.get', (['"""HANGUP_TIME"""', 'None'], {}), "('HANGUP_TIME', None)\n", (8040, 8061), False, 'import os\n'), ((8440, 8472), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (8461, 8472), True, 'import numpy as np\n'), ((8481, 8509), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (8498, 8509), False, 'import torch\n'), ((10968, 11004), 'dwi_ml.experiment.monitoring.ValueHistoryMonitor', 'ValueHistoryMonitor', (['"""Training loss"""'], {}), "('Training loss')\n", (10987, 11004), False, 'from dwi_ml.experiment.monitoring import ValueHistoryMonitor\n'), ((11039, 11077), 'dwi_ml.experiment.monitoring.ValueHistoryMonitor', 'ValueHistoryMonitor', (['"""Validation loss"""'], {}), "('Validation loss')\n", (11058, 11077), False, 'from dwi_ml.experiment.monitoring import ValueHistoryMonitor\n'), ((11111, 11143), 'dwi_ml.experiment.monitoring.ValueHistoryMonitor', 'ValueHistoryMonitor', (['"""Grad Norm"""'], {}), "('Grad Norm')\n", (11130, 11143), False, 'from dwi_ml.experiment.monitoring import ValueHistoryMonitor\n'), ((13356, 13367), 'time.time', 'time.time', ([], {}), '()\n', (13365, 13367), False, 'import time\n'), ((8257, 8282), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8280, 8282), False, 'import torch\n'), ((8310, 8330), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (8322, 8330), False, 'import torch\n'), ((8371, 8390), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8383, 8390), False, 'import torch\n'), ((8565, 8598), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seed'], {}), '(self.seed)\n', (8587, 8598), False, 'import torch\n'), ((8785, 8804), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8797, 8804), False, 'import torch\n'), ((12035, 12096), 'dwi_ml.experiment.timer.Timer', 'Timer', (['"""Loading training dataset"""'], {'newline': '(True)', 'color': '"""blue"""'}), "('Loading training dataset', newline=True, color='blue')\n", (12040, 12096), False, 'from dwi_ml.experiment.timer import Timer\n'), ((12301, 12364), 'dwi_ml.experiment.timer.Timer', 'Timer', (['"""Loading validation dataset"""'], {'newline': '(True)', 'color': '"""blue"""'}), "('Loading validation dataset', newline=True, color='blue')\n", (12306, 12364), False, 'from dwi_ml.experiment.timer import Timer\n'), ((13395, 13426), 'json.dumps', 'json.dumps', (['self.taskman_report'], {}), '(self.taskman_report)\n', (13405, 13426), False, 'import json\n'), ((13165, 13176), 'time.time', 'time.time', ([], {}), '()\n', (13174, 13176), False, 'import time\n'), ((10680, 10703), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10701, 10703), False, 'import datetime\n')] |
#############################################################################################
# Mel-filter-banks implementation
#############################################################################################
import numpy as np
from ..utils.converters import hz2mel, mel2hz
from ..utils.exceptions import ParameterError, ErrorMsgs
def mel_filter_banks(nfilts=20,
nfft=512,
fs=16000,
low_freq=0,
high_freq=None,
scale="constant"):
"""
Compute Mel-filterbanks.The filters are stored in the rows, the columns
correspond to fft bins.
Args:
nfilts (int) : the number of filters in the filterbank.
(Default 20)
nfft (int) : the FFT size.
(Default is 512)
fs (int) : sample rate/ sampling frequency of the signal.
(Default 16000 Hz)
low_freq (int) : lowest band edge of mel filters.
(Default 0 Hz)
high_freq (int) : highest band edge of mel filters.
(Default samplerate/2)
scale (str) : choose if mx bins amplitudes sum up to one or are constants.
Default is "constant"
Returns:
a numpy array of size nfilts * (nfft/2 + 1) containing filterbank.
Each row holds 1 filter.
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
# compute points evenly spaced in mels (ponts are in Hz)
low_mel = hz2mel(low_freq)
high_mel = hz2mel(high_freq)
mel_points = np.linspace(low_mel, high_mel, nfilts + 2)
# we use fft bins, so we have to convert from Hz to fft bin number
bins = np.floor((nfft + 1) * mel2hz(mel_points) / fs)
fbank = np.zeros([nfilts, nfft // 2 + 1])
# init scaler
if scale == "descendant" or scale == "constant":
c = 1
else:
c = 0
# compute amps of fbanks
for j in range(0, nfilts):
b0, b1, b2 = bins[j], bins[j + 1], bins[j + 2]
# compute scaler
if scale == "descendant":
c -= 1 / nfilts
c = c * (c > 0) + 0 * (c < 0)
elif scale == "ascendant":
c += 1 / nfilts
c = c * (c < 1) + 1 * (c > 1)
# compute fbank bins
fbank[j, int(b0):int(b1)] = c * (np.arange(int(b0), int(b1)) -
int(b0)) / (b1 - b0)
fbank[j, int(b1):int(b2)] = c * (
int(b2) - np.arange(int(b1), int(b2))) / (b2 - b1)
return np.abs(fbank)
def inverse_mel_filter_banks(nfilts=20,
nfft=512,
fs=16000,
low_freq=0,
high_freq=None,
scale="constant"):
"""
Compute inverse Mel-filterbanks. The filters are stored in the rows, the columns
correspond to fft bins.
Args:
nfilt (int) : the number of filters in the filterbank.
(Default 20)
nfft (int) : the FFT size.
(Default is 512)
fs (int) : sample rate/ sampling frequency of the signal.
(Default 16000 Hz)
low_freq (int) : lowest band edge of mel filters.
(Default 0 Hz)
high_freq (int) : highest band edge of mel filters.
(Default samplerate/2)
scale (str) : choose if mx bins amplitudes sum up to one or are constants.
Default is "const"
Returns:
a numpy array of size nfilt * (nfft/2 + 1) containing filterbank.
Each row holds 1 filter.
"""
# init freqs
high_freq = high_freq or fs / 2
low_freq = low_freq or 0
# run checks
if low_freq < 0:
raise ParameterError(ErrorMsgs["low_freq"])
if high_freq > (fs / 2):
raise ParameterError(ErrorMsgs["high_freq"])
# inverse scaler value
scales = {
"ascendant": "descendant",
"descendant": "ascendant",
"constant": "constant"
}
iscale = scales[scale]
# generate inverse mel fbanks by inversing regular mel fbanks
imel_fbanks = mel_filter_banks(nfilts=nfilts,
nfft=nfft,
fs=fs,
low_freq=low_freq,
high_freq=high_freq,
scale=iscale)
# inverse regular filter banks
for i, pts in enumerate(imel_fbanks):
imel_fbanks[i] = pts[::-1]
return np.abs(imel_fbanks)
| [
"numpy.abs",
"numpy.linspace",
"numpy.zeros"
] | [((1878, 1920), 'numpy.linspace', 'np.linspace', (['low_mel', 'high_mel', '(nfilts + 2)'], {}), '(low_mel, high_mel, nfilts + 2)\n', (1889, 1920), True, 'import numpy as np\n'), ((2063, 2096), 'numpy.zeros', 'np.zeros', (['[nfilts, nfft // 2 + 1]'], {}), '([nfilts, nfft // 2 + 1])\n', (2071, 2096), True, 'import numpy as np\n'), ((2839, 2852), 'numpy.abs', 'np.abs', (['fbank'], {}), '(fbank)\n', (2845, 2852), True, 'import numpy as np\n'), ((4935, 4954), 'numpy.abs', 'np.abs', (['imel_fbanks'], {}), '(imel_fbanks)\n', (4941, 4954), True, 'import numpy as np\n')] |
# Copyright 2018 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers, rnn
class DaggerLSTM(object):
def __init__(self, state_dim, dwnd):
# dummy variable used to verify that sharing variables is working
self.cnt = tf.get_variable(
'cnt', [], tf.float32,
initializer=tf.constant_initializer(0.0))
self.add_one = self.cnt.assign_add(1.0)
# self.input: [batch_size, max_time, state_dim]
self.input = tf.placeholder(tf.float32, [None, None, state_dim])
self.num_layers = 1
self.lstm_dim = 32
self.linear_dim = 16
self.attn_dim = 32
stacked_lstm = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.lstm_dim)
for _ in xrange(self.num_layers)])
self.state_in = []
state_tuple_in = []
for _ in xrange(self.num_layers):
c_in = tf.placeholder(tf.float32, [None, self.lstm_dim])
h_in = tf.placeholder(tf.float32, [None, self.lstm_dim])
self.state_in.append((c_in, h_in))
state_tuple_in.append(rnn.LSTMStateTuple(c_in, h_in))
self.state_in = tuple(self.state_in)
state_tuple_in = tuple(state_tuple_in)
# self.output: [batch_size, max_time, lstm_dim]
state_embedding = layers.linear(self.input, self.linear_dim)
output, state_tuple_out = tf.nn.dynamic_rnn(
stacked_lstm, state_embedding, initial_state=state_tuple_in)
self.state_out = self.convert_state_out(state_tuple_out)
# map output to scores
u = layers.linear(output, self.attn_dim)
u = tf.nn.tanh(u) # batch_size * max_time * attn_dim
v = tf.get_variable('attn_v', [self.attn_dim])
v = tf.expand_dims(tf.expand_dims(v, 0), 0)
y = tf.reduce_sum(v * u, [2]) # batch_size * max_time
attn_vec = tf.TensorArray(dtype=tf.float32, size=1, dynamic_size=True)
i = tf.constant(0)
num_iter = tf.shape(output)[1]
def loop_body(i, dim, attn_vec):
start = tf.cond(i - dwnd + 1 < 0, lambda: 0, lambda: i - dwnd + 1)
end = i + 1
a = tf.expand_dims(tf.nn.softmax(y[:, start : end]), 2)
s = tf.reduce_sum(a * output[:, start : end, :], [1])
attn_vec = attn_vec.write(i, s)
return i + 1, dim, attn_vec
_, _, self.attn_output = tf.while_loop(lambda i, dim, _: i < dim,
loop_body, [i, num_iter, attn_vec])
# self.action_scores is still batch_size * max_time * lstm_dim
# self.action_scores = tf.stack(self.attn_vec, 1)
self.attn_output = self.attn_output.stack()
self.actions = tf.nn.tanh(layers.linear(self.attn_output, 1))
self.actions = tf.transpose(tf.squeeze(self.actions)) # batch_size * max_time
self.trainable_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def convert_state_out(self, state_tuple_out):
state_out = []
for lstm_state_tuple in state_tuple_out:
state_out.append((lstm_state_tuple.c, lstm_state_tuple.h))
return tuple(state_out)
def zero_init_state(self, batch_size):
init_state = []
for _ in xrange(self.num_layers):
c_init = np.zeros([batch_size, self.lstm_dim], np.float32)
h_init = np.zeros([batch_size, self.lstm_dim], np.float32)
init_state.append((c_init, h_init))
return init_state
| [
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.reduce_sum",
"tensorflow.get_variable_scope",
"tensorflow.nn.softmax",
"tensorflow.while_loop",
"tensorflow.contrib.layers.linear",
"tensorflow.placeholder",
"tensorflow.nn.dynamic_rnn",
"tensorfl... | [((1079, 1130), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, state_dim]'], {}), '(tf.float32, [None, None, state_dim])\n', (1093, 1130), True, 'import tensorflow as tf\n'), ((1889, 1931), 'tensorflow.contrib.layers.linear', 'layers.linear', (['self.input', 'self.linear_dim'], {}), '(self.input, self.linear_dim)\n', (1902, 1931), False, 'from tensorflow.contrib import layers, rnn\n'), ((1966, 2044), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['stacked_lstm', 'state_embedding'], {'initial_state': 'state_tuple_in'}), '(stacked_lstm, state_embedding, initial_state=state_tuple_in)\n', (1983, 2044), True, 'import tensorflow as tf\n'), ((2168, 2204), 'tensorflow.contrib.layers.linear', 'layers.linear', (['output', 'self.attn_dim'], {}), '(output, self.attn_dim)\n', (2181, 2204), False, 'from tensorflow.contrib import layers, rnn\n'), ((2217, 2230), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['u'], {}), '(u)\n', (2227, 2230), True, 'import tensorflow as tf\n'), ((2279, 2321), 'tensorflow.get_variable', 'tf.get_variable', (['"""attn_v"""', '[self.attn_dim]'], {}), "('attn_v', [self.attn_dim])\n", (2294, 2321), True, 'import tensorflow as tf\n'), ((2386, 2411), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(v * u)', '[2]'], {}), '(v * u, [2])\n', (2399, 2411), True, 'import tensorflow as tf\n'), ((2456, 2515), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': '(1)', 'dynamic_size': '(True)'}), '(dtype=tf.float32, size=1, dynamic_size=True)\n', (2470, 2515), True, 'import tensorflow as tf\n'), ((2528, 2542), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (2539, 2542), True, 'import tensorflow as tf\n'), ((2980, 3056), 'tensorflow.while_loop', 'tf.while_loop', (['(lambda i, dim, _: i < dim)', 'loop_body', '[i, num_iter, attn_vec]'], {}), '(lambda i, dim, _: i < dim, loop_body, [i, num_iter, attn_vec])\n', (2993, 3056), True, 'import tensorflow as tf\n'), ((1481, 1530), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.lstm_dim]'], {}), '(tf.float32, [None, self.lstm_dim])\n', (1495, 1530), True, 'import tensorflow as tf\n'), ((1550, 1599), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.lstm_dim]'], {}), '(tf.float32, [None, self.lstm_dim])\n', (1564, 1599), True, 'import tensorflow as tf\n'), ((2349, 2369), 'tensorflow.expand_dims', 'tf.expand_dims', (['v', '(0)'], {}), '(v, 0)\n', (2363, 2369), True, 'import tensorflow as tf\n'), ((2562, 2578), 'tensorflow.shape', 'tf.shape', (['output'], {}), '(output)\n', (2570, 2578), True, 'import tensorflow as tf\n'), ((2643, 2703), 'tensorflow.cond', 'tf.cond', (['(i - dwnd + 1 < 0)', '(lambda : 0)', '(lambda : i - dwnd + 1)'], {}), '(i - dwnd + 1 < 0, lambda : 0, lambda : i - dwnd + 1)\n', (2650, 2703), True, 'import tensorflow as tf\n'), ((2810, 2857), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(a * output[:, start:end, :])', '[1]'], {}), '(a * output[:, start:end, :], [1])\n', (2823, 2857), True, 'import tensorflow as tf\n'), ((3330, 3364), 'tensorflow.contrib.layers.linear', 'layers.linear', (['self.attn_output', '(1)'], {}), '(self.attn_output, 1)\n', (3343, 3364), False, 'from tensorflow.contrib import layers, rnn\n'), ((3402, 3426), 'tensorflow.squeeze', 'tf.squeeze', (['self.actions'], {}), '(self.actions)\n', (3412, 3426), True, 'import tensorflow as tf\n'), ((3938, 3987), 'numpy.zeros', 'np.zeros', (['[batch_size, self.lstm_dim]', 'np.float32'], {}), '([batch_size, self.lstm_dim], np.float32)\n', (3946, 3987), True, 'import numpy as np\n'), ((4009, 4058), 'numpy.zeros', 'np.zeros', (['[batch_size, self.lstm_dim]', 'np.float32'], {}), '([batch_size, self.lstm_dim], np.float32)\n', (4017, 4058), True, 'import numpy as np\n'), ((923, 951), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (946, 951), True, 'import tensorflow as tf\n'), ((1284, 1316), 'tensorflow.contrib.rnn.BasicLSTMCell', 'rnn.BasicLSTMCell', (['self.lstm_dim'], {}), '(self.lstm_dim)\n', (1301, 1316), False, 'from tensorflow.contrib import layers, rnn\n'), ((1681, 1711), 'tensorflow.contrib.rnn.LSTMStateTuple', 'rnn.LSTMStateTuple', (['c_in', 'h_in'], {}), '(c_in, h_in)\n', (1699, 1711), False, 'from tensorflow.contrib import layers, rnn\n'), ((2757, 2787), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['y[:, start:end]'], {}), '(y[:, start:end])\n', (2770, 2787), True, 'import tensorflow as tf\n'), ((3550, 3573), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3571, 3573), True, 'import tensorflow as tf\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf import yamlutil
from asdf.tags.core.ndarray import NDArrayType
from astropy import table
from ...types import AstropyAsdfType
class TableType(AstropyAsdfType):
name = 'core/table'
types = ['astropy.table.Table']
requires = ['astropy']
@classmethod
def from_tree(cls, node, ctx):
columns = [
yamlutil.tagged_tree_to_custom_tree(c, ctx)
for c in node['columns']
]
return table.Table(columns, meta=node.get('meta', {}))
@classmethod
def to_tree(cls, data, ctx):
columns = []
for name in data.colnames:
column = yamlutil.custom_tree_to_tagged_tree(
data.columns[name], ctx)
columns.append(column)
node = {'columns': columns}
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
NDArrayType.assert_equal(np.array(old), np.array(new))
class ColumnType(AstropyAsdfType):
name = 'core/column'
types = ['astropy.table.Column', 'astropy.table.MaskedColumn']
requires = ['astropy']
handle_dynamic_subclasses = True
@classmethod
def from_tree(cls, node, ctx):
data = yamlutil.tagged_tree_to_custom_tree(
node['data'], ctx)
name = node['name']
description = node.get('description')
unit = node.get('unit')
meta = node.get('meta', None)
return table.Column(
data=data._make_array(), name=name, description=description,
unit=unit, meta=meta)
@classmethod
def to_tree(cls, data, ctx):
node = {
'data': yamlutil.custom_tree_to_tagged_tree(
data.data, ctx),
'name': data.name
}
if data.description:
node['description'] = data.description
if data.unit:
node['unit'] = yamlutil.custom_tree_to_tagged_tree(
data.unit, ctx)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
assert old.description == new.description
assert old.unit == new.unit
NDArrayType.assert_equal(np.array(old), np.array(new))
| [
"numpy.array",
"asdf.yamlutil.custom_tree_to_tagged_tree",
"asdf.yamlutil.tagged_tree_to_custom_tree"
] | [((1382, 1436), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (["node['data']", 'ctx'], {}), "(node['data'], ctx)\n", (1417, 1436), False, 'from asdf import yamlutil\n'), ((454, 497), 'asdf.yamlutil.tagged_tree_to_custom_tree', 'yamlutil.tagged_tree_to_custom_tree', (['c', 'ctx'], {}), '(c, ctx)\n', (489, 497), False, 'from asdf import yamlutil\n'), ((737, 797), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['data.columns[name]', 'ctx'], {}), '(data.columns[name], ctx)\n', (772, 797), False, 'from asdf import yamlutil\n'), ((1091, 1104), 'numpy.array', 'np.array', (['old'], {}), '(old)\n', (1099, 1104), True, 'import numpy as np\n'), ((1106, 1119), 'numpy.array', 'np.array', (['new'], {}), '(new)\n', (1114, 1119), True, 'import numpy as np\n'), ((1819, 1870), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['data.data', 'ctx'], {}), '(data.data, ctx)\n', (1854, 1870), False, 'from asdf import yamlutil\n'), ((2058, 2109), 'asdf.yamlutil.custom_tree_to_tagged_tree', 'yamlutil.custom_tree_to_tagged_tree', (['data.unit', 'ctx'], {}), '(data.unit, ctx)\n', (2093, 2109), False, 'from asdf import yamlutil\n'), ((2418, 2431), 'numpy.array', 'np.array', (['old'], {}), '(old)\n', (2426, 2431), True, 'import numpy as np\n'), ((2433, 2446), 'numpy.array', 'np.array', (['new'], {}), '(new)\n', (2441, 2446), True, 'import numpy as np\n')] |
# Python modules
# 3rd party modules
import numpy as np
from lmfit import Parameters
# Our modules
import vespa.analysis.chain_fit_identity as chain_fit_identity
import vespa.common.util.math_ as util_math
import vespa.common.util.generic_spectral as util_spectral
import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt
from vespa.common.constants import DEGREES_TO_RADIANS as DTOR
from vespa.analysis.constants import FitLineshapeModel, VoigtDefaultFixedT2, FitMacromoleculeMethod
from vespa.analysis.constants import FitOptimizeMethod as optmeth
from vespa.analysis.chain_base import Chain
LMFIT_METHODS = [optmeth.LMFIT_DEFAULT, optmeth.LMFIT_JACOBIAN]
class ChainFitVoigt(Chain):
"""
Building block object used to create a processing chain for MRS data.
Performs LCM (linear combination model) fit to the data. Fit model is made
up of spectrally simulated basis spectra for all metabolites.
"""
def __init__(self, dataset, block):
"""
Chain objects organize Algo (algorithm) calls by setting up access to
input data and parameters, and creating standard output values for View.
Base class sets convenience references to: self._block and self._dataset
self.data is always initialized as []
"""
super().__init__(dataset, block)
self.fit_function = self.lorgauss_internal
self.reset_results_arrays()
# book-keeping attributes
self.lmfit_fvar_names = []
@property
def nmet(self):
""" Number of metabolites to be fitted - varies depending on model """
if self._block is not None:
if self._block.set.prior_list is not None:
return len(self._block.set.prior_list)
return 0
def reset_results_arrays(self):
"""
Results array reset is in its own method because it may need to be
called at other times that just in the object initialization.
"""
nmet = self.nmet
nmmol = self._block.nmmol
nparam = self._block.nparam
spectral_dim0 = self._dataset.spectral_dims[0]
if len(self.data) != spectral_dim0:
self.data = np.zeros(spectral_dim0, complex)
self.yini = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.yfit = np.zeros((nmet+nmmol, spectral_dim0), complex)
self.base = np.zeros(spectral_dim0, complex)
self.initial_values = np.zeros(nparam, float)
self.fit_results = np.zeros(nparam, float)
self.fit_baseline = np.zeros(spectral_dim0, complex)
self.weight_array = np.zeros(spectral_dim0, complex)
self.limits = np.zeros((2,nparam), float)
self.fitted_lw = 0.0
def run_global_init(self):
""""
Moved all of the global (one time) initialization code to this method
so we could package it in run() in an 'if' statement. This is in line
with making the 'fit all voxels' functionality as streamlined as
possible.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
self.spectral_dims = self._dataset.spectral_dims
self.nmmol = self._block.nmmol
self.nparam = self._block.nparam
self.init_b0 = 0.0
self.init_lw_hz = 3.0
self.init_ta = 0.8
self.init_tb = 0.03
self.init_ampl = None
self.init_area = None
self.limits = np.zeros((2,self.nparam+self.nmmol), float)
self.weight_array = np.zeros(self._dataset.spectral_dims[0], complex)
self.fit_baseline = 0.0 # needed for LORGAUSS call
self.fit_function = self.lorgauss_internal
self.fix_t2_center = VoigtDefaultFixedT2.CENTER
self.minmaxlw = [0,0]
# set up basis set for selected metabolites, collect all ppm locations
basis_mets = []
ppms = []
for name in set.prior_list:
basis_mets.append(prior.basis_set[name].fid.copy())
ppms += prior.basis_set[name].all_ppms
self.basis_mets = np.array(basis_mets)
self.peakpts = self._dataset.ppm2pts(np.array(ppms)) # for weight array calc
# set up basis set for macromolecules if needed
#self.macromol_model = set.macromol_model
self.basis_mmol = None
if set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
if set.macromol_single_basis_dataset:
tmp = set.macromol_single_basis_dataset.blocks['raw']
self.basis_mmol = tmp.data.copy()
# check results arrays for proper dimensionality
block.check_parameter_dimensions(self)
def run(self, voxels, entry='initial_only', statusbar=None, do_init=True):
"""
Run is typically called every time a processing setting is changed
in the parent (block) object. Run processes a single voxel at a time.
This object maintains previous run() results values until next run().
This allows the View to update without having to re-run the pipeline.
The 'entry' keyword adds flexibility to Block-Chain-View relationship.
"""
block = self._block
set = self._block.set
prior = self._block.set.prior
dataset = self._dataset
#----------------------------------------------------------------------
# Return with zero values if no metabolites are selected
if self.nmet < 1:
self.yini = self.yini * 0
voxel = voxels[0]
self.data = dataset.get_source_data('fit')
self.data = self.data[voxel[2],voxel[1],voxel[0],:]
plot_results = { 'fitted_lw' : 3.0,
'minmaxlw' : [1,5],
'init_b0' : 0.0,
'init_ph0' : -dataset.get_phase_0(voxel) * np.pi/180.0,
'init_ph1' : -dataset.get_phase_1(voxel),
'data' : self.data.copy(),
'weight_array' : self.data.copy() * 0,
'fit_baseline' : self.data.copy() * 0,
'yfit' : self.data.copy() * 0,
'yini' : self.data.copy() * 0,
'init_baseline': self.data.copy() * 0,
'mmol_area' : 1.0 }
return plot_results
#----------------------------------------------------------------------
# Do the one time global bits of code, if needed
if do_init:
self.run_global_init()
#----------------------------------------------------------------------
# Now process the current voxel
data_source = dataset.get_source_data('fit')
voxel = voxels[0] # because we got rid of for-loop
x,y,z = voxel # for convenience
self.iteration = 0 # global index used in functors as a trigger
self.voxel = voxel
self.statusbar = statusbar
# local copy of input data
self.data = data_source[z,y,x,:].copy()
# spectral chain needs update for this line to be valid
self.chain = dataset.get_source_chain('fit')
self.kodata = self.chain.kodata.copy()
# various default values
self.mmol_area = 1.0
# copy 'global' parameters, that DO change with voxel, from dataset
#
# NB. phase0/1 are inputs for 'manual' method, the init_ph0/1 are
# outputs from initval calcs. If 'manual' is selected, then the
# output value should be equal but negative to original. We use
# the init_ph0/1 to update the GUI (and mrs_dataset values) so
# the chain needs both input and output (I think).
self.phase0 = dataset.get_phase_0(voxel)
self.phase1 = dataset.get_phase_1(voxel)
self.init_ph0 = -dataset.get_phase_0(voxel) * np.pi / 180.0 # match units in util_initial_values
self.init_ph1 = -dataset.get_phase_1(voxel)
# copy block parameters, that DO change with voxel, from block
self.frequency_shift = dataset.get_frequency_shift(voxel)
self.fit_baseline = block.fit_baseline[:,x,y,z].copy()
self.init_baseline = self.fit_baseline.copy() * 0
# setup chain results arrays
self.initial_values = voigt_checkout(self.nmet, block.initial_values[:,x,y,z], dataset)
self.fit_results = voigt_checkout(self.nmet, block.fit_results[ :,x,y,z], dataset)
self.fit_stats = block.fit_stats[ :,x,y,z].copy()
self.cramer_rao = block.cramer_rao[:,x,y,z].copy()
self.confidence = block.confidence[:,x,y,z].copy()
# select the chain processing functor based on the entry point
if entry == 'initial_only':
funct_fit_voigt.do_processing_initial(self)
elif entry == 'full_fit' or entry == 'all':
funct_fit_voigt.do_processing_full_fit(self)
elif entry == 'plot_refresh':
funct_fit_voigt.do_processing_plot_refresh(self)
elif entry == 'output_refresh':
funct_fit_voigt.do_processing_output_refresh(self)
elif entry == 'voxel_change':
if np.sum(self.initial_values[0:self.nmet])==0.0:
flag_auto_initvals = True
else:
flag_auto_initvals = False
funct_fit_voigt.do_processing_voxel_change(self, flag_auto_initvals=flag_auto_initvals)
else:
print('oooops! - chain_fit_voigt "entry" point error ')
if statusbar:
statusbar.SetStatusText(' Fitting Done', 0)
# one last lw calc to refresh HTLM window on opening VIFF file
self.fitted_lw, _ = util_spectral.voigt_width(self.fit_results[self.nmet*2], self.fit_results[self.nmet*2+1], dataset)
block.initial_values[:,x,y,z] = voigt_checkin(self.nmet, self.initial_values, dataset)
block.fit_results[ :,x,y,z] = voigt_checkin(self.nmet, self.fit_results, dataset)
block.fit_stats[ :,x,y,z] = self.fit_stats.copy()
block.fit_baseline[ :,x,y,z] = self.fit_baseline.copy()
block.cramer_rao[ :,x,y,z] = self.cramer_rao.copy()
block.confidence[ :,x,y,z] = self.confidence.copy()
# Initial value algorithms change b0, ph0/ph1. To be well behaved we ask
# the dataset object to save these to the 'spectral' block for us.
#
# NB. In CLI mode, call this chain with 'initial_only' first, then update
# the 'spectral' block and only then call this chain with 'full_fit'
dataset.set_frequency_shift(dataset.get_frequency_shift(voxel) + self.init_b0, voxel)
dataset.set_phase_0(-self.init_ph0 * 180.0 / np.pi, voxel)
dataset.set_phase_1(-self.init_ph1, voxel)
# Return values specific to calling Tab used to update its self.view (plot_panel_spectrum object).
plot_results = { 'fitted_lw' : self.fitted_lw,
'minmaxlw' : self.minmaxlw,
'init_b0' : self.init_b0,
'init_ph0' : self.init_ph0 * 180.0 / np.pi,
'init_ph1' : self.init_ph1,
'data' : self.data.copy(),
'weight_array' : self.weight_array.copy(),
'fit_baseline' : self.fit_baseline.copy(),
'yfit' : self.yfit.copy(),
'yini' : self.yini.copy(),
'init_baseline' : self.init_baseline.copy(),
'mmol_area' : self.mmol_area }
return plot_results
def create_param_labels(self):
""" Create list of unique parameter labels """
plabel = []
unique_abbr = [item.replace('-', '_') for item in self._dataset.prior_list_unique]
for item in unique_abbr: plabel.append('area_' + item)
for item in unique_abbr: plabel.append('freq_' + item)
plabel.append('ta')
plabel.append('tb')
plabel.append('ph0')
plabel.append('ph1')
if self._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET:
plabel.append('mmol_area')
plabel.append('mmol_freq')
return plabel
def lorgauss_internal_lmfit_dfunc(self, params, *args, **kwargs):
"""
This is in the format that LMFIT expects to call in the Minimizer class
for the 'least_squares' algorithm.
This returns the weighted partial derivative functions all_pders * ww
as a single numpy (n,m) float array, where where n = # of variable
parameters (versus dependent params) and m = # of spectral points. In
this case, the real and imaginary vectors have been concatenated into
a single array, so m = 2 * npts_spectral_zerofilled.
Note. The vespa model (for one example) might have 48 parameters, but
only 42 are variable parameters while the other 6 are dependent
expressions (e.g. freq_naag = freq_naa + 0.04). The LMFIT algorithm
only passes in the 42 'free' params, and I need to expand that into the
actual 48 for the self.lorgauss_internal() call to work properly. On
return, I need to remove the pder entris for the dependent parameters
(and return just a 42 x npts array).
params - these are just the free variable values, we need to expand this
into a full list/dict of free and evaluated expression variables
for the call to self.lorgauss_internal(). This can be a list of
current variable values, OR it can be an ordered dict of LMFIT
Paramters.
"""
ww = np.concatenate([self.weight_array, self.weight_array])
# expand list of free variable values into full list of free and evaluated expression values
all_params = self.all_params.copy() # copy of full param set
for name, val in zip(self.lmfit_fvar_names, params):
all_params[name].value = val # update free params to current pass values
all_params.update_constraints() # evaluate expression params values
yfit, all_pders = self.lorgauss_internal(all_params, pderflg=True)
# Re-sort all_pders array if inequality expressions present in Parameters list
#
# - pder returns in 'Vespa' order (area(s), freq(s), ta, tb, ph0, ph1, mmol_area, mmol_freq)
# - if inequality control vars have been added to end of Paramters list (typical in Vespa
# model) then we have to re-sort
# - usually things like 'freq_naag' have to be relocated to position where 'delta_freq_naa'
# was located in the 'params' variable that was input to this method
pders = []
indxs = []
all_names = list(all_params.keys())
for key in self.lmfit_fvar_names:
if 'delta_' in key:
indx = all_names.index(key.replace('delta_', ''))
pders.append(-1 * all_pders[indx, :]) # -1 is empirical vs LMFIT, bjs 3/2021
else:
indx = all_names.index(key)
pders.append(all_pders[indx, :])
indxs.append(indx)
pders = np.array(pders)
# expand complex to 1D and apply weighting scheme
dfunc = []
for pder in pders:
dfunc.append(np.concatenate([pder.real, pder.imag]) * ww * (-1)) # -1 is empirically vs LMFIT, bjs 3/2021
dfunc = np.array(dfunc)
return dfunc.T # empirical vs LMFIT requirement
def lorgauss_internal_lmfit(self, a, report_stats=False):
"""
This is in the format that LMFIT expects to call in the Minimizer class.
This returns the weighted difference (data - yfit) * ww as a single
numpy float array, where the real and imaginary vectors have been
concatenated into a single array.
a - fully expanded list of parameters, free and evaluated expressions
"""
data = self.data_scale.copy()
ww = self.weight_array
yfit, _ = self.lorgauss_internal(a, pderflg=False)
yfit = np.concatenate([yfit.real, yfit.imag])
data = np.concatenate([data.real, data.imag])
ww = np.concatenate([ww, ww])
if report_stats:
nfree = np.size(yfit)-len(list(a.keys()))
wchisqr = np.sum(ww*(data-yfit)**2)/nfree # got from CCFIT method
chisqr = np.sum( (data-yfit)**2)/nfree
return wchisqr, chisqr
else:
y = (data - yfit) * ww
return y
def lorgauss_internal(self, a, pderflg=True,
nobase=False,
indiv=False,
finalwflg=False):
"""
=========
Arguments
=========
**a:** [list][float] parameters for model function
**dataset:** [object][dataset (or subset)] object containing fitting
parameters
**pderflg:** [keyword][bool][default=False] xxxx
**nobase:** [keyword][bool][default=False] flag, do not include
baseline contribs from (*dood).basarr
**indiv:** [keyword][bool][default=False] flag, return individual
metabolites, not summed total of all
**finalwflg:** [keyword][float][default=False] xxxx
===========
Description
===========
Returns the parameterized metabolite model function.
A contains : [[am],[fr],Ta,Tb,ph0,ph1] - LorGauss complex
Peak ampls and freqs are taken from the DB info in info,
so the values in [am] and [fr] are relative multipliers
and additives respectively. That is why there is only
one value for each compound in each array
If the relfreq flag is ON, then [fr] is a single value that
is added to each peak freq equivalently. Ie. the whole
spectrum can shift, but relative ppm separations between
all metabolites are maintained exactly. If the flag is OFF,
then metabs may shift independently from one another,
however, within groups of peaks belonging to the same
metabolite, relative ppm separtaions are maintained.
am - peak amplitude
fr - peak frequency offsets in PPM
Ta - T2 decay constant in sec
Tb - T2 star decay const in sec
ph0/1 - zero/first order phase in degrees
coef - are the spline coefs for the lineshape, knot locations are in info
======
Syntax
======
::
f = self.lorgauss_internal(a, pderflg = False,
nobase = False,
indiv = False,
finalwflg = False)
"""
ds = self._dataset
set = self._block.set
# parse input parameters
if isinstance(a, Parameters):
v = a.valuesdict()
a = np.array([item[1] for item in list(v.items())])
# Setup constants and flags
nmet = self.nmet
npts = ds.raw_dims[0]
nptszf = int(round(npts * ds.zero_fill_multiplier))
td = 1.0/ds.sw
piv = ds.ppm2pts(ds.phase_1_pivot, acq=True)
arr1 = np.zeros(int(npts),float) + 1.0
f = np.zeros((int(nmet),int(nptszf)),complex)
mf = np.zeros((int(nptszf),),complex)
t = (np.arange(nmet * npts) % npts) * td
t.shape = nmet, npts
# get prior max peak ppm vals for metabs which are flagged ON
peaks = np.array(set.prior_peak_ppm)
# setup Lineshape
if set.lineshape_model != FitLineshapeModel.GAUSS:
# voigt and lorentzian models
expo = t/a[nmet*2] + (t/a[nmet*2+1])**2
lshape = util_math.safe_exp(-expo)
else:
# Gaussian lineshape - allows user to set a fixed T2 value for each
# metabolite stored in a 'T2 lineshape array'. But, this model still
# has a Tb parameter, though tightly constrained. We set it to 0.250
# +/- 0.001 sec, a reasonable T2 value, to make the search space
# happy. The fitting function is adjusted for each metab by the delta
# from 0.250.
ma = (self.fix_t2_center - a[nmet*2]) + set.prior_fix_t2 # delta for Ta param that is set at 0.25 sec
ma = t/np.outer(ma, arr1)
mb = (t / a[nmet*2+1])**2
expo = ma+mb
lshape = util_math.safe_exp(-expo)
if finalwflg:
finalw = lshape[:,0]
finalw = util_spectral.full_width_half_max(np.fft.fft(util_spectral.chop(finalw))/len(finalw)) * ds.spectral_hpp
return finalw
# if FID, then for correct area, first point must be divided by 2
fre = a[nmet:nmet*2] - ds.ppm2hz(peaks)*2.0*np.pi # shift in Radians from basis center freq
fre = np.exp( 1j * (np.outer(fre, arr1)) * t ) # outer is matrix multiplication
amp = np.outer(a[0:nmet], arr1)
ph0 = np.outer(np.exp(1j * (np.zeros(nmet) + a[nmet*2+2])), arr1)
tmp = self.basis_mets.copy() * amp * fre * ph0 * lshape
f[:,0:npts] = tmp
f[:,0] = f[:,0] / 2.0
# Calc Phase1
phase1 = np.exp(1j * (a[nmet*2+3]*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf))
# Calc Mmol - we will calc mmol pders later if needed
if (set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET):
if self.basis_mmol is not None:
mdat = self.basis_mmol.copy() * ((((np.arange(npts) + 1) % 2) * 2) - 1) # chop the basis fn
mamp = a[self.nparam - 2]
mfre = np.exp(1j * a[self.nparam - 1] * np.arange(npts) * td) # freq roll shift
mph0 = np.exp(1j * a[nmet*2 + 2]) # global ph0
mdat *= mamp * mfre * mph0
mf[0:npts] = mdat
mf[0] = mf[0] / 2.0
mind = mf.copy() # save copy of indiv mmol basis functions
# Calculate Partial Derivatives
pder = None
if pderflg:
pder = np.zeros((int(len(a)),int(nptszf)), complex)
pall = np.sum(f,axis=0) # all lines added
pind = f
tt = np.zeros(int(nptszf),float)
tt[0:npts] = np.arange(npts,dtype=float) * td
for i in range(nmet): # Calc the Ampl and Freq pders
pder[i,:] = (np.fft.fft(pind[i,:] / a[i] )/nptszf) * phase1
pder[i+nmet,:] = (np.fft.fft(tt * 1j * pind[i,:])/nptszf) * phase1
pder[nmet*2+0,:] = (np.fft.fft( tt * pall/(a[nmet*2+0]**2))/nptszf) * phase1
pder[nmet*2+1,:] = (np.fft.fft(2.0*(tt**2) * pall/(a[nmet*2+1]**3))/nptszf) * phase1
if set.optimize_method in LMFIT_METHODS:
# flags below are set in funct_fit_voigt.py only if both metabs in plabel
plabel = self.create_param_labels()
if set.optimize_constrain_ppm_naa_naag:
pder[plabel.index('freq_naa')] += pder[plabel.index('freq_naag')]
if set.optimize_constrain_ppm_cr_pcr:
pder[plabel.index('freq_cr')] += pder[plabel.index('freq_pcr')]
if set.optimize_constrain_ppm_gpc_pcho:
pder[plabel.index('freq_gpc')] += pder[plabel.index('freq_pcho')]
if set.optimize_constrain_ppm_cr2_pcr2:
pder[plabel.index('freq_cr2')] += pder[plabel.index('freq_pcr2')]
if set.optimize_constrain_ppm_glu_gln:
pder[plabel.index('freq_glu')] += pder[plabel.index('freq_gln')]
if set.optimize_constrain_ppm_tau_glc:
pder[plabel.index('freq_tau')] += pder[plabel.index('freq_glc')]
if set.lineshape_model == FitLineshapeModel.GAUSS:
pder[nmet*2+0,:] *= -1e-6 # empirical from LMFIT tests
pder[nmet*2+2,:] = (np.fft.fft(1j*pall)/nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet*2+2, :] += (np.fft.fft(1j*mf)/nptszf) * phase1
pder[nmet*2+3,:] = (np.fft.fft(pall)/nptszf) * (1j*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet*2+3,:] += (np.fft.fft(mf)/nptszf) * (1j*DTOR*(np.arange(nptszf,dtype=float)-piv)/nptszf) * phase1
# Do the FFT
if indiv: # return individual lines
if nmet != 1:
for i in range(nmet):
f[i,:] = (np.fft.fft(f[i,:])/nptszf) * phase1
else:
f = (np.fft.fft(f[0,:])/nptszf) * phase1
else: # return summed spectrum
if (nmet) != 1:
f = np.sum(f,axis=0)
f = (np.fft.fft(f)/nptszf) * phase1
else:
f = (np.fft.fft(f[0,:])/nptszf) * phase1
# Add in baseline unless nobase is True ---
if not nobase:
if f.ndim > 1:
for i in range(len(f)): f[i,:] = f[i,:] + self.fit_baseline
else:
f = f + self.fit_baseline
# Finish calc of Mmol here and add to full model
if (set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET):
if self.basis_mmol is not None:
mf = (np.fft.fft(mf)/nptszf) * phase1
if f.ndim > 1:
mf.shape = 1,mf.shape[0]
f = np.concatenate([f, mf],axis=0)
else:
f = f + mf
if pderflg:
mtt = np.zeros(nptszf,float)
mtt[0:npts] = np.arange(npts,dtype=float) * td
pder[nmet*2+4,:] = (np.fft.fft(mind / mamp)/nptszf) * phase1
pder[nmet*2+5,:] = (np.fft.fft(mtt*1j* mind)/nptszf) * phase1
return f, pder
def lorgauss_internal_orig(self, a, pderflg=True,
nobase=False,
indiv=False,
finalwflg=False):
"""
This is ORIGINAL lorgauss_internal from 0.10.x and just starting the
version 1.0.0 release. It's here for just-in-case
=========
Arguments
=========
**a:** [list][float] parameters for model function
**dataset:** [object][dataset (or subset)] object containing fitting
parameters
**pderflg:** [keyword][bool][default=False] xxxx
**nobase:** [keyword][bool][default=False] flag, do not include
baseline contribs from (*dood).basarr
**indiv:** [keyword][bool][default=False] flag, return individual
metabolites, not summed total of all
**finalwflg:** [keyword][float][default=False] xxxx
===========
Description
===========
Returns the parameterized metabolite model function.
A contains : [[am],[fr],Ta,Tb,ph0,ph1] - LorGauss complex
Peak ampls and freqs are taken from the DB info in info,
so the values in [am] and [fr] are relative multipliers
and additives respectively. That is why there is only
one value for each compound in each array
If the relfreq flag is ON, then [fr] is a single value that
is added to each peak freq equivalently. Ie. the whole
spectrum can shift, but relative ppm separations between
all metabolites are maintained exactly. If the flag is OFF,
then metabs may shift independently from one another,
however, within groups of peaks belonging to the same
metabolite, relative ppm separtaions are maintained.
am - peak amplitude
fr - peak frequency offsets in PPM
Ta - T2 decay constant in sec
Tb - T2 star decay const in sec
ph0/1 - zero/first order phase in degrees
coef - are the spline coefs for the lineshape, knot locations are in info
======
Syntax
======
::
f = self.lorgauss_internal(a, pderflg = False,
nobase = False,
indiv = False,
finalwflg = False)
"""
# parse input parameters
if isinstance(a, Parameters):
v = a.valuesdict()
a = np.array([item[1] for item in list(v.items())])
# Setup constants and flags
dataset = self._dataset
nmet = self.nmet
npts = dataset.raw_dims[0]
zfmult = dataset.zero_fill_multiplier
nptszf = int(round(npts * zfmult))
sw = 1.0 * dataset.sw
td = 1.0 / sw
piv = dataset.ppm2pts(dataset.phase_1_pivot, acq=True)
t2fix = self._block.set.prior_fix_t2
arr1 = np.zeros(int(npts), float) + 1.0
f = np.zeros((int(nmet), int(nptszf)), complex)
mf = np.zeros((int(nptszf),), complex)
t = (np.arange(nmet * npts) % npts) * td
t.shape = nmet, npts
mt = np.arange(npts) * td
# get prior max peak ppm vals for metabs which are flagged ON
peaks = np.array(self._block.set.prior_peak_ppm)
# setup Lineshape
if self._block.set.lineshape_model != FitLineshapeModel.GAUSS:
# voigt and lorentzian models
expo = t / a[nmet * 2] + (t / a[nmet * 2 + 1]) ** 2
lshape = util_math.safe_exp(-expo)
else:
# Gaussian lineshape - allows user to set a fixed T2 value for each
# metabolite stored in a 'T2 lineshape array'. But, this model still
# has a Tb parameter, though tightly constrained. We set it to 0.250
# +/- 0.001 sec, a reasonable T2 value, to make the search space
# happy. The fitting function is adjusted for each metab by the delta
# from 0.250.
ma = (self.fix_t2_center - a[nmet * 2]) + t2fix # delta for Ta param that is set at 0.25 sec
ma = t / np.outer(ma, arr1)
mb = (t / a[nmet * 2 + 1]) ** 2
expo = ma + mb
lshape = util_math.safe_exp(-expo)
if finalwflg:
finalw = lshape[:, 0]
finalw = util_spectral.full_width_half_max(
np.fft.fft(util_spectral.chop(finalw)) / len(finalw)) * dataset.spectral_hpp
return finalw
# if FID, then for correct area, first point must be divided by 2
tmp = self.basis_mets.copy()
fre = a[nmet:nmet * 2] - self._dataset.ppm2hz(peaks) * 2.0 * np.pi # in Radians here
fre = np.exp(1j * (np.outer(fre, arr1)) * t) # outer is matrix multiplication
amp = np.outer(a[0:nmet], arr1)
ph0 = np.outer(np.exp(1j * (np.zeros(nmet) + a[nmet * 2 + 2])), arr1)
tmp *= amp * fre * ph0 * lshape
f[:, 0:npts] = tmp
f[:, 0] = f[:, 0] / 2.0
# Calc Phase1
phase1 = np.exp(1j * (a[nmet * 2 + 3] * DTOR * (np.arange(nptszf, dtype=float) - piv) / nptszf))
# Calc Mmol - to include in pders if needed
if (self._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET):
if self.basis_mmol is not None:
mfre = a[self.nparam - 1]
mdat = self.basis_mmol.copy()
chop = ((((np.arange(npts) + 1) % 2) * 2) - 1)
mdat *= chop
marea = a[self.nparam - 2]
fre = mfre # *2.0*np.pi # in Radians here
fre = np.exp(1j * fre * mt)
ph0 = np.exp(1j * a[nmet * 2 + 2])
mdat *= marea * fre * ph0
mf[0:npts] = mdat
mf[0] = mf[0] / 2.0
mind = mf.copy()
# Calculate Partial Derivatives
#
# TODO bjs - if mmol model changes, much more control logic needed below
#
pder = None
if pderflg:
pder = np.zeros((int(len(a)), int(nptszf)), complex)
pall = np.sum(f, axis=0) # all lines added
pind = f
tt = np.zeros(int(nptszf), float)
tt[0:npts] = np.arange(npts, dtype=float) * td
for i in range(nmet): # Calc the Ampl and Freq pders
pder[i, :] = (np.fft.fft(pind[i, :] / a[i]) / nptszf) * phase1
pder[i + nmet, :] = (np.fft.fft(tt * 1j * pind[i, :]) / nptszf) * phase1
pder[nmet * 2 + 0, :] = (np.fft.fft(tt * pall / (a[nmet * 2 + 0] ** 2)) / nptszf) * phase1
pder[nmet * 2 + 1, :] = (np.fft.fft(2.0 * (tt ** 2) * pall / (a[nmet * 2 + 1] ** 3)) / nptszf) * phase1
if self._block.set.lineshape_model == FitLineshapeModel.GAUSS:
pder[nmet * 2 + 0, :] *= -1e-6 # empirical from LMFIT tests
pder[nmet * 2 + 2, :] = (np.fft.fft(1j * pall) / nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet * 2 + 2, :] += (np.fft.fft(1j * mf) / nptszf) * phase1
pder[nmet * 2 + 3, :] = (np.fft.fft(pall) / nptszf) * (
1j * DTOR * (np.arange(nptszf, dtype=float) - piv) / nptszf) * phase1
if self.basis_mmol is not None:
pder[nmet * 2 + 3, :] += (np.fft.fft(mf) / nptszf) * (
1j * DTOR * (np.arange(nptszf, dtype=float) - piv) / nptszf) * phase1
# Do the FFT
if indiv: # return individual lines
if nmet != 1:
for i in range(nmet):
f[i, :] = (np.fft.fft(f[i, :]) / nptszf) * phase1
else:
f = (np.fft.fft(f[0, :]) / nptszf) * phase1
else: # return summed spectrum
if (nmet) != 1:
f = np.sum(f, axis=0)
f = (np.fft.fft(f) / nptszf) * phase1
else:
f = (np.fft.fft(f[0, :]) / nptszf) * phase1
# Add in baseline unless nobase is True ---
if not nobase:
if f.ndim > 1:
for i in range(len(f)): f[i, :] = f[i, :] + self.fit_baseline
else:
f = f + self.fit_baseline
# Finish calc of Mmol here and add to full model
if (self._block.set.macromol_model == FitMacromoleculeMethod.SINGLE_BASIS_DATASET):
if self.basis_mmol is not None:
# mfre = a[self.nparam-1]
# mdat = self.basis_mmol.copy()
# chop = ((((np.arange(npts) + 1) % 2) * 2) - 1)
# mdat *= chop
# marea = a[self.nparam-2]
# fre = mfre #*2.0*np.pi # in Radians here
# fre = np.exp( 1j * fre * mt )
# ph0 = np.exp( 1j * a[nmet*2+2])
#
# mdat *= marea * fre * ph0
# mf[0:npts] = mdat
# mf[0] = mf[0] / 2.0
#
# mind = mf.copy()
mf = (np.fft.fft(mf) / nptszf) * phase1
if f.ndim > 1:
mf.shape = 1, mf.shape[0]
f = np.concatenate([f, mf], axis=0)
else:
f = f + mf
if pderflg:
mtt = np.zeros(nptszf, float)
mtt[0:npts] = np.arange(npts, dtype=float) * td
pder[nmet * 2 + 4, :] = (np.fft.fft(mind / marea) / nptszf) * phase1
pder[nmet * 2 + 5, :] = (np.fft.fft(mtt * 1j * mind) / nptszf) * phase1
return f, pder
def voigt_checkin(nmet, source, dataset):
"""
Parameter value conversion before saving into long term storage. Phase0
converts from radians to degrees, and frequency terms from Hz to ppm.
"""
dest = source.copy()
dest[nmet:nmet*2] = dataset.hz2ppm(dest[nmet:nmet*2]/(2.0*np.pi), acq=False)
dest[nmet*2+2] = dest[nmet*2+2] * 180.0 / np.pi
return dest
def voigt_checkout(nmet, source, dataset):
"""
Parameterv value conversion before use in fitting routine. Phase0 converts
from degrees to radians, and frequency terms from ppm to Hz.
"""
dest = source.copy()
dest[nmet:nmet*2] = dataset.ppm2hz(dest[nmet:nmet*2], acq=False)*2.0*np.pi
dest[nmet*2+2] = dest[nmet*2+2] * np.pi / 180.0
if dest[nmet*2] == 0.0: dest[nmet*2] = 0.000001
if dest[nmet*2+1] == 0.0: dest[nmet*2+1] = 0.000001
return dest
| [
"vespa.analysis.functors.funct_fit_voigt.do_processing_output_refresh",
"vespa.common.util.generic_spectral.voigt_width",
"vespa.analysis.functors.funct_fit_voigt.do_processing_full_fit",
"numpy.size",
"numpy.fft.fft",
"vespa.common.util.math_.safe_exp",
"vespa.common.util.generic_spectral.chop",
"ves... | [((3723, 3769), 'numpy.zeros', 'np.zeros', (['(2, self.nparam + self.nmmol)', 'float'], {}), '((2, self.nparam + self.nmmol), float)\n', (3731, 3769), True, 'import numpy as np\n'), ((3804, 3853), 'numpy.zeros', 'np.zeros', (['self._dataset.spectral_dims[0]', 'complex'], {}), '(self._dataset.spectral_dims[0], complex)\n', (3812, 3853), True, 'import numpy as np\n'), ((4377, 4397), 'numpy.array', 'np.array', (['basis_mets'], {}), '(basis_mets)\n', (4385, 4397), True, 'import numpy as np\n'), ((10370, 10479), 'vespa.common.util.generic_spectral.voigt_width', 'util_spectral.voigt_width', (['self.fit_results[self.nmet * 2]', 'self.fit_results[self.nmet * 2 + 1]', 'dataset'], {}), '(self.fit_results[self.nmet * 2], self.fit_results\n [self.nmet * 2 + 1], dataset)\n', (10395, 10479), True, 'import vespa.common.util.generic_spectral as util_spectral\n'), ((14518, 14572), 'numpy.concatenate', 'np.concatenate', (['[self.weight_array, self.weight_array]'], {}), '([self.weight_array, self.weight_array])\n', (14532, 14572), True, 'import numpy as np\n'), ((16119, 16134), 'numpy.array', 'np.array', (['pders'], {}), '(pders)\n', (16127, 16134), True, 'import numpy as np\n'), ((16377, 16392), 'numpy.array', 'np.array', (['dfunc'], {}), '(dfunc)\n', (16385, 16392), True, 'import numpy as np\n'), ((17090, 17128), 'numpy.concatenate', 'np.concatenate', (['[yfit.real, yfit.imag]'], {}), '([yfit.real, yfit.imag])\n', (17104, 17128), True, 'import numpy as np\n'), ((17144, 17182), 'numpy.concatenate', 'np.concatenate', (['[data.real, data.imag]'], {}), '([data.real, data.imag])\n', (17158, 17182), True, 'import numpy as np\n'), ((17198, 17222), 'numpy.concatenate', 'np.concatenate', (['[ww, ww]'], {}), '([ww, ww])\n', (17212, 17222), True, 'import numpy as np\n'), ((20758, 20786), 'numpy.array', 'np.array', (['set.prior_peak_ppm'], {}), '(set.prior_peak_ppm)\n', (20766, 20786), True, 'import numpy as np\n'), ((22298, 22323), 'numpy.outer', 'np.outer', (['a[0:nmet]', 'arr1'], {}), '(a[0:nmet], arr1)\n', (22306, 22323), True, 'import numpy as np\n'), ((30698, 30738), 'numpy.array', 'np.array', (['self._block.set.prior_peak_ppm'], {}), '(self._block.set.prior_peak_ppm)\n', (30706, 30738), True, 'import numpy as np\n'), ((32236, 32261), 'numpy.outer', 'np.outer', (['a[0:nmet]', 'arr1'], {}), '(a[0:nmet], arr1)\n', (32244, 32261), True, 'import numpy as np\n'), ((2297, 2329), 'numpy.zeros', 'np.zeros', (['spectral_dim0', 'complex'], {}), '(spectral_dim0, complex)\n', (2305, 2329), True, 'import numpy as np\n'), ((2364, 2412), 'numpy.zeros', 'np.zeros', (['(nmet + nmmol, spectral_dim0)', 'complex'], {}), '((nmet + nmmol, spectral_dim0), complex)\n', (2372, 2412), True, 'import numpy as np\n'), ((2445, 2493), 'numpy.zeros', 'np.zeros', (['(nmet + nmmol, spectral_dim0)', 'complex'], {}), '((nmet + nmmol, spectral_dim0), complex)\n', (2453, 2493), True, 'import numpy as np\n'), ((2526, 2558), 'numpy.zeros', 'np.zeros', (['spectral_dim0', 'complex'], {}), '(spectral_dim0, complex)\n', (2534, 2558), True, 'import numpy as np\n'), ((2593, 2616), 'numpy.zeros', 'np.zeros', (['nparam', 'float'], {}), '(nparam, float)\n', (2601, 2616), True, 'import numpy as np\n'), ((2651, 2674), 'numpy.zeros', 'np.zeros', (['nparam', 'float'], {}), '(nparam, float)\n', (2659, 2674), True, 'import numpy as np\n'), ((2709, 2741), 'numpy.zeros', 'np.zeros', (['spectral_dim0', 'complex'], {}), '(spectral_dim0, complex)\n', (2717, 2741), True, 'import numpy as np\n'), ((2776, 2808), 'numpy.zeros', 'np.zeros', (['spectral_dim0', 'complex'], {}), '(spectral_dim0, complex)\n', (2784, 2808), True, 'import numpy as np\n'), ((2849, 2877), 'numpy.zeros', 'np.zeros', (['(2, nparam)', 'float'], {}), '((2, nparam), float)\n', (2857, 2877), True, 'import numpy as np\n'), ((4446, 4460), 'numpy.array', 'np.array', (['ppms'], {}), '(ppms)\n', (4454, 4460), True, 'import numpy as np\n'), ((9425, 9468), 'vespa.analysis.functors.funct_fit_voigt.do_processing_initial', 'funct_fit_voigt.do_processing_initial', (['self'], {}), '(self)\n', (9462, 9468), True, 'import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt\n'), ((21007, 21032), 'vespa.common.util.math_.safe_exp', 'util_math.safe_exp', (['(-expo)'], {}), '(-expo)\n', (21025, 21032), True, 'import vespa.common.util.math_ as util_math\n'), ((21741, 21766), 'vespa.common.util.math_.safe_exp', 'util_math.safe_exp', (['(-expo)'], {}), '(-expo)\n', (21759, 21766), True, 'import vespa.common.util.math_ as util_math\n'), ((23543, 23560), 'numpy.sum', 'np.sum', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (23549, 23560), True, 'import numpy as np\n'), ((30590, 30605), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (30599, 30605), True, 'import numpy as np\n'), ((30964, 30989), 'vespa.common.util.math_.safe_exp', 'util_math.safe_exp', (['(-expo)'], {}), '(-expo)\n', (30982, 30989), True, 'import vespa.common.util.math_ as util_math\n'), ((31670, 31695), 'vespa.common.util.math_.safe_exp', 'util_math.safe_exp', (['(-expo)'], {}), '(-expo)\n', (31688, 31695), True, 'import vespa.common.util.math_ as util_math\n'), ((33552, 33569), 'numpy.sum', 'np.sum', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (33558, 33569), True, 'import numpy as np\n'), ((9533, 9577), 'vespa.analysis.functors.funct_fit_voigt.do_processing_full_fit', 'funct_fit_voigt.do_processing_full_fit', (['self'], {}), '(self)\n', (9571, 9577), True, 'import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt\n'), ((17277, 17290), 'numpy.size', 'np.size', (['yfit'], {}), '(yfit)\n', (17284, 17290), True, 'import numpy as np\n'), ((17334, 17365), 'numpy.sum', 'np.sum', (['(ww * (data - yfit) ** 2)'], {}), '(ww * (data - yfit) ** 2)\n', (17340, 17365), True, 'import numpy as np\n'), ((17414, 17440), 'numpy.sum', 'np.sum', (['((data - yfit) ** 2)'], {}), '((data - yfit) ** 2)\n', (17420, 17440), True, 'import numpy as np\n'), ((20604, 20626), 'numpy.arange', 'np.arange', (['(nmet * npts)'], {}), '(nmet * npts)\n', (20613, 20626), True, 'import numpy as np\n'), ((21632, 21650), 'numpy.outer', 'np.outer', (['ma', 'arr1'], {}), '(ma, arr1)\n', (21640, 21650), True, 'import numpy as np\n'), ((23112, 23142), 'numpy.exp', 'np.exp', (['(1.0j * a[nmet * 2 + 2])'], {}), '(1.0j * a[nmet * 2 + 2])\n', (23118, 23142), True, 'import numpy as np\n'), ((23680, 23708), 'numpy.arange', 'np.arange', (['npts'], {'dtype': 'float'}), '(npts, dtype=float)\n', (23689, 23708), True, 'import numpy as np\n'), ((26195, 26212), 'numpy.sum', 'np.sum', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (26201, 26212), True, 'import numpy as np\n'), ((30512, 30534), 'numpy.arange', 'np.arange', (['(nmet * npts)'], {}), '(nmet * npts)\n', (30521, 30534), True, 'import numpy as np\n'), ((31559, 31577), 'numpy.outer', 'np.outer', (['ma', 'arr1'], {}), '(ma, arr1)\n', (31567, 31577), True, 'import numpy as np\n'), ((33065, 33088), 'numpy.exp', 'np.exp', (['(1.0j * fre * mt)'], {}), '(1.0j * fre * mt)\n', (33071, 33088), True, 'import numpy as np\n'), ((33109, 33139), 'numpy.exp', 'np.exp', (['(1.0j * a[nmet * 2 + 2])'], {}), '(1.0j * a[nmet * 2 + 2])\n', (33115, 33139), True, 'import numpy as np\n'), ((33682, 33710), 'numpy.arange', 'np.arange', (['npts'], {'dtype': 'float'}), '(npts, dtype=float)\n', (33691, 33710), True, 'import numpy as np\n'), ((35270, 35287), 'numpy.sum', 'np.sum', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (35276, 35287), True, 'import numpy as np\n'), ((9628, 9676), 'vespa.analysis.functors.funct_fit_voigt.do_processing_plot_refresh', 'funct_fit_voigt.do_processing_plot_refresh', (['self'], {}), '(self)\n', (9670, 9676), True, 'import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt\n'), ((22215, 22234), 'numpy.outer', 'np.outer', (['fre', 'arr1'], {}), '(fre, arr1)\n', (22223, 22234), True, 'import numpy as np\n'), ((23989, 24033), 'numpy.fft.fft', 'np.fft.fft', (['(tt * pall / a[nmet * 2 + 0] ** 2)'], {}), '(tt * pall / a[nmet * 2 + 0] ** 2)\n', (23999, 24033), True, 'import numpy as np\n'), ((24088, 24143), 'numpy.fft.fft', 'np.fft.fft', (['(2.0 * tt ** 2 * pall / a[nmet * 2 + 1] ** 3)'], {}), '(2.0 * tt ** 2 * pall / a[nmet * 2 + 1] ** 3)\n', (24098, 24143), True, 'import numpy as np\n'), ((25368, 25391), 'numpy.fft.fft', 'np.fft.fft', (['(1.0j * pall)'], {}), '(1.0j * pall)\n', (25378, 25391), True, 'import numpy as np\n'), ((26934, 26965), 'numpy.concatenate', 'np.concatenate', (['[f, mf]'], {'axis': '(0)'}), '([f, mf], axis=0)\n', (26948, 26965), True, 'import numpy as np\n'), ((27081, 27104), 'numpy.zeros', 'np.zeros', (['nptszf', 'float'], {}), '(nptszf, float)\n', (27089, 27104), True, 'import numpy as np\n'), ((32162, 32181), 'numpy.outer', 'np.outer', (['fre', 'arr1'], {}), '(fre, arr1)\n', (32170, 32181), True, 'import numpy as np\n'), ((33988, 34032), 'numpy.fft.fft', 'np.fft.fft', (['(tt * pall / a[nmet * 2 + 0] ** 2)'], {}), '(tt * pall / a[nmet * 2 + 0] ** 2)\n', (33998, 34032), True, 'import numpy as np\n'), ((34091, 34146), 'numpy.fft.fft', 'np.fft.fft', (['(2.0 * tt ** 2 * pall / a[nmet * 2 + 1] ** 3)'], {}), '(2.0 * tt ** 2 * pall / a[nmet * 2 + 1] ** 3)\n', (34101, 34146), True, 'import numpy as np\n'), ((34361, 34384), 'numpy.fft.fft', 'np.fft.fft', (['(1.0j * pall)'], {}), '(1.0j * pall)\n', (34371, 34384), True, 'import numpy as np\n'), ((36609, 36640), 'numpy.concatenate', 'np.concatenate', (['[f, mf]'], {'axis': '(0)'}), '([f, mf], axis=0)\n', (36623, 36640), True, 'import numpy as np\n'), ((36749, 36772), 'numpy.zeros', 'np.zeros', (['nptszf', 'float'], {}), '(nptszf, float)\n', (36757, 36772), True, 'import numpy as np\n'), ((9729, 9779), 'vespa.analysis.functors.funct_fit_voigt.do_processing_output_refresh', 'funct_fit_voigt.do_processing_output_refresh', (['self'], {}), '(self)\n', (9773, 9779), True, 'import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt\n'), ((16265, 16303), 'numpy.concatenate', 'np.concatenate', (['[pder.real, pder.imag]'], {}), '([pder.real, pder.imag])\n', (16279, 16303), True, 'import numpy as np\n'), ((22361, 22375), 'numpy.zeros', 'np.zeros', (['nmet'], {}), '(nmet)\n', (22369, 22375), True, 'import numpy as np\n'), ((23823, 23852), 'numpy.fft.fft', 'np.fft.fft', (['(pind[i, :] / a[i])'], {}), '(pind[i, :] / a[i])\n', (23833, 23852), True, 'import numpy as np\n'), ((23906, 23940), 'numpy.fft.fft', 'np.fft.fft', (['(tt * 1.0j * pind[i, :])'], {}), '(tt * 1.0j * pind[i, :])\n', (23916, 23940), True, 'import numpy as np\n'), ((25487, 25508), 'numpy.fft.fft', 'np.fft.fft', (['(1.0j * mf)'], {}), '(1.0j * mf)\n', (25497, 25508), True, 'import numpy as np\n'), ((25557, 25573), 'numpy.fft.fft', 'np.fft.fft', (['pall'], {}), '(pall)\n', (25567, 25573), True, 'import numpy as np\n'), ((26066, 26085), 'numpy.fft.fft', 'np.fft.fft', (['f[0, :]'], {}), '(f[0, :])\n', (26076, 26085), True, 'import numpy as np\n'), ((26233, 26246), 'numpy.fft.fft', 'np.fft.fft', (['f'], {}), '(f)\n', (26243, 26246), True, 'import numpy as np\n'), ((26303, 26322), 'numpy.fft.fft', 'np.fft.fft', (['f[0, :]'], {}), '(f[0, :])\n', (26313, 26322), True, 'import numpy as np\n'), ((26802, 26816), 'numpy.fft.fft', 'np.fft.fft', (['mf'], {}), '(mf)\n', (26812, 26816), True, 'import numpy as np\n'), ((27138, 27166), 'numpy.arange', 'np.arange', (['npts'], {'dtype': 'float'}), '(npts, dtype=float)\n', (27147, 27166), True, 'import numpy as np\n'), ((32298, 32312), 'numpy.zeros', 'np.zeros', (['nmet'], {}), '(nmet)\n', (32306, 32312), True, 'import numpy as np\n'), ((33813, 33842), 'numpy.fft.fft', 'np.fft.fft', (['(pind[i, :] / a[i])'], {}), '(pind[i, :] / a[i])\n', (33823, 33842), True, 'import numpy as np\n'), ((33899, 33933), 'numpy.fft.fft', 'np.fft.fft', (['(tt * 1.0j * pind[i, :])'], {}), '(tt * 1.0j * pind[i, :])\n', (33909, 33933), True, 'import numpy as np\n'), ((34488, 34509), 'numpy.fft.fft', 'np.fft.fft', (['(1.0j * mf)'], {}), '(1.0j * mf)\n', (34498, 34509), True, 'import numpy as np\n'), ((34565, 34581), 'numpy.fft.fft', 'np.fft.fft', (['pall'], {}), '(pall)\n', (34575, 34581), True, 'import numpy as np\n'), ((35143, 35162), 'numpy.fft.fft', 'np.fft.fft', (['f[0, :]'], {}), '(f[0, :])\n', (35153, 35162), True, 'import numpy as np\n'), ((35309, 35322), 'numpy.fft.fft', 'np.fft.fft', (['f'], {}), '(f)\n', (35319, 35322), True, 'import numpy as np\n'), ((35381, 35400), 'numpy.fft.fft', 'np.fft.fft', (['f[0, :]'], {}), '(f[0, :])\n', (35391, 35400), True, 'import numpy as np\n'), ((36473, 36487), 'numpy.fft.fft', 'np.fft.fft', (['mf'], {}), '(mf)\n', (36483, 36487), True, 'import numpy as np\n'), ((36807, 36835), 'numpy.arange', 'np.arange', (['npts'], {'dtype': 'float'}), '(npts, dtype=float)\n', (36816, 36835), True, 'import numpy as np\n'), ((10021, 10113), 'vespa.analysis.functors.funct_fit_voigt.do_processing_voxel_change', 'funct_fit_voigt.do_processing_voxel_change', (['self'], {'flag_auto_initvals': 'flag_auto_initvals'}), '(self, flag_auto_initvals=\n flag_auto_initvals)\n', (10063, 10113), True, 'import vespa.analysis.functors.funct_fit_voigt as funct_fit_voigt\n'), ((21903, 21929), 'vespa.common.util.generic_spectral.chop', 'util_spectral.chop', (['finalw'], {}), '(finalw)\n', (21921, 21929), True, 'import vespa.common.util.generic_spectral as util_spectral\n'), ((22602, 22632), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (22611, 22632), True, 'import numpy as np\n'), ((23045, 23060), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (23054, 23060), True, 'import numpy as np\n'), ((25727, 25741), 'numpy.fft.fft', 'np.fft.fft', (['mf'], {}), '(mf)\n', (25737, 25741), True, 'import numpy as np\n'), ((25991, 26010), 'numpy.fft.fft', 'np.fft.fft', (['f[i, :]'], {}), '(f[i, :])\n', (26001, 26010), True, 'import numpy as np\n'), ((27211, 27234), 'numpy.fft.fft', 'np.fft.fft', (['(mind / mamp)'], {}), '(mind / mamp)\n', (27221, 27234), True, 'import numpy as np\n'), ((27292, 27321), 'numpy.fft.fft', 'np.fft.fft', (['(mtt * 1.0j * mind)'], {}), '(mtt * 1.0j * mind)\n', (27302, 27321), True, 'import numpy as np\n'), ((31836, 31862), 'vespa.common.util.generic_spectral.chop', 'util_spectral.chop', (['finalw'], {}), '(finalw)\n', (31854, 31862), True, 'import vespa.common.util.generic_spectral as util_spectral\n'), ((32519, 32549), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (32528, 32549), True, 'import numpy as np\n'), ((34776, 34790), 'numpy.fft.fft', 'np.fft.fft', (['mf'], {}), '(mf)\n', (34786, 34790), True, 'import numpy as np\n'), ((35065, 35084), 'numpy.fft.fft', 'np.fft.fft', (['f[i, :]'], {}), '(f[i, :])\n', (35075, 35084), True, 'import numpy as np\n'), ((36887, 36911), 'numpy.fft.fft', 'np.fft.fft', (['(mind / marea)'], {}), '(mind / marea)\n', (36897, 36911), True, 'import numpy as np\n'), ((36976, 37005), 'numpy.fft.fft', 'np.fft.fft', (['(mtt * 1.0j * mind)'], {}), '(mtt * 1.0j * mind)\n', (36986, 37005), True, 'import numpy as np\n'), ((9846, 9886), 'numpy.sum', 'np.sum', (['self.initial_values[0:self.nmet]'], {}), '(self.initial_values[0:self.nmet])\n', (9852, 9886), True, 'import numpy as np\n'), ((25594, 25624), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (25603, 25624), True, 'import numpy as np\n'), ((32873, 32888), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (32882, 32888), True, 'import numpy as np\n'), ((34633, 34663), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (34642, 34663), True, 'import numpy as np\n'), ((22886, 22901), 'numpy.arange', 'np.arange', (['npts'], {}), '(npts)\n', (22895, 22901), True, 'import numpy as np\n'), ((25762, 25792), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (25771, 25792), True, 'import numpy as np\n'), ((34846, 34876), 'numpy.arange', 'np.arange', (['nptszf'], {'dtype': 'float'}), '(nptszf, dtype=float)\n', (34855, 34876), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import os
from my_utils.tests import test_function
import numpy as np
from itertools import combinations
from fractions import gcd
def part_1(X):
"""
Function which calculates the solution to part 1
Arguments
---------
X : array, a numpy array (spreadsheet!)
Returns
-------
sum : float, the sum of the checksum on each row
"""
return np.sum(X.max(axis=1) - X.min(axis=1))
def part_2(X):
"""
Function which calculates the solution to part 2
Arguments
---------
X : array, a numpy array (spreadsheet!)
Returns
-------
sum : float, the sum of the checksum on each row
"""
ans = []
for row in X:
combs = combinations(row, 2)
comb = [ii for ii in combs if (gcd(*ii) in ii)]
assert len(comb) == 1
comb = comb[0]
ans += [max(comb) / min(comb)]
return np.sum(ans)
def main(test_datas, functions, puzzle_input=None):
for ii, (test_data, fun) in enumerate(zip(test_datas, functions)):
nr_errors = test_function(fun, test_data)
if nr_errors == 0:
print('Pt. {} Tests Passed'.format(ii+1))
if puzzle_input is not None:
fn = os.path.basename(__file__)
for ii, fun in enumerate(functions):
ans = fun(puzzle_input)
print('{} Pt. {} Solution: {}'.format(fn, ii+1, ans))
if __name__ == "__main__":
test_data1 = {
'inputs': [np.array([[5, 1, 9, 5]]), np.array([[7, 5, 3]]),
np.array([[2, 4, 6, 8]])],
'outputs': [np.array(8), np.array(4), np.array(6)]
}
test_data2 = {
'inputs': [np.array([[5, 9, 2, 8]]), np.array([[9, 4, 7, 3]]),
np.array([[3, 8, 6, 5]])],
'outputs': [np.array(4), np.array(3), np.array(2)]
}
puzzle_input = np.genfromtxt('./inputs/day_2.txt', dtype=int)
main(test_datas=[test_data1, test_data2],
functions=[part_1, part_2],
puzzle_input=puzzle_input)
| [
"fractions.gcd",
"itertools.combinations",
"numpy.sum",
"numpy.array",
"my_utils.tests.test_function",
"os.path.basename",
"numpy.genfromtxt"
] | [((945, 956), 'numpy.sum', 'np.sum', (['ans'], {}), '(ans)\n', (951, 956), True, 'import numpy as np\n'), ((1882, 1928), 'numpy.genfromtxt', 'np.genfromtxt', (['"""./inputs/day_2.txt"""'], {'dtype': 'int'}), "('./inputs/day_2.txt', dtype=int)\n", (1895, 1928), True, 'import numpy as np\n'), ((765, 785), 'itertools.combinations', 'combinations', (['row', '(2)'], {}), '(row, 2)\n', (777, 785), False, 'from itertools import combinations\n'), ((1102, 1131), 'my_utils.tests.test_function', 'test_function', (['fun', 'test_data'], {}), '(fun, test_data)\n', (1115, 1131), False, 'from my_utils.tests import test_function\n'), ((1260, 1286), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1276, 1286), False, 'import os\n'), ((1501, 1525), 'numpy.array', 'np.array', (['[[5, 1, 9, 5]]'], {}), '([[5, 1, 9, 5]])\n', (1509, 1525), True, 'import numpy as np\n'), ((1527, 1548), 'numpy.array', 'np.array', (['[[7, 5, 3]]'], {}), '([[7, 5, 3]])\n', (1535, 1548), True, 'import numpy as np\n'), ((1569, 1593), 'numpy.array', 'np.array', (['[[2, 4, 6, 8]]'], {}), '([[2, 4, 6, 8]])\n', (1577, 1593), True, 'import numpy as np\n'), ((1616, 1627), 'numpy.array', 'np.array', (['(8)'], {}), '(8)\n', (1624, 1627), True, 'import numpy as np\n'), ((1629, 1640), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (1637, 1640), True, 'import numpy as np\n'), ((1642, 1653), 'numpy.array', 'np.array', (['(6)'], {}), '(6)\n', (1650, 1653), True, 'import numpy as np\n'), ((1699, 1723), 'numpy.array', 'np.array', (['[[5, 9, 2, 8]]'], {}), '([[5, 9, 2, 8]])\n', (1707, 1723), True, 'import numpy as np\n'), ((1725, 1749), 'numpy.array', 'np.array', (['[[9, 4, 7, 3]]'], {}), '([[9, 4, 7, 3]])\n', (1733, 1749), True, 'import numpy as np\n'), ((1770, 1794), 'numpy.array', 'np.array', (['[[3, 8, 6, 5]]'], {}), '([[3, 8, 6, 5]])\n', (1778, 1794), True, 'import numpy as np\n'), ((1817, 1828), 'numpy.array', 'np.array', (['(4)'], {}), '(4)\n', (1825, 1828), True, 'import numpy as np\n'), ((1830, 1841), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1838, 1841), True, 'import numpy as np\n'), ((1843, 1854), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1851, 1854), True, 'import numpy as np\n'), ((825, 833), 'fractions.gcd', 'gcd', (['*ii'], {}), '(*ii)\n', (828, 833), False, 'from fractions import gcd\n')] |
import pylab
from ..Utilidades.Constantes import Constantes
from ..Utilidades.UtilidadesMatriz import UtilidadesMatriz
utilidadesMatriz = UtilidadesMatriz()
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.covariance import EllipticEnvelope, EmpiricalCovariance, MinCovDet
from scipy import stats
from sklearn import svm
from sklearn.ensemble import IsolationForest
rng = np.random.RandomState(42)
class Graphics():
def __init__(self):
print("Clase Graphics Cargada Correctamente")
#Metodo Inicial de la clase
def obtenerValoresParaDibujarGrafica(self, matriz, lista, labels):
numeroColumnas = len(labels)
if numeroColumnas == 2:
valoresX = matriz.index
valoresY = utilidadesMatriz.ObtenerParametros(labels, matriz, 1)
self.dibujarGraficaLineal(valoresX, valoresY)
if numeroColumnas == 3:
if labels[0] == 'Anio':
if len(matriz.index) == 1:
valoresX = matriz.columns
valoresY = matriz.loc[matriz.index[0]]
self.dibujarGraficaLineal(valoresX, valoresY)
else:
self.dibujarGraficaLinealSuperpuesta(lista, matriz)
#Metodo privado
#listaColumnas: Valores para el eje x
#Inicializa divesos parametros comunes a los diversos tipos de grafica de outliers que se usan para dibujar las graficas
def inicializarDatosGrafica(self, listaFilas, listaColumnas, datosOriginales):
valoresColumnas = list()
textoInferior = ""
max_Y = np.amax(datosOriginales)
#TODO
if 'Mes'in listaFilas[0] or 'Pais' in listaFilas[0] and 'Cantidad' in listaFilas[1]:
valoresColumnas = listaColumnas
x = np.arange(0, len(listaColumnas))
xx1, yy1 = np.meshgrid(np.linspace(-2, len(listaColumnas), len(listaColumnas) ), np.linspace(-10, max_Y*2, len(listaFilas)))
textoInferior = listaFilas[0]
elif 'Anio' in listaFilas[0] and 'Cantidad' in listaFilas[1]:
valoresColumnas = listaColumnas
x = valoresColumnas
xx1, yy1 = np.meshgrid(np.linspace(listaColumnas[0]-1, listaColumnas[len(listaColumnas)-1]+1, len(listaColumnas)), np.linspace(-500, max_Y*2, len(listaFilas)))
textoInferior = listaFilas[0]
elif 'Ciudad' in listaFilas[0] and 'Cantidad' in listaFilas[1]:
valoresColumnas = listaColumnas
x = np.arange(0, len(listaColumnas))
xx1, yy1 = np.meshgrid(np.linspace(-2, len(listaColumnas), len(listaColumnas)), np.linspace(-500, max_Y*2, len(listaFilas)))
textoInferior = listaFilas[0]
elif 'Pais' in listaFilas[0] and 'Cantidad' in listaFilas[1]:
valoresColumnas = listaColumnas
x = np.arange(0, len(listaColumnas))
xx1, yy1 = np.meshgrid(np.linspace(-2, len(listaColumnas), len(listaColumnas)), np.linspace(-500, max_Y*2, len(listaFilas)))
textoInferior = listaFilas[0]
elif 'Anio' in listaFilas[0] and 'Ciudad' in listaFilas[1] and 'Cantidad'in listaFilas[2]:
# print(listaColumnas)
# print(len(listaColumnas)) #TODO
x = np.arange(0, len(listaColumnas))
valoresColumnas = listaColumnas
xx1, yy1 = np.meshgrid(np.linspace(-1, len(x), 500), np.linspace(0, max_Y*2, 500))
textoInferior = listaFilas[0]
elif 'Anio' in listaFilas[0] and 'Pais' in listaFilas[1] and 'Cantidad' in listaFilas[2]:
# print(listaColumnas)
# print(listaFilas)
valoresColumnas = listaColumnas
x = np.arange(0, len(listaColumnas))
# print(x)
# print(datosOriginales)
xx1, yy1 = np.meshgrid(np.linspace(-1, len(x), 500), np.linspace(-1000, max_Y*2, 500))
textoInferior = listaFilas[0]
elif 'Anio' in listaFilas[0] and 'Mes' in listaFilas[1] or 'Ciudad' in listaFilas[1] and 'Cantidad' in listaFilas[2] or 'Numero_Vuelos' in listaFilas[2]:
if 'Mes' in listaFilas[1]:
x = np.arange(0, len(Constantes.Meses))
valoresColumnas = Constantes.Meses
else: #Para ciudades/paises
x = np.arange(0, len(listaColumnas))
valoresColumnas = listaColumnas
xx1, yy1 = np.meshgrid(np.linspace(-1, len(x), 500), np.linspace(-10, max_Y*2, 500))
textoInferior = listaFilas[1]
return valoresColumnas, textoInferior, xx1, yy1, x
#CON MESES OK
#CON AÑOS OK
#CON CIUDAD OK
#TODO PAISES
#Metodo para obtener los outliers durante varios años separado por meses
##listaFilas Lista Anios
#ListaColumnas lista Meses
def showOutliersInliersEllipticEnvelope(self, datosOriginales, datosATestear, listaFilas, listaColumnas):
valoresColumnas, textoInferior, xx1, yy1, x = self.inicializarDatosGrafica(listaFilas, listaColumnas, datosOriginales)
# Obtenemos las fronteras de datos basandonos en los datos originales
clf = EllipticEnvelope(contamination=Constantes.ContaminacionEllipticEnvelope)
#Llamamos al metodo para dibujar la grafica
self.dibujarGraficaOutliersInliers(clf, datosOriginales, datosATestear, xx1, yy1, x , valoresColumnas, textoInferior, "Elliptic")
def dibujarGraficaOutliersInliers(self, clf, datosOriginales, datosATestear, xx1, yy1, x , valoresColumnas, textoInferior, tipo):
clf.fit(datosOriginales)
Z = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z = Z.reshape(xx1.shape)
resultadoValoresATestear = clf.predict(datosATestear)
plt.figure(1) # two clusters
# plt.title("Deteccion de Outliers")
if tipo == "Elliptic":
plt.title("Deteccion de Outliers Metodo Elliptic Envelope")
plt.contour(xx1, yy1, Z, levels=[0], linewidths=1, colors='m')
else:
plt.title("Deteccion de Outliers Metodo Isolation Forest")
plt.contourf(xx1, yy1, Z, cmap=plt.cm.Blues_r)
outliers = plt.scatter(-13, -13)
inliers = plt.scatter(-13, -13)
valores_originales = plt.scatter(datosOriginales[:, 0], datosOriginales[:, 1], color='black', label='Valores Originales')
# print(datosATestear)
# Iteramos los valores marcando en rojo los elementos que sean outliers y en verde los inliners
for i in np.arange(0, len(resultadoValoresATestear)):
if resultadoValoresATestear[i] == -1:
color = 'red'
outliers = plt.scatter(datosATestear[i, 0], datosATestear[i, 1], color=color, label='Outliers')
else:
color = 'green'
inliers = plt.scatter(datosATestear[i, 0], datosATestear[i, 1], color=color, label='Inliners')
# Definimos valores de la grafica
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
# print(valoresColumnas)
# print(x)
pylab.xticks(x, valoresColumnas, size='small', rotation='vertical')
# plt.savefig('grafica.png')
plt.ylabel("Cantidad")
plt.xlabel(textoInferior)
plt.legend(handles=[valores_originales, outliers, inliers])
plt.show()
#CON MESES OK
#CON AÑOS OK
#NO TOCAR
#Metodo para obtenerla grafica de outliers/inliers durante varios años
def MostrarGraficaInliersOutliersIsolationForest(self, datosOriginales, datosATestear, listaFilas, listaColumnas):
valoresColumnas, textoInferior, xx1, yy1, x = self.inicializarDatosGrafica(listaFilas, listaColumnas, datosOriginales)
# Fit the model
n_samples = len(datosOriginales)
clf = IsolationForest(max_samples=n_samples, random_state=rng)
#Llamamos al metodo para dibujar la grafica
self.dibujarGraficaOutliersInliers(clf, datosOriginales, datosATestear, xx1, yy1, x , valoresColumnas, textoInferior, "Forest")
#
#NO TOCAR
# def showOutliersInliers(self, datosOriginales, datosATestear):
# max_Y = np.amax(datosOriginales)
#
#
# # Obtenemos las fronteras de datos basandonos en los datos originales
# xx1, yy1 = np.meshgrid(np.linspace(-1, 13, 500), np.linspace(-1, max_Y*2, 500)) #Seteamos a 13 debido a los meses
# clf = EllipticEnvelope(contamination=0.26161)
# clf.fit(datosOriginales)
# Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
# Z1 = Z1.reshape(xx1.shape)
# plt.contour(xx1, yy1, Z1, levels=[0], linewidths=1, colors='m')
# pred_test = clf.predict(datosATestear)
# plt.figure(1) # two clusters
# plt.title("Deteccion de Outliers")
# valores_originales = plt.scatter(datosOriginales[:, 0], datosOriginales[:, 1], color='black', label='Valores Originales')
# inliers = plt.scatter(-1, -1)
#
# # Iteramos los valores marcando en rojo los elementos que sean outliers y en verde los inliners
# for i in np.arange(0,len(pred_test)):
# if pred_test[i] == -1:
# color = 'red'
# outliers = plt.scatter(datosATestear[i, 0], datosATestear[i, 1], color=color, label='Outliers')
# else:
# color = 'green'
# inliers = plt.scatter(datosATestear[i, 0], datosATestear[i, 1], color=color, label='Inliners')
# # Definimos valores de la grafica
# plt.xlim((xx1.min(), xx1.max()))
# plt.ylim((yy1.min(), yy1.max()))
#
# plt.ylabel("Valores")
# plt.xlabel("Meses")
# plt.legend(handles=[valores_originales, outliers, inliers])
#
# plt.show()
# plt.savefig('grafica.png')
#Metodo para obtener colores aleatorios sin repeticion
def get_cmap(self, n, name='hsv'):
'''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.'''
return plt.cm.get_cmap(name, n)
#Metodo Generico para dibujar Graficas
def dibujarGraficaLineal(self, valoresX, valoresY):
pylab.figure(1)
pylab.figure( figsize=(12.0, 6.0))
x = np.arange(0, len(valoresX))
coloresMapped = self.get_cmap(len(valoresX))
for item in np.arange(0, len(valoresX)):
valorX = valoresX[item]
pylab.scatter(item, valoresY.loc[valorX], color=coloresMapped(item), label = valorX)
# pylab.plot(item, valoresY.loc[valorX], color=coloresMapped(item), label = valorX)
pylab.xticks(x, valoresX)
pylab.legend(loc='upper right')
pylab.savefig("grafica.png")
pylab.show()
#Metodo Generico para dibujar Graficas durante varios anios
def dibujarGraficaLinealSuperpuesta(self, listaFilas, matriz):
pylab.figure(1)
pylab.figure( figsize=(12.0, 6.0))
index = 0
listaColumnas = matriz.columns
for item in listaFilas:
valores = matriz.loc[item]
coloresMapped = self.get_cmap(len(valores))
x = np.arange(0, len(valores))
pylab.scatter(x, valores, color=coloresMapped(index), label = item)
index = index +1
pylab.xticks(x, listaColumnas)
pylab.legend(loc='upper right')
# savefig("grafica.png")
pylab.show()
| [
"matplotlib.pyplot.ylabel",
"pylab.savefig",
"numpy.random.RandomState",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.contour",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"sklearn.covariance.EllipticEnvelope",
"pylab.figure",
"matplotlib.pyplot.cm.get_cmap",
... | [((405, 430), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (426, 430), True, 'import numpy as np\n'), ((1633, 1657), 'numpy.amax', 'np.amax', (['datosOriginales'], {}), '(datosOriginales)\n', (1640, 1657), True, 'import numpy as np\n'), ((5206, 5278), 'sklearn.covariance.EllipticEnvelope', 'EllipticEnvelope', ([], {'contamination': 'Constantes.ContaminacionEllipticEnvelope'}), '(contamination=Constantes.ContaminacionEllipticEnvelope)\n', (5222, 5278), False, 'from sklearn.covariance import EllipticEnvelope, EmpiricalCovariance, MinCovDet\n'), ((5840, 5853), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (5850, 5853), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6289), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(-13)', '(-13)'], {}), '(-13, -13)\n', (6279, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6308, 6329), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(-13)', '(-13)'], {}), '(-13, -13)\n', (6319, 6329), True, 'import matplotlib.pyplot as plt\n'), ((6359, 6463), 'matplotlib.pyplot.scatter', 'plt.scatter', (['datosOriginales[:, 0]', 'datosOriginales[:, 1]'], {'color': '"""black"""', 'label': '"""Valores Originales"""'}), "(datosOriginales[:, 0], datosOriginales[:, 1], color='black',\n label='Valores Originales')\n", (6370, 6463), True, 'import matplotlib.pyplot as plt\n'), ((7209, 7276), 'pylab.xticks', 'pylab.xticks', (['x', 'valoresColumnas'], {'size': '"""small"""', 'rotation': '"""vertical"""'}), "(x, valoresColumnas, size='small', rotation='vertical')\n", (7221, 7276), False, 'import pylab\n'), ((7321, 7343), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cantidad"""'], {}), "('Cantidad')\n", (7331, 7343), True, 'import matplotlib.pyplot as plt\n'), ((7352, 7377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['textoInferior'], {}), '(textoInferior)\n', (7362, 7377), True, 'import matplotlib.pyplot as plt\n'), ((7386, 7445), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': '[valores_originales, outliers, inliers]'}), '(handles=[valores_originales, outliers, inliers])\n', (7396, 7445), True, 'import matplotlib.pyplot as plt\n'), ((7454, 7464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7462, 7464), True, 'import matplotlib.pyplot as plt\n'), ((8055, 8111), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'max_samples': 'n_samples', 'random_state': 'rng'}), '(max_samples=n_samples, random_state=rng)\n', (8070, 8111), False, 'from sklearn.ensemble import IsolationForest\n'), ((10327, 10351), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['name', 'n'], {}), '(name, n)\n', (10342, 10351), True, 'import matplotlib.pyplot as plt\n'), ((10473, 10488), 'pylab.figure', 'pylab.figure', (['(1)'], {}), '(1)\n', (10485, 10488), False, 'import pylab\n'), ((10497, 10530), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(12.0, 6.0)'}), '(figsize=(12.0, 6.0))\n', (10509, 10530), False, 'import pylab\n'), ((10916, 10941), 'pylab.xticks', 'pylab.xticks', (['x', 'valoresX'], {}), '(x, valoresX)\n', (10928, 10941), False, 'import pylab\n'), ((10959, 10990), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (10971, 10990), False, 'import pylab\n'), ((10999, 11027), 'pylab.savefig', 'pylab.savefig', (['"""grafica.png"""'], {}), "('grafica.png')\n", (11012, 11027), False, 'import pylab\n'), ((11036, 11048), 'pylab.show', 'pylab.show', ([], {}), '()\n', (11046, 11048), False, 'import pylab\n'), ((11202, 11217), 'pylab.figure', 'pylab.figure', (['(1)'], {}), '(1)\n', (11214, 11217), False, 'import pylab\n'), ((11226, 11259), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(12.0, 6.0)'}), '(figsize=(12.0, 6.0))\n', (11238, 11259), False, 'import pylab\n'), ((11661, 11692), 'pylab.legend', 'pylab.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (11673, 11692), False, 'import pylab\n'), ((11733, 11745), 'pylab.show', 'pylab.show', ([], {}), '()\n', (11743, 11745), False, 'import pylab\n'), ((5957, 6016), 'matplotlib.pyplot.title', 'plt.title', (['"""Deteccion de Outliers Metodo Elliptic Envelope"""'], {}), "('Deteccion de Outliers Metodo Elliptic Envelope')\n", (5966, 6016), True, 'import matplotlib.pyplot as plt\n'), ((6029, 6091), 'matplotlib.pyplot.contour', 'plt.contour', (['xx1', 'yy1', 'Z'], {'levels': '[0]', 'linewidths': '(1)', 'colors': '"""m"""'}), "(xx1, yy1, Z, levels=[0], linewidths=1, colors='m')\n", (6040, 6091), True, 'import matplotlib.pyplot as plt\n'), ((6118, 6176), 'matplotlib.pyplot.title', 'plt.title', (['"""Deteccion de Outliers Metodo Isolation Forest"""'], {}), "('Deteccion de Outliers Metodo Isolation Forest')\n", (6127, 6176), True, 'import matplotlib.pyplot as plt\n'), ((6189, 6235), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx1', 'yy1', 'Z'], {'cmap': 'plt.cm.Blues_r'}), '(xx1, yy1, Z, cmap=plt.cm.Blues_r)\n', (6201, 6235), True, 'import matplotlib.pyplot as plt\n'), ((11621, 11651), 'pylab.xticks', 'pylab.xticks', (['x', 'listaColumnas'], {}), '(x, listaColumnas)\n', (11633, 11651), False, 'import pylab\n'), ((6766, 6855), 'matplotlib.pyplot.scatter', 'plt.scatter', (['datosATestear[i, 0]', 'datosATestear[i, 1]'], {'color': 'color', 'label': '"""Outliers"""'}), "(datosATestear[i, 0], datosATestear[i, 1], color=color, label=\n 'Outliers')\n", (6777, 6855), True, 'import matplotlib.pyplot as plt\n'), ((6927, 7016), 'matplotlib.pyplot.scatter', 'plt.scatter', (['datosATestear[i, 0]', 'datosATestear[i, 1]'], {'color': 'color', 'label': '"""Inliners"""'}), "(datosATestear[i, 0], datosATestear[i, 1], color=color, label=\n 'Inliners')\n", (6938, 7016), True, 'import matplotlib.pyplot as plt\n'), ((3463, 3493), 'numpy.linspace', 'np.linspace', (['(0)', '(max_Y * 2)', '(500)'], {}), '(0, max_Y * 2, 500)\n', (3474, 3493), True, 'import numpy as np\n'), ((3915, 3949), 'numpy.linspace', 'np.linspace', (['(-1000)', '(max_Y * 2)', '(500)'], {}), '(-1000, max_Y * 2, 500)\n', (3926, 3949), True, 'import numpy as np\n'), ((4507, 4539), 'numpy.linspace', 'np.linspace', (['(-10)', '(max_Y * 2)', '(500)'], {}), '(-10, max_Y * 2, 500)\n', (4518, 4539), True, 'import numpy as np\n')] |
#! /usr/bin/env python
from __future__ import print_function
from timeit import default_timer as time
import sys
import numpy as np
from numba import dppl, njit
from numba.dppl.testing import unittest
from numba.dppl.testing import DPPLTestCase
import dppl.ocldrv as ocldrv
class TestNumpy_math_functions(DPPLTestCase):
N = 10
a = np.array(np.random.random(N), dtype=np.float32)
b = np.array(np.random.random(N), dtype=np.float32)
def test_add(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.add(a, b)
return c
c = f(self.a, self.b)
d = self.a + self.b
self.assertTrue(np.all(c == d))
def test_subtract(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.subtract(a, b)
return c
c = f(self.a, self.b)
d = self.a - self.b
self.assertTrue(np.all(c == d))
def test_multiply(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.multiply(a, b)
return c
c = f(self.a, self.b)
d = self.a * self.b
self.assertTrue(np.all(c == d))
def test_divide(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.divide(a, b)
return c
c = f(self.a, self.b)
d = self.a / self.b
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_true_divide(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.true_divide(a, b)
return c
c = f(self.a, self.b)
d = np.true_divide(self.a, self.b)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_negative(self):
@njit(parallel={'offload':True})
def f(a):
c = np.negative(a)
return c
c = f(self.a)
self.assertTrue(np.all(c == -self.a))
def test_power(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.power(a, b)
return c
input_arr = np.random.randint(self.N, size=(self.N))
exp = np.full((self.N), 2, dtype=np.int)
c = f(input_arr, exp)
self.assertTrue(np.all(c == input_arr * input_arr))
def test_remainder(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.remainder(a, b)
return c
input_arr = np.full((self.N), 3, dtype=np.int)
divisor = np.full((self.N), 2, dtype=np.int)
c = f(input_arr, divisor)
self.assertTrue(np.all(c == 1))
def test_mod(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.mod(a, b)
return c
input_arr = np.full((self.N), 3, dtype=np.int)
divisor = np.full((self.N), 2, dtype=np.int)
c = f(input_arr, divisor)
self.assertTrue(np.all(c == 1))
def test_fmod(self):
@njit(parallel={'offload':True})
def f(a, b):
c = np.fmod(a, b)
return c
input_arr = np.full((self.N), 3, dtype=np.float32)
divisor = np.full((self.N), 2, dtype=np.int)
c = f(input_arr, divisor)
self.assertTrue(np.all(c == 1.))
def test_abs(self):
@njit(parallel={'offload':True})
def f(a):
c = np.abs(a)
return c
input_arr = 5 * np.random.random_sample(self.N) - 5
c = f(input_arr)
self.assertTrue(np.all(c == -input_arr))
def test_absolute(self):
@njit(parallel={'offload':True})
def f(a):
c = np.absolute(a)
return c
input_arr = 5 * np.random.random_sample(self.N) - 5
c = f(input_arr)
self.assertTrue(np.all(c == -input_arr))
def test_fabs(self):
@njit(parallel={'offload':True})
def f(a):
c = np.fabs(a)
return c
input_arr = 5 * np.random.random_sample(self.N) - 5
c = f(input_arr)
self.assertTrue(np.all(c == -input_arr))
def test_sign(self):
@njit(parallel={'offload':True})
def f(a):
c = np.sign(a)
return c
input_arr = 5 * np.random.random_sample(self.N) - 5
c = f(input_arr)
self.assertTrue(np.all(c == -1.))
def test_conj(self):
@njit(parallel={'offload':True})
def f(a):
c = np.conj(a)
return c
input_arr = np.eye(self.N) + 1j * np.eye(self.N)
c = f(input_arr)
d = np.conj(input_arr)
self.assertTrue(np.all(c == d))
def test_exp(self):
@njit(parallel={'offload':True})
def f(a):
c = np.exp(a)
return c
input_arr = np.random.randint(self.N, size=(self.N))
c = f(input_arr)
d = np.exp(input_arr)
self.assertTrue(np.all(c == d))
def test_log(self):
@njit(parallel={'offload':True})
def f(a):
c = np.log(a)
return c
input_arr = np.random.randint(1, self.N, size=(self.N))
c = f(input_arr)
d = np.log(input_arr)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_log10(self):
@njit(parallel={'offload':True})
def f(a):
c = np.log10(a)
return c
input_arr = np.random.randint(1, self.N, size=(self.N))
c = f(input_arr)
d = np.log10(input_arr)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_expm1(self):
@njit(parallel={'offload':True})
def f(a):
c = np.expm1(a)
return c
input_arr = np.random.randint(1, self.N, size=(self.N))
c = f(input_arr)
d = np.expm1(input_arr)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_log1p(self):
@njit(parallel={'offload':True})
def f(a):
c = np.log1p(a)
return c
input_arr = np.random.randint(1, self.N, size=(self.N))
c = f(input_arr)
d = np.log1p(input_arr)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_sqrt(self):
@njit(parallel={'offload':True})
def f(a):
c = np.sqrt(a)
return c
c = f(self.a)
d = np.sqrt(self.a)
max_abs_err = c.sum() - d.sum()
self.assertTrue(max_abs_err < 1e-5)
def test_square(self):
@njit(parallel={'offload':True})
def f(a):
c = np.square(a)
return c
input_arr = np.random.randint(self.N, size=(self.N))
c = f(input_arr)
self.assertTrue(np.all(c == input_arr * input_arr))
def test_reciprocal(self):
@njit(parallel={'offload':True})
def f(a):
c = np.reciprocal(a)
return c
input_arr = 5 * np.random.random_sample(self.N) + 5
c = f(input_arr)
self.assertTrue(np.all(c == 1/input_arr))
def test_conjugate(self):
@njit(parallel={'offload':True})
def f(a):
c = np.conjugate(a)
return c
input_arr = np.eye(self.N) + 1j * np.eye(self.N)
c = f(input_arr)
d = np.conj(input_arr)
self.assertTrue(np.all(c == d))
if __name__ == '__main__':
unittest.main()
| [
"numba.dppl.testing.unittest.main",
"numpy.log10",
"numpy.sqrt",
"numpy.log",
"numpy.mod",
"numpy.divide",
"numpy.multiply",
"numpy.random.random",
"numpy.conjugate",
"numpy.subtract",
"numpy.exp",
"numpy.remainder",
"numpy.abs",
"numpy.eye",
"numpy.random.random_sample",
"numpy.add",
... | [((7489, 7504), 'numba.dppl.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (7502, 7504), False, 'from numba.dppl.testing import unittest\n'), ((351, 370), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (367, 370), True, 'import numpy as np\n'), ((407, 426), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (423, 426), True, 'import numpy as np\n'), ((480, 512), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (484, 512), False, 'from numba import dppl, njit\n'), ((721, 753), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (725, 753), False, 'from numba import dppl, njit\n'), ((967, 999), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (971, 999), False, 'from numba import dppl, njit\n'), ((1211, 1243), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (1215, 1243), False, 'from numba import dppl, njit\n'), ((1502, 1534), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (1506, 1534), False, 'from numba import dppl, njit\n'), ((1656, 1686), 'numpy.true_divide', 'np.true_divide', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1670, 1686), True, 'import numpy as np\n'), ((1810, 1842), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (1814, 1842), False, 'from numba import dppl, njit\n'), ((2017, 2049), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (2021, 2049), False, 'from numba import dppl, njit\n'), ((2143, 2181), 'numpy.random.randint', 'np.random.randint', (['self.N'], {'size': 'self.N'}), '(self.N, size=self.N)\n', (2160, 2181), True, 'import numpy as np\n'), ((2198, 2230), 'numpy.full', 'np.full', (['self.N', '(2)'], {'dtype': 'np.int'}), '(self.N, 2, dtype=np.int)\n', (2205, 2230), True, 'import numpy as np\n'), ((2364, 2396), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (2368, 2396), False, 'from numba import dppl, njit\n'), ((2494, 2526), 'numpy.full', 'np.full', (['self.N', '(3)'], {'dtype': 'np.int'}), '(self.N, 3, dtype=np.int)\n', (2501, 2526), True, 'import numpy as np\n'), ((2547, 2579), 'numpy.full', 'np.full', (['self.N', '(2)'], {'dtype': 'np.int'}), '(self.N, 2, dtype=np.int)\n', (2554, 2579), True, 'import numpy as np\n'), ((2691, 2723), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (2695, 2723), False, 'from numba import dppl, njit\n'), ((2815, 2847), 'numpy.full', 'np.full', (['self.N', '(3)'], {'dtype': 'np.int'}), '(self.N, 3, dtype=np.int)\n', (2822, 2847), True, 'import numpy as np\n'), ((2868, 2900), 'numpy.full', 'np.full', (['self.N', '(2)'], {'dtype': 'np.int'}), '(self.N, 2, dtype=np.int)\n', (2875, 2900), True, 'import numpy as np\n'), ((3013, 3045), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (3017, 3045), False, 'from numba import dppl, njit\n'), ((3138, 3174), 'numpy.full', 'np.full', (['self.N', '(3)'], {'dtype': 'np.float32'}), '(self.N, 3, dtype=np.float32)\n', (3145, 3174), True, 'import numpy as np\n'), ((3195, 3227), 'numpy.full', 'np.full', (['self.N', '(2)'], {'dtype': 'np.int'}), '(self.N, 2, dtype=np.int)\n', (3202, 3227), True, 'import numpy as np\n'), ((3340, 3372), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (3344, 3372), False, 'from numba import dppl, njit\n'), ((3612, 3644), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (3616, 3644), False, 'from numba import dppl, njit\n'), ((3886, 3918), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (3890, 3918), False, 'from numba import dppl, njit\n'), ((4156, 4188), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (4160, 4188), False, 'from numba import dppl, njit\n'), ((4418, 4450), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (4422, 4450), False, 'from numba import dppl, njit\n'), ((4612, 4630), 'numpy.conj', 'np.conj', (['input_arr'], {}), '(input_arr)\n', (4619, 4630), True, 'import numpy as np\n'), ((4705, 4737), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (4709, 4737), False, 'from numba import dppl, njit\n'), ((4823, 4861), 'numpy.random.randint', 'np.random.randint', (['self.N'], {'size': 'self.N'}), '(self.N, size=self.N)\n', (4840, 4861), True, 'import numpy as np\n'), ((4901, 4918), 'numpy.exp', 'np.exp', (['input_arr'], {}), '(input_arr)\n', (4907, 4918), True, 'import numpy as np\n'), ((4994, 5026), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (4998, 5026), False, 'from numba import dppl, njit\n'), ((5112, 5153), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.N'], {'size': 'self.N'}), '(1, self.N, size=self.N)\n', (5129, 5153), True, 'import numpy as np\n'), ((5193, 5210), 'numpy.log', 'np.log', (['input_arr'], {}), '(input_arr)\n', (5199, 5210), True, 'import numpy as np\n'), ((5332, 5364), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (5336, 5364), False, 'from numba import dppl, njit\n'), ((5452, 5493), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.N'], {'size': 'self.N'}), '(1, self.N, size=self.N)\n', (5469, 5493), True, 'import numpy as np\n'), ((5533, 5552), 'numpy.log10', 'np.log10', (['input_arr'], {}), '(input_arr)\n', (5541, 5552), True, 'import numpy as np\n'), ((5674, 5706), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (5678, 5706), False, 'from numba import dppl, njit\n'), ((5794, 5835), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.N'], {'size': 'self.N'}), '(1, self.N, size=self.N)\n', (5811, 5835), True, 'import numpy as np\n'), ((5875, 5894), 'numpy.expm1', 'np.expm1', (['input_arr'], {}), '(input_arr)\n', (5883, 5894), True, 'import numpy as np\n'), ((6016, 6048), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (6020, 6048), False, 'from numba import dppl, njit\n'), ((6136, 6177), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.N'], {'size': 'self.N'}), '(1, self.N, size=self.N)\n', (6153, 6177), True, 'import numpy as np\n'), ((6217, 6236), 'numpy.log1p', 'np.log1p', (['input_arr'], {}), '(input_arr)\n', (6225, 6236), True, 'import numpy as np\n'), ((6356, 6388), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (6360, 6388), False, 'from numba import dppl, njit\n'), ((6489, 6504), 'numpy.sqrt', 'np.sqrt', (['self.a'], {}), '(self.a)\n', (6496, 6504), True, 'import numpy as np\n'), ((6627, 6659), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (6631, 6659), False, 'from numba import dppl, njit\n'), ((6748, 6786), 'numpy.random.randint', 'np.random.randint', (['self.N'], {'size': 'self.N'}), '(self.N, size=self.N)\n', (6765, 6786), True, 'import numpy as np\n'), ((6916, 6948), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (6920, 6948), False, 'from numba import dppl, njit\n'), ((7198, 7230), 'numba.njit', 'njit', ([], {'parallel': "{'offload': True}"}), "(parallel={'offload': True})\n", (7202, 7230), False, 'from numba import dppl, njit\n'), ((7397, 7415), 'numpy.conj', 'np.conj', (['input_arr'], {}), '(input_arr)\n', (7404, 7415), True, 'import numpy as np\n'), ((549, 561), 'numpy.add', 'np.add', (['a', 'b'], {}), '(a, b)\n', (555, 561), True, 'import numpy as np\n'), ((666, 680), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (672, 680), True, 'import numpy as np\n'), ((790, 807), 'numpy.subtract', 'np.subtract', (['a', 'b'], {}), '(a, b)\n', (801, 807), True, 'import numpy as np\n'), ((912, 926), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (918, 926), True, 'import numpy as np\n'), ((1036, 1053), 'numpy.multiply', 'np.multiply', (['a', 'b'], {}), '(a, b)\n', (1047, 1053), True, 'import numpy as np\n'), ((1158, 1172), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (1164, 1172), True, 'import numpy as np\n'), ((1280, 1295), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (1289, 1295), True, 'import numpy as np\n'), ((1571, 1591), 'numpy.true_divide', 'np.true_divide', (['a', 'b'], {}), '(a, b)\n', (1585, 1591), True, 'import numpy as np\n'), ((1876, 1890), 'numpy.negative', 'np.negative', (['a'], {}), '(a)\n', (1887, 1890), True, 'import numpy as np\n'), ((1959, 1979), 'numpy.all', 'np.all', (['(c == -self.a)'], {}), '(c == -self.a)\n', (1965, 1979), True, 'import numpy as np\n'), ((2086, 2100), 'numpy.power', 'np.power', (['a', 'b'], {}), '(a, b)\n', (2094, 2100), True, 'import numpy as np\n'), ((2288, 2322), 'numpy.all', 'np.all', (['(c == input_arr * input_arr)'], {}), '(c == input_arr * input_arr)\n', (2294, 2322), True, 'import numpy as np\n'), ((2433, 2451), 'numpy.remainder', 'np.remainder', (['a', 'b'], {}), '(a, b)\n', (2445, 2451), True, 'import numpy as np\n'), ((2641, 2655), 'numpy.all', 'np.all', (['(c == 1)'], {}), '(c == 1)\n', (2647, 2655), True, 'import numpy as np\n'), ((2760, 2772), 'numpy.mod', 'np.mod', (['a', 'b'], {}), '(a, b)\n', (2766, 2772), True, 'import numpy as np\n'), ((2962, 2976), 'numpy.all', 'np.all', (['(c == 1)'], {}), '(c == 1)\n', (2968, 2976), True, 'import numpy as np\n'), ((3082, 3095), 'numpy.fmod', 'np.fmod', (['a', 'b'], {}), '(a, b)\n', (3089, 3095), True, 'import numpy as np\n'), ((3289, 3305), 'numpy.all', 'np.all', (['(c == 1.0)'], {}), '(c == 1.0)\n', (3295, 3305), True, 'import numpy as np\n'), ((3406, 3415), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (3412, 3415), True, 'import numpy as np\n'), ((3548, 3571), 'numpy.all', 'np.all', (['(c == -input_arr)'], {}), '(c == -input_arr)\n', (3554, 3571), True, 'import numpy as np\n'), ((3678, 3692), 'numpy.absolute', 'np.absolute', (['a'], {}), '(a)\n', (3689, 3692), True, 'import numpy as np\n'), ((3825, 3848), 'numpy.all', 'np.all', (['(c == -input_arr)'], {}), '(c == -input_arr)\n', (3831, 3848), True, 'import numpy as np\n'), ((3952, 3962), 'numpy.fabs', 'np.fabs', (['a'], {}), '(a)\n', (3959, 3962), True, 'import numpy as np\n'), ((4095, 4118), 'numpy.all', 'np.all', (['(c == -input_arr)'], {}), '(c == -input_arr)\n', (4101, 4118), True, 'import numpy as np\n'), ((4222, 4232), 'numpy.sign', 'np.sign', (['a'], {}), '(a)\n', (4229, 4232), True, 'import numpy as np\n'), ((4365, 4382), 'numpy.all', 'np.all', (['(c == -1.0)'], {}), '(c == -1.0)\n', (4371, 4382), True, 'import numpy as np\n'), ((4484, 4494), 'numpy.conj', 'np.conj', (['a'], {}), '(a)\n', (4491, 4494), True, 'import numpy as np\n'), ((4537, 4551), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (4543, 4551), True, 'import numpy as np\n'), ((4655, 4669), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (4661, 4669), True, 'import numpy as np\n'), ((4771, 4780), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (4777, 4780), True, 'import numpy as np\n'), ((4943, 4957), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (4949, 4957), True, 'import numpy as np\n'), ((5060, 5069), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (5066, 5069), True, 'import numpy as np\n'), ((5398, 5409), 'numpy.log10', 'np.log10', (['a'], {}), '(a)\n', (5406, 5409), True, 'import numpy as np\n'), ((5740, 5751), 'numpy.expm1', 'np.expm1', (['a'], {}), '(a)\n', (5748, 5751), True, 'import numpy as np\n'), ((6082, 6093), 'numpy.log1p', 'np.log1p', (['a'], {}), '(a)\n', (6090, 6093), True, 'import numpy as np\n'), ((6422, 6432), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (6429, 6432), True, 'import numpy as np\n'), ((6693, 6705), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (6702, 6705), True, 'import numpy as np\n'), ((6839, 6873), 'numpy.all', 'np.all', (['(c == input_arr * input_arr)'], {}), '(c == input_arr * input_arr)\n', (6845, 6873), True, 'import numpy as np\n'), ((6982, 6998), 'numpy.reciprocal', 'np.reciprocal', (['a'], {}), '(a)\n', (6995, 6998), True, 'import numpy as np\n'), ((7132, 7158), 'numpy.all', 'np.all', (['(c == 1 / input_arr)'], {}), '(c == 1 / input_arr)\n', (7138, 7158), True, 'import numpy as np\n'), ((7264, 7279), 'numpy.conjugate', 'np.conjugate', (['a'], {}), '(a)\n', (7276, 7279), True, 'import numpy as np\n'), ((7322, 7336), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (7328, 7336), True, 'import numpy as np\n'), ((7440, 7454), 'numpy.all', 'np.all', (['(c == d)'], {}), '(c == d)\n', (7446, 7454), True, 'import numpy as np\n'), ((3462, 3493), 'numpy.random.random_sample', 'np.random.random_sample', (['self.N'], {}), '(self.N)\n', (3485, 3493), True, 'import numpy as np\n'), ((3739, 3770), 'numpy.random.random_sample', 'np.random.random_sample', (['self.N'], {}), '(self.N)\n', (3762, 3770), True, 'import numpy as np\n'), ((4009, 4040), 'numpy.random.random_sample', 'np.random.random_sample', (['self.N'], {}), '(self.N)\n', (4032, 4040), True, 'import numpy as np\n'), ((4279, 4310), 'numpy.random.random_sample', 'np.random.random_sample', (['self.N'], {}), '(self.N)\n', (4302, 4310), True, 'import numpy as np\n'), ((4559, 4573), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (4565, 4573), True, 'import numpy as np\n'), ((7046, 7077), 'numpy.random.random_sample', 'np.random.random_sample', (['self.N'], {}), '(self.N)\n', (7069, 7077), True, 'import numpy as np\n'), ((7344, 7358), 'numpy.eye', 'np.eye', (['self.N'], {}), '(self.N)\n', (7350, 7358), True, 'import numpy as np\n')] |
# -----------------------------------------------------------
# Test zipping psf datasets.
#
# (C) 2020 <NAME>, Oxford, United Kingdom
# Released under Apache License, Version 2.0
# email <EMAIL>
# -----------------------------------------------------------
import numpy as np
import pytest
from psfdataset import PSFDataset
from psfdataset import PSFZippedDataset
from psfdataset.transforms.spatial import Normalize, Crop
class TestPSFZippedDataset:
def test_PSFZippedDataset(self):
# Check zipping
ds1 = PSFDataset(Crop())
ds1.add_element(np.array([[[1], [2]]]), 1)
ds1.add_element(np.array([[[2], [4]]]), 0)
ds2 = PSFDataset(Normalize(3, 1))
ds2.add_element(np.array([1, 2]), 1) # 0 -> -1 0
ds2.add_element(np.array([2, 3]), 0) # 1 -> 0 1
ds2.add_element(np.array([1, 3]), 0) # 2-> -1 1
with pytest.raises(Exception):
ds = PSFZippedDataset(ds1)
ds = PSFZippedDataset([ds1])
ds = PSFZippedDataset((ds1, ds2))
ds1.add_element(np.array([[[3], [6]]]), 0)
ds = PSFZippedDataset((ds1, ds2))
assert len(ds) == 3
assert ds.get_data_dimension() == 4
np.testing.assert_array_equal(ds[0][0], np.array([0, 1, -1, 0]))
np.testing.assert_array_equal(ds[1][0], np.array([0, 2, 0, 1]))
assert ds[0][1] == 1
assert ds[1][1] == 0
# Test iterator access
i = 0
for data, label in ds.get_iterator():
np.testing.assert_array_equal(
data, np.array([0, i + 1, i % 2 - 1,
int(i > 0)]))
assert label == int(i == 0)
i += 1
# Check description array
desc = ds.get_description()
assert isinstance(desc, dict)
for key, val in ds1.get_description().items():
assert "[DS1]" + key in desc
assert desc["[DS1]" + key] == val
for key, val in ds2.get_description().items():
assert "[DS2]" + key in desc
assert desc["[DS2]" + key] == val
| [
"psfdataset.transforms.spatial.Normalize",
"psfdataset.transforms.spatial.Crop",
"numpy.array",
"psfdataset.PSFZippedDataset",
"pytest.raises"
] | [((1095, 1123), 'psfdataset.PSFZippedDataset', 'PSFZippedDataset', (['(ds1, ds2)'], {}), '((ds1, ds2))\n', (1111, 1123), False, 'from psfdataset import PSFZippedDataset\n'), ((540, 546), 'psfdataset.transforms.spatial.Crop', 'Crop', ([], {}), '()\n', (544, 546), False, 'from psfdataset.transforms.spatial import Normalize, Crop\n'), ((572, 594), 'numpy.array', 'np.array', (['[[[1], [2]]]'], {}), '([[[1], [2]]])\n', (580, 594), True, 'import numpy as np\n'), ((623, 645), 'numpy.array', 'np.array', (['[[[2], [4]]]'], {}), '([[[2], [4]]])\n', (631, 645), True, 'import numpy as np\n'), ((676, 691), 'psfdataset.transforms.spatial.Normalize', 'Normalize', (['(3)', '(1)'], {}), '(3, 1)\n', (685, 691), False, 'from psfdataset.transforms.spatial import Normalize, Crop\n'), ((717, 733), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (725, 733), True, 'import numpy as np\n'), ((775, 791), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (783, 791), True, 'import numpy as np\n'), ((832, 848), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (840, 848), True, 'import numpy as np\n'), ((879, 903), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (892, 903), False, 'import pytest\n'), ((922, 943), 'psfdataset.PSFZippedDataset', 'PSFZippedDataset', (['ds1'], {}), '(ds1)\n', (938, 943), False, 'from psfdataset import PSFZippedDataset\n'), ((961, 984), 'psfdataset.PSFZippedDataset', 'PSFZippedDataset', (['[ds1]'], {}), '([ds1])\n', (977, 984), False, 'from psfdataset import PSFZippedDataset\n'), ((1002, 1030), 'psfdataset.PSFZippedDataset', 'PSFZippedDataset', (['(ds1, ds2)'], {}), '((ds1, ds2))\n', (1018, 1030), False, 'from psfdataset import PSFZippedDataset\n'), ((1055, 1077), 'numpy.array', 'np.array', (['[[[3], [6]]]'], {}), '([[[3], [6]]])\n', (1063, 1077), True, 'import numpy as np\n'), ((1245, 1268), 'numpy.array', 'np.array', (['[0, 1, -1, 0]'], {}), '([0, 1, -1, 0])\n', (1253, 1268), True, 'import numpy as np\n'), ((1318, 1340), 'numpy.array', 'np.array', (['[0, 2, 0, 1]'], {}), '([0, 2, 0, 1])\n', (1326, 1340), True, 'import numpy as np\n')] |
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_rand_score
parser = ArgumentParser()
parser.add_argument("--source", type=Path, required=True)
parser.add_argument("--phn", type=Path, required=True)
parser.add_argument("--n_components", type=int, required=True)
parser.add_argument("--trial", type=int, default=1)
args = parser.parse_args()
source = np.load(args.source)
phn = np.load(args.phn)
keys = sorted(list(source.keys()))
datas = np.concatenate([source[key] for key in keys], axis=0)
labels = np.concatenate([phn[key] for key in keys], axis=0)
N, D = datas.shape
aris = np.zeros(args.trial)
for t in range(args.trial):
gmm = GaussianMixture(n_components=args.n_components, max_iter=1000)
gmm.fit(datas)
lab = gmm.predict(datas)
aris[t] = adjusted_rand_score(lab, labels)
print(f"ARI: {aris}")
print(f"summary: {aris.mean()} +- {aris.std()}")
| [
"sklearn.mixture.GaussianMixture",
"argparse.ArgumentParser",
"sklearn.metrics.adjusted_rand_score",
"numpy.zeros",
"numpy.concatenate",
"numpy.load"
] | [((182, 198), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (196, 198), False, 'from argparse import ArgumentParser\n'), ((466, 486), 'numpy.load', 'np.load', (['args.source'], {}), '(args.source)\n', (473, 486), True, 'import numpy as np\n'), ((493, 510), 'numpy.load', 'np.load', (['args.phn'], {}), '(args.phn)\n', (500, 510), True, 'import numpy as np\n'), ((556, 609), 'numpy.concatenate', 'np.concatenate', (['[source[key] for key in keys]'], {'axis': '(0)'}), '([source[key] for key in keys], axis=0)\n', (570, 609), True, 'import numpy as np\n'), ((619, 669), 'numpy.concatenate', 'np.concatenate', (['[phn[key] for key in keys]'], {'axis': '(0)'}), '([phn[key] for key in keys], axis=0)\n', (633, 669), True, 'import numpy as np\n'), ((697, 717), 'numpy.zeros', 'np.zeros', (['args.trial'], {}), '(args.trial)\n', (705, 717), True, 'import numpy as np\n'), ((756, 818), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'args.n_components', 'max_iter': '(1000)'}), '(n_components=args.n_components, max_iter=1000)\n', (771, 818), False, 'from sklearn.mixture import GaussianMixture\n'), ((883, 915), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['lab', 'labels'], {}), '(lab, labels)\n', (902, 915), False, 'from sklearn.metrics import adjusted_rand_score\n')] |
"""Setup for signac, signac-flow, signac-dashboard for this study."""
import os
import pathlib
import sys
import flow
import numpy as np
from flow import environments
# from reproducibility_project.src.analysis.equilibration import is_equilibrated
class Project(flow.FlowProject):
"""Subclass of FlowProject to provide custom methods and attributes."""
def __init__(self):
super().__init__()
current_path = pathlib.Path(os.getcwd()).absolute()
self.data_dir = current_path.parents[1] / "data"
self.ff_fn = self.data_dir / "forcefield.xml"
# ____________________________________________________________________________
"""Setting progress label"""
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_created_box(job):
"""Check if the lammps simulation box has been created for the job."""
return job.isfile("box.lammps")
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_copy_files(job):
"""Check if the submission scripts have been copied over for the job."""
return job.isfile("submit.pbs")
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_minimized_equilibrated_nvt(job):
"""Check if the lammps minimization step has run for the job."""
return job.isfile("minimized.restart_0")
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@flow.with_job
def lammps_equilibrated_npt(job):
"""Check if the lammps equilibration step has run and passed is_equilibrated for the job."""
import pathlib
import numpy as np
from reproducibility_project.src.analysis.equilibration import (
is_equilibrated,
)
project = job._project
p = pathlib.Path(".")
list_of_filenames = list(p.glob("eqlog*.txt"))
# grab the filename with the largest number
counter = 0
latest_eqdata = False
for file in list_of_filenames:
step = int(file.name[5:].split(".")[0])
if step > counter:
counter = step
latest_eqdata = file
if latest_eqdata:
data = np.genfromtxt(latest_eqdata.name, skip_header=1)
check_equil = np.all(
[
is_equilibrated(data[:, 1])[0],
is_equilibrated(data[:, 2])[0],
is_equilibrated(data[:, 4])[0],
is_equilibrated(data[:, 6])[0],
]
)
else:
check_equil = False
return job.isfile("equilibrated_npt.restart") and check_equil
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_production(job):
"""Check if the lammps production step has run for the job."""
return job.isfile("production.restart")
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_density_data(job):
"""Check if lammps has output density information for the job."""
return job.isfile("density.dat")
@Project.label
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
def lammps_created_gsd(job):
"""Check if the mdtraj has converted the production to a gsd trajectory for the job."""
return job.isfile("prod.gsd")
# _____________________________________________________________________
"""Setting up workflow operation"""
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.post(lammps_created_box)
@flow.with_job
def built_lammps(job):
"""Create initial configurations of the system statepoint."""
import foyer
from mbuild.formats.lammpsdata import write_lammpsdata
from reproducibility_project.src.molecules.system_builder import (
construct_system,
)
from reproducibility_project.src.utils.forcefields import load_ff
system = construct_system(job.sp)[0]
parmed_structure = system.to_parmed()
ff = load_ff(job.sp.forcefield_name)
system.save(
"box.json"
) # save the compound as a json object for reading back in to mbuild
typed_surface = ff.apply(parmed_structure)
typed_surface.save(
"box.top"
) # save to gromacs topology for later conversions in mdtraj
write_lammpsdata(
typed_surface,
"box.lammps",
atom_style="full",
unit_style="real",
mins=[system.get_boundingbox().vectors[0]],
maxs=[system.get_boundingbox().vectors[1]],
use_rb_torsions=True,
) # write out a lammps topology
return
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.pre(lammps_created_box)
@Project.post(lammps_copy_files)
@flow.with_job
@flow.cmd
def lammps_cp_files(job):
"""Copy over run files for lammps and the PBS scheduler."""
lmps_submit_path = "../../src/engine_input/lammps/VU_scripts/submit.pbs"
lmps_run_path = "../../src/engine_input/lammps/input_scripts/in.*"
msg = f"cp {lmps_submit_path} {lmps_run_path} ./"
return msg
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.pre(lammps_copy_files)
@Project.post(lammps_minimized_equilibrated_nvt)
@flow.with_job
@flow.cmd
def lammps_em_nvt(job):
"""Run energy minimization and nvt ensemble."""
in_script_name = "in.minimize"
r_cut = job.sp.r_cut * 10
modify_submit_scripts(in_script_name, job.id)
msg = f"qsub -v 'infile={in_script_name}, seed={job.sp.replica+1}, T={job.sp.temperature}, P={job.sp.pressure}, rcut={r_cut}' submit.pbs"
return msg
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.pre(lammps_minimized_equilibrated_nvt)
@Project.post(lammps_equilibrated_npt)
@flow.with_job
@flow.cmd
def lammps_equil_npt(job):
"""Run npt ensemble equilibration."""
in_script_name = "in.equilibration"
modify_submit_scripts(in_script_name, job.id)
r_cut = job.sp.r_cut * 10
msg = f"qsub -v 'infile={in_script_name}, seed={job.sp.replica+1}, T={job.sp.temperature}, P={job.sp.pressure}, rcut={r_cut}' submit.pbs"
return msg
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.pre(lammps_equilibrated_npt)
@Project.post(lammps_production)
@flow.with_job
@flow.cmd
def lammps_prod(job):
"""Run npt ensemble production."""
in_script_name = "in.production"
modify_submit_scripts(in_script_name, job.id)
r_cut = job.sp.r_cut * 10
msg = f"qsub -v 'infile={in_script_name}, seed={job.sp.replica+1}, T={job.sp.temperature}, P={job.sp.pressure}, rcut={r_cut}' submit.pbs"
return msg
@Project.operation
@Project.pre(lambda j: j.sp.engine == "lammps-VU")
@Project.pre(lammps_production)
@Project.post(lambda j: j.isfile("prod.gsd"))
@flow.with_job
@flow.cmd
def lammps_calc_rdf(job):
"""Create an rdf from the gsd file using Freud analysis scripts."""
# Create rdf data from the production run
import mbuild as mb
import mdtraj
traj = md.load("prod.xtc", top="box.gro")
traj.save("prod.gsd")
# TODO: Use freud rdf PR to create an RDF from the gsd file
return
def modify_submit_scripts(filename, jobid, cores=8):
"""Modify the submission scripts to include the job and simulation type in the header."""
with open("submit.pbs", "r") as f:
lines = f.readlines()
lines[1] = "#PBS -N {}-{}\n".format(filename[3:], jobid[0:4])
with open("submit.pbs", "w") as f:
f.writelines(lines)
return
if __name__ == "__main__":
pr = Project()
pr.main()
| [
"reproducibility_project.src.molecules.system_builder.construct_system",
"pathlib.Path",
"os.getcwd",
"reproducibility_project.src.utils.forcefields.load_ff",
"numpy.genfromtxt",
"reproducibility_project.src.analysis.equilibration.is_equilibrated"
] | [((1731, 1748), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (1743, 1748), False, 'import pathlib\n'), ((3808, 3839), 'reproducibility_project.src.utils.forcefields.load_ff', 'load_ff', (['job.sp.forcefield_name'], {}), '(job.sp.forcefield_name)\n', (3815, 3839), False, 'from reproducibility_project.src.utils.forcefields import load_ff\n'), ((2097, 2145), 'numpy.genfromtxt', 'np.genfromtxt', (['latest_eqdata.name'], {'skip_header': '(1)'}), '(latest_eqdata.name, skip_header=1)\n', (2110, 2145), True, 'import numpy as np\n'), ((3729, 3753), 'reproducibility_project.src.molecules.system_builder.construct_system', 'construct_system', (['job.sp'], {}), '(job.sp)\n', (3745, 3753), False, 'from reproducibility_project.src.molecules.system_builder import construct_system\n'), ((449, 460), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (458, 460), False, 'import os\n'), ((2206, 2233), 'reproducibility_project.src.analysis.equilibration.is_equilibrated', 'is_equilibrated', (['data[:, 1]'], {}), '(data[:, 1])\n', (2221, 2233), False, 'from reproducibility_project.src.analysis.equilibration import is_equilibrated\n'), ((2254, 2281), 'reproducibility_project.src.analysis.equilibration.is_equilibrated', 'is_equilibrated', (['data[:, 2]'], {}), '(data[:, 2])\n', (2269, 2281), False, 'from reproducibility_project.src.analysis.equilibration import is_equilibrated\n'), ((2302, 2329), 'reproducibility_project.src.analysis.equilibration.is_equilibrated', 'is_equilibrated', (['data[:, 4]'], {}), '(data[:, 4])\n', (2317, 2329), False, 'from reproducibility_project.src.analysis.equilibration import is_equilibrated\n'), ((2350, 2377), 'reproducibility_project.src.analysis.equilibration.is_equilibrated', 'is_equilibrated', (['data[:, 6]'], {}), '(data[:, 6])\n', (2365, 2377), False, 'from reproducibility_project.src.analysis.equilibration import is_equilibrated\n')] |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utils to assist with dataclass-related operations."""
import itertools
from dataclasses import is_dataclass
import numpy as np
import pandas as pd
def dataclass_eq(base_dataclass: object, other: object) -> bool:
"""Check if base_dataclass is equal to the other object, with proper handling for numpy array fields.
Args:
base_dataclass: Base dataclass to compare against.
other: Other object to compare against the base dataclass.
Raises:
ValueError: If base_dataclass is not an instance of a dataclass.
Returns:
Flag indicating whether base_dataclass and the other object are considered equal.
"""
if not is_dataclass(base_dataclass):
raise ValueError(f"'{base_dataclass.__class__.__name__}' is not a dataclass!")
# Check whether the two objects point to the same instance
if base_dataclass is other:
return True
# Check whether the two objects are both dataclasses of the same type
if base_dataclass.__class__ is not other.__class__:
return False
# Check whether the dataclasses have equal values in all members
base_tuple = vars(base_dataclass).values()
other_tuple = vars(other).values()
return all(_dataclass_member_eq(base_mem, other_mem) for base_mem, other_mem in zip(base_tuple, other_tuple))
def _dataclass_member_eq(base: object, other: object) -> bool:
"""Check if dataclass members base and other are equal, with proper handling for numpy arrays.
Args:
base: Base object to compare against.
other: Other object to compare against the base object.
Returns:
Bool flag indicating whether objects a and b are equal.
"""
# Objects are equal if they point to the same instance
if base is other:
return True
# If both objects are lists, check equality for all members
if isinstance(base, list) and isinstance(other, list):
return all(_dataclass_member_eq(base_i, other_i) for base_i, other_i in itertools.zip_longest(base, other))
# If both objects are np arrays, delegate equality check to numpy's built-in operation
if isinstance(base, np.ndarray) and isinstance(other, np.ndarray):
return bool(np.array_equal(base, other))
# If both objects are pd dataframes, delegate equality check to pandas' built-in operation
if isinstance(base, pd.DataFrame) and isinstance(other, pd.DataFrame):
return bool(pd.DataFrame.equals(base, other))
# Equality checks for all other types are delegated to the standard equality check
try:
return bool(base == other)
except (TypeError, ValueError):
return False
| [
"itertools.zip_longest",
"dataclasses.is_dataclass",
"numpy.array_equal",
"pandas.DataFrame.equals"
] | [((738, 766), 'dataclasses.is_dataclass', 'is_dataclass', (['base_dataclass'], {}), '(base_dataclass)\n', (750, 766), False, 'from dataclasses import is_dataclass\n'), ((2288, 2315), 'numpy.array_equal', 'np.array_equal', (['base', 'other'], {}), '(base, other)\n', (2302, 2315), True, 'import numpy as np\n'), ((2508, 2540), 'pandas.DataFrame.equals', 'pd.DataFrame.equals', (['base', 'other'], {}), '(base, other)\n', (2527, 2540), True, 'import pandas as pd\n'), ((2069, 2103), 'itertools.zip_longest', 'itertools.zip_longest', (['base', 'other'], {}), '(base, other)\n', (2090, 2103), False, 'import itertools\n')] |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import pickle
from helper import showImages, showSidebySide
from camera_calibrate import undistortImages
cameraCalibration = pickle.load( open('./pickled_data/camera_calibration.p', 'rb' ) )
mtx, dist = map(cameraCalibration.get, ('mtx', 'dist'))
## Read test image
testImages = list(map(lambda imageFileName: cv2.imread(imageFileName),
glob.glob('./test_images/st*.jpg')))
testImagesName = glob.glob('./test_images/st*.jpg')
print("test images num:", len(testImages))
index = 1
print("test images name:", testImagesName[index])
## Convert to RGB image
## test4.img to test
#testImage = cv2.imread('./test_images/test4.jpg')
original = cv2.cvtColor(testImages[index],cv2.COLOR_BGR2RGB)
undist = cv2.undistort(original, mtx, dist, None, mtx)
xSize, ySize, _ = undist.shape
copy = undist.copy()
bottomY = 720
topY = 500
left1 = (201, bottomY)
left1_x, left1_y = left1
left2 = (528, topY)
left2_x, left2_y = left2
right1 = (768, topY)
right1_x, right1_y = right1
right2 = (1100, bottomY)
right2_x, right2_y = right2
color = [255, 0, 0]
w = 2
cv2.line(copy, left1, left2, color, w)
cv2.line(copy, left2, right1, color, w)
cv2.line(copy, right1, right2, color, w)
cv2.line(copy, right2, left1, color, w)
showSidebySide(undist, copy, "original", "source_line_drawed")
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
src = np.float32([
[left2_x, left2_y],
[right1_x, right1_y],
[right2_x, right2_y],
[left1_x, left1_y]
])
nX = gray.shape[1]
nY = gray.shape[0]
img_size = (nX, nY)
offset = 200
dst = np.float32([
[offset, 0],
[img_size[0]-offset, 0],
[img_size[0]-offset, img_size[1]],
[offset, img_size[1]]
])
img_size = (gray.shape[1], gray.shape[0])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(undist, M, img_size)
showSidebySide(undist, warped, "original", "Perspective_transformed")
#pickle.dump( { 'M': M, 'Minv': Minv }, open('./pickled_data/perspective_transform.p', 'wb'))
print(M)
print(Minv)
| [
"helper.showSidebySide",
"cv2.getPerspectiveTransform",
"cv2.line",
"cv2.undistort",
"cv2.warpPerspective",
"cv2.cvtColor",
"cv2.imread",
"numpy.float32",
"glob.glob"
] | [((488, 522), 'glob.glob', 'glob.glob', (['"""./test_images/st*.jpg"""'], {}), "('./test_images/st*.jpg')\n", (497, 522), False, 'import glob\n'), ((734, 784), 'cv2.cvtColor', 'cv2.cvtColor', (['testImages[index]', 'cv2.COLOR_BGR2RGB'], {}), '(testImages[index], cv2.COLOR_BGR2RGB)\n', (746, 784), False, 'import cv2\n'), ((793, 838), 'cv2.undistort', 'cv2.undistort', (['original', 'mtx', 'dist', 'None', 'mtx'], {}), '(original, mtx, dist, None, mtx)\n', (806, 838), False, 'import cv2\n'), ((1142, 1180), 'cv2.line', 'cv2.line', (['copy', 'left1', 'left2', 'color', 'w'], {}), '(copy, left1, left2, color, w)\n', (1150, 1180), False, 'import cv2\n'), ((1181, 1220), 'cv2.line', 'cv2.line', (['copy', 'left2', 'right1', 'color', 'w'], {}), '(copy, left2, right1, color, w)\n', (1189, 1220), False, 'import cv2\n'), ((1221, 1261), 'cv2.line', 'cv2.line', (['copy', 'right1', 'right2', 'color', 'w'], {}), '(copy, right1, right2, color, w)\n', (1229, 1261), False, 'import cv2\n'), ((1262, 1301), 'cv2.line', 'cv2.line', (['copy', 'right2', 'left1', 'color', 'w'], {}), '(copy, right2, left1, color, w)\n', (1270, 1301), False, 'import cv2\n'), ((1302, 1364), 'helper.showSidebySide', 'showSidebySide', (['undist', 'copy', '"""original"""', '"""source_line_drawed"""'], {}), "(undist, copy, 'original', 'source_line_drawed')\n", (1316, 1364), False, 'from helper import showImages, showSidebySide\n'), ((1373, 1413), 'cv2.cvtColor', 'cv2.cvtColor', (['undist', 'cv2.COLOR_BGR2GRAY'], {}), '(undist, cv2.COLOR_BGR2GRAY)\n', (1385, 1413), False, 'import cv2\n'), ((1420, 1520), 'numpy.float32', 'np.float32', (['[[left2_x, left2_y], [right1_x, right1_y], [right2_x, right2_y], [left1_x,\n left1_y]]'], {}), '([[left2_x, left2_y], [right1_x, right1_y], [right2_x, right2_y],\n [left1_x, left1_y]])\n', (1430, 1520), True, 'import numpy as np\n'), ((1686, 1802), 'numpy.float32', 'np.float32', (['[[offset, 0], [img_size[0] - offset, 0], [img_size[0] - offset, img_size[1]\n ], [offset, img_size[1]]]'], {}), '([[offset, 0], [img_size[0] - offset, 0], [img_size[0] - offset,\n img_size[1]], [offset, img_size[1]]])\n', (1696, 1802), True, 'import numpy as np\n'), ((1933, 1970), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1960, 1970), False, 'import cv2\n'), ((1978, 2015), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['dst', 'src'], {}), '(dst, src)\n', (2005, 2015), False, 'import cv2\n'), ((2025, 2065), 'cv2.warpPerspective', 'cv2.warpPerspective', (['undist', 'M', 'img_size'], {}), '(undist, M, img_size)\n', (2044, 2065), False, 'import cv2\n'), ((2066, 2135), 'helper.showSidebySide', 'showSidebySide', (['undist', 'warped', '"""original"""', '"""Perspective_transformed"""'], {}), "(undist, warped, 'original', 'Perspective_transformed')\n", (2080, 2135), False, 'from helper import showImages, showSidebySide\n'), ((434, 468), 'glob.glob', 'glob.glob', (['"""./test_images/st*.jpg"""'], {}), "('./test_images/st*.jpg')\n", (443, 468), False, 'import glob\n'), ((385, 410), 'cv2.imread', 'cv2.imread', (['imageFileName'], {}), '(imageFileName)\n', (395, 410), False, 'import cv2\n')] |
# Chord classification
#
# The task is to classify chords (or more precisely pitch class sets) based on chromagram features.
#
# We use a single Beatles song with just two chord and silence.
#
# The task is in fact multilabel classification, since each pitch class is generally independent.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import arrow
import os
import scipy.signal
import scipy.misc
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.cross_validation import train_test_split
from sklearn.metrics import hamming_loss, accuracy_score
from keras.models import model_from_yaml
from tfr.spectrogram import create_window
from tfr.files import load_wav
from tfr.analysis import split_to_blocks
from tfr.reassignment import chromagram
## Load model
model_id = 'model_2016-04-16-20-52-03'
model_dir = '../data/beatles/models/' + model_id
model_arch = model_dir + '/' + model_id + '_arch.yaml'
model_weights = model_dir + '/' + model_id + '_weights.h5'
print('loading model:', model_arch)
model = model_from_yaml(open(model_arch).read())
print('loading model wieghts:', model_weights)
model.load_weights(model_weights)
## Load data
song = "The_Beatles/03_-_A_Hard_Day's_Night/05_-_And_I_Love_Her"
audio_file = '../data/beatles/audio-cd/' + song + '.wav'
### Chromagram features
# labels_file = '../data/beatles/chord-pcs/4096_2048/'+song+'.pcs'
# features_file = '../data/beatles/chromagram/block=4096_hop=2048_bins=-48,67_div=1/'+song+'.npz'
# data = np.load(features_file)
# features = data['X']
# times = data['times']
### Chord labels
# df_labels = pd.read_csv(labels_file, sep='\t')
# labels_pcs = df_labels[df_labels.columns[1:]].as_matrix()
block_size = 4096
hop_size = 2048
print('loading audio:', audio_file)
x, fs = load_wav(audio_file)
print('splitting audio to blocks')
x_blocks, times = split_to_blocks(x, block_size, hop_size)
w = create_window(block_size)
print('computing chromagram')
X_chromagram = chromagram(x_blocks, w, fs, to_log=True)
features = X_chromagram
## Data preprocessing
### Features
print('scaling the input features')
# scaler = MinMaxScaler()
# X = scaler.fit_transform(features).astype('float32')
# TODO: there's a bug: should be + 120 on both places!!!
X = (features.astype('float32') - 120) / (features.shape[1] - 120)
# reshape for 1D convolution
def conv_reshape(X):
return X.reshape(X.shape[0], X.shape[1], 1)
X_conv = conv_reshape(X)
# visualization
#
# def plot_labels(l, title, fifths=False, resample=True, exact=False):
# if fifths:
# l = l[:,np.arange(12)*7 % 12]
# l = l.T
#
# # file = model_dir+'/'+model_id+'_'+title+'.png'
#
# if exact:
# pass
# # scipy.misc.imsave(file, l)
# else:
# if resample:
# l = scipy.signal.resample(l, 200, axis=1)
# plt.figure(figsize=(20, 2))
# plt.imshow(l, cmap='gray', interpolation='none')
# plt.tight_layout()
# plt.show()
# # plt.savefig(file)
# predicted labels
# labels_pred_full = model.predict_classes(X_conv)
# plot_labels(labels_pred_full, 'pred')
# plot_labels(labels_pred_full, 'exact_pred', exact=True)
# in case of input features with original time order we can apply median filter:
# medfilt(labels_pred_full, (15, 1))
model.compile(class_mode='binary', loss='binary_crossentropy', optimizer='adam')
y_pred = (model.predict(X_conv) >= 0.5).astype(np.int32)
pred_file = '../data/beatles/chord-pcs-predicted/%d_%d/%s/%s.tsv' % (block_size, hop_size, model_id, song)
pred_dir = os.path.dirname(pred_file)
os.makedirs(pred_dir, exist_ok=True)
np.savetxt(pred_file, y_pred, delimiter='\t', fmt='%d')
# def plot_labels_true_pred_diff():
# def plot2d(x):
# plt.imshow(scipy.signal.resample(x.T, 200, axis=1), cmap='gray', interpolation='none')
# plt.figure(figsize=(20, 6))
# ax = plt.subplot(3,1,1)
# plot2d(labels_pcs)
# ax.set_title('true')
# ax = plt.subplot(3,1,2)
# plot2d(labels_pred_full)
# ax.set_title('predicted')
# ax = plt.subplot(3,1,3)
# plot2d(labels_pred_full - labels_pcs)
# ax.set_title('difference')
# plt.tight_layout()
# plt.show()
#
# plot_labels_true_pred_diff()
| [
"tfr.analysis.split_to_blocks",
"tfr.files.load_wav",
"os.makedirs",
"os.path.dirname",
"numpy.savetxt",
"tfr.spectrogram.create_window",
"tfr.reassignment.chromagram"
] | [((1798, 1818), 'tfr.files.load_wav', 'load_wav', (['audio_file'], {}), '(audio_file)\n', (1806, 1818), False, 'from tfr.files import load_wav\n'), ((1872, 1912), 'tfr.analysis.split_to_blocks', 'split_to_blocks', (['x', 'block_size', 'hop_size'], {}), '(x, block_size, hop_size)\n', (1887, 1912), False, 'from tfr.analysis import split_to_blocks\n'), ((1917, 1942), 'tfr.spectrogram.create_window', 'create_window', (['block_size'], {}), '(block_size)\n', (1930, 1942), False, 'from tfr.spectrogram import create_window\n'), ((1988, 2028), 'tfr.reassignment.chromagram', 'chromagram', (['x_blocks', 'w', 'fs'], {'to_log': '(True)'}), '(x_blocks, w, fs, to_log=True)\n', (1998, 2028), False, 'from tfr.reassignment import chromagram\n'), ((3562, 3588), 'os.path.dirname', 'os.path.dirname', (['pred_file'], {}), '(pred_file)\n', (3577, 3588), False, 'import os\n'), ((3589, 3625), 'os.makedirs', 'os.makedirs', (['pred_dir'], {'exist_ok': '(True)'}), '(pred_dir, exist_ok=True)\n', (3600, 3625), False, 'import os\n'), ((3626, 3681), 'numpy.savetxt', 'np.savetxt', (['pred_file', 'y_pred'], {'delimiter': '"""\t"""', 'fmt': '"""%d"""'}), "(pred_file, y_pred, delimiter='\\t', fmt='%d')\n", (3636, 3681), True, 'import numpy as np\n')] |
# import bagpy
from bagpy import bagreader
import pandas as pd
import numpy as np
import pickle as pkl
import os
import rosbag
class RobotTraj():
def __init__(self) -> None:
self.desired_topic = '/anna/end_effector/states'
self.other_topics = ['/anna/joint/states', '/anna/keyframe/states']
self.users =['user2','user4','user7','user8','user10','user14','user16','user18','user20'] # narration
self.users += ['user3','user5','user6','user9','user11','user12','user15','user17','user19'] # natural
self.tasks = ['cutting', 'box']
self.demo_types = ['video', 'kt']
self.data_dir = '/Volumes/Seagate Portable Drive/audio_study/kinesthetic/'
self.path_len = {}
self.path_len_time = {}
def compute_path_len(self):
# iterate through users
for user in self.users:
for task in self.tasks:
for demo_type in self.demo_types:
key = user+'_'+task+'_'+demo_type
if key not in self.path_len_time:
self.path_len_time[key] = {}
print(user,task,demo_type)
bagfiles = []
bag_path = os.path.join(self.data_dir,user,task,demo_type)
for dirname, dirs, files in os.walk(bag_path):
for filename in files:
fn,ex = os.path.splitext(filename)
if ex == '.bag':
bagfiles.append(filename)
bagfiles.sort(reverse=True)
bag = os.path.join(bag_path,bagfiles[0])
bag_audio = rosbag.Bag(bag)
total_dist = 0.
i = 0
for idx, data in enumerate(bag_audio.read_messages(topics=[self.desired_topic])):
topic, msg, t = data
# print(msg)
if i==0:
x,y,z = msg.pose.position.x,msg.pose.position.y,msg.pose.position.z
old = np.array((x,y,z))
t_start = t.secs*pow(10,9) + t.nsecs
curr_time = 0
else:
x,y,z = msg.pose.position.x,msg.pose.position.y,msg.pose.position.z
newl = np.array((x,y,z))
total_dist += np.linalg.norm(newl-old)
t_new = t.secs*pow(10,9) + t.nsecs
curr_time = t_new-t_start
old = np.copy(newl)
# print(curr_time*pow(10,-9))
curr_time = curr_time*pow(10,-9)
# print(round(curr_time,1))
# save distance upto every 0.1 second
self.path_len_time[key][round(curr_time,1)] = total_dist
i+=1
print(f'Total distance traveled by the arm is {total_dist}')
self.path_len[key] = total_dist
def save_path_len(self):
self.compute_path_len()
filename = '../../data/robot_path_len.pkl'
with open(filename, 'wb') as handle:
pkl.dump(self.path_len, handle, protocol=pkl.HIGHEST_PROTOCOL)
filename = '../../data/robot_path_len_time.pkl'
with open(filename, 'wb') as handle:
pkl.dump(self.path_len_time, handle, protocol=pkl.HIGHEST_PROTOCOL)
def main():
traj = RobotTraj()
traj.save_path_len()
if __name__ == "__main__":
main() | [
"numpy.copy",
"pickle.dump",
"os.path.join",
"os.path.splitext",
"rosbag.Bag",
"numpy.array",
"numpy.linalg.norm",
"os.walk"
] | [((3342, 3404), 'pickle.dump', 'pkl.dump', (['self.path_len', 'handle'], {'protocol': 'pkl.HIGHEST_PROTOCOL'}), '(self.path_len, handle, protocol=pkl.HIGHEST_PROTOCOL)\n', (3350, 3404), True, 'import pickle as pkl\n'), ((3519, 3586), 'pickle.dump', 'pkl.dump', (['self.path_len_time', 'handle'], {'protocol': 'pkl.HIGHEST_PROTOCOL'}), '(self.path_len_time, handle, protocol=pkl.HIGHEST_PROTOCOL)\n', (3527, 3586), True, 'import pickle as pkl\n'), ((1221, 1271), 'os.path.join', 'os.path.join', (['self.data_dir', 'user', 'task', 'demo_type'], {}), '(self.data_dir, user, task, demo_type)\n', (1233, 1271), False, 'import os\n'), ((1318, 1335), 'os.walk', 'os.walk', (['bag_path'], {}), '(bag_path)\n', (1325, 1335), False, 'import os\n'), ((1624, 1659), 'os.path.join', 'os.path.join', (['bag_path', 'bagfiles[0]'], {}), '(bag_path, bagfiles[0])\n', (1636, 1659), False, 'import os\n'), ((1691, 1706), 'rosbag.Bag', 'rosbag.Bag', (['bag'], {}), '(bag)\n', (1701, 1706), False, 'import rosbag\n'), ((1420, 1446), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1436, 1446), False, 'import os\n'), ((2119, 2138), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (2127, 2138), True, 'import numpy as np\n'), ((2405, 2424), 'numpy.array', 'np.array', (['(x, y, z)'], {}), '((x, y, z))\n', (2413, 2424), True, 'import numpy as np\n'), ((2465, 2491), 'numpy.linalg.norm', 'np.linalg.norm', (['(newl - old)'], {}), '(newl - old)\n', (2479, 2491), True, 'import numpy as np\n'), ((2642, 2655), 'numpy.copy', 'np.copy', (['newl'], {}), '(newl)\n', (2649, 2655), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.