code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from keras import Model
from keras import models
from keras import optimizers
from keras import Sequential
from keras import layers
from keras import losses
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import keras.backend as K
import keras.applications
from keras import applications
from keras import utils
import cv2
import numpy as np
import os
import math
config = {
"ImgPath" : "1. Data Gen\\1. Data\\1X9A1712.jpg" #"Put your image path here"
,"VGG16InputSize" : (224,224)
,"AnchorBox" : {
"AspectRatioW_div_W" : [1/3,1/2,3/4,1]
,"Scales" : [1/2,3/4,1,3/2]
}
}
def main(): ############ MAIN FUNCTION - START HERE ############
# Get vgg model
vggmodel = applications.VGG16(include_top=False,weights='imagenet')
# Extract features for images (used dictionary comprehension to stop getting warning messages from Keras)
list_of_images = [cv2.imread(config["ImgPath"])]
array_of_prediction_ready_images = pre_process_image_for_vgg(list_of_images)
array_of_feature_maps = vggmodel.predict(array_of_prediction_ready_images)
# Find conversions from feature map (CNN output) to input image
feature_to_input_x_scale, feature_to_input_y_scale, feature_to_input_x_offset, feature_to_input_y_offset = find_feature_map_to_input_scale_and_offset(array_of_prediction_ready_images[0],array_of_feature_maps[0])
# get potential boxes, aka anchor boxes
potential_boxes = get_potential_boxes_for_region_proposal(array_of_prediction_ready_images[0],array_of_feature_maps[0],feature_to_input_x_scale, feature_to_input_y_scale, feature_to_input_x_offset, feature_to_input_y_offset)
# Create region proposal network
rpn_model = create_region_proposal_network(len(potential_boxes))
# Output following (height, width, anchor_num) (height, width, anchor_num * 4)
predicted_scores_for_anchor_boxes, predicted_adjustments = rpn_model.predict(array_of_feature_maps)
print(f"predicted_scores_for_anchor_boxes.shape = {predicted_scores_for_anchor_boxes.shape}, predicted_adjustments.shape = {predicted_adjustments.shape}")
print(f"But why is there the ,5,5, bit? I don't know which ones to choose now to get the predicted bounding box?")
def pre_process_image_for_vgg(img):
"""
Resizes the image to input of VGGInputSize specified in the config dictionary
Normalises the image
Reshapes the image to an array of images e.g. [[img],[img],..]
If img has a shape of
"""
if type(img) == np.ndarray: # Single image
resized_img = cv2.resize(img,config["VGG16InputSize"],interpolation = cv2.INTER_AREA)
normalised_image = applications.vgg16.preprocess_input(resized_img)
reshaped_to_array_of_images = np.array([normalised_image])
return reshaped_to_array_of_images
elif type(img) == list: # list of images
img_list = img
resized_img_list = [cv2.resize(image,config["VGG16InputSize"],interpolation = cv2.INTER_AREA) for image in img_list]
resized_img_array = np.array(resized_img_list)
normalised_images_array = applications.vgg16.preprocess_input(resized_img_array)
return normalised_images_array
def find_feature_map_to_input_scale_and_offset(pre_processed_input_image,feature_maps):
"""
Finds the scale and offset from the feature map (output) of the CNN classifier to the pre-processed input image of the CNN
"""
# Find shapes of feature maps and input images to the classifier CNN
input_image_shape = pre_processed_input_image.shape
feature_map_shape = feature_maps.shape
img_height, img_width, _ = input_image_shape
features_height, features_width, _ = feature_map_shape
# Find mapping from features map (output of vggmodel.predict) back to the input image
feature_to_input_x = img_width / features_width
feature_to_input_y = img_height / features_height
# Put anchor points in the centre of
feature_to_input_x_offset = feature_to_input_x/2
feature_to_input_y_offset = feature_to_input_y/2
return feature_to_input_x, feature_to_input_y, feature_to_input_x_offset, feature_to_input_y_offset
def get_get_coordinates_of_anchor_points(feature_map,feature_to_input_x,feature_to_input_y,x_offset,y_offset):
"""
Maps the CNN output (Feature map) coordinates on the input image to the CNN
Returns the coordinates as a list of dictionaries with the format {"x":x,"y":y}
"""
features_height, features_width, _ = feature_map.shape
# For the feature map (x,y) determine the anchors on the input image (x,y) as array
feature_to_input_coords_x = [int(x_feature*feature_to_input_x+x_offset) for x_feature in range(features_width)]
feature_to_input_coords_y = [int(y_feature*feature_to_input_y+y_offset) for y_feature in range(features_height)]
coordinate_of_anchor_points = [{"x":x,"y":y} for x in feature_to_input_coords_x for y in feature_to_input_coords_y]
return coordinate_of_anchor_points
def get_potential_boxes_for_region_proposal(pre_processed_input_image,feature_maps,feature_to_input_x, feature_to_input_y, x_offset, y_offset):
"""
Generates the anchor points (the centre of the enlarged feature map) as an (x,y) position on the input image
Generates all the potential bounding boxes for each anchor point
returns a list of potential bounding boxes in the form {"x1","y1","x2","y2"}
"""
# Find shapes of input images to the classifier CNN
input_image_shape = pre_processed_input_image.shape
# For the feature map (x,y) determine the anchors on the input image (x,y) as array
coordinate_of_anchor_boxes = get_get_coordinates_of_anchor_points(feature_maps,feature_to_input_x,feature_to_input_y,x_offset,y_offset)
# Create potential boxes for classification
boxes_width_height = generate_potential_box_dimensions(config["AnchorBox"],feature_to_input_x,feature_to_input_y)
list_of_potential_boxes_for_coords = [generate_potential_boxes_for_coord(boxes_width_height,coord) for coord in coordinate_of_anchor_boxes]
potential_boxes = [box for boxes_for_coord in list_of_potential_boxes_for_coords for box in boxes_for_coord]
return potential_boxes
def generate_potential_box_dimensions(settings,feature_to_input_x,feature_to_input_y):
"""
Generate potential boxes height & width for each point aka anchor boxes given the
ratio between feature map to input scaling for x and y
Assumption 1: Settings will have the following attributes
AspectRatioW_div_W: A list of float values representing the aspect ratios of
the anchor boxes at each location on the feature map
Scales: A list of float values representing the scale of the anchor boxes
at each location on the feature map.
"""
box_width_height = []
for scale in settings["Scales"]:
for aspect_ratio_w_div_h in settings["AspectRatioW_div_W"]:
width = round(feature_to_input_x*scale*aspect_ratio_w_div_h)
height = round(feature_to_input_y*scale/aspect_ratio_w_div_h)
box_width_height.append({"Width":width,"Height":height})
return box_width_height
def generate_potential_boxes_for_coord(box_width_height,coord):
"""
Assumption 1: box_width_height is an array of dictionary with each dictionary consisting of
{"Width":positive integer, "Height": positive integer}
Assumption 2: coord is an array of dictionary with each dictionary consistening of
{"x":centre of box x coordinate,"y",centre of box y coordinate"}
"""
potential_boxes = []
for box_dim in box_width_height:
potential_boxes.append({
"x1": coord["x"]-int(box_dim["Width"]/2)
,"y1": coord["y"]-int(box_dim["Height"]/2)
,"x2": coord["x"]+int(box_dim["Width"]/2)
,"y2": coord["y"]+int(box_dim["Height"]/2)
})
return potential_boxes
def create_region_proposal_network(number_of_potential_bounding_boxes,number_of_feature_map_channels=512):
"""
Creates the region proposal network which takes the input of the feature map and
Compiles the model and returns it
RPN consists of an input later, a CNN and two output layers.
output_deltas:
output_scores:
Note: Number of feature map channels should be the last element of model.predict().shape
"""
# Input layer
feature_map_tile = layers.Input(shape=(None,None,number_of_feature_map_channels),name="RPN_Input_Same")
# CNN component
convolution_3x3 = layers.Conv2D(filters=512,kernel_size=(3, 3),name="3x3")(feature_map_tile)
# Output layers
output_deltas = layers.Conv2D(filters= 4 * number_of_potential_bounding_boxes,kernel_size=(1, 1),activation="linear",kernel_initializer="uniform",name="Output_Deltas")(convolution_3x3)
output_scores = layers.Conv2D(filters=1 * number_of_potential_bounding_boxes,kernel_size=(1, 1),activation="sigmoid",kernel_initializer="uniform",name="Output_Prob_FG")(convolution_3x3)
model = Model(inputs=[feature_map_tile], outputs=[output_scores, output_deltas])
# TODO add loss_cls and smoothL1
model.compile(optimizer='adam', loss={'scores1':losses.binary_crossentropy, 'deltas1':losses.huber})
return model
if __name__ == "__main__":
main() | [
"keras.layers.Conv2D",
"keras.Model",
"numpy.array",
"keras.layers.Input",
"keras.applications.vgg16.preprocess_input",
"cv2.resize",
"cv2.imread",
"keras.applications.VGG16"
] | [((747, 804), 'keras.applications.VGG16', 'applications.VGG16', ([], {'include_top': '(False)', 'weights': '"""imagenet"""'}), "(include_top=False, weights='imagenet')\n", (765, 804), False, 'from keras import applications\n'), ((8568, 8660), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(None, None, number_of_feature_map_channels)', 'name': '"""RPN_Input_Same"""'}), "(shape=(None, None, number_of_feature_map_channels), name=\n 'RPN_Input_Same')\n", (8580, 8660), False, 'from keras import layers\n'), ((9182, 9254), 'keras.Model', 'Model', ([], {'inputs': '[feature_map_tile]', 'outputs': '[output_scores, output_deltas]'}), '(inputs=[feature_map_tile], outputs=[output_scores, output_deltas])\n', (9187, 9254), False, 'from keras import Model\n'), ((938, 967), 'cv2.imread', 'cv2.imread', (["config['ImgPath']"], {}), "(config['ImgPath'])\n", (948, 967), False, 'import cv2\n'), ((2606, 2677), 'cv2.resize', 'cv2.resize', (['img', "config['VGG16InputSize']"], {'interpolation': 'cv2.INTER_AREA'}), "(img, config['VGG16InputSize'], interpolation=cv2.INTER_AREA)\n", (2616, 2677), False, 'import cv2\n'), ((2705, 2753), 'keras.applications.vgg16.preprocess_input', 'applications.vgg16.preprocess_input', (['resized_img'], {}), '(resized_img)\n', (2740, 2753), False, 'from keras import applications\n'), ((2792, 2820), 'numpy.array', 'np.array', (['[normalised_image]'], {}), '([normalised_image])\n', (2800, 2820), True, 'import numpy as np\n'), ((8695, 8753), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'name': '"""3x3"""'}), "(filters=512, kernel_size=(3, 3), name='3x3')\n", (8708, 8753), False, 'from keras import layers\n'), ((8810, 8974), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(4 * number_of_potential_bounding_boxes)', 'kernel_size': '(1, 1)', 'activation': '"""linear"""', 'kernel_initializer': '"""uniform"""', 'name': '"""Output_Deltas"""'}), "(filters=4 * number_of_potential_bounding_boxes, kernel_size=(\n 1, 1), activation='linear', kernel_initializer='uniform', name=\n 'Output_Deltas')\n", (8823, 8974), False, 'from keras import layers\n'), ((8999, 9165), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(1 * number_of_potential_bounding_boxes)', 'kernel_size': '(1, 1)', 'activation': '"""sigmoid"""', 'kernel_initializer': '"""uniform"""', 'name': '"""Output_Prob_FG"""'}), "(filters=1 * number_of_potential_bounding_boxes, kernel_size=(\n 1, 1), activation='sigmoid', kernel_initializer='uniform', name=\n 'Output_Prob_FG')\n", (9012, 9165), False, 'from keras import layers\n'), ((3085, 3111), 'numpy.array', 'np.array', (['resized_img_list'], {}), '(resized_img_list)\n', (3093, 3111), True, 'import numpy as np\n'), ((3146, 3200), 'keras.applications.vgg16.preprocess_input', 'applications.vgg16.preprocess_input', (['resized_img_array'], {}), '(resized_img_array)\n', (3181, 3200), False, 'from keras import applications\n'), ((2960, 3033), 'cv2.resize', 'cv2.resize', (['image', "config['VGG16InputSize']"], {'interpolation': 'cv2.INTER_AREA'}), "(image, config['VGG16InputSize'], interpolation=cv2.INTER_AREA)\n", (2970, 3033), False, 'import cv2\n')] |
# Licensed under an MIT open source license - see LICENSE
import numpy as np
import pytest
from .. import Dendrogram, periodic_neighbours, Structure
class Test2DimensionalData(object):
def test_dendrogramWithNan(self):
n = np.nan
data = np.array([[n, n, n, n, n, n, n, n],
[n, 4, n, n, n, n, n, n],
[n, n, n, 1, n, n, 0, 5],
[3, n, n, 2, 3, 2, 0, n]])
d = Dendrogram.compute(data)
########################################
# Check the trunk elements:
leaves = [structure for structure in d.trunk if structure.is_leaf]
branches = [structure for structure in d.trunk if structure not in leaves]
assert len(leaves) == 2, "We expect two leaves among the lowest structures (the trunk)"
assert len(branches) == 1, "We expect one branch among the lowest structures (the trunk)"
for leaf in leaves:
assert len(leaf.values(subtree=False)) == 1, "Leaves in the trunk are only expected to contain one point"
assert leaf.parent is None
assert leaf.ancestor == leaf
assert leaf.get_npix() == 1
if leaf.values(subtree=False)[0] == 4:
assert list(zip(*leaf.indices(subtree=False)))[0] == (1, 1)
elif leaf.values(subtree=False)[0] == 3:
assert list(zip(*leaf.indices(subtree=False)))[0] == (3, 0)
else:
self.fail("Invalid value of flux in one of the leaves")
########################################
# Check properties of the branch:
branch = branches[0]
assert branch.parent is None
assert branch.ancestor == branch
assert branch.get_npix(subtree=False) == 1 # only pixel is a 0
assert branch.get_npix(subtree=True) == 7
assert len(branch.children) == 2
for leaf in branch.children:
assert leaf.is_leaf
assert leaf.ancestor == branch
assert leaf.parent == branch
if 5 in leaf.values(subtree=False):
assert sum(leaf.values(subtree=False)) == 5
elif 3 in leaf.values(subtree=False):
assert sum(leaf.values(subtree=False)) == 1 + 2 + 3 + 2
else:
self.fail("Invalid child of the branch")
def test_mergeLevelAndHeight(self):
n = np.nan
data = np.array([[n, n, n, n, n, ],
[n, 4, 2, 5, n, ],
[n, n, n, n, 0, ]])
d = Dendrogram.compute(data)
branch, leaf4, leaf5 = d.trunk[0], d.structure_at((1, 1)), d.structure_at((1, 3))
assert leaf4.height == 4.
assert leaf5.height == 5.
assert branch.height == 4.
def test_dendrogramWithConstBackground(self):
# Test a highly artificial array containing a lot of equal pixels
data = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 3, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 3, 4, 3, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 3, 4, 3, 1, 1, 2, 2, 1, 1, 1],
[1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ])
d = Dendrogram.compute(data)
assert len(d) <= 7
# Some of the '1' valued pixels get included with the leaves and branches,
# hence number of structures is currently 7 and not 6 as expected.
# Fixing this is probably more trouble than it's worth.
leaf_with_twos = d.structure_at((10, 9))
assert leaf_with_twos.height == 2
# Check that all structures contain a reference to the dendrogram
for structure in d:
assert structure._dendrogram is d
class Test3DimensionalData(object):
def setup_method(self, method):
from ._testdata import data
self.data = data
def test_dendrogramComputation(self):
d = Dendrogram.compute(self.data, min_npix=8, min_delta=0.3, min_value=1.4)
# This data with these parameters should produce 55 leaves
assert len(d.leaves) == 55
# Now check every pixel in the data cube (this takes a while).
st_map = -np.ones(self.data.shape, dtype=np.int)
for st in d.all_structures:
st_map[st.indices(subtree=False)] = st.idx
#check that vmin/vmax/peak are correct
for st in d.all_structures:
assert st.vmin == self.data[st.indices(subtree=False)].min()
assert st.vmax == self.data[st.indices(subtree=False)].max()
pk_exp = self.data[st.indices(subtree=True)].max()
ind, pk = st.get_peak(subtree=True)
assert self.data[ind] == pk
assert pk_exp == pk
# The "right" way to do this is loop through indices,
# and repeatedly call structure_at(). However, this is quite slow
# structure_at is a thin wrapper around index_map,
# and we compare index_map to st_map instead
np.testing.assert_array_equal(st_map, d.index_map)
# here, we test a few values of structure_at
for coord in np.indices(self.data.shape).reshape(self.data.ndim, np.prod(self.data.shape)).transpose()[::100]:
coord = tuple(coord)
f = self.data[coord]
structure = d.structure_at(coord)
if structure is not None:
assert structure.idx == st_map[coord], "Pixel at {0} is claimed to be part of {1}, but that structure does not contain the coordinate {0}!".format(coord, structure)
else:
assert st_map[coord] == -1
class TestNDimensionalData(object):
def test_4dim(self):
" Test 4-dimensional data "
data = np.zeros((5, 5, 5, 5)) # Create a 5x5x5x5 array initialized to zero
# N-dimensional data is hard to conceptualize so I've kept this simple.
# Create a local maximum (value 5) at the centre
data[2, 2, 2, 2] = 5
# add some points around it with value 3. Note that '1:4:2' is equivalent to saying indices '1' and '3'
data[2, 1:4:2, 2, 2] = data[2, 2, 1:4:2, 2] = data[2, 2, 2, 1:4:2] = 3
# Add a trail of points of value 2 connecting one of those 3s to a 4
data[0:3, 0, 2, 2] = 2 # Sets [0, 0, 2, 2], [1, 0, 2, 2], and [2, 0, 2, 2] all equal to 2 -> will connect to the '3' at [2, 1, 2, 2]
data[0, 0, 2, 1] = 4
# Now dendrogram it:
d = Dendrogram.compute(data, min_value=1)
# We expect two leaves:
leaves = d.leaves
assert len(leaves) == 2
# We expect one branch:
branches = [i for i in d.all_structures if i.is_branch]
assert len(branches) == 1
assert len(d.trunk) == 1
assert d.trunk[0] == branches[0]
# The maxima of each leaf should be at [2,2,2,2] and [0,3,2,1]
for leaf in leaves:
assert leaf.get_peak() in (((2, 2, 2, 2), 5.), ((0, 0, 2, 1), 4.))
assert leaves[0].get_peak() != leaves[1].get_peak()
# Check out a few more properties of the leaf around the global maximum:
leaf = d.structure_at((2, 2, 2, 2))
assert leaf.vmax == 5
assert leaf.vmin == 2
assert leaf.get_npix() == 1 + 6 + 2 # Contains 1x '5', 6x '3', and 2x '2'. The other '2' should be in the branch
# Check that the only pixel in the branch is a '2' at [0,0,2,2]
assert (list(zip(*branches[0].indices(subtree=False))), branches[0].values(subtree=False)) == ([(0, 0, 2, 2), ], [2., ])
def test_periodic():
x = np.array([[0, 0, 0, 0, 0, ],
[1, 1, 0, 1, 1],
[0, 0, 0, 0, 0]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[-1, -1, -1, -1, -1],
[0, 0, -1, 0, 0],
[-1, -1, -1, -1, -1]])
np.testing.assert_array_equal(d.index_map, expected)
def test_periodic_left():
x = np.array([[1, 0, 0, 0, 0],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[0, -1, -1, -1, -1],
[0, -1, -1, -1, 0],
[0, -1, -1, -1, -1]])
np.testing.assert_array_equal(d.index_map, expected)
def test_periodic_left_narrow():
x = np.array([[0, 0, 0, 0, 0],
[1, 1, 0, 0, 1],
[0, 0, 0, 0, 0]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[-1, -1, -1, -1, -1],
[0, 0, -1, -1, 0],
[-1, -1, -1, -1, -1]])
np.testing.assert_array_equal(d.index_map, expected)
def test_periodic_right():
x = np.array([[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 0, 0, 0, 1]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[-1, -1, -1, -1, 0],
[0, -1, -1, -1, 0],
[-1, -1, -1, -1, 0]])
np.testing.assert_array_equal(d.index_map, expected)
def test_periodic_right_narrow():
x = np.array([[0, 0, 0, 0, 0],
[1, 0, 0, 1, 1],
[0, 0, 0, 0, 0]])
d = Dendrogram.compute(x, min_value=0.5,
neighbours=periodic_neighbours(1))
expected = np.array([[-1, -1, -1, -1, -1],
[0, -1, -1, 0, 0],
[-1, -1, -1, -1, -1]])
np.testing.assert_array_equal(d.index_map, expected)
from .build_benchmark import BENCHMARKS
@pytest.mark.parametrize(('filename'), BENCHMARKS.keys())
def test_benchmark(filename):
from astropy.io import fits
import os
path = os.path.join(os.path.dirname(__file__),
'benchmark_data', filename)
p = BENCHMARKS[filename]
data = fits.getdata(path, 1)
d1 = Dendrogram.compute(data, **p)
d2 = Dendrogram.load_from(path)
assert d1 == d2
# Check that all structures contain a reference to the dendrogram
for structure in d1:
assert structure._dendrogram is d1
| [
"numpy.prod",
"numpy.ones",
"numpy.indices",
"os.path.dirname",
"numpy.array",
"astropy.io.fits.getdata",
"numpy.zeros",
"numpy.testing.assert_array_equal"
] | [((8402, 8463), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 1, 0, 1, 1], [0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [1, 1, 0, 1, 1], [0, 0, 0, 0, 0]])\n', (8410, 8463), True, 'import numpy as np\n'), ((8629, 8701), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, -1], [0, 0, -1, 0, 0], [-1, -1, -1, -1, -1]]'], {}), '([[-1, -1, -1, -1, -1], [0, 0, -1, 0, 0], [-1, -1, -1, -1, -1]])\n', (8637, 8701), True, 'import numpy as np\n'), ((8757, 8809), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['d.index_map', 'expected'], {}), '(d.index_map, expected)\n', (8786, 8809), True, 'import numpy as np\n'), ((8848, 8909), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [1, 0, 0, 0, 1], [1, 0, 0, 0, 0]])\n', (8856, 8909), True, 'import numpy as np\n'), ((9073, 9145), 'numpy.array', 'np.array', (['[[0, -1, -1, -1, -1], [0, -1, -1, -1, 0], [0, -1, -1, -1, -1]]'], {}), '([[0, -1, -1, -1, -1], [0, -1, -1, -1, 0], [0, -1, -1, -1, -1]])\n', (9081, 9145), True, 'import numpy as np\n'), ((9203, 9255), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['d.index_map', 'expected'], {}), '(d.index_map, expected)\n', (9232, 9255), True, 'import numpy as np\n'), ((9301, 9362), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 1, 0, 0, 1], [0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [1, 1, 0, 0, 1], [0, 0, 0, 0, 0]])\n', (9309, 9362), True, 'import numpy as np\n'), ((9526, 9599), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, -1], [0, 0, -1, -1, 0], [-1, -1, -1, -1, -1]]'], {}), '([[-1, -1, -1, -1, -1], [0, 0, -1, -1, 0], [-1, -1, -1, -1, -1]])\n', (9534, 9599), True, 'import numpy as np\n'), ((9657, 9709), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['d.index_map', 'expected'], {}), '(d.index_map, expected)\n', (9686, 9709), True, 'import numpy as np\n'), ((9749, 9810), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 0, 0, 0, 1]]'], {}), '([[0, 0, 0, 0, 1], [1, 0, 0, 0, 1], [0, 0, 0, 0, 1]])\n', (9757, 9810), True, 'import numpy as np\n'), ((9974, 10046), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, 0], [0, -1, -1, -1, 0], [-1, -1, -1, -1, 0]]'], {}), '([[-1, -1, -1, -1, 0], [0, -1, -1, -1, 0], [-1, -1, -1, -1, 0]])\n', (9982, 10046), True, 'import numpy as np\n'), ((10104, 10156), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['d.index_map', 'expected'], {}), '(d.index_map, expected)\n', (10133, 10156), True, 'import numpy as np\n'), ((10203, 10264), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 0, 0, 1, 1], [0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [1, 0, 0, 1, 1], [0, 0, 0, 0, 0]])\n', (10211, 10264), True, 'import numpy as np\n'), ((10428, 10501), 'numpy.array', 'np.array', (['[[-1, -1, -1, -1, -1], [0, -1, -1, 0, 0], [-1, -1, -1, -1, -1]]'], {}), '([[-1, -1, -1, -1, -1], [0, -1, -1, 0, 0], [-1, -1, -1, -1, -1]])\n', (10436, 10501), True, 'import numpy as np\n'), ((10559, 10611), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['d.index_map', 'expected'], {}), '(d.index_map, expected)\n', (10588, 10611), True, 'import numpy as np\n'), ((10950, 10971), 'astropy.io.fits.getdata', 'fits.getdata', (['path', '(1)'], {}), '(path, 1)\n', (10962, 10971), False, 'from astropy.io import fits\n'), ((276, 394), 'numpy.array', 'np.array', (['[[n, n, n, n, n, n, n, n], [n, 4, n, n, n, n, n, n], [n, n, n, 1, n, n, 0, \n 5], [3, n, n, 2, 3, 2, 0, n]]'], {}), '([[n, n, n, n, n, n, n, n], [n, 4, n, n, n, n, n, n], [n, n, n, 1,\n n, n, 0, 5], [3, n, n, 2, 3, 2, 0, n]])\n', (284, 394), True, 'import numpy as np\n'), ((2501, 2562), 'numpy.array', 'np.array', (['[[n, n, n, n, n], [n, 4, 2, 5, n], [n, n, n, n, 0]]'], {}), '([[n, n, n, n, n], [n, 4, 2, 5, n], [n, n, n, n, 0]])\n', (2509, 2562), True, 'import numpy as np\n'), ((3000, 3660), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 3, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 3, 5, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 2, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 3, 4, 3, 1, 1,\n 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, \n 1], [1, 1, 1, 1, 3, 4, 3, 1, 1, 2, 2, 1, 1, 1], [1, 1, 1, 1, 2, 3, 2, 1,\n 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1,\n 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 3, 5, 3, 1, 1, 1, 1, 1, 1, \n 1, 1, 1], [1, 1, 2, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 3, 4,\n 3, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1], [1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2, 3, 2, 1, 1, 1, \n 1, 1, 1, 1], [1, 1, 1, 1, 3, 4, 3, 1, 1, 2, 2, 1, 1, 1], [1, 1, 1, 1, 2,\n 3, 2, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])\n', (3008, 3660), True, 'import numpy as np\n'), ((5793, 5843), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['st_map', 'd.index_map'], {}), '(st_map, d.index_map)\n', (5822, 5843), True, 'import numpy as np\n'), ((6539, 6561), 'numpy.zeros', 'np.zeros', (['(5, 5, 5, 5)'], {}), '((5, 5, 5, 5))\n', (6547, 6561), True, 'import numpy as np\n'), ((10828, 10853), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10843, 10853), False, 'import os\n'), ((4974, 5012), 'numpy.ones', 'np.ones', (['self.data.shape'], {'dtype': 'np.int'}), '(self.data.shape, dtype=np.int)\n', (4981, 5012), True, 'import numpy as np\n'), ((5974, 5998), 'numpy.prod', 'np.prod', (['self.data.shape'], {}), '(self.data.shape)\n', (5981, 5998), True, 'import numpy as np\n'), ((5922, 5949), 'numpy.indices', 'np.indices', (['self.data.shape'], {}), '(self.data.shape)\n', (5932, 5949), True, 'import numpy as np\n')] |
import unittest
import numpy
from templevel import TempLevel
from pymclevel.box import BoundingBox
__author__ = 'Rio'
class TestJavaLevel(unittest.TestCase):
def setUp(self):
self.creativelevel = TempLevel("Dojo_64_64_128.dat")
self.indevlevel = TempLevel("hell.mclevel")
def testCopy(self):
indevlevel = self.indevlevel.level
creativelevel = self.creativelevel.level
creativelevel.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))
assert(numpy.array((indevlevel.Blocks[0:64, 0:64, 0:64]) == (creativelevel.Blocks[0:64, 0:64, 0:64])).all())
creativelevel.saveInPlace()
# xxx old survival levels
| [
"pymclevel.box.BoundingBox",
"numpy.array",
"templevel.TempLevel"
] | [((210, 241), 'templevel.TempLevel', 'TempLevel', (['"""Dojo_64_64_128.dat"""'], {}), "('Dojo_64_64_128.dat')\n", (219, 241), False, 'from templevel import TempLevel\n'), ((268, 293), 'templevel.TempLevel', 'TempLevel', (['"""hell.mclevel"""'], {}), "('hell.mclevel')\n", (277, 293), False, 'from templevel import TempLevel\n'), ((461, 497), 'pymclevel.box.BoundingBox', 'BoundingBox', (['(0, 0, 0)', '(64, 64, 64)'], {}), '((0, 0, 0), (64, 64, 64))\n', (472, 497), False, 'from pymclevel.box import BoundingBox\n'), ((526, 621), 'numpy.array', 'numpy.array', (['(indevlevel.Blocks[0:64, 0:64, 0:64] == creativelevel.Blocks[0:64, 0:64, 0:64])'], {}), '(indevlevel.Blocks[0:64, 0:64, 0:64] == creativelevel.Blocks[0:\n 64, 0:64, 0:64])\n', (537, 621), False, 'import numpy\n')] |
from torch.optim import Adam, SGD, AdamW
import torch
from torch.optim.lr_scheduler import OneCycleLR
import numpy as np
import os
import time
from torch.utils.data import DataLoader
from dataset.vocab import Vocab
from dataset.add_noise import SynthesizeData
from params import *
from models.seq2seq import Seq2Seq
from models.seq2seq_without_attention import Seq2Seq_WithoutAtt
from utils.logger import Logger
from dataset.autocorrect_dataset import AutoCorrectDataset
from models.loss import LabelSmoothingLoss
from utils.utils import translate, translate_beam_search, batch_translate_beam_search
from utils.metrics import compute_accuracy
class Trainer():
def __init__(self, alphabets_, list_ngram):
self.vocab = Vocab(alphabets_)
self.synthesizer = SynthesizeData(vocab_path="")
self.list_ngrams_train, self.list_ngrams_valid = self.train_test_split(list_ngram, test_size=0.1)
print("Loaded data!!!")
print("Total training samples: ", len(self.list_ngrams_train))
print("Total valid samples: ", len(self.list_ngrams_valid))
INPUT_DIM = self.vocab.__len__()
OUTPUT_DIM = self.vocab.__len__()
self.device = DEVICE
self.num_iters = NUM_ITERS
self.beamsearch = BEAM_SEARCH
self.batch_size = BATCH_SIZE
self.print_every = PRINT_PER_ITER
self.valid_every = VALID_PER_ITER
self.checkpoint = CHECKPOINT
self.export_weights = EXPORT
self.metrics = MAX_SAMPLE_VALID
logger = LOG
if logger:
self.logger = Logger(logger)
self.iter = 0
self.model = Seq2Seq(input_dim=INPUT_DIM, output_dim=OUTPUT_DIM, encoder_embbeded=ENC_EMB_DIM,
decoder_embedded=DEC_EMB_DIM,
encoder_hidden=ENC_HID_DIM, decoder_hidden=DEC_HID_DIM, encoder_dropout=ENC_DROPOUT,
decoder_dropout=DEC_DROPOUT)
self.optimizer = AdamW(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09)
self.scheduler = OneCycleLR(self.optimizer, total_steps=self.num_iters, pct_start=PCT_START, max_lr=MAX_LR)
self.criterion = LabelSmoothingLoss(len(self.vocab), padding_idx=self.vocab.pad, smoothing=0.1)
self.train_gen = self.data_gen(self.list_ngrams_train, self.synthesizer, self.vocab, is_train=True)
self.valid_gen = self.data_gen(self.list_ngrams_valid, self.synthesizer, self.vocab, is_train=False)
self.train_losses = []
# to device
self.model.to(self.device)
self.criterion.to(self.device)
def train_test_split(self, list_phrases, test_size=0.1):
list_phrases = list_phrases
train_idx = int(len(list_phrases) * (1 - test_size))
list_phrases_train = list_phrases[:train_idx]
list_phrases_valid = list_phrases[train_idx:]
return list_phrases_train, list_phrases_valid
def data_gen(self, list_ngrams_np, synthesizer, vocab, is_train=True):
dataset = AutoCorrectDataset(list_ngrams_np, transform_noise=synthesizer, vocab=vocab, maxlen=MAXLEN)
shuffle = True if is_train else False
gen = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=shuffle,
drop_last=False)
return gen
def step(self, batch):
self.model.train()
batch = self.batch_to_device(batch)
src, tgt = batch['src'], batch['tgt']
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0) # batch x src_len -> src_len x batch
outputs = self.model(src, tgt) # src : src_len x B, outpus : B x tgt_len x vocab
# loss = self.criterion(rearrange(outputs, 'b t v -> (b t) v'), rearrange(tgt_output, 'b o -> (b o)'))
outputs = outputs.view(-1, outputs.size(2)) # flatten(0, 1)
tgt_output = tgt.transpose(0, 1).reshape(-1) # flatten() # tgt: tgt_len xB , need convert to B x tgt_len
loss = self.criterion(outputs, tgt_output)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1)
self.optimizer.step()
self.scheduler.step()
loss_item = loss.item()
return loss_item
def train(self):
print("Begin training from iter: ", self.iter)
total_loss = 0
total_loader_time = 0
total_gpu_time = 0
best_acc = -1
data_iter = iter(self.train_gen)
for i in range(self.num_iters):
self.iter += 1
start = time.time()
try:
batch = next(data_iter)
except StopIteration:
data_iter = iter(self.train_gen)
batch = next(data_iter)
total_loader_time += time.time() - start
start = time.time()
loss = self.step(batch)
total_gpu_time += time.time() - start
total_loss += loss
self.train_losses.append((self.iter, loss))
if self.iter % self.print_every == 0:
info = 'iter: {:06d} - train loss: {:.3f} - lr: {:.2e} - load time: {:.2f} - gpu time: {:.2f}'.format(
self.iter,
total_loss / self.print_every, self.optimizer.param_groups[0]['lr'],
total_loader_time, total_gpu_time)
total_loss = 0
total_loader_time = 0
total_gpu_time = 0
print(info)
self.logger.log(info)
if self.iter % self.valid_every == 0:
val_loss, preds, actuals, inp_sents = self.validate()
acc_full_seq, acc_per_char, cer = self.precision(self.metrics)
info = 'iter: {:06d} - valid loss: {:.3f} - acc full seq: {:.4f} - acc per char: {:.4f} - CER: {:.4f} '.format(
self.iter, val_loss, acc_full_seq, acc_per_char, cer)
print(info)
print("--- Sentence predict ---")
for pred, inp, label in zip(preds, inp_sents, actuals):
infor_predict = 'Pred: {} - Inp: {} - Label: {}'.format(pred, inp, label)
print(infor_predict)
self.logger.log(infor_predict)
self.logger.log(info)
if acc_full_seq > best_acc:
self.save_weights(self.export_weights)
best_acc = acc_full_seq
self.save_checkpoint(self.checkpoint)
def validate(self):
self.model.eval()
total_loss = []
max_step = self.metrics / self.batch_size
with torch.no_grad():
for step, batch in enumerate(self.valid_gen):
batch = self.batch_to_device(batch)
src, tgt = batch['src'], batch['tgt']
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0)
outputs = self.model(src, tgt, 0) # turn off teaching force
outputs = outputs.flatten(0, 1)
tgt_output = tgt.flatten()
loss = self.criterion(outputs, tgt_output)
total_loss.append(loss.item())
preds, actuals, inp_sents, probs = self.predict(5)
del outputs
del loss
if step > max_step:
break
total_loss = np.mean(total_loss)
self.model.train()
return total_loss, preds[:3], actuals[:3], inp_sents[:3]
def predict(self, sample=None):
pred_sents = []
actual_sents = []
inp_sents = []
for batch in self.valid_gen:
batch = self.batch_to_device(batch)
if self.beamsearch:
translated_sentence = batch_translate_beam_search(batch['src'], self.model)
prob = None
else:
translated_sentence, prob = translate(batch['src'], self.model)
pred_sent = self.vocab.batch_decode(translated_sentence.tolist())
actual_sent = self.vocab.batch_decode(batch['tgt'].tolist())
inp_sent = self.vocab.batch_decode(batch['src'].tolist())
pred_sents.extend(pred_sent)
actual_sents.extend(actual_sent)
inp_sents.extend(inp_sent)
if sample is not None and len(pred_sents) > sample:
break
return pred_sents, actual_sents, inp_sents, prob
def precision(self, sample=None):
pred_sents, actual_sents, _, _ = self.predict(sample=sample)
acc_full_seq = compute_accuracy(actual_sents, pred_sents, mode='full_sequence')
acc_per_char = compute_accuracy(actual_sents, pred_sents, mode='per_char')
cer = compute_accuracy(actual_sents, pred_sents, mode='CER')
return acc_full_seq, acc_per_char, cer
def visualize_prediction(self, sample=16, errorcase=False, fontname='serif', fontsize=16):
pred_sents, actual_sents, img_files, probs = self.predict(sample)
if errorcase:
wrongs = []
for i in range(len(img_files)):
if pred_sents[i] != actual_sents[i]:
wrongs.append(i)
pred_sents = [pred_sents[i] for i in wrongs]
actual_sents = [actual_sents[i] for i in wrongs]
img_files = [img_files[i] for i in wrongs]
probs = [probs[i] for i in wrongs]
img_files = img_files[:sample]
fontdict = {
'family': fontname,
'size': fontsize
}
def visualize_dataset(self, sample=16, fontname='serif'):
n = 0
for batch in self.train_gen:
for i in range(self.batch_size):
img = batch['img'][i].numpy().transpose(1, 2, 0)
sent = self.vocab.decode(batch['tgt_input'].T[i].tolist())
n += 1
if n >= sample:
return
def load_checkpoint(self, filename):
checkpoint = torch.load(filename)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.model.load_state_dict(checkpoint['state_dict'])
self.iter = checkpoint['iter']
self.train_losses = checkpoint['train_losses']
def save_checkpoint(self, filename):
state = {'iter': self.iter, 'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(), 'train_losses': self.train_losses,
'scheduler': self.scheduler.state_dict()}
path, _ = os.path.split(filename)
os.makedirs(path, exist_ok=True)
torch.save(state, filename)
def load_weights(self, filename):
state_dict = torch.load(filename, map_location=torch.device(self.device))
for name, param in self.model.named_parameters():
if name not in state_dict:
print('{} not found'.format(name))
elif state_dict[name].shape != param.shape:
print(
'{} missmatching shape, required {} but found {}'.format(name, param.shape, state_dict[name].shape))
del state_dict[name]
self.model.load_state_dict(state_dict, strict=False)
def save_weights(self, filename):
path, _ = os.path.split(filename)
os.makedirs(path, exist_ok=True)
torch.save(self.model.state_dict(), filename)
def batch_to_device(self, batch):
src = batch['src'].to(self.device, non_blocking=True)
tgt = batch['tgt'].to(self.device, non_blocking=True)
batch = {
'src': src,
'tgt': tgt
}
return batch | [
"dataset.vocab.Vocab",
"numpy.mean",
"utils.utils.batch_translate_beam_search",
"os.makedirs",
"torch.device",
"torch.optim.lr_scheduler.OneCycleLR",
"torch.load",
"os.path.split",
"torch.no_grad",
"dataset.add_noise.SynthesizeData",
"utils.metrics.compute_accuracy",
"torch.utils.data.DataLoad... | [((732, 749), 'dataset.vocab.Vocab', 'Vocab', (['alphabets_'], {}), '(alphabets_)\n', (737, 749), False, 'from dataset.vocab import Vocab\n'), ((777, 806), 'dataset.add_noise.SynthesizeData', 'SynthesizeData', ([], {'vocab_path': '""""""'}), "(vocab_path='')\n", (791, 806), False, 'from dataset.add_noise import SynthesizeData\n'), ((1635, 1873), 'models.seq2seq.Seq2Seq', 'Seq2Seq', ([], {'input_dim': 'INPUT_DIM', 'output_dim': 'OUTPUT_DIM', 'encoder_embbeded': 'ENC_EMB_DIM', 'decoder_embedded': 'DEC_EMB_DIM', 'encoder_hidden': 'ENC_HID_DIM', 'decoder_hidden': 'DEC_HID_DIM', 'encoder_dropout': 'ENC_DROPOUT', 'decoder_dropout': 'DEC_DROPOUT'}), '(input_dim=INPUT_DIM, output_dim=OUTPUT_DIM, encoder_embbeded=\n ENC_EMB_DIM, decoder_embedded=DEC_EMB_DIM, encoder_hidden=ENC_HID_DIM,\n decoder_hidden=DEC_HID_DIM, encoder_dropout=ENC_DROPOUT,\n decoder_dropout=DEC_DROPOUT)\n', (1642, 1873), False, 'from models.seq2seq import Seq2Seq\n'), ((2060, 2154), 'torch.optim.lr_scheduler.OneCycleLR', 'OneCycleLR', (['self.optimizer'], {'total_steps': 'self.num_iters', 'pct_start': 'PCT_START', 'max_lr': 'MAX_LR'}), '(self.optimizer, total_steps=self.num_iters, pct_start=PCT_START,\n max_lr=MAX_LR)\n', (2070, 2154), False, 'from torch.optim.lr_scheduler import OneCycleLR\n'), ((3016, 3111), 'dataset.autocorrect_dataset.AutoCorrectDataset', 'AutoCorrectDataset', (['list_ngrams_np'], {'transform_noise': 'synthesizer', 'vocab': 'vocab', 'maxlen': 'MAXLEN'}), '(list_ngrams_np, transform_noise=synthesizer, vocab=vocab,\n maxlen=MAXLEN)\n', (3034, 3111), False, 'from dataset.autocorrect_dataset import AutoCorrectDataset\n'), ((3169, 3245), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': 'shuffle', 'drop_last': '(False)'}), '(dataset, batch_size=BATCH_SIZE, shuffle=shuffle, drop_last=False)\n', (3179, 3245), False, 'from torch.utils.data import DataLoader\n'), ((7386, 7405), 'numpy.mean', 'np.mean', (['total_loss'], {}), '(total_loss)\n', (7393, 7405), True, 'import numpy as np\n'), ((8572, 8636), 'utils.metrics.compute_accuracy', 'compute_accuracy', (['actual_sents', 'pred_sents'], {'mode': '"""full_sequence"""'}), "(actual_sents, pred_sents, mode='full_sequence')\n", (8588, 8636), False, 'from utils.metrics import compute_accuracy\n'), ((8660, 8719), 'utils.metrics.compute_accuracy', 'compute_accuracy', (['actual_sents', 'pred_sents'], {'mode': '"""per_char"""'}), "(actual_sents, pred_sents, mode='per_char')\n", (8676, 8719), False, 'from utils.metrics import compute_accuracy\n'), ((8734, 8788), 'utils.metrics.compute_accuracy', 'compute_accuracy', (['actual_sents', 'pred_sents'], {'mode': '"""CER"""'}), "(actual_sents, pred_sents, mode='CER')\n", (8750, 8788), False, 'from utils.metrics import compute_accuracy\n'), ((9989, 10009), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (9999, 10009), False, 'import torch\n'), ((10584, 10607), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (10597, 10607), False, 'import os\n'), ((10616, 10648), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (10627, 10648), False, 'import os\n'), ((10658, 10685), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (10668, 10685), False, 'import torch\n'), ((11312, 11335), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (11325, 11335), False, 'import os\n'), ((11344, 11376), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (11355, 11376), False, 'import os\n'), ((1575, 1589), 'utils.logger.Logger', 'Logger', (['logger'], {}), '(logger)\n', (1581, 1589), False, 'from utils.logger import Logger\n'), ((4566, 4577), 'time.time', 'time.time', ([], {}), '()\n', (4575, 4577), False, 'import time\n'), ((4834, 4845), 'time.time', 'time.time', ([], {}), '()\n', (4843, 4845), False, 'import time\n'), ((6654, 6669), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6667, 6669), False, 'import torch\n'), ((4793, 4804), 'time.time', 'time.time', ([], {}), '()\n', (4802, 4804), False, 'import time\n'), ((4912, 4923), 'time.time', 'time.time', ([], {}), '()\n', (4921, 4923), False, 'import time\n'), ((7766, 7819), 'utils.utils.batch_translate_beam_search', 'batch_translate_beam_search', (["batch['src']", 'self.model'], {}), "(batch['src'], self.model)\n", (7793, 7819), False, 'from utils.utils import translate, translate_beam_search, batch_translate_beam_search\n'), ((7910, 7945), 'utils.utils.translate', 'translate', (["batch['src']", 'self.model'], {}), "(batch['src'], self.model)\n", (7919, 7945), False, 'from utils.utils import translate, translate_beam_search, batch_translate_beam_search\n'), ((10780, 10805), 'torch.device', 'torch.device', (['self.device'], {}), '(self.device)\n', (10792, 10805), False, 'import torch\n')] |
"""This script trigger convolution operation. We think it cause more
GPU power consumption then gemm call.
"""
import numpy as np
import theano
import theano.tensor as T
from theano.gpuarray import dnn
from theano.tensor.nnet.abstract_conv import get_conv_output_shape
def burn():
sz = 128
img_shp = [sz, sz, sz, sz]
kern_shp = [sz // 2, sz, 3, 3]
out_shp = get_conv_output_shape(img_shp, kern_shp, "valid", (1, 1))
img = T.tensor4("img")
kern = T.tensor4("kern")
out = T.tensor4("out")
def rand(shp):
return np.random.rand(*shp).astype(theano.config.floatX)
img = theano.shared(rand(img_shp))
kern = theano.shared(rand(kern_shp))
out = theano.shared(rand(out_shp))
# beta 1 is needed to force the reuse of out, otherwise, it is
# replaced by a GpuAllocEmpty
o1 = dnn._dnn_conv(img, kern, conv_mode="conv", out=out, beta=1.0)
mode = theano.compile.get_default_mode().including("local_remove_all_assert")
f = theano.function([], [o1], mode=mode)
theano.printing.debugprint(f)
print("Start computation")
for i in range(10000):
f.fn()
print("Computation stopped")
if __name__ == "__main__":
burn()
| [
"theano.gpuarray.dnn._dnn_conv",
"theano.function",
"theano.printing.debugprint",
"numpy.random.rand",
"theano.tensor.nnet.abstract_conv.get_conv_output_shape",
"theano.compile.get_default_mode",
"theano.tensor.tensor4"
] | [((380, 437), 'theano.tensor.nnet.abstract_conv.get_conv_output_shape', 'get_conv_output_shape', (['img_shp', 'kern_shp', '"""valid"""', '(1, 1)'], {}), "(img_shp, kern_shp, 'valid', (1, 1))\n", (401, 437), False, 'from theano.tensor.nnet.abstract_conv import get_conv_output_shape\n'), ((448, 464), 'theano.tensor.tensor4', 'T.tensor4', (['"""img"""'], {}), "('img')\n", (457, 464), True, 'import theano.tensor as T\n'), ((476, 493), 'theano.tensor.tensor4', 'T.tensor4', (['"""kern"""'], {}), "('kern')\n", (485, 493), True, 'import theano.tensor as T\n'), ((504, 520), 'theano.tensor.tensor4', 'T.tensor4', (['"""out"""'], {}), "('out')\n", (513, 520), True, 'import theano.tensor as T\n'), ((836, 897), 'theano.gpuarray.dnn._dnn_conv', 'dnn._dnn_conv', (['img', 'kern'], {'conv_mode': '"""conv"""', 'out': 'out', 'beta': '(1.0)'}), "(img, kern, conv_mode='conv', out=out, beta=1.0)\n", (849, 897), False, 'from theano.gpuarray import dnn\n'), ((988, 1024), 'theano.function', 'theano.function', (['[]', '[o1]'], {'mode': 'mode'}), '([], [o1], mode=mode)\n', (1003, 1024), False, 'import theano\n'), ((1029, 1058), 'theano.printing.debugprint', 'theano.printing.debugprint', (['f'], {}), '(f)\n', (1055, 1058), False, 'import theano\n'), ((909, 942), 'theano.compile.get_default_mode', 'theano.compile.get_default_mode', ([], {}), '()\n', (940, 942), False, 'import theano\n'), ((556, 576), 'numpy.random.rand', 'np.random.rand', (['*shp'], {}), '(*shp)\n', (570, 576), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import scipy.sparse as sp
from multimodal.lib.array_utils import normalize_features
from multimodal.evaluation import (evaluate_label_reco,
evaluate_NN_label,
chose_examples)
class TestLabelEvaluation(unittest.TestCase):
def test(self):
labels = [2, 0]
reco = np.array([[.1, .5, .6, .1],
[.6, .5, .2, .1]])
good = evaluate_label_reco(reco, labels)
self.assertEqual(good, 1.)
bad = evaluate_label_reco(reco[[1, 0], :], labels)
self.assertEqual(bad, 0.)
medium = evaluate_label_reco(reco[[1, 1], :], labels)
self.assertEqual(medium, .5)
def test_fails_on_multiple_labels(self):
labels = [[2], [0]]
reco = np.array([[.1, .5, .6, .1],
[.6, .5, .2, .1]])
with self.assertRaises(AssertionError):
evaluate_label_reco(reco, labels)
class TestNNEvaluation(unittest.TestCase):
def setUp(self):
self.labels_a = np.random.randint(10, size=13)
self.labels_b = [i for i in reversed(range(10))]
# Encode label on third coordinate of a and fourth of b
self.a = np.random.random((13, 5))
for i in range(13):
self.a[i, 2] = self.labels_a[i]
self.b = np.random.random((10, 5))
for i in range(10):
self.b[i, 3] = self.labels_b[i]
def fake_metrics(self, a, b, axis=-1):
assert(axis == -1) # Test does not work if not...
return 1. - (a[:, :, 2] == b[:, :, 3])
def test_good_on_fake_measure(self):
self.assertEqual(evaluate_NN_label(self.a, self.b, self.labels_a,
self.labels_b, self.fake_metrics
), 1.)
def test_bad_on_fake_measure(self):
self.assertEqual(evaluate_NN_label(self.a, 1 + self.b,
self.labels_a, self.labels_b,
self.fake_metrics), 0.)
def test_on_fake_measure_sparse(self):
a = sp.lil_matrix(self.a).tocsr()
b = sp.lil_matrix(self.b).tocsr()
self.assertEqual(
evaluate_NN_label(a, b, self.labels_a, self.labels_b,
self.fake_metrics),
1.)
self.assertEqual(
evaluate_NN_label(a, 1 + self.b, self.labels_a,
self.labels_b, self.fake_metrics),
0.)
class TestChoseExamples(unittest.TestCase):
def setUp(self):
self.label_set = list(range(3))
self.labels = self.label_set * 5
np.random.seed(0)
np.random.shuffle(self.labels)
def test_choses_as_many_examples_as_labels(self):
r = chose_examples(self.labels, self.label_set)
self.assertEqual(len(r), len(self.label_set))
r = chose_examples(self.labels) # And without giving labels
self.assertEqual(len(r), len(self.label_set))
def test_choses_twice_as_many_examples_as_labels(self):
r = chose_examples(self.labels, self.label_set, number=2)
self.assertEqual(len(r), 2 * len(self.label_set))
def test_all_chosen_are_indices(self):
r = chose_examples(self.labels, self.label_set, number=2)
assert(all([0 <= i < len(self.labels) for i in r]))
def test_all_labels_are_chosen_once(self):
r = chose_examples(self.labels, self.label_set)
lab = [self.labels[i] for i in r]
assert(all([lab.count(l) == 1 for l in self.label_set]))
def test_all_labels_are_chosen_twice(self):
r = chose_examples(self.labels, self.label_set, number=2)
lab = [self.labels[i] for i in r]
assert(all([lab.count(l) == 2 for l in self.label_set]))
class TestNormalizeFeatures(unittest.TestCase):
def setUp(self):
self.mat = np.random.random((32, 13))
self.mat = 10. * self.mat * (self.mat < .2)
def test_on_sparse_same_shape(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m = sp.csc_matrix(m)
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, m.shape)
def test_removes_columns_sparse(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m[:, [1, 3]] = 0 # Ensures column 1 and 3 have zero sum
m = sp.csc_matrix(m)
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, (m.shape[0], m.shape[1] - 2))
def test_on_dense_same_shape(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, m.shape)
def test_removes_columns_dense(self):
m = self.mat
m[0, :] += 1 # Ensures that no column has zero sum
m[:, [1, 3]] = 0 # Ensures column 1 and 3 have zero sum
norm = normalize_features(m)
assert(np.allclose(norm.sum(axis=0), 1))
self.assertEqual(norm.shape, (m.shape[0], m.shape[1] - 2))
def test_same_on_dense_and_sparse(self):
m1 = sp.csc_matrix(self.mat)
m2 = sp.csr_matrix(self.mat)
n = normalize_features(self.mat)
n1 = normalize_features(m1)
n2 = normalize_features(m2)
assert(np.allclose(n1.todense(), n))
assert(np.allclose(n2.todense(), n))
def test_does_not_modify(self):
m = self.mat.copy()
normalize_features(m)
ms = sp.csr_matrix(m)
normalize_features(ms)
assert(np.allclose(m, self.mat))
assert(np.allclose(ms.todense(), self.mat))
def test_OK(self):
n = normalize_features(np.array([[1., 0., 1.5, .1],
[1., 0., .5, .3]]))
ok = np.array([[.5, .75, .25],
[.5, .25, .75]])
assert(np.allclose(n, ok))
| [
"multimodal.evaluation.evaluate_label_reco",
"multimodal.lib.array_utils.normalize_features",
"multimodal.evaluation.evaluate_NN_label",
"numpy.allclose",
"scipy.sparse.lil_matrix",
"multimodal.evaluation.chose_examples",
"numpy.random.random",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.rand... | [((390, 444), 'numpy.array', 'np.array', (['[[0.1, 0.5, 0.6, 0.1], [0.6, 0.5, 0.2, 0.1]]'], {}), '([[0.1, 0.5, 0.6, 0.1], [0.6, 0.5, 0.2, 0.1]])\n', (398, 444), True, 'import numpy as np\n'), ((477, 510), 'multimodal.evaluation.evaluate_label_reco', 'evaluate_label_reco', (['reco', 'labels'], {}), '(reco, labels)\n', (496, 510), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((560, 604), 'multimodal.evaluation.evaluate_label_reco', 'evaluate_label_reco', (['reco[[1, 0], :]', 'labels'], {}), '(reco[[1, 0], :], labels)\n', (579, 604), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((656, 700), 'multimodal.evaluation.evaluate_label_reco', 'evaluate_label_reco', (['reco[[1, 1], :]', 'labels'], {}), '(reco[[1, 1], :], labels)\n', (675, 700), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((827, 881), 'numpy.array', 'np.array', (['[[0.1, 0.5, 0.6, 0.1], [0.6, 0.5, 0.2, 0.1]]'], {}), '([[0.1, 0.5, 0.6, 0.1], [0.6, 0.5, 0.2, 0.1]])\n', (835, 881), True, 'import numpy as np\n'), ((1084, 1114), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(13)'}), '(10, size=13)\n', (1101, 1114), True, 'import numpy as np\n'), ((1253, 1278), 'numpy.random.random', 'np.random.random', (['(13, 5)'], {}), '((13, 5))\n', (1269, 1278), True, 'import numpy as np\n'), ((1368, 1393), 'numpy.random.random', 'np.random.random', (['(10, 5)'], {}), '((10, 5))\n', (1384, 1393), True, 'import numpy as np\n'), ((2736, 2753), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2750, 2753), True, 'import numpy as np\n'), ((2762, 2792), 'numpy.random.shuffle', 'np.random.shuffle', (['self.labels'], {}), '(self.labels)\n', (2779, 2792), True, 'import numpy as np\n'), ((2860, 2903), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels', 'self.label_set'], {}), '(self.labels, self.label_set)\n', (2874, 2903), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((2970, 2997), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels'], {}), '(self.labels)\n', (2984, 2997), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((3154, 3207), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels', 'self.label_set'], {'number': '(2)'}), '(self.labels, self.label_set, number=2)\n', (3168, 3207), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((3322, 3375), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels', 'self.label_set'], {'number': '(2)'}), '(self.labels, self.label_set, number=2)\n', (3336, 3375), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((3496, 3539), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels', 'self.label_set'], {}), '(self.labels, self.label_set)\n', (3510, 3539), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((3708, 3761), 'multimodal.evaluation.chose_examples', 'chose_examples', (['self.labels', 'self.label_set'], {'number': '(2)'}), '(self.labels, self.label_set, number=2)\n', (3722, 3761), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((3960, 3986), 'numpy.random.random', 'np.random.random', (['(32, 13)'], {}), '((32, 13))\n', (3976, 3986), True, 'import numpy as np\n'), ((4174, 4190), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['m'], {}), '(m)\n', (4187, 4190), True, 'import scipy.sparse as sp\n'), ((4206, 4227), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m'], {}), '(m)\n', (4224, 4227), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((4525, 4541), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['m'], {}), '(m)\n', (4538, 4541), True, 'import scipy.sparse as sp\n'), ((4557, 4578), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m'], {}), '(m)\n', (4575, 4578), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((4832, 4853), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m'], {}), '(m)\n', (4850, 4853), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5153, 5174), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m'], {}), '(m)\n', (5171, 5174), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5350, 5373), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['self.mat'], {}), '(self.mat)\n', (5363, 5373), True, 'import scipy.sparse as sp\n'), ((5387, 5410), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['self.mat'], {}), '(self.mat)\n', (5400, 5410), True, 'import scipy.sparse as sp\n'), ((5423, 5451), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['self.mat'], {}), '(self.mat)\n', (5441, 5451), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5465, 5487), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m1'], {}), '(m1)\n', (5483, 5487), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5501, 5523), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m2'], {}), '(m2)\n', (5519, 5523), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5687, 5708), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['m'], {}), '(m)\n', (5705, 5708), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5722, 5738), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['m'], {}), '(m)\n', (5735, 5738), True, 'import scipy.sparse as sp\n'), ((5747, 5769), 'multimodal.lib.array_utils.normalize_features', 'normalize_features', (['ms'], {}), '(ms)\n', (5765, 5769), False, 'from multimodal.lib.array_utils import normalize_features\n'), ((5785, 5809), 'numpy.allclose', 'np.allclose', (['m', 'self.mat'], {}), '(m, self.mat)\n', (5796, 5809), True, 'import numpy as np\n'), ((6022, 6070), 'numpy.array', 'np.array', (['[[0.5, 0.75, 0.25], [0.5, 0.25, 0.75]]'], {}), '([[0.5, 0.75, 0.25], [0.5, 0.25, 0.75]])\n', (6030, 6070), True, 'import numpy as np\n'), ((6103, 6121), 'numpy.allclose', 'np.allclose', (['n', 'ok'], {}), '(n, ok)\n', (6114, 6121), True, 'import numpy as np\n'), ((959, 992), 'multimodal.evaluation.evaluate_label_reco', 'evaluate_label_reco', (['reco', 'labels'], {}), '(reco, labels)\n', (978, 992), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((1683, 1770), 'multimodal.evaluation.evaluate_NN_label', 'evaluate_NN_label', (['self.a', 'self.b', 'self.labels_a', 'self.labels_b', 'self.fake_metrics'], {}), '(self.a, self.b, self.labels_a, self.labels_b, self.\n fake_metrics)\n', (1700, 1770), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((1924, 2015), 'multimodal.evaluation.evaluate_NN_label', 'evaluate_NN_label', (['self.a', '(1 + self.b)', 'self.labels_a', 'self.labels_b', 'self.fake_metrics'], {}), '(self.a, 1 + self.b, self.labels_a, self.labels_b, self.\n fake_metrics)\n', (1941, 2015), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((2272, 2344), 'multimodal.evaluation.evaluate_NN_label', 'evaluate_NN_label', (['a', 'b', 'self.labels_a', 'self.labels_b', 'self.fake_metrics'], {}), '(a, b, self.labels_a, self.labels_b, self.fake_metrics)\n', (2289, 2344), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((2442, 2528), 'multimodal.evaluation.evaluate_NN_label', 'evaluate_NN_label', (['a', '(1 + self.b)', 'self.labels_a', 'self.labels_b', 'self.fake_metrics'], {}), '(a, 1 + self.b, self.labels_a, self.labels_b, self.\n fake_metrics)\n', (2459, 2528), False, 'from multimodal.evaluation import evaluate_label_reco, evaluate_NN_label, chose_examples\n'), ((5918, 5972), 'numpy.array', 'np.array', (['[[1.0, 0.0, 1.5, 0.1], [1.0, 0.0, 0.5, 0.3]]'], {}), '([[1.0, 0.0, 1.5, 0.1], [1.0, 0.0, 0.5, 0.3]])\n', (5926, 5972), True, 'import numpy as np\n'), ((2158, 2179), 'scipy.sparse.lil_matrix', 'sp.lil_matrix', (['self.a'], {}), '(self.a)\n', (2171, 2179), True, 'import scipy.sparse as sp\n'), ((2200, 2221), 'scipy.sparse.lil_matrix', 'sp.lil_matrix', (['self.b'], {}), '(self.b)\n', (2213, 2221), True, 'import scipy.sparse as sp\n')] |
import numpy as np
class constant:
def __init__(self, rc):
""" rc """
self.rc = rc
def radial(self, r):
return self.rc, 0
class gaussian:
def __init__(self, rc):
""" exp( - r^2 / 2 rc^2 ) """
self.rc = rc
def radial(self, r):
x = -r / self.rc**2
y = np.exp(x*r/2)
return y, x*y
class cosine_cutoff:
def __init__(self, rc):
""" ( 1 + cos( pi * r / rc ) ) / 2 """
self.rc = rc
def radial(self, r):
x = np.pi * r / self.rc
y = (1 + np.cos(x)) / 2
outlier = r >= self.rc
y[outlier] = 0.0
dy = - np.pi * np.sin(x) / (2*self.rc)
dy[outlier] = 0.0
return y, dy
class quadratic_cutoff:
def __init__(self, rc):
""" ( 1 - r / r_c )^2 """
self.rc = rc
def radial(self, r):
x = 1. - r/self.rc
x[x < 0.0] = 0.0
return x*x, -2*x/self.rc
class poly_cutoff:
def __init__(self, rc, n):
""" ( 1 - r / r_c )^n """
self.rc = rc
self.n = n
self.n_ = n - 1
def radial(self, r):
x = 1. - r/self.rc
x[x < 0.0] = 0.0
y = x**(self.n_)
return x*y, -self.n*y/self.rc
| [
"numpy.exp",
"numpy.sin",
"numpy.cos"
] | [((331, 348), 'numpy.exp', 'np.exp', (['(x * r / 2)'], {}), '(x * r / 2)\n', (337, 348), True, 'import numpy as np\n'), ((562, 571), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (568, 571), True, 'import numpy as np\n'), ((656, 665), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (662, 665), True, 'import numpy as np\n')] |
import os
import time
import resource
import numpy as np
import torch as th
from . import logger
from mpi4py import MPI
def rcm(start, stop, modulus, mode="[)"):
"""
Interval contains multiple, where 'mode' specifies whether it's
closed or open on either side
This was very tricky to get right
"""
left_hit = start % modulus == 0
middle_hit = modulus * (start // modulus + 1) < stop
# ^^^ modulus * (start // modulus + 1) is the smallest multiple of modulus that's
# strictly greater than start
right_hit = stop % modulus == 0
return (start < stop) and (
(left_hit and mode[0] == "[") or (middle_hit) or (right_hit and mode[1] == "]")
)
class LogSaveHelper:
def __init__(
self,
model: "(nn.Module)",
ic_per_step: "(int) number of iteractions per logging step",
comm: "(MPI.Comm)" = None,
ic_per_save: "(int) save only after this many interactions" = 100_000,
save_mode: "(str) last: keep last model, all: keep all}" = "none",
t0: "(float) override training start timestamp" = None,
log_callbacks: "(list) extra callbacks to run before self.log()" = None,
log_new_eps: "(bool) whether to log statistics for new episodes from non-rolling buffer" = False,
env_name: "(str) env name" = "coinrun",
seed: "(int) seed" = 0,
):
self.model = model
self.comm = comm or MPI.COMM_WORLD
self.ic_per_step = ic_per_step
self.ic_per_save = ic_per_save
self.save_mode = save_mode
self.save_idx = 0
self.last_ic = 0
self.log_idx = 0
self.start_time = self.last_time = time.time()
self.total_interact_count = 0
self.env_name = env_name
self.seed = seed
if ic_per_save > 0:
self.save()
self.start_time = self.last_time = t0 or time.time()
self.log_callbacks = log_callbacks
self.log_new_eps = log_new_eps
self.roller_stats = {}
self.eval_roller_stats = {}
def __call__(self):
self.total_interact_count += self.ic_per_step
assert self.total_interact_count > 0, "Should start counting at 1"
# will_save = (self.ic_per_save > 0) and rcm(
# self.last_ic + 1, self.total_interact_count + 1, self.ic_per_save
# )
will_save = (
self.total_interact_count > 24500000
and (self.ic_per_save > 0)
and rcm(self.last_ic + 1, self.total_interact_count + 1, self.ic_per_save)
)
self.log()
if will_save:
self.save()
return True
def gather_roller_stats(self, roller):
self.roller_stats = {
"train/mean_episode_reward": self._nanmean([] if roller is None else roller.recent_eprets),
"EpLenMean": self._nanmean([] if roller is None else roller.recent_eplens),
}
if roller is not None and self.log_new_eps:
assert roller.has_non_rolling_eps, "roller needs keep_non_rolling"
ret_n, ret_mean, ret_std = self._nanmoments(roller.non_rolling_eprets)
_len_n, len_mean, len_std = self._nanmoments(roller.non_rolling_eplens)
roller.clear_non_rolling_episode_buf()
self.roller_stats.update(
{
"NewEpNum": ret_n,
"NewEpRewMean": ret_mean,
"NewEpRewStd": ret_std,
"NewEpLenMean": len_mean,
"NewEpLenStd": len_std,
}
)
def gather_eval_roller_stats(self, roller):
self.eval_roller_stats = {
"test/mean_episode_reward": self._nanmean([] if roller is None else roller.recent_eprets),
"EpLenMeanTest": self._nanmean([] if roller is None else roller.recent_eplens),
}
if roller is not None and self.log_new_eps:
assert roller.has_non_rolling_eps, "roller needs keep_non_rolling"
ret_n, ret_mean, ret_std = self._nanmoments(roller.non_rolling_eprets)
_len_n, len_mean, len_std = self._nanmoments(roller.non_rolling_eplens)
roller.clear_non_rolling_episode_buf()
self.eval_roller_stats.update(
{
"NewEpNumTest": ret_n,
"NewEpRewMeanTest": ret_mean,
"NewEpRewStdTest": ret_std,
"NewEpLenMeanTest": len_mean,
"NewEpLenStdTest": len_std,
}
)
def log(self):
if self.log_callbacks is not None:
for callback in self.log_callbacks:
callback()
for k, v in self.roller_stats.items():
logger.logkv(k, v)
for k, v in self.eval_roller_stats.items():
logger.logkv(k, v)
logger.logkv("train/total_num_steps", self.total_interact_count)
cur_time = time.time()
Δtime = cur_time - self.last_time
Δic = self.total_interact_count - self.last_ic
logger.logkv("Misc/TimeElapsed", cur_time - self.start_time)
logger.logkv("IPS_total", Δic / Δtime)
logger.logkv("sps", Δic / Δtime)
logger.logkv("num_steps", self.total_interact_count)
logger.logkv("del_time", Δtime)
logger.logkv("Iter", self.log_idx)
logger.logkv("CpuMaxMemory", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1000)
if th.cuda.is_available():
logger.logkv("GpuMaxMemory", th.cuda.max_memory_allocated())
th.cuda.reset_max_memory_allocated()
# if self.comm.rank == 0:
# print("RCALL_LOGDIR: ", os.environ["RCALL_LOGDIR"])
logger.dumpkvs()
self.last_time = cur_time
self.last_ic = self.total_interact_count
self.log_idx += 1
def save(self):
if self.comm.rank != 0:
return
if self.save_mode == "last":
basename = "model-{}-s{}".format(self.env_name, self.seed)
elif self.save_mode == "all":
basename = "model-{}-s{}".format(self.env_name, self.seed) + f"{self.save_idx:03d}"
elif self.save_mode == "none":
return
else:
raise NotImplementedError
suffix = f"_rank{MPI.COMM_WORLD.rank:03d}" if MPI.COMM_WORLD.rank != 0 else ""
basename += f"{suffix}.jd"
fname = os.path.join(logger.get_dir(), basename)
logger.log("Saving to ", fname, f"IC={self.total_interact_count}")
th.save(self.model, fname, pickle_protocol=-1)
self.save_idx += 1
def _nanmean(self, xs):
xs = _flatten(self.comm.allgather(xs))
return np.nan if len(xs) == 0 else np.mean(xs)
def _nanmoments(self, xs, **kwargs):
xs = _flatten(self.comm.allgather(xs))
return _nanmoments_local(xs, **kwargs)
def _flatten(ls):
return [el for sublist in ls for el in sublist]
def _nanmoments_local(xs, ddof=1):
n = len(xs)
if n == 0:
return n, np.nan, np.nan
elif n == ddof:
return n, np.mean(xs), np.nan
else:
return n, np.mean(xs), np.std(xs, ddof=ddof)
| [
"numpy.mean",
"resource.getrusage",
"torch.cuda.max_memory_allocated",
"numpy.std",
"torch.cuda.is_available",
"torch.save",
"torch.cuda.reset_max_memory_allocated",
"time.time"
] | [((1681, 1692), 'time.time', 'time.time', ([], {}), '()\n', (1690, 1692), False, 'import time\n'), ((4939, 4950), 'time.time', 'time.time', ([], {}), '()\n', (4948, 4950), False, 'import time\n'), ((5457, 5479), 'torch.cuda.is_available', 'th.cuda.is_available', ([], {}), '()\n', (5477, 5479), True, 'import torch as th\n'), ((6524, 6570), 'torch.save', 'th.save', (['self.model', 'fname'], {'pickle_protocol': '(-1)'}), '(self.model, fname, pickle_protocol=-1)\n', (6531, 6570), True, 'import torch as th\n'), ((1890, 1901), 'time.time', 'time.time', ([], {}), '()\n', (1899, 1901), False, 'import time\n'), ((5566, 5602), 'torch.cuda.reset_max_memory_allocated', 'th.cuda.reset_max_memory_allocated', ([], {}), '()\n', (5600, 5602), True, 'import torch as th\n'), ((6717, 6728), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (6724, 6728), True, 'import numpy as np\n'), ((5522, 5552), 'torch.cuda.max_memory_allocated', 'th.cuda.max_memory_allocated', ([], {}), '()\n', (5550, 5552), True, 'import torch as th\n'), ((7076, 7087), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (7083, 7087), True, 'import numpy as np\n'), ((7124, 7135), 'numpy.mean', 'np.mean', (['xs'], {}), '(xs)\n', (7131, 7135), True, 'import numpy as np\n'), ((7137, 7158), 'numpy.std', 'np.std', (['xs'], {'ddof': 'ddof'}), '(xs, ddof=ddof)\n', (7143, 7158), True, 'import numpy as np\n'), ((5387, 5427), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (5405, 5427), False, 'import resource\n')] |
import os
import cv2
import argparse
import numpy as np
from matplotlib import pyplot as plt
from roipoly import MultiRoi
parser = argparse.ArgumentParser(description='Label stop sign image')
parser.add_argument('-i',
nargs=1,
help='input image path',
dest='input',
required=True)
args = parser.parse_args()
IMG_FILE = args.input[0]
img = cv2.imread(IMG_FILE)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
COLORS = [('COLOR_STOP_SIGN_RED', 'red'), ('COLOR_OTHER_RED', 'orangered'),
('COLOR_BROWN', 'brown'), ('COLOR_ORANGE', 'orange'),
('COLOR_BLUE', 'cyan'), ('COLOR_OTHER', 'black')]
rois = []
color_pixels = {}
all_color_mask = np.zeros(img.shape[:-1])
def prompt_is_ok(msg):
print(msg, end='')
answer = input()
return answer == 'y'
for color, roi_color in COLORS:
is_ok = False
if not prompt_is_ok(f'Do you want to label color {color}? [y/n]: '):
color_pixels[color] = np.array([])
continue
while not is_ok:
print(f'Labeling color {color} ...')
fig = plt.figure()
plt.imshow(img, interpolation='nearest', cmap='Greys')
plt.title(f'Add ROI for Color:{color}')
multiroi = MultiRoi(color_cycle=(roi_color,))
tmask = np.zeros(img.shape[:-1])
for name, roi in multiroi.rois.items():
mask = roi.get_mask(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
tmask += mask
masked_img = img.copy()
masked_img[tmask == 0, :] = 0
plt.imshow(masked_img)
plt.show()
is_ok = prompt_is_ok(f'Is color {color} labeled correctly? [y/n]: ')
if not is_ok:
print(f'Please label color {color} again.')
else:
rois.extend(multiroi.rois.values())
pixels = img[tmask != 0, :].reshape(-1, 3)
color_pixels[color] = pixels
data = {**color_pixels}
if prompt_is_ok(f'Do you want to label stop signs region? [y/n]: '):
is_ok = False
while not is_ok:
print(f'Labeling stop signs ...')
fig = plt.figure()
plt.imshow(img, interpolation='nearest', cmap='Greys')
plt.title(f'Add ROI for stop signs')
multiroi = MultiRoi(color_cycle=('g',))
tmask = np.zeros(img.shape[:-1])
for name, roi in multiroi.rois.items():
mask = roi.get_mask(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY))
tmask += mask
masked_img = img.copy()
masked_img[tmask == 0, :] = 0
plt.imshow(masked_img)
plt.show()
is_ok = prompt_is_ok(f'Are stop signs labeled correctly? [y/n]: ')
if not is_ok:
print(f'Please label stop signs again.')
else:
rois.extend(multiroi.rois.values())
tmask[tmask != 0] = 1
stop_sign_mask = tmask
stop_sign_roi = multiroi
data['MASK_STOP_SIGN'] = stop_sign_mask
else:
data['MASK_STOP_SIGN'] = np.zeros(img.shape[:-1])
img_name = os.path.splitext(IMG_FILE)[0]
plt.figure()
plt.imshow(img)
for roi in rois:
roi.display_roi()
plt.axis('off')
plt.savefig(f'{img_name}-roi.png', bbox_inches='tight')
plt.title('Labeling Result')
plt.show()
np.savez(f'{img_name}.npz', **data)
| [
"matplotlib.pyplot.imshow",
"numpy.savez",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"os.path.splitext",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"roipoly.MultiRoi",
"cv2.cvtColor",
"matplotlib.pyplot.axis",
"cv2.imread",
"matplotli... | [((132, 192), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Label stop sign image"""'}), "(description='Label stop sign image')\n", (155, 192), False, 'import argparse\n'), ((422, 442), 'cv2.imread', 'cv2.imread', (['IMG_FILE'], {}), '(IMG_FILE)\n', (432, 442), False, 'import cv2\n'), ((449, 485), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (461, 485), False, 'import cv2\n'), ((732, 756), 'numpy.zeros', 'np.zeros', (['img.shape[:-1]'], {}), '(img.shape[:-1])\n', (740, 756), True, 'import numpy as np\n'), ((3050, 3062), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3060, 3062), True, 'from matplotlib import pyplot as plt\n'), ((3063, 3078), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3073, 3078), True, 'from matplotlib import pyplot as plt\n'), ((3118, 3133), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3126, 3133), True, 'from matplotlib import pyplot as plt\n'), ((3134, 3189), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{img_name}-roi.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{img_name}-roi.png', bbox_inches='tight')\n", (3145, 3189), True, 'from matplotlib import pyplot as plt\n'), ((3190, 3218), 'matplotlib.pyplot.title', 'plt.title', (['"""Labeling Result"""'], {}), "('Labeling Result')\n", (3199, 3218), True, 'from matplotlib import pyplot as plt\n'), ((3219, 3229), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3227, 3229), True, 'from matplotlib import pyplot as plt\n'), ((3231, 3266), 'numpy.savez', 'np.savez', (['f"""{img_name}.npz"""'], {}), "(f'{img_name}.npz', **data)\n", (3239, 3266), True, 'import numpy as np\n'), ((2983, 3007), 'numpy.zeros', 'np.zeros', (['img.shape[:-1]'], {}), '(img.shape[:-1])\n', (2991, 3007), True, 'import numpy as np\n'), ((3020, 3046), 'os.path.splitext', 'os.path.splitext', (['IMG_FILE'], {}), '(IMG_FILE)\n', (3036, 3046), False, 'import os\n'), ((1006, 1018), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1014, 1018), True, 'import numpy as np\n'), ((1117, 1129), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1127, 1129), True, 'from matplotlib import pyplot as plt\n'), ((1138, 1192), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'interpolation': '"""nearest"""', 'cmap': '"""Greys"""'}), "(img, interpolation='nearest', cmap='Greys')\n", (1148, 1192), True, 'from matplotlib import pyplot as plt\n'), ((1201, 1240), 'matplotlib.pyplot.title', 'plt.title', (['f"""Add ROI for Color:{color}"""'], {}), "(f'Add ROI for Color:{color}')\n", (1210, 1240), True, 'from matplotlib import pyplot as plt\n'), ((1261, 1295), 'roipoly.MultiRoi', 'MultiRoi', ([], {'color_cycle': '(roi_color,)'}), '(color_cycle=(roi_color,))\n', (1269, 1295), False, 'from roipoly import MultiRoi\n'), ((1313, 1337), 'numpy.zeros', 'np.zeros', (['img.shape[:-1]'], {}), '(img.shape[:-1])\n', (1321, 1337), True, 'import numpy as np\n'), ((1562, 1584), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_img'], {}), '(masked_img)\n', (1572, 1584), True, 'from matplotlib import pyplot as plt\n'), ((1593, 1603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1601, 1603), True, 'from matplotlib import pyplot as plt\n'), ((2107, 2119), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2117, 2119), True, 'from matplotlib import pyplot as plt\n'), ((2128, 2182), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'interpolation': '"""nearest"""', 'cmap': '"""Greys"""'}), "(img, interpolation='nearest', cmap='Greys')\n", (2138, 2182), True, 'from matplotlib import pyplot as plt\n'), ((2191, 2227), 'matplotlib.pyplot.title', 'plt.title', (['f"""Add ROI for stop signs"""'], {}), "(f'Add ROI for stop signs')\n", (2200, 2227), True, 'from matplotlib import pyplot as plt\n'), ((2248, 2276), 'roipoly.MultiRoi', 'MultiRoi', ([], {'color_cycle': "('g',)"}), "(color_cycle=('g',))\n", (2256, 2276), False, 'from roipoly import MultiRoi\n'), ((2294, 2318), 'numpy.zeros', 'np.zeros', (['img.shape[:-1]'], {}), '(img.shape[:-1])\n', (2302, 2318), True, 'import numpy as np\n'), ((2543, 2565), 'matplotlib.pyplot.imshow', 'plt.imshow', (['masked_img'], {}), '(masked_img)\n', (2553, 2565), True, 'from matplotlib import pyplot as plt\n'), ((2574, 2584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2582, 2584), True, 'from matplotlib import pyplot as plt\n'), ((1418, 1455), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1430, 1455), False, 'import cv2\n'), ((2399, 2436), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (2411, 2436), False, 'import cv2\n')] |
import cv2
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Cropping2D
from keras.layers import Convolution2D, Conv2D
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.models import load_model
from keras.layers import Dropout
from random import shuffle
#path_data = './data/data/'
#path_data = './data_t1_moveCenterOnCurve/'
#path_data = './data_t1_2/'
path_data = './data/'
old_model = "./dac_net_v8_regularization07_epoch5_data_gray.h5"
new_model = "./dac_net_v20_regularization05_twice_epoch20_patient8_data.h5"
def generator(samples, batch_size=32):
num_samples = len(samples)
correction = 0.1 # correction factor for the steering of the right and left cameras
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
# center image
name = path_data + 'IMG/'+batch_sample[0].split('/')[-1]
center_image = cv2.imread(name)
center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
# left image
name = path_data + 'IMG/'+batch_sample[1].split('/')[-1]
left_image = cv2.imread(name)
left_image = cv2.cvtColor(left_image, cv2.COLOR_BGR2RGB)
left_angle = float(batch_sample[3]) + correction
images.append(left_image)
angles.append(left_angle)
#right image
name = path_data + 'IMG/'+batch_sample[2].split('/')[-1]
right_image = cv2.imread(name)
right_image = cv2.cvtColor(right_image, cv2.COLOR_BGR2RGB)
right_angle = float(batch_sample[3]) - correction
images.append(right_image)
angles.append(right_angle)
# I will do some image augmentation, will flip the tree images horizontally, and correct the angle
# we only flip images if we are in a curve (center angles is different zero) in order to help data balance.
# Flipping image when there is not a curve, might inbalance more the data, and the model is not helping learning something new.
if (center_angle != 0):
# center image
images.append(cv2.flip(center_image, 1))
angles.append(center_angle * -1.0)
# left image
images.append(cv2.flip(left_image, 1))
angles.append(left_angle * -1.0)
# right image
images.append(cv2.flip(right_image, 1))
angles.append(right_angle * -1.0)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def main():
samples = []
with open(path_data + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
print("num samples data all!!!!: ")
print(len(samples))
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=16)
validation_generator = generator(validation_samples, batch_size=16)
if not os.path.isfile(old_model):
print("Creating a new model")
model = Sequential()
# Add a lambda layer in order to convert to grayscale
# model.add(Lambda(lambda x: tf.image.rgb_to_grayscale(x), input_shape=(160,320,3)))
#model.add(Lambda(lambda x:x/255 - 0.5, input_shape=(160,320,3))) # this for normalization, and the '-0.5' is for mean centering the image
model.add(Lambda(lambda x:x/127.5 - 1.0, input_shape=(160,320,3)))
#model.add(Lambda(lambda x:x/255 - 0.5))
model.add(Lambda(lambda x: tf.image.resize_images(x, size=[80,160])))
#model.add(Cropping2D(cropping=((55,25), (0,0)))) # remove the top 55 pixels and the botton 25 pixels.
model.add(Cropping2D(cropping=((25,12), (0,0))))
#model.add(Flatten()) #model.add(Flatten(input_shape=(160,320,3)))
# changing stride to (1,1) as we have reduces the input image by 2
model.add(Conv2D(24, kernel_size=(5,5), strides=(2,2), padding = 'valid', activation="relu"))
#model.add(Dropout(0.5))
model.add(Conv2D(36,kernel_size=(5,5),strides=(2,2), padding='valid', activation="relu"))
#model.add(Dropout(0.2))
model.add(Conv2D(48,kernel_size=(5,5),strides=(1,1), padding='valid', activation="relu"))
#model.add(Dropout(0.2))
model.add(Conv2D(64, kernel_size=(3,3),strides=(1,1), padding='valid', activation="relu"))
#model.add(Dropout(0.2))
#model.add(Conv2D(64,kernel_size=(3,3),strides=(1,1), padding='valid', activation="relu"))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(Dense(50))
#model.add(Dropout(0.2))
model.add(Dense(10))
#model.add(Dropout(0.7))
model.add(Dense(1))
model.compile(loss='mse', optimizer='Adam')
else:
print("Loading model")
model = load_model(old_model, custom_objects={"tf": tf})
#history_object = model.fit_generator(train_generator, samples_per_epoch= len(train_samples), validation_data=validation_generator, nb_val_samples=len(validation_samples), nb_epoch=3, verbose = 1)
checkpoint = ModelCheckpoint(filepath=new_model, monitor='val_loss', save_best_only=True)
stopper = EarlyStopping(monitor='val_loss', min_delta=0.0003, patience=8)
history_object = model.fit_generator(train_generator, steps_per_epoch= len(train_samples), validation_data=validation_generator, validation_steps=len(validation_samples), epochs=20, verbose = 2, callbacks=[checkpoint, stopper])
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
#plt.show()
plt.savefig("meanSquaredErrorLoss_v20_regularization05_twice_epoch20_patient8_data.png")
# Save the model
#model.save("dac_net_v1_epoch1_sim.h5")
if __name__ == "__main__":
main()
| [
"keras.layers.Conv2D",
"tensorflow.image.resize_images",
"matplotlib.pyplot.ylabel",
"numpy.array",
"keras.layers.Dense",
"keras.layers.Cropping2D",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.callbacks.EarlyStopping",
"csv.reader",
"matplotlib.pyplot.savefig",
"random.shuffle... | [((3642, 3682), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)'}), '(samples, test_size=0.2)\n', (3658, 3682), False, 'from sklearn.model_selection import train_test_split\n'), ((6101, 6177), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'new_model', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=new_model, monitor='val_loss', save_best_only=True)\n", (6116, 6177), False, 'from keras.callbacks import ModelCheckpoint\n'), ((6192, 6255), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0003)', 'patience': '(8)'}), "(monitor='val_loss', min_delta=0.0003, patience=8)\n", (6205, 6255), False, 'from keras.callbacks import EarlyStopping\n'), ((6555, 6595), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['loss']"], {}), "(history_object.history['loss'])\n", (6563, 6595), True, 'import matplotlib.pyplot as plt\n'), ((6600, 6644), 'matplotlib.pyplot.plot', 'plt.plot', (["history_object.history['val_loss']"], {}), "(history_object.history['val_loss'])\n", (6608, 6644), True, 'import matplotlib.pyplot as plt\n'), ((6649, 6691), 'matplotlib.pyplot.title', 'plt.title', (['"""model mean squared error loss"""'], {}), "('model mean squared error loss')\n", (6658, 6691), True, 'import matplotlib.pyplot as plt\n'), ((6696, 6733), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean squared error loss"""'], {}), "('mean squared error loss')\n", (6706, 6733), True, 'import matplotlib.pyplot as plt\n'), ((6738, 6757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (6748, 6757), True, 'import matplotlib.pyplot as plt\n'), ((6762, 6827), 'matplotlib.pyplot.legend', 'plt.legend', (["['training set', 'validation set']"], {'loc': '"""upper right"""'}), "(['training set', 'validation set'], loc='upper right')\n", (6772, 6827), True, 'import matplotlib.pyplot as plt\n'), ((6848, 6946), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""meanSquaredErrorLoss_v20_regularization05_twice_epoch20_patient8_data.png"""'], {}), "(\n 'meanSquaredErrorLoss_v20_regularization05_twice_epoch20_patient8_data.png'\n )\n", (6859, 6946), True, 'import matplotlib.pyplot as plt\n'), ((992, 1008), 'random.shuffle', 'shuffle', (['samples'], {}), '(samples)\n', (999, 1008), False, 'from random import shuffle\n'), ((3455, 3474), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (3465, 3474), False, 'import csv\n'), ((3893, 3918), 'os.path.isfile', 'os.path.isfile', (['old_model'], {}), '(old_model)\n', (3907, 3918), False, 'import os\n'), ((3975, 3987), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3985, 3987), False, 'from keras.models import Sequential\n'), ((5830, 5878), 'keras.models.load_model', 'load_model', (['old_model'], {'custom_objects': "{'tf': tf}"}), "(old_model, custom_objects={'tf': tf})\n", (5840, 5878), False, 'from keras.models import load_model\n'), ((3237, 3253), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (3245, 3253), True, 'import numpy as np\n'), ((3276, 3292), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (3284, 3292), True, 'import numpy as np\n'), ((4309, 4369), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 127.5 - 1.0)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 127.5 - 1.0, input_shape=(160, 320, 3))\n', (4315, 4369), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4622, 4661), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((25, 12), (0, 0))'}), '(cropping=((25, 12), (0, 0)))\n', (4632, 4661), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((4830, 4917), 'keras.layers.Conv2D', 'Conv2D', (['(24)'], {'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(24, kernel_size=(5, 5), strides=(2, 2), padding='valid', activation=\n 'relu')\n", (4836, 4917), False, 'from keras.layers import Convolution2D, Conv2D\n'), ((4965, 5052), 'keras.layers.Conv2D', 'Conv2D', (['(36)'], {'kernel_size': '(5, 5)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(36, kernel_size=(5, 5), strides=(2, 2), padding='valid', activation=\n 'relu')\n", (4971, 5052), False, 'from keras.layers import Convolution2D, Conv2D\n'), ((5096, 5183), 'keras.layers.Conv2D', 'Conv2D', (['(48)'], {'kernel_size': '(5, 5)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(48, kernel_size=(5, 5), strides=(1, 1), padding='valid', activation=\n 'relu')\n", (5102, 5183), False, 'from keras.layers import Convolution2D, Conv2D\n'), ((5227, 5314), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), strides=(1, 1), padding='valid', activation=\n 'relu')\n", (5233, 5314), False, 'from keras.layers import Convolution2D, Conv2D\n'), ((5459, 5468), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5466, 5468), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((5489, 5501), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5496, 5501), False, 'from keras.layers import Dropout\n'), ((5522, 5532), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (5527, 5532), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((5553, 5565), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5560, 5565), False, 'from keras.layers import Dropout\n'), ((5586, 5595), 'keras.layers.Dense', 'Dense', (['(50)'], {}), '(50)\n', (5591, 5595), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((5648, 5657), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (5653, 5657), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((5710, 5718), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (5715, 5718), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D\n'), ((1360, 1376), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1370, 1376), False, 'import cv2\n'), ((1408, 1453), 'cv2.cvtColor', 'cv2.cvtColor', (['center_image', 'cv2.COLOR_BGR2RGB'], {}), '(center_image, cv2.COLOR_BGR2RGB)\n', (1420, 1453), False, 'import cv2\n'), ((1728, 1744), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (1738, 1744), False, 'import cv2\n'), ((1774, 1817), 'cv2.cvtColor', 'cv2.cvtColor', (['left_image', 'cv2.COLOR_BGR2RGB'], {}), '(left_image, cv2.COLOR_BGR2RGB)\n', (1786, 1817), False, 'import cv2\n'), ((2100, 2116), 'cv2.imread', 'cv2.imread', (['name'], {}), '(name)\n', (2110, 2116), False, 'import cv2\n'), ((2147, 2191), 'cv2.cvtColor', 'cv2.cvtColor', (['right_image', 'cv2.COLOR_BGR2RGB'], {}), '(right_image, cv2.COLOR_BGR2RGB)\n', (2159, 2191), False, 'import cv2\n'), ((3311, 3350), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (3332, 3350), False, 'import sklearn\n'), ((4450, 4491), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['x'], {'size': '[80, 160]'}), '(x, size=[80, 160])\n', (4472, 4491), True, 'import tensorflow as tf\n'), ((2837, 2862), 'cv2.flip', 'cv2.flip', (['center_image', '(1)'], {}), '(center_image, 1)\n', (2845, 2862), False, 'import cv2\n'), ((2987, 3010), 'cv2.flip', 'cv2.flip', (['left_image', '(1)'], {}), '(left_image, 1)\n', (2995, 3010), False, 'import cv2\n'), ((3134, 3158), 'cv2.flip', 'cv2.flip', (['right_image', '(1)'], {}), '(right_image, 1)\n', (3142, 3158), False, 'import cv2\n')] |
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to test consistency between Cirq and TFQ circuit execution ops."""
from unittest import mock
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops
from tensorflow_quantum.python import util
# Number of random circuits to use in a test batch.
BATCH_SIZE = 15
# These get used everywhere
WF_SIM = cirq.sim.sparse_simulator.Simulator()
DM_SIM = cirq.sim.density_matrix_simulator.DensityMatrixSimulator()
EXPECTATION_OPS = [
circuit_execution_ops.get_expectation_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_expectation_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_expectation_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_expectation_op(backend=None,
quantum_concurrent=False)
]
SAMPLING_OPS = [
circuit_execution_ops.get_sampling_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_sampling_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_sampling_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_sampling_op(backend=None,
quantum_concurrent=False)
]
STATE_OPS = [
circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=True),
circuit_execution_ops.get_state_op(backend=WF_SIM, quantum_concurrent=True),
circuit_execution_ops.get_state_op(backend=DM_SIM, quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_state_op(backend=None, quantum_concurrent=False)
]
SAMPLED_EXPECTATION_OPS = [
circuit_execution_ops.get_sampled_expectation_op(backend=None,
quantum_concurrent=True),
circuit_execution_ops.get_sampled_expectation_op(backend=WF_SIM,
quantum_concurrent=True),
circuit_execution_ops.get_sampled_expectation_op(backend=DM_SIM,
quantum_concurrent=True),
# For timing interests C++ backend is tested in quantum_concurrent mode.
circuit_execution_ops.get_sampled_expectation_op(backend=None,
quantum_concurrent=False),
]
SIMS = [WF_SIM, WF_SIM, DM_SIM, WF_SIM]
class OpGetterInputChecks(tf.test.TestCase):
"""Check that the op getters handle inputs correctly."""
def test_get_expectation_inputs(self):
"""Test that get expectation only accepts inputs it should."""
circuit_execution_ops.get_expectation_op()
circuit_execution_ops.get_expectation_op(backend=cirq.Simulator())
circuit_execution_ops.get_expectation_op(
backend=cirq.DensityMatrixSimulator())
circuit_execution_ops.get_expectation_op()
with self.assertRaisesRegex(NotImplementedError,
expected_regex='Sample-based'):
mock_engine = mock.Mock()
circuit_execution_ops.get_expectation_op(
cirq.google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq.google.XMON))
with self.assertRaisesRegex(
TypeError, expected_regex="a Cirq.SimulatesFinalState"):
circuit_execution_ops.get_expectation_op(backend="junk")
with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_expectation_op(quantum_concurrent='junk')
def test_get_sampled_expectation_inputs(self):
"""Test that get expectation only accepts inputs it should."""
circuit_execution_ops.get_sampled_expectation_op()
circuit_execution_ops.get_sampled_expectation_op(
backend=cirq.Simulator())
circuit_execution_ops.get_sampled_expectation_op(
backend=cirq.DensityMatrixSimulator())
mock_engine = mock.Mock()
circuit_execution_ops.get_sampled_expectation_op(
cirq.google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq.google.XMON))
with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"):
circuit_execution_ops.get_sampled_expectation_op(backend="junk")
with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_sampled_expectation_op(
quantum_concurrent='junk')
def test_get_samples_inputs(self):
"""Test that get_samples only accepts inputs it should."""
circuit_execution_ops.get_sampling_op()
circuit_execution_ops.get_sampling_op(backend=cirq.Simulator())
circuit_execution_ops.get_sampling_op(
backend=cirq.DensityMatrixSimulator())
mock_engine = mock.Mock()
circuit_execution_ops.get_sampling_op(
backend=cirq.google.QuantumEngineSampler(engine=mock_engine,
processor_id='test',
gate_set=cirq.google.XMON))
with self.assertRaisesRegex(TypeError,
expected_regex="Expected a Cirq.Sampler"):
circuit_execution_ops.get_sampling_op(backend="junk")
with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_sampling_op(quantum_concurrent='junk')
def test_get_state_inputs(self):
"""Test that get_states only accepts inputs it should."""
circuit_execution_ops.get_state_op()
circuit_execution_ops.get_state_op(backend=cirq.Simulator())
circuit_execution_ops.get_state_op(
backend=cirq.DensityMatrixSimulator())
with self.assertRaisesRegex(TypeError,
expected_regex="Cirq.SimulatesFinalState"):
circuit_execution_ops.get_state_op(backend="junk")
with self.assertRaisesRegex(TypeError,
expected_regex="Cirq.SimulatesFinalState"):
mock_engine = mock.Mock()
circuit_execution_ops.get_state_op(
backend=cirq.google.QuantumEngineSampler(
engine=mock_engine,
processor_id='test',
gate_set=cirq.google.XMON))
with self.assertRaisesRegex(TypeError,
expected_regex="must be type bool."):
circuit_execution_ops.get_state_op(quantum_concurrent='junk')
class ExecutionOpsConsistentyTest(tf.test.TestCase, parameterized.TestCase):
"""Test all ops produce equivalent output to one another."""
@parameterized.parameters([{
'op_and_sim': (op, sim)
} for (op, sim) in zip(STATE_OPS, SIMS)])
def test_supported_gates_consistent(self, op_and_sim):
"""Ensure that supported gates are consistent across backends."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, 5)
circuit_batch = []
gate_ref = util.get_supported_gates()
for gate in gate_ref:
# Create a circuit with non zero entries on real
# and imaginary values.
c = cirq.Circuit()
for qubit in qubits:
c += cirq.Circuit(cirq.Y(qubit)**0.125)
if gate_ref[gate] == 2:
op_qubits = np.random.choice(qubits, size=2, replace=False)
c += cirq.Circuit(gate(*op_qubits))
elif gate_ref[gate] == 1:
op_qubits = np.random.choice(qubits, size=1, replace=False)
c += cirq.Circuit(gate(*op_qubits))
else:
raise ValueError(
"Unable to test supported gates across all ops."
"please update circuit_execution_ops_test.py")
circuit_batch.append(c)
op_states = op(util.convert_to_tensor(circuit_batch), [],
[[]] * len(circuit_batch)).to_list()
cirq_states = batch_util.batch_calculate_state(
circuit_batch, [cirq.ParamResolver({}) for _ in circuit_batch], sim)
self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(STATE_OPS, SIMS)],
'n_qubits': [3, 7]
})))
def test_simulate_state_no_symbols(self, op_and_sim, n_qubits):
"""Compute states using cirq and tfq without symbols."""
op = op_and_sim[0]
sim = op_and_sim[1]
circuit_batch, resolver_batch = util.random_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), BATCH_SIZE)
op_states = op(util.convert_to_tensor(circuit_batch), [],
[[]] * BATCH_SIZE).to_list()
cirq_states = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(STATE_OPS, SIMS)],
'n_qubits': [3, 7],
'symbol_names': [['a'], ['a', 'b'],
['a', 'b', 'c', 'd', 'e']]
})))
def test_simulate_state_with_symbols(self, op_and_sim, n_qubits,
symbol_names):
"""Compute states using cirq and tfq with symbols."""
op = op_and_sim[0]
sim = op_and_sim[1]
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
cirq.GridQubit.rect(1, n_qubits), symbol_names, BATCH_SIZE)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
op_states = op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array).to_list()
cirq_states = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim) for (
op,
sim) in zip(STATE_OPS[:-2] +
[STATE_OPS[-1]], SIMS[:-2] + [SIMS[-1]])],
})))
def test_simulate_state_large(self, op_and_sim):
"""Test a reasonably large and complex circuit."""
op, sim = op_and_sim
symbol_names = []
circuit_batch, resolver_batch = \
util.random_circuit_resolver_batch(
cirq.GridQubit.rect(4, 4), 5)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch]).astype(np.float32)
op_states = op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array).to_list()
cirq_states = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(**{
'op_and_sim': [(op, sim) for (op, sim) in zip(STATE_OPS, SIMS)],
})))
def test_simulate_state_empty(self, op_and_sim):
"""Test empty circuits for states using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
op_states = op(util.convert_to_tensor(circuit_batch), [],
[[]] * BATCH_SIZE).to_list()
cirq_states = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(EXPECTATION_OPS, SIMS)],
'n_qubits': [3, 7],
'symbol_names': [['a', 'b', 'c', 'd', 'e']],
'max_paulisum_length': [6]
})))
def test_analytical_expectation(self, op_and_sim, n_qubits, symbol_names,
max_paulisum_length):
"""Compute expectations using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, BATCH_SIZE)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
BATCH_SIZE)
op_expectations = op(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[psum] for psum in pauli_sums]))
cirq_expectations = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim)
self.assertAllClose(op_expectations.numpy().flatten(),
cirq_expectations.flatten(),
rtol=1e-5,
atol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(EXPECTATION_OPS, SIMS)],
'n_qubits': [3],
'symbol_names': [['a', 'b', 'c', 'd', 'e']],
'max_paulisum_length': [6]
})))
def test_analytical_expectation_empty(self, op_and_sim, n_qubits,
symbol_names, max_paulisum_length):
"""Test empty circuits for analytical expectation using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
symbol_values_array = np.array(
[[0.0 for _ in symbol_names] for _ in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
BATCH_SIZE)
op_expectations = op(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[psum] for psum in pauli_sums]))
cirq_expectations = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim)
self.assertAllClose(op_expectations.numpy().flatten(),
cirq_expectations.flatten(),
rtol=1e-5,
atol=1e-5)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim) for (
op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)],
'n_qubits': [3, 7],
'symbol_names': [['a', 'b', 'c', 'd', 'e']],
'max_paulisum_length': [6]
})))
def test_sampled_expectation(self, op_and_sim, n_qubits, symbol_names,
max_paulisum_length):
"""Compute sampled expectations using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, BATCH_SIZE)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
BATCH_SIZE)
num_samples = [[10000]] * BATCH_SIZE
op_expectations = op(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[psum] for psum in pauli_sums]),
num_samples)
cirq_expectations = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in pauli_sums],
num_samples, sim)
self.assertAllClose(op_expectations.numpy().flatten(),
cirq_expectations.flatten(),
rtol=1e-1,
atol=1e-1)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim) for (
op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)],
'n_qubits': [3],
'symbol_names': [['a', 'b', 'c', 'd', 'e']],
'max_paulisum_length': [6]
})))
def test_sampled_expectation_empty(self, op_and_sim, n_qubits, symbol_names,
max_paulisum_length):
"""Test empty circuits for sampled expectation using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
symbol_values_array = np.array(
[[0.0 for _ in symbol_names] for _ in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length,
BATCH_SIZE)
num_samples = [[1000]] * BATCH_SIZE
op_expectations = op(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[psum] for psum in pauli_sums]),
num_samples)
cirq_expectations = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in pauli_sums],
num_samples, sim)
self.assertAllClose(op_expectations.numpy().flatten(),
cirq_expectations.flatten(),
rtol=1e-1,
atol=1e-1)
# keep the qubit count low here, all computations scale exponentially
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(SAMPLING_OPS, SIMS)],
'n_qubits': [6],
'symbol_names': [['a', 'b', 'c', 'd', 'e']]
})))
def test_sampling(self, op_and_sim, n_qubits, symbol_names):
"""Compare sampling with tfq ops and Cirq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
n_samples = int((2**n_qubits) * 1000)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, BATCH_SIZE, 30)
for i in range(BATCH_SIZE):
circuit_batch[i] += cirq.Circuit(
*[cirq.H(qubit) for qubit in qubits])
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [n_samples]).to_list())
op_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in op_samples
]
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
cirq_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in cirq_samples
]
for a, b in zip(op_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
# keep the qubit count low here, all computations scale exponentially
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'op_and_sim': [(op, sim)
for (op, sim) in zip(SAMPLING_OPS, SIMS)],
'n_qubits': [3],
'symbol_names': [['a', 'b', 'c', 'd', 'e']]
})))
def test_sampling_empty(self, op_and_sim, n_qubits, symbol_names):
"""Test empty circuits for sampling using cirq and tfq."""
op = op_and_sim[0]
sim = op_and_sim[1]
qubits = cirq.GridQubit.rect(1, n_qubits)
n_samples = int((2**n_qubits) * 1000)
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
symbol_values_array = np.array(
[[0.0 for _ in symbol_names] for _ in resolver_batch])
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [n_samples]).to_list())
op_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in op_samples
]
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
cirq_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in cirq_samples
]
for a, b in zip(op_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
if __name__ == '__main__':
tf.test.main()
| [
"cirq.ParamResolver",
"tensorflow_quantum.core.ops.batch_util.batch_calculate_expectation",
"cirq.Circuit",
"numpy.array",
"tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op",
"tensorflow_quantum.core.ops.batch_util.batch_calculate_sampled_expectation",
"numpy.arange",
"cirq.sim.den... | [((1143, 1180), 'cirq.sim.sparse_simulator.Simulator', 'cirq.sim.sparse_simulator.Simulator', ([], {}), '()\n', (1178, 1180), False, 'import cirq\n'), ((1190, 1248), 'cirq.sim.density_matrix_simulator.DensityMatrixSimulator', 'cirq.sim.density_matrix_simulator.DensityMatrixSimulator', ([], {}), '()\n', (1246, 1248), False, 'import cirq\n'), ((1274, 1353), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'backend': 'None', 'quantum_concurrent': '(True)'}), '(backend=None, quantum_concurrent=True)\n', (1314, 1353), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((1404, 1490), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'backend': 'WF_SIM', 'quantum_concurrent': '(True)'}), '(backend=WF_SIM, quantum_concurrent\n =True)\n', (1444, 1490), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((1536, 1622), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'backend': 'DM_SIM', 'quantum_concurrent': '(True)'}), '(backend=DM_SIM, quantum_concurrent\n =True)\n', (1576, 1622), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((1745, 1830), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'backend': 'None', 'quantum_concurrent': '(False)'}), '(backend=None, quantum_concurrent=False\n )\n', (1785, 1830), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((1895, 1971), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'backend': 'None', 'quantum_concurrent': '(True)'}), '(backend=None, quantum_concurrent=True)\n', (1932, 1971), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2019, 2097), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'backend': 'WF_SIM', 'quantum_concurrent': '(True)'}), '(backend=WF_SIM, quantum_concurrent=True)\n', (2056, 2097), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2145, 2223), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'backend': 'DM_SIM', 'quantum_concurrent': '(True)'}), '(backend=DM_SIM, quantum_concurrent=True)\n', (2182, 2223), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2348, 2425), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'backend': 'None', 'quantum_concurrent': '(False)'}), '(backend=None, quantum_concurrent=False)\n', (2385, 2425), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2489, 2562), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'backend': 'None', 'quantum_concurrent': '(True)'}), '(backend=None, quantum_concurrent=True)\n', (2523, 2562), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2568, 2643), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'backend': 'WF_SIM', 'quantum_concurrent': '(True)'}), '(backend=WF_SIM, quantum_concurrent=True)\n', (2602, 2643), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2649, 2724), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'backend': 'DM_SIM', 'quantum_concurrent': '(True)'}), '(backend=DM_SIM, quantum_concurrent=True)\n', (2683, 2724), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2807, 2881), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'backend': 'None', 'quantum_concurrent': '(False)'}), '(backend=None, quantum_concurrent=False)\n', (2841, 2881), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((2917, 3008), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'backend': 'None', 'quantum_concurrent': '(True)'}), '(backend=None,\n quantum_concurrent=True)\n', (2965, 3008), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((3063, 3156), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'backend': 'WF_SIM', 'quantum_concurrent': '(True)'}), '(backend=WF_SIM,\n quantum_concurrent=True)\n', (3111, 3156), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((3211, 3304), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'backend': 'DM_SIM', 'quantum_concurrent': '(True)'}), '(backend=DM_SIM,\n quantum_concurrent=True)\n', (3259, 3304), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((3436, 3528), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'backend': 'None', 'quantum_concurrent': '(False)'}), '(backend=None,\n quantum_concurrent=False)\n', (3484, 3528), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((25034, 25048), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (25046, 25048), True, 'import tensorflow as tf\n'), ((3853, 3895), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {}), '()\n', (3893, 3895), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((4080, 4122), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {}), '()\n', (4120, 4122), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((5068, 5118), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {}), '()\n', (5116, 5118), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((5346, 5357), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5355, 5357), False, 'from unittest import mock\n'), ((6120, 6159), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {}), '()\n', (6157, 6159), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((6352, 6363), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (6361, 6363), False, 'from unittest import mock\n'), ((7142, 7178), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {}), '()\n', (7176, 7178), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((8590, 8615), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', '(5)'], {}), '(1, 5)\n', (8609, 8615), False, 'import cirq\n'), ((8663, 8689), 'tensorflow_quantum.python.util.get_supported_gates', 'util.get_supported_gates', ([], {}), '()\n', (8687, 8689), False, 'from tensorflow_quantum.python import util\n'), ((10585, 10653), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_state', 'batch_util.batch_calculate_state', (['circuit_batch', 'resolver_batch', 'sim'], {}), '(circuit_batch, resolver_batch, sim)\n', (10617, 10653), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((11641, 11733), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (11649, 11733), True, 'import numpy as np\n'), ((11924, 11992), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_state', 'batch_util.batch_calculate_state', (['circuit_batch', 'resolver_batch', 'sim'], {}), '(circuit_batch, resolver_batch, sim)\n', (11956, 11992), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((13100, 13168), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_state', 'batch_util.batch_calculate_state', (['circuit_batch', 'resolver_batch', 'sim'], {}), '(circuit_batch, resolver_batch, sim)\n', (13132, 13168), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((13949, 14017), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_state', 'batch_util.batch_calculate_state', (['circuit_batch', 'resolver_batch', 'sim'], {}), '(circuit_batch, resolver_batch, sim)\n', (13981, 14017), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((14820, 14852), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (14839, 14852), False, 'import cirq\n'), ((14907, 14982), 'tensorflow_quantum.python.util.random_symbol_circuit_resolver_batch', 'util.random_symbol_circuit_resolver_batch', (['qubits', 'symbol_names', 'BATCH_SIZE'], {}), '(qubits, symbol_names, BATCH_SIZE)\n', (14948, 14982), False, 'from tensorflow_quantum.python import util\n'), ((15031, 15123), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (15039, 15123), True, 'import numpy as np\n'), ((15182, 15245), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', 'max_paulisum_length', 'BATCH_SIZE'], {}), '(qubits, max_paulisum_length, BATCH_SIZE)\n', (15204, 15245), False, 'from tensorflow_quantum.python import util\n'), ((15517, 15622), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_expectation', 'batch_util.batch_calculate_expectation', (['circuit_batch', 'resolver_batch', '[[x] for x in pauli_sums]', 'sim'], {}), '(circuit_batch, resolver_batch, [[x] for\n x in pauli_sums], sim)\n', (15555, 15622), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((16538, 16570), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (16557, 16570), False, 'import cirq\n'), ((16747, 16812), 'numpy.array', 'np.array', (['[[(0.0) for _ in symbol_names] for _ in resolver_batch]'], {}), '([[(0.0) for _ in symbol_names] for _ in resolver_batch])\n', (16755, 16812), True, 'import numpy as np\n'), ((16846, 16909), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', 'max_paulisum_length', 'BATCH_SIZE'], {}), '(qubits, max_paulisum_length, BATCH_SIZE)\n', (16868, 16909), False, 'from tensorflow_quantum.python import util\n'), ((17181, 17286), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_expectation', 'batch_util.batch_calculate_expectation', (['circuit_batch', 'resolver_batch', '[[x] for x in pauli_sums]', 'sim'], {}), '(circuit_batch, resolver_batch, [[x] for\n x in pauli_sums], sim)\n', (17219, 17286), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((18167, 18199), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (18186, 18199), False, 'import cirq\n'), ((18254, 18329), 'tensorflow_quantum.python.util.random_symbol_circuit_resolver_batch', 'util.random_symbol_circuit_resolver_batch', (['qubits', 'symbol_names', 'BATCH_SIZE'], {}), '(qubits, symbol_names, BATCH_SIZE)\n', (18295, 18329), False, 'from tensorflow_quantum.python import util\n'), ((18378, 18470), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (18386, 18470), True, 'import numpy as np\n'), ((18529, 18592), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', 'max_paulisum_length', 'BATCH_SIZE'], {}), '(qubits, max_paulisum_length, BATCH_SIZE)\n', (18551, 18592), False, 'from tensorflow_quantum.python import util\n'), ((18934, 19060), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_sampled_expectation', 'batch_util.batch_calculate_sampled_expectation', (['circuit_batch', 'resolver_batch', '[[x] for x in pauli_sums]', 'num_samples', 'sim'], {}), '(circuit_batch,\n resolver_batch, [[x] for x in pauli_sums], num_samples, sim)\n', (18980, 19060), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((19977, 20009), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (19996, 20009), False, 'import cirq\n'), ((20186, 20251), 'numpy.array', 'np.array', (['[[(0.0) for _ in symbol_names] for _ in resolver_batch]'], {}), '([[(0.0) for _ in symbol_names] for _ in resolver_batch])\n', (20194, 20251), True, 'import numpy as np\n'), ((20285, 20348), 'tensorflow_quantum.python.util.random_pauli_sums', 'util.random_pauli_sums', (['qubits', 'max_paulisum_length', 'BATCH_SIZE'], {}), '(qubits, max_paulisum_length, BATCH_SIZE)\n', (20307, 20348), False, 'from tensorflow_quantum.python import util\n'), ((20689, 20815), 'tensorflow_quantum.core.ops.batch_util.batch_calculate_sampled_expectation', 'batch_util.batch_calculate_sampled_expectation', (['circuit_batch', 'resolver_batch', '[[x] for x in pauli_sums]', 'num_samples', 'sim'], {}), '(circuit_batch,\n resolver_batch, [[x] for x in pauli_sums], num_samples, sim)\n', (20735, 20815), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((21655, 21687), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (21674, 21687), False, 'import cirq\n'), ((21789, 21868), 'tensorflow_quantum.python.util.random_symbol_circuit_resolver_batch', 'util.random_symbol_circuit_resolver_batch', (['qubits', 'symbol_names', 'BATCH_SIZE', '(30)'], {}), '(qubits, symbol_names, BATCH_SIZE, 30)\n', (21830, 21868), False, 'from tensorflow_quantum.python import util\n'), ((22053, 22145), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (22061, 22145), True, 'import numpy as np\n'), ((22611, 22681), 'tensorflow_quantum.core.ops.batch_util.batch_sample', 'batch_util.batch_sample', (['circuit_batch', 'resolver_batch', 'n_samples', 'sim'], {}), '(circuit_batch, resolver_batch, n_samples, sim)\n', (22634, 22681), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((23744, 23776), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (23763, 23776), False, 'import cirq\n'), ((24000, 24065), 'numpy.array', 'np.array', (['[[(0.0) for _ in symbol_names] for _ in resolver_batch]'], {}), '([[(0.0) for _ in symbol_names] for _ in resolver_batch])\n', (24008, 24065), True, 'import numpy as np\n'), ((24506, 24576), 'tensorflow_quantum.core.ops.batch_util.batch_sample', 'batch_util.batch_sample', (['circuit_batch', 'resolver_batch', 'n_samples', 'sim'], {}), '(circuit_batch, resolver_batch, n_samples, sim)\n', (24529, 24576), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((4274, 4285), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (4283, 4285), False, 'from unittest import mock\n'), ((4678, 4734), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'backend': '"""junk"""'}), "(backend='junk')\n", (4718, 4734), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((4869, 4936), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_expectation_op', 'circuit_execution_ops.get_expectation_op', ([], {'quantum_concurrent': '"""junk"""'}), "(quantum_concurrent='junk')\n", (4909, 4936), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((5428, 5532), 'cirq.google.QuantumEngineSampler', 'cirq.google.QuantumEngineSampler', ([], {'engine': 'mock_engine', 'processor_id': '"""test"""', 'gate_set': 'cirq.google.XMON'}), "(engine=mock_engine, processor_id='test',\n gate_set=cirq.google.XMON)\n", (5460, 5532), False, 'import cirq\n'), ((5713, 5777), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'backend': '"""junk"""'}), "(backend='junk')\n", (5761, 5777), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((5912, 5987), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampled_expectation_op', 'circuit_execution_ops.get_sampled_expectation_op', ([], {'quantum_concurrent': '"""junk"""'}), "(quantum_concurrent='junk')\n", (5960, 5987), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((6777, 6830), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'backend': '"""junk"""'}), "(backend='junk')\n", (6814, 6830), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((6965, 7029), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_sampling_op', 'circuit_execution_ops.get_sampling_op', ([], {'quantum_concurrent': '"""junk"""'}), "(quantum_concurrent='junk')\n", (7002, 7029), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((7482, 7532), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'backend': '"""junk"""'}), "(backend='junk')\n", (7516, 7532), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((7686, 7697), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (7695, 7697), False, 'from unittest import mock\n'), ((8067, 8128), 'tensorflow_quantum.core.ops.circuit_execution_ops.get_state_op', 'circuit_execution_ops.get_state_op', ([], {'quantum_concurrent': '"""junk"""'}), "(quantum_concurrent='junk')\n", (8101, 8128), False, 'from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops\n'), ((8833, 8847), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (8845, 8847), False, 'import cirq\n'), ((10398, 10430), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (10417, 10430), False, 'import cirq\n'), ((11550, 11582), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(1)', 'n_qubits'], {}), '(1, n_qubits)\n', (11569, 11582), False, 'import cirq\n'), ((12737, 12762), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(4)', '(4)'], {}), '(4, 4)\n', (12756, 12762), False, 'import cirq\n'), ((13688, 13702), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (13700, 13702), False, 'import cirq\n'), ((13757, 13779), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (13775, 13779), False, 'import cirq\n'), ((15333, 15370), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (15355, 15370), False, 'from tensorflow_quantum.python import util\n'), ((15431, 15486), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[psum] for psum in pauli_sums]'], {}), '([[psum] for psum in pauli_sums])\n', (15453, 15486), False, 'from tensorflow_quantum.python import util\n'), ((16596, 16610), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (16608, 16610), False, 'import cirq\n'), ((16665, 16687), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (16683, 16687), False, 'import cirq\n'), ((16997, 17034), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (17019, 17034), False, 'from tensorflow_quantum.python import util\n'), ((17095, 17150), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[psum] for psum in pauli_sums]'], {}), '([[psum] for psum in pauli_sums])\n', (17117, 17150), False, 'from tensorflow_quantum.python import util\n'), ((18725, 18762), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (18747, 18762), False, 'from tensorflow_quantum.python import util\n'), ((18823, 18878), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[psum] for psum in pauli_sums]'], {}), '([[psum] for psum in pauli_sums])\n', (18845, 18878), False, 'from tensorflow_quantum.python import util\n'), ((20035, 20049), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (20047, 20049), False, 'import cirq\n'), ((20104, 20126), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (20122, 20126), False, 'import cirq\n'), ((20480, 20517), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (20502, 20517), False, 'from tensorflow_quantum.python import util\n'), ((20578, 20633), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['[[psum] for psum in pauli_sums]'], {}), '([[psum] for psum in pauli_sums])\n', (20600, 20633), False, 'from tensorflow_quantum.python import util\n'), ((23849, 23863), 'cirq.Circuit', 'cirq.Circuit', ([], {}), '()\n', (23861, 23863), False, 'import cirq\n'), ((23918, 23940), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (23936, 23940), False, 'import cirq\n'), ((3953, 3969), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (3967, 3969), False, 'import cirq\n'), ((4041, 4070), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (4068, 4070), False, 'import cirq\n'), ((4356, 4460), 'cirq.google.QuantumEngineSampler', 'cirq.google.QuantumEngineSampler', ([], {'engine': 'mock_engine', 'processor_id': '"""test"""', 'gate_set': 'cirq.google.XMON'}), "(engine=mock_engine, processor_id='test',\n gate_set=cirq.google.XMON)\n", (4388, 4460), False, 'import cirq\n'), ((5197, 5213), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (5211, 5213), False, 'import cirq\n'), ((5293, 5322), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (5320, 5322), False, 'import cirq\n'), ((6214, 6230), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (6228, 6230), False, 'import cirq\n'), ((6299, 6328), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (6326, 6328), False, 'import cirq\n'), ((6431, 6535), 'cirq.google.QuantumEngineSampler', 'cirq.google.QuantumEngineSampler', ([], {'engine': 'mock_engine', 'processor_id': '"""test"""', 'gate_set': 'cirq.google.XMON'}), "(engine=mock_engine, processor_id='test',\n gate_set=cirq.google.XMON)\n", (6463, 6535), False, 'import cirq\n'), ((7230, 7246), 'cirq.Simulator', 'cirq.Simulator', ([], {}), '()\n', (7244, 7246), False, 'import cirq\n'), ((7312, 7341), 'cirq.DensityMatrixSimulator', 'cirq.DensityMatrixSimulator', ([], {}), '()\n', (7339, 7341), False, 'import cirq\n'), ((9002, 9049), 'numpy.random.choice', 'np.random.choice', (['qubits'], {'size': '(2)', 'replace': '(False)'}), '(qubits, size=2, replace=False)\n', (9018, 9049), True, 'import numpy as np\n'), ((9704, 9726), 'cirq.ParamResolver', 'cirq.ParamResolver', (['{}'], {}), '({})\n', (9722, 9726), False, 'import cirq\n'), ((12798, 12890), 'numpy.array', 'np.array', (['[[resolver[symbol] for symbol in symbol_names] for resolver in resolver_batch]'], {}), '([[resolver[symbol] for symbol in symbol_names] for resolver in\n resolver_batch])\n', (12806, 12890), True, 'import numpy as np\n'), ((23064, 23099), 'scipy.stats.entropy', 'stats.entropy', (['(a + 1e-08)', '(b + 1e-08)'], {}), '(a + 1e-08, b + 1e-08)\n', (23077, 23099), False, 'from scipy import stats\n'), ((24959, 24994), 'scipy.stats.entropy', 'stats.entropy', (['(a + 1e-08)', '(b + 1e-08)'], {}), '(a + 1e-08, b + 1e-08)\n', (24972, 24994), False, 'from scipy import stats\n'), ((7770, 7874), 'cirq.google.QuantumEngineSampler', 'cirq.google.QuantumEngineSampler', ([], {'engine': 'mock_engine', 'processor_id': '"""test"""', 'gate_set': 'cirq.google.XMON'}), "(engine=mock_engine, processor_id='test',\n gate_set=cirq.google.XMON)\n", (7802, 7874), False, 'import cirq\n'), ((9168, 9215), 'numpy.random.choice', 'np.random.choice', (['qubits'], {'size': '(1)', 'replace': '(False)'}), '(qubits, size=1, replace=False)\n', (9184, 9215), True, 'import numpy as np\n'), ((9517, 9554), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (9539, 9554), False, 'from tensorflow_quantum.python import util\n'), ((10468, 10505), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (10490, 10505), False, 'from tensorflow_quantum.python import util\n'), ((11794, 11831), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (11816, 11831), False, 'from tensorflow_quantum.python import util\n'), ((12970, 13007), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (12992, 13007), False, 'from tensorflow_quantum.python import util\n'), ((13832, 13869), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (13854, 13869), False, 'from tensorflow_quantum.python import util\n'), ((8915, 8928), 'cirq.Y', 'cirq.Y', (['qubit'], {}), '(qubit)\n', (8921, 8928), False, 'import cirq\n'), ((21986, 21999), 'cirq.H', 'cirq.H', (['qubit'], {}), '(qubit)\n', (21992, 21999), False, 'import cirq\n'), ((22229, 22266), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (22251, 22266), False, 'from tensorflow_quantum.python import util\n'), ((24124, 24161), 'tensorflow_quantum.python.util.convert_to_tensor', 'util.convert_to_tensor', (['circuit_batch'], {}), '(circuit_batch)\n', (24146, 24161), False, 'from tensorflow_quantum.python import util\n'), ((22427, 22466), 'numpy.arange', 'np.arange', (['(sample.shape[-1] - 1)', '(-1)', '(-1)'], {}), '(sample.shape[-1] - 1, -1, -1)\n', (22436, 22466), True, 'import numpy as np\n'), ((22816, 22855), 'numpy.arange', 'np.arange', (['(sample.shape[-1] - 1)', '(-1)', '(-1)'], {}), '(sample.shape[-1] - 1, -1, -1)\n', (22825, 22855), True, 'import numpy as np\n'), ((24322, 24361), 'numpy.arange', 'np.arange', (['(sample.shape[-1] - 1)', '(-1)', '(-1)'], {}), '(sample.shape[-1] - 1, -1, -1)\n', (24331, 24361), True, 'import numpy as np\n'), ((24711, 24750), 'numpy.arange', 'np.arange', (['(sample.shape[-1] - 1)', '(-1)', '(-1)'], {}), '(sample.shape[-1] - 1, -1, -1)\n', (24720, 24750), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import sys
import numpy as np
# from matplotlib import pyplot as plt
def watershed(src):
# Change color to gray scale
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# Use the Otsu's binarization
thresh,bin_img = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# print(thresh) # print threshold
# Noise removal
kernel = np.ones((3,3), np.uint8)
opening = cv2.morphologyEx(bin_img,cv2.MORPH_OPEN,kernel,iterations = 2)
# Sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
# Apply watershed
markers = cv2.watershed(src,markers)
src[markers == -1] = [255,0,0]
# Check marker (If check markers, please import matplotlib)
# plt.imshow(markers)
# plt.show()
# Check markers data
# print(np.unique(markers,return_counts=True))
return markers, src
if __name__ == '__main__':
param = sys.argv
if (len(param) != 2):
print ("Usage: $ python " + param[0] + " sample.jpg")
quit()
# open image file
try:
input_img = cv2.imread(param[1])
except:
print ('faild to load %s' % param[1])
quit()
if input_img is None:
print ('faild to load %s' % param[1])
quit()
markers, img = watershed(input_img)
cv2.imwrite("watershed_markers_" + param[1], markers)
cv2.imwrite("watershed_image_" + param[1], img)
| [
"numpy.uint8",
"cv2.imwrite",
"numpy.ones",
"cv2.threshold",
"cv2.morphologyEx",
"cv2.distanceTransform",
"cv2.connectedComponents",
"cv2.cvtColor",
"cv2.dilate",
"cv2.subtract",
"cv2.imread",
"cv2.watershed"
] | [((169, 206), 'cv2.cvtColor', 'cv2.cvtColor', (['src', 'cv2.COLOR_BGR2GRAY'], {}), '(src, cv2.COLOR_BGR2GRAY)\n', (181, 206), False, 'import cv2\n'), ((263, 331), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (276, 331), False, 'import cv2\n'), ((400, 425), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (407, 425), True, 'import numpy as np\n'), ((439, 502), 'cv2.morphologyEx', 'cv2.morphologyEx', (['bin_img', 'cv2.MORPH_OPEN', 'kernel'], {'iterations': '(2)'}), '(bin_img, cv2.MORPH_OPEN, kernel, iterations=2)\n', (455, 502), False, 'import cv2\n'), ((544, 585), 'cv2.dilate', 'cv2.dilate', (['opening', 'kernel'], {'iterations': '(3)'}), '(opening, kernel, iterations=3)\n', (554, 585), False, 'import cv2\n'), ((641, 687), 'cv2.distanceTransform', 'cv2.distanceTransform', (['opening', 'cv2.DIST_L2', '(5)'], {}), '(opening, cv2.DIST_L2, 5)\n', (662, 687), False, 'import cv2\n'), ((810, 827), 'numpy.uint8', 'np.uint8', (['sure_fg'], {}), '(sure_fg)\n', (818, 827), True, 'import numpy as np\n'), ((842, 872), 'cv2.subtract', 'cv2.subtract', (['sure_bg', 'sure_fg'], {}), '(sure_bg, sure_fg)\n', (854, 872), False, 'import cv2\n'), ((915, 947), 'cv2.connectedComponents', 'cv2.connectedComponents', (['sure_fg'], {}), '(sure_fg)\n', (938, 947), False, 'import cv2\n'), ((1157, 1184), 'cv2.watershed', 'cv2.watershed', (['src', 'markers'], {}), '(src, markers)\n', (1170, 1184), False, 'import cv2\n'), ((1858, 1911), 'cv2.imwrite', 'cv2.imwrite', (["('watershed_markers_' + param[1])", 'markers'], {}), "('watershed_markers_' + param[1], markers)\n", (1869, 1911), False, 'import cv2\n'), ((1916, 1963), 'cv2.imwrite', 'cv2.imwrite', (["('watershed_image_' + param[1])", 'img'], {}), "('watershed_image_' + param[1], img)\n", (1927, 1963), False, 'import cv2\n'), ((1631, 1651), 'cv2.imread', 'cv2.imread', (['param[1]'], {}), '(param[1])\n', (1641, 1651), False, 'import cv2\n')] |
from dataclasses import dataclass
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
@dataclass
class Dataset:
"""A container for convenient access to raw and preprocessed data."""
train: pd.DataFrame
test: pd.DataFrame
target: str
def __post_init__(self):
self._split_x_y()
self._register_feature_names()
self._impute_and_encode()
def _split_x_y(self):
self.X_train, self.y_train = (
self.train.drop(columns=[self.target]),
self.train[self.target],
)
self.X_test, self.y_test = (
self.test.drop(columns=[self.target]),
self.test[self.target],
)
def _register_feature_names(self):
self.cat_features = [
c
for c in self.X_train.columns
if pd.api.types.is_object_dtype(self.X_train[c])
]
self.num_features = [
c for c in self.X_train.columns if c not in self.cat_features
]
def _impute_and_encode(self):
cat_pipe = Pipeline(
[
("imputer", SimpleImputer(strategy="constant", fill_value="missing")),
("onehot", OneHotEncoder(sparse=False, handle_unknown="ignore")),
]
)
transfomer = ColumnTransformer(
[
("cat", cat_pipe, self.cat_features),
(
"num",
SimpleImputer(strategy="mean"),
self.num_features,
),
],
sparse_threshold=0,
)
X_train_trans = transfomer.fit_transform(self.X_train)
X_test_trans = transfomer.transform(self.X_test)
if len(self.cat_features) > 0:
cat_features_transformed = (
transfomer.named_transformers_["cat"]
.named_steps["onehot"]
.get_feature_names(input_features=self.cat_features)
)
self.feature_names = np.r_[cat_features_transformed, self.num_features]
else:
self.feature_names = np.array(self.num_features)
self.X_train = pd.DataFrame(X_train_trans, columns=self.feature_names)
self.X_test = pd.DataFrame(X_test_trans, columns=self.feature_names)
@property
def sample(self):
return self.test.head(100)
| [
"sklearn.preprocessing.OneHotEncoder",
"numpy.array",
"pandas.api.types.is_object_dtype",
"sklearn.impute.SimpleImputer",
"pandas.DataFrame"
] | [((2301, 2356), 'pandas.DataFrame', 'pd.DataFrame', (['X_train_trans'], {'columns': 'self.feature_names'}), '(X_train_trans, columns=self.feature_names)\n', (2313, 2356), True, 'import pandas as pd\n'), ((2379, 2433), 'pandas.DataFrame', 'pd.DataFrame', (['X_test_trans'], {'columns': 'self.feature_names'}), '(X_test_trans, columns=self.feature_names)\n', (2391, 2433), True, 'import pandas as pd\n'), ((2250, 2277), 'numpy.array', 'np.array', (['self.num_features'], {}), '(self.num_features)\n', (2258, 2277), True, 'import numpy as np\n'), ((982, 1027), 'pandas.api.types.is_object_dtype', 'pd.api.types.is_object_dtype', (['self.X_train[c]'], {}), '(self.X_train[c])\n', (1010, 1027), True, 'import pandas as pd\n'), ((1258, 1314), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (1271, 1314), False, 'from sklearn.impute import SimpleImputer\n'), ((1344, 1396), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'handle_unknown': '"""ignore"""'}), "(sparse=False, handle_unknown='ignore')\n", (1357, 1396), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1596, 1626), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (1609, 1626), False, 'from sklearn.impute import SimpleImputer\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 9 13:22:31 2021
Model Simulation & Grid Interpolation
@authors: <NAME> & <NAME>
"""
import numpy as np
import sys
from scipy.stats import norm
from scipy.stats import uniform
import scipy.special as sc
import mpmath
import scipy.integrate as si
import scipy.interpolate as interp
import scipy.optimize as optim
from scipy.stats import genextreme
## integration.cpp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Import C++ function library
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., RW_marginal, pRW_me_interp, find_xrange_pRW_me
##
import os, ctypes
# g++ -std=c++11 -shared -fPIC -o p_integrand.so p_integrand.cpp
lib = ctypes.CDLL(os.path.abspath('./nonstat_model_noXs_global/p_integrand.so'))
i_and_o_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
grid_type = np.ctypeslib.ndpointer(ndim=1, dtype=np.float64)
bool_type = np.ctypeslib.ndpointer(ndim=1, dtype='bool')
lib.pRW_me_interp_C.restype = ctypes.c_int
lib.pRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.RW_marginal_C.restype = ctypes.c_int
lib.RW_marginal_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.RW_me_2_unifs.restype = ctypes.c_int
lib.RW_me_2_unifs.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.find_xrange_pRW_me_C.restype = ctypes.c_int
lib.find_xrange_pRW_me_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, i_and_o_type)
lib.pchip.restype = ctypes.c_int
lib.pchip.argtypes = (grid_type, grid_type, grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.qRW_me_interp.restype = ctypes.c_int
lib.qRW_me_interp.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type,
grid_type, grid_type, ctypes.c_int, ctypes.c_double, ctypes.c_double)
lib.RW_density_C.restype = ctypes.c_int
lib.RW_density_C.argtypes = (i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_int,
i_and_o_type)
lib.dRW_me_interp_C.restype = ctypes.c_int
lib.dRW_me_interp_C.argtypes = (i_and_o_type, grid_type, grid_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type)
lib.density_interp_grid.restype = ctypes.c_int
lib.density_interp_grid.argtypes = (grid_type, i_and_o_type,
ctypes.c_double, ctypes.c_int, ctypes.c_int,
i_and_o_type, i_and_o_type)
lib.dgev_C.restype = ctypes.c_double
lib.dgev_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.dnorm_C.restype = ctypes.c_double
lib.dnorm_C.argtypes = (ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_bool)
lib.marg_transform_data_mixture_me_likelihood_C.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_C.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_F.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_F.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, ctypes.c_double, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int)
lib.marg_transform_data_mixture_me_likelihood_global.restype = ctypes.c_double
lib.marg_transform_data_mixture_me_likelihood_global.argtypes = (i_and_o_type, i_and_o_type, i_and_o_type,
bool_type, bool_type, i_and_o_type, i_and_o_type, i_and_o_type,
ctypes.c_double, i_and_o_type, ctypes.c_double,
grid_type, grid_type, ctypes.c_int, ctypes.c_int, ctypes.c_int)
lib.Thresh_X_try.restype = ctypes.c_int
lib.Thresh_X_try.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, i_and_o_type, i_and_o_type)
lib.X_update.restype = ctypes.c_int
lib.X_update.argtypes = (i_and_o_type, grid_type, grid_type, i_and_o_type, ctypes.c_double, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.unifs_2_RW_me.restype = ctypes.c_int
lib.unifs_2_RW_me.argtypes = (i_and_o_type, grid_type, grid_type, ctypes.c_double, i_and_o_type, ctypes.c_double,
ctypes.c_double, ctypes.c_double,
ctypes.c_int, ctypes.c_int, ctypes.c_int, i_and_o_type)
lib.print_Vec.restype = ctypes.c_double
lib.print_Vec.argtypes = (i_and_o_type, ctypes.c_int, ctypes.c_int)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Generate Levy random samples
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., Stable variables with alpha=1/2
##
def rlevy(n, m = 0, s = 1):
if np.any(s < 0):
sys.exit("s must be positive")
return s/norm.ppf(uniform.rvs(0,1,n)/2)**2 + m
## The density for R^phi in which R is levy distributed
def dR_power_phi(x, phi, m=0, s=1, log=False):
x_phi = x**(1/phi)
if np.any(x_phi <= m):
sys.exit("some x**phi <= m")
if np.any(s <= 0):
sys.exit("s must be positive")
tmp = np.sum(np.log(s/(2 * np.pi))/2 - 3 * np.log(x_phi - m)/2 - s/(2 * (x_phi -
m)) + (1/phi-1)*np.log(x)-np.log(phi))
if np.invert(log):
tmp = np.exp(tmp)
return tmp
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate unregularized upper incomplete gamma function
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## The negative a values are allowed
##
def gammaincc_unregulized(a,x):
if(isinstance(x, (int, np.int64, float))): x=np.array([x])
if x.any()<0: sys.exit("x must be positive")
if a>0:
return sc.gamma(a)*sc.gammaincc(a,x)
elif a<0:
return gammaincc_unregulized(a+1,x)/a-(x**a)*np.exp(-x)/a
else:
return mpmath.gammainc(0,x)
## Compare with mpmath.gammainc
## gammaincc_unregulized is more efficient
# import time
#
# start_time = time.time()
# gammaincc_unregulized(-3.62,5)
# time.time() - start_time
# start_time = time.time()
# mpmath.gammainc(-3.62,5)
# time.time() - start_time
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the exact marginal survival function for R^phi*W
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
##
def RW_marginal_uni(x,phi,gamma,survival = True):
tmp1 = gamma/(2*(x**(1/phi)))
tmp2 = (gamma/2)**phi/sc.gamma(0.5)
res = sc.gammainc(0.5,tmp1) + tmp2*gammaincc_unregulized(0.5-phi,tmp1)/x
if survival:
return res
else:
return 1-res
RW_marginal = np.vectorize(RW_marginal_uni)
def RW_marginal_asymp(x,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/x
elif phi>0.5:
return np.sqrt(2*gamma/np.pi)*(x**(-1/(2*phi)))/(1-1/(2*phi))
else:
return np.sqrt(2*gamma/np.pi)*np.log(x)/x
def RW_quantile_asymp(p,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/(1-p)
elif phi>0.5:
return (np.sqrt(2*gamma/np.pi)/(1-1/(2*phi))/(1-p))**(2*phi)
else:
tmp = (1-p)/np.sqrt(2*gamma/np.pi)
return tmp/sc.lambertw(tmp)
# # Compare the exact and asymptotic CDF
# gamma = 1.2; x =10; phi=0.3
# import matplotlib.pyplot as plt
# axes = plt.gca()
# axes.set_ylim([0,0.125])
# X_vals = np.linspace(100,1500,num=200)
# P_vals = RW_marginal(X_vals,phi,gamma)
# P_asym = RW_marginal_asymp(X_vals,phi,gamma)
# plt.plot(X_vals, P_vals, 'b')
# plt.plot(X_vals, P_asym, 'r',linestyle='--');
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the marginal survival function for R^phi*W + epsilon
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
# ---------------- 1. Define integrand in Python: exact form ---------------- #
def mix_distn_integrand(t, xval, phi, tmp1, tmp2, tau_sqd):
diff = xval - t
tmp3 = tmp1/(diff**(1/phi))
res = sc.gammainc(0.5,tmp3) + tmp2*gammaincc_unregulized(0.5-phi,tmp3)/diff
result = res * np.exp(-t**2/(2*tau_sqd))
return result
def pRW_me_uni(xval, phi, gamma, tau_sqd):
tmp1 = gamma/2
tmp2 = ((gamma/2)**phi)/sc.gamma(0.5)
sd = np.sqrt(tau_sqd)
I_1 = si.quad(mix_distn_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2, tau_sqd)) # 0.00147
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1[0]/np.sqrt(2*np.pi*tau_sqd)
if tmp<0.999:
return tmp
else:
return RW_marginal_uni(xval,phi,gamma,survival = False)
pRW_me = np.vectorize(pRW_me_uni)
# ----------- 2. Define integrand in Python: linear interpolation ----------- #
# Actually BETTER than numerical integration because there are no singular values.
# We use the Trapezoidal rule.
## **** (0). Generate a GRIDDED set of values for P(RW>x) ****
def survival_interp_grid(phi, gamma, grid_size=800):
xp_1 = np.linspace(0.000001, 200, grid_size, endpoint = False)
xp_2 = np.linspace(200.5, 900, int(grid_size/4), endpoint = False)
xp_3 = np.linspace(900.5, 100000, int(grid_size/10), endpoint = False)
xp = np.concatenate((xp_1, xp_2, xp_3))
xp = xp[::-1] # reverse order
xp = np.ascontiguousarray(xp, np.float64) #C contiguous order: xp.flags['C_CONTIGUOUS']=True?
n_xval = len(xp); surv_p = np.empty(n_xval)
tmp_int = lib.RW_marginal_C(xp, phi, gamma, n_xval, surv_p)
if tmp_int!=1: sys.exit('C implementaion failed.')
# surv_p = RW_marginal(xp, phi, gamma)
return (xp, surv_p)
## **** (1). Vectorize univariate function ****
def pRW_me_uni_interp(xval, xp, surv_p, tau_sqd):
tp = xval-xp
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p
sd = np.sqrt(tau_sqd)
I_1 = sum(np.diff(tp)*(integrand_p[:-1] + integrand_p[1:])/2) # 0.00036
tmp = norm.cdf(xval, loc=0.0, scale=sd)-I_1/np.sqrt(2*np.pi*tau_sqd)
return tmp
def pRW_me_interp_slower(xval, xp, surv_p, tau_sqd):
return np.array([pRW_me_uni_interp(xval_i, xp, surv_p, tau_sqd) for xval_i in xval])
## **** (2). Broadcast matrices and vectorize columns ****
def pRW_me_interp_py(xval, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
tmp = np.zeros(xval.shape) # Store the results
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<820)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-xp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p[:,np.newaxis]
sd = np.sqrt(tau_sqd)
ncol = integrand_p.shape[1]
I_1 = np.array([np.sum(np.diff(tp[:,index])*(integrand_p[:-1,index] + integrand_p[1:,index])/2)
for index in np.arange(ncol)])
tmp_res = norm.cdf(xval_less, loc=0.0, scale=sd)-I_1/np.sqrt(2*np.pi*tau_sqd)
# Numerical negative when xval is very small
if(np.any(tmp_res<0)): tmp_res[tmp_res<0] = 0
tmp[which] = tmp_res
# Calculate for values that are greater than 820
if(xval.size-np.sum(which)>0):
tmp[np.invert(which)] = RW_marginal(xval[np.invert(which)],phi,gamma,survival = False)
return tmp
## **** (3). Use the C implementation ****
def pRW_me_interp(xval, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
n_xval = len(xval); n_grid = len(xp)
result = np.zeros(n_xval) # Store the results
tmp_int = lib.pRW_me_interp_C(xval, xp, surv_p, tau_sqd, phi, gamma, n_xval, n_grid, result)
if tmp_int!=1: sys.exit('C implementaion failed.')
return result
# ----------- 3. Define integrand in Python: linear interpolation ----------- #
# The grid in the previous version depends on gamma. It's not ideal.
## **** (0). Generate a GRIDDED set of values for the integrand ****
## When phi > 1 and gamma < 0.01 (fixed?) or gamma > 120, the abnormality kicks in quicker.
## When phi=0.7 and gamma=1e-05, it works fine.
def survival_interp_grid1(phi, grid_size=1000):
sp_1 = np.linspace(0.000001, 400, grid_size, endpoint = False)
sp_2 = np.linspace(400.5, 1100, int(grid_size/4), endpoint = False)
sp_3 = np.linspace(1100.5, 100000, int(grid_size/10), endpoint = False)
sp = np.concatenate((sp_1, sp_2, sp_3))
tmp = 1/(sp**(1/phi))
surv_p = sc.gammainc(0.5,tmp) + gammaincc_unregulized(0.5-phi,tmp)/(sp*sc.gamma(0.5))
return (sp, surv_p)
def pRW_me_interp1(xval, sp, surv_p, tau_sqd, phi, gamma):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
res = np.zeros(xval.size) # Store the results
tmp1 = (gamma/2)**phi
# If the asymp quantile level reaches 0.98, use the smooth distribution func.
thresh = max(RW_quantile_asymp(0.98,phi,gamma),7.5) # 7.5 is for gamma<0.0001
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<thresh)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-tmp1*sp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * surv_p[:,np.newaxis]
sd = np.sqrt(tau_sqd)
ncol = integrand_p.shape[1]
I_1 = np.array([np.sum(np.diff(sp)*(integrand_p[:-1,index] + integrand_p[1:,index])/2)
for index in np.arange(ncol)])
tmp_res = norm.cdf(xval_less, loc=0.0, scale=sd)-tmp1*I_1/np.sqrt(2*np.pi*tau_sqd)
# Numerical negative when xval is very small
if(np.any(tmp_res<0)): tmp_res[tmp_res<0] = 0
res[which] = tmp_res
# Calculate for values that are greater than 820
if(xval.size-np.sum(which)>0):
res[np.invert(which)] = RW_marginal(xval[np.invert(which)],phi,gamma,survival = False)
return res
# import matplotlib.pyplot as plt
# axes = plt.gca()
# # axes.set_ylim([0,0.125])
# X_vals = np.linspace(0.001,120,num=300)
# import time
# phi=0.7; gamma=1.2; tau_sqd = 10
# start_time = time.time()
# P_vals = RW_marginal(X_vals,phi, gamma, survival = False)
# time.time() - start_time
# start_time = time.time()
# P_mix = pRW_me(X_vals,phi,gamma,tau_sqd)
# time.time() - start_time
# grid = survival_interp_grid(phi, gamma)
# xp = grid[0]; surv_p = grid[1]
# start_time = time.time()
# P_interp_slower = pRW_me_interp_slower(X_vals, xp, surv_p, tau_sqd)
# time.time() - start_time
# start_time = time.time()
# P_interp_py = pRW_me_interp_py(X_vals, xp, surv_p, tau_sqd, phi, gamma)
# time.time() - start_time
# start_time = time.time()
# P_interp = pRW_me_interp(X_vals, xp, surv_p, tau_sqd, phi, gamma)
# time.time() - start_time
# grid = survival_interp_grid1(phi)
# sp = grid[0]; surv_p1 = grid[1]
# start_time = time.time()
# P_interp1 = pRW_me_interp1(X_vals, sp, surv_p1, tau_sqd, phi, gamma)
# time.time() - start_time
# fig, ax = plt.subplots()
# ax.plot(X_vals, P_vals, 'b', label="Smooth R^phi*W")
# ax.plot(X_vals, P_mix, 'r',linestyle='--', label="With nugget: numerical int")
# ax.plot(X_vals, P_interp_slower, 'g',linestyle=':', label="With nugget: Linear interp 1")
# ax.plot(X_vals, P_interp_py, 'm',linestyle=':', label="With nugget: Linear interp 2")
# ax.plot(X_vals, P_interp, 'y',linestyle='-.', label="With nugget: Linear interp 2 in C++")
# ax.plot(X_vals, P_interp1, 'c',linestyle='-.', label="With nugget: Linear interp w/o gamma")
# # ax.scatter(86.50499743, 0.9, c='red')
# # ax.scatter(1.11750005, 0.19, c='red')
# legend = ax.legend(loc = "lower right",shadow=True)
# plt.show()
## ----- Compared to 0.02579 secs for 1000 values when using pmixture_me() --- ##
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
#| RW_marginal | pRW_me | pRW_me_interp_slower | pRW_me_interp_py | pRW_me_interp
# ---------------------------------------------------------------------------------------------
#| smooth RW | exact marg | interp w/ vectorize | interp w/ broadcast | interp in C++
# ---------------------------------------------------------------------------------------------
#| 0.02799 secs | 4.85512 secs | 0.25337 secs | 0.05140 secs | 0.01389 secs
# ---------------------------------------------------------------------------------------------
## -------------------------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the quantile inverse function for R^phi*W + epsilon
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
# -------------------- 1. Use Shaby's interpolated method ------------------- #
# Improvement: Now we can calculate lower quantile levels that are negative.
def find_xrange_pRW_me(min_p, max_p, x_init, xp, surv_p, tau_sqd, phi, gamma):
x_range = np.zeros(2)
min_x = x_init[0]
max_x = x_init[1]
# if min_x <= 0 or min_p <= 0.15:
# sys.exit('This will only work for x > 0, which corresponds to p > 0.15.')
if min_x >= max_x:
sys.exit('x_init[0] must be smaller than x_init[1].')
## First the min
p_min_x = pRW_me_interp_py(min_x, xp, surv_p, tau_sqd, phi, gamma)
while p_min_x > min_p:
min_x = min_x-40/phi
p_min_x = pRW_me_interp_py(min_x, xp, surv_p, tau_sqd, phi, gamma)
x_range[0] = min_x
## Now the max
p_max_x = pRW_me_interp_py(max_x, xp, surv_p, tau_sqd, phi, gamma)
while p_max_x < max_p:
max_x = max_x*2 # Upper will set to 20 initially
p_max_x = pRW_me_interp_py(max_x, xp, surv_p, tau_sqd, phi, gamma)
x_range[1] = max_x
return x_range
import sklearn
import sklearn.isotonic
# from sklearn.experimental import enable_hist_gradient_boosting # noqa
# from sklearn.ensemble import HistGradientBoostingRegressor
# x_vals_tmp = x_vals.reshape(-1,1)
# cdf_gbdt = HistGradientBoostingRegressor(monotonic_cst=[1]).fit(x_vals_tmp, cdf_vals)
# cdf_vals_1 = cdf_gbdt.predict(x_vals_tmp)
def qRW_me_interp_py(p, xp, surv_p, tau_sqd, phi, gamma,
cdf_vals = np.nan, x_vals = np.nan, n_x=400, lower=5, upper=20):
if(isinstance(p, (int, np.int64, float))): p=np.array([p])
large_delta_large_x = False
# Generate x_vals and cdf_vals to interpolate
if np.any(np.isnan(x_vals)):
x_range = find_xrange_pRW_me(np.min(p),np.max(p), np.array([lower,upper]),
xp, surv_p, tau_sqd, phi, gamma)
if np.isinf(x_range[1]): # Upper is set to 20 initially
x_range[1] = 10^20; large_delta_large_x = True
if np.any(x_range<=0):
x_vals = np.concatenate((np.linspace(x_range[0], 0.0001, num=150),
np.exp(np.linspace(np.log(0.0001001), np.log(x_range[1]), num=n_x))))
else:
x_vals = np.exp(np.linspace(np.log(x_range[0]), np.log(x_range[1]), num=n_x))
cdf_vals = pRW_me_interp_py(x_vals, xp, surv_p, tau_sqd, phi, gamma)
else:
if np.any(np.isnan(cdf_vals)):
cdf_vals = pRW_me_interp_py(x_vals, xp, surv_p, tau_sqd, phi, gamma)
# Obtain the quantile level using the interpolated function
if not large_delta_large_x:
zeros = sum(cdf_vals==0)
try:
tck = interp.pchip(cdf_vals[zeros:], x_vals[zeros:]) # 1-D monotonic cubic interpolation.
except ValueError:
ir = sklearn.isotonic.IsotonicRegression(increasing=True)
ir.fit(x_vals[zeros:], cdf_vals[zeros:])
cdf_vals_1 = ir.predict(x_vals[zeros:])
indices = np.where(np.diff(cdf_vals_1)==0)[0]+1
tck = interp.pchip(np.delete(cdf_vals_1,indices), np.delete(x_vals[zeros:],indices))
q_vals = tck(p)
else:
which = p>cdf_vals[-1]
q_vals = np.repeat(np.nan, np.shape(p)[0])
q_vals[which] = x_range[1]
if np.any(~which):
# tck = interp.interp1d(cdf_vals, x_vals, kind = 'cubic')
tck = interp.pchip(cdf_vals, x_vals)
q_vals[~which] = tck(p[~which])
return q_vals
# Same function implemented in C++
def qRW_me_interp(p, xp, surv_p, tau_sqd, phi, gamma,
cdf_vals = np.nan, x_vals = np.nan, n_x=400, lower=5, upper=20):
if(isinstance(p, (int, np.int64, float))): p=np.array([p])
large_delta_large_x = False
# (1) When phi is varying over space, we need to calculate one quantile for each phi value.
# Given more accuarte [lower,upper] values for the single p, we can decrease n_x.
if(len(p)==1): n_x=np.int(100*(p+0.1))
# (2) Generate x_vals and cdf_vals to interpolate
if np.any(np.isnan(x_vals)):
x_range = np.empty(2); n_grid = len(xp)
tmp_int = lib.find_xrange_pRW_me_C(np.min(p), np.max(p), lower, upper,
xp, surv_p, tau_sqd, phi, gamma, n_grid, x_range)
if tmp_int!=1: sys.exit('C implementaion failed.')
if np.isinf(x_range[1]): # Upper is set to 20 initially
x_range[1] = 10^20; large_delta_large_x = True
if np.any(x_range<=0):
x_vals = np.concatenate((np.linspace(x_range[0], 0.0001, num=150),
np.exp(np.linspace(np.log(0.0001001), np.log(x_range[1]), num=n_x))))
else:
x_vals = np.exp(np.linspace(np.log(x_range[0]), np.log(x_range[1]), num=n_x))
cdf_vals = pRW_me_interp(x_vals, xp, surv_p, tau_sqd, phi, gamma)
else:
if np.any(np.isnan(cdf_vals)):
cdf_vals = pRW_me_interp(x_vals, xp, surv_p, tau_sqd, phi, gamma)
# (3) Obtain the quantile level using the interpolated function
if not large_delta_large_x:
zeros = sum(cdf_vals==0)
try:
tck = interp.pchip(cdf_vals[zeros:], x_vals[zeros:]) # 1-D monotonic cubic interpolation.
except ValueError:
ir = sklearn.isotonic.IsotonicRegression(increasing=True)
ir.fit(x_vals[zeros:], cdf_vals[zeros:])
cdf_vals_1 = ir.predict(x_vals[zeros:])
indices = np.where(np.diff(cdf_vals_1)==0)[0]+1
tck = interp.pchip(np.delete(cdf_vals_1,indices), np.delete(x_vals[zeros:],indices))
q_vals = tck(p)
else:
which = p>cdf_vals[-1]
q_vals = np.repeat(np.nan, np.shape(p)[0])
q_vals[which] = x_range[1]
if np.any(~which):
# tck = interp.interp1d(cdf_vals, x_vals, kind = 'cubic')
tck = interp.pchip(cdf_vals, x_vals)
q_vals[~which] = tck(p[~which])
return q_vals
# --------------------------- 2. Use Scipy: Slower -------------------------- #
## Slow especially for phi>1.5
def diff(x, cdf_target, xp, surv_p, tau_sqd, phi, gamma):
cdf_val = pRW_me_interp(x, xp, surv_p, tau_sqd, phi, gamma)
return (cdf_val - cdf_target)**2
def qRW_me_optim(p, xp, surv_p, tau_sqd, phi, gamma):
if(isinstance(p, (int, np.int64, float))): p=np.array([p])
q_vals = np.zeros(p.shape)
for idx,p_value in enumerate(p):
res = optim.minimize(diff, 1.0, args=(p_value,xp, surv_p, tau_sqd, phi, gamma),
method='Nelder-Mead', tol=1e-6)
q_vals[idx] = res.x[0]
return q_vals
# import matplotlib.pyplot as plt
# phi=0.7; gamma=1.2; tau_sqd = 20
# grid = survival_interp_grid(phi, gamma)
# xp = grid[0]; surv_p = grid[1]
# P_vals = np.linspace(0.001,0.99,num=300)
# import time
# start_time = time.time()
# Q_interp_py = qRW_me_interp_py(P_vals, xp, surv_p, tau_sqd, phi, gamma) #0.02546 secs
# time.time() - start_time
# start_time = time.time()
# Q_interp = qRW_me_interp(P_vals, xp, surv_p, tau_sqd, phi, gamma) #0.00863 secs
# time.time() - start_time
# start_time = time.time()
# Q_optim = qRW_me_optim(P_vals, xp, surv_p, tau_sqd, phi, gamma) # 0.99205 secs
# time.time() - start_time
# Q_cplus = np.empty(len(P_vals))
# n_x=400; cdf_vals = np.repeat(np.nan, n_x);x_vals = np.repeat(np.nan, n_x)
# start_time = time.time()
# tmp_int = lib.qRW_me_interp(P_vals, xp, surv_p, tau_sqd, phi, gamma,
# len(P_vals), len(xp), Q_cplus,
# cdf_vals, x_vals, n_x, 5, 20)
# time.time() - start_time #0.00742 secs
# fig, ax = plt.subplots()
# ax.plot(Q_interp_py, P_vals, 'b', label="Ben's interp method")
# ax.plot(Q_interp, P_vals, 'b', label="Ben's interp method in C++")
# ax.plot(Q_optim, P_vals, 'r',linestyle='--', label="Scipy's optim")
# ax.plot(Q_cplus, P_vals, 'c',linestyle='-.', label="Ben's interp method & pchip in C++")
# legend = ax.legend(loc = "lower right",shadow=True)
# plt.show()
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the exact density function for R^phi*W
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
##
def RW_density_uni(x,phi,gamma, log=True):
tmp1 = gamma/(2*(x**(1/phi)))
tmp2 = (gamma/2)**phi/sc.gamma(0.5)
res = tmp2*gammaincc_unregulized(0.5-phi,tmp1)/(x**2)
if log:
return np.log(res)
else:
return res
RW_density = np.vectorize(RW_density_uni)
def RW_density_asymp(x,phi,gamma):
if phi<0.5:
moment = ((2*gamma)**phi)*sc.gamma(1-2*phi)/sc.gamma(1-phi)
return moment/(x**2)
elif phi>0.5:
return np.sqrt(2*gamma/np.pi)*(x**(-1/(2*phi)-1))/(2*phi-1)
else:
return np.sqrt(2*gamma/np.pi)*(np.log(x)-1)/(x**2)
# # Compare the exact and asymptotic CDF
# gamma = 1.2; x =10; phi=0.3
# import matplotlib.pyplot as plt
# fig, ax = plt.subplots()
# X_vals = np.linspace(100,1500,num=200)
# # X_vals = np.linspace(150,350,num=200) # For phi=0.3
# P_vals = RW_density(X_vals,phi,gamma,log=False)
# P_asym = RW_density_asymp(X_vals,phi,gamma)
# ax.plot(X_vals, P_vals, 'b', label="R^phi*W density")
# ax.plot(X_vals, P_asym, 'r',linestyle='--', label="R^phi*W tail approx")
# legend = ax.legend(loc = "upper right",shadow=True)
# plt.title(label="phi=0.3")
# plt.show()
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Calculate the density function for R^phi*W + epsilon
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
# ---------------- 1. Define integrand in Python: exact form ---------------- #
def mix_den_integrand(t, xval, phi, tmp1, tmp2, tau_sqd):
diff = xval - t
tmp3 = tmp1/(diff**(1/phi))
res = tmp2*gammaincc_unregulized(0.5-phi,tmp3)/(diff**2)
result = res * np.exp(-t**2/(2*tau_sqd))
return result
def dRW_me_uni(xval, phi, gamma, tau_sqd):
tmp1 = gamma/2
tmp2 = ((gamma/2)**phi)/sc.gamma(0.5)
denom = np.sqrt(2*np.pi*tau_sqd)
I_1 = si.quad(mix_den_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2, tau_sqd))
tmp = I_1[0]/denom
if tmp<0.00001 and xval>300:
return RW_density_uni(xval,phi,gamma,log = False)
else:
return tmp
dRW_me = np.vectorize(dRW_me_uni)
# ----------- 2. Define integrand in Python: linear interpolation ----------- #
# Actually BETTER than numerical integration because there are no singular values.
# We use the Trapezoidal rule.
## **** (0). Generate a GRIDDED set of values for f_RW(x) ****
def density_interp_grid(phi, gamma, grid_size=800):
xp_1 = np.linspace(0.000001, 200, grid_size, endpoint = False)
xp_2 = np.linspace(200.5, 900, int(grid_size/4), endpoint = False)
xp_3 = np.linspace(900.5, 100000, int(grid_size/10), endpoint = False)
xp = np.concatenate((xp_1, xp_2, xp_3))
xp = xp[::-1] # reverse order
xp = np.ascontiguousarray(xp, np.float64) #C contiguous order: xp.flags['C_CONTIGUOUS']=True?
n_xval = len(xp); den_p = np.empty(n_xval); surv_p = np.empty(n_xval)
tmp_int = lib.RW_density_C(xp, phi, gamma, n_xval, den_p) # density
if tmp_int!=1: sys.exit('C implementaion failed.')
tmp_int = lib.RW_marginal_C(xp, phi, gamma, n_xval, surv_p) #cdf
if tmp_int!=1: sys.exit('C implementaion failed.')
# den_p = RW_density(xp, phi, gamma, log=False)
# surv_p = RW_marginal(xp, phi, gamma)
return (xp, den_p, surv_p)
## **** (1). Vectorize univariate function ****
## Time: 0.05516 secs for 300 xvals.
def dRW_me_uni_interp(xval, xp, den_p, tau_sqd):
tp = xval-xp
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * den_p
denom = np.sqrt(2*np.pi*tau_sqd)
I_1 = sum(np.diff(tp)*(integrand_p[:-1] + integrand_p[1:])/2) # 0.00036
tmp = I_1/denom
return tmp
def dRW_me_interp_slower(xval, xp, den_p, tau_sqd):
return np.array([dRW_me_uni_interp(xval_i, xp, den_p, tau_sqd) for xval_i in xval])
## **** (2). Broadcast matrices and vectorize columns ****
## Time: 0.05625 secs for 300 xvals. Faster 2x.
def dRW_me_interp_py(xval, xp, den_p, tau_sqd, phi, gamma, log =False):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
tmp = np.zeros(xval.size) # Store the results
thresh_large=820
if(tau_sqd<1): thresh_large = 50
# Use the smooth process CDF values if tau_sqd<0.05
if tau_sqd>0.05:
which = (xval<thresh_large)
else:
which = np.repeat(False, xval.shape)
# Calculate for values that are less than 820
if(np.sum(which)>0):
xval_less = xval[which]
tp = xval_less-xp[:,np.newaxis]
integrand_p = np.exp(-tp**2/(2*tau_sqd)) * den_p[:,np.newaxis]
denom = np.sqrt(2*np.pi*tau_sqd)
ncol = integrand_p.shape[1]
I_1 = np.array([np.sum(np.diff(tp[:,index])*(integrand_p[:-1,index] + integrand_p[1:,index])/2)
for index in np.arange(ncol)])
tmp_res = I_1/denom
# Numerical negative when xval is very small
# if(np.any(tmp_res<0)): tmp_res[tmp_res<0] = 0
tmp[which] = tmp_res
# Calculate for values that are greater than 820
if(xval.size-np.sum(which)>0):
tmp[np.invert(which) & (xval>0)] = RW_density(xval[np.invert(which) & (xval>0)],phi,gamma,log = False)
if log:
return np.log(tmp)
else:
return tmp
## **** (3). Use the C implementation ****
def dRW_me_interp(xval, xp, den_p, tau_sqd, phi, gamma, log=False):
if(isinstance(xval, (int, np.int64, float))): xval=np.array([xval], dtype='float64')
n_xval = len(xval); n_grid = len(xp)
result = np.zeros(n_xval) # Store the results
tmp_int = lib.dRW_me_interp_C(xval, xp, den_p, tau_sqd, phi, gamma, n_xval, n_grid, result)
if tmp_int!=1: sys.exit('C implementaion failed.')
if log:
return np.log(result)
else:
return result
# import matplotlib.pyplot as plt
# axes = plt.gca()
# # axes.set_ylim([0,0.125])
# X_vals = np.linspace(0.001,100,num=300)
# import time
# phi=0.7; gamma=1.2; tau_sqd=10
# start_time = time.time()
# D_vals = RW_density(X_vals,phi, gamma, log = False)
# time.time() - start_time
# start_time = time.time()
# D_mix = dRW_me(X_vals,phi,gamma,tau_sqd)
# time.time() - start_time
# grid = density_interp_grid(phi, gamma)
# xp = grid[0]; den_p = grid[1]
# start_time = time.time()
# D_interp_slower = dRW_me_interp_slower(X_vals, xp, den_p, tau_sqd)
# time.time() - start_time
# start_time = time.time()
# D_interp_py = dRW_me_interp_py(X_vals, xp, den_p, tau_sqd, phi, gamma)
# time.time() - start_time
# start_time = time.time()
# D_interp = dRW_me_interp(X_vals, xp, den_p, tau_sqd, phi, gamma)
# time.time() - start_time
# fig, ax = plt.subplots()
# ax.plot(X_vals, D_vals, 'b', label="Smooth R^phi*W")
# ax.plot(X_vals, D_mix, 'r',linestyle='--', label="With nugget: numerical int")
# ax.plot(X_vals, D_interp_slower, 'g',linestyle=':', label="With nugget: Linear interp 1")
# ax.plot(X_vals, D_interp_py, 'y',linestyle='-.', label="With nugget: Linear interp 2")
# ax.plot(X_vals, D_interp, 'm',linestyle='-.', label="With nugget: Linear interp 2 in C++")
# legend = ax.legend(loc = "upper right",shadow=True)
# plt.title(label="phi=0.3")
# plt.show()
# # ----- Compared to 0.01391 secs for 300 values when using pmixture_me() --- ##
# ------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------
# | RW_density | dRW_me | dRW_me_interp_slower | dRW_me_interp_py | dRW_me_interp |
# ------------------------------------------------------------------------------------------------
# | smooth RW | exact marg | interp w/ vectorize | interp w/ broadcast | interp in C++ |
# ------------------------------------------------------------------------------------------------
# | 0.00423 secs | 0.80787 secs | 0.05088 secs | 0.01348 secs | 0.00444 secs |
# ------------------------------------------------------------------------------------------------
# # ---------------------------------------------------------------------------------------------- ##
## scalemix_utils.R
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Compute the Matern correlation function
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## Input from a matrix of pairwise distances and a vector of parameters
##
def corr_fn(r, theta):
if type(r).__module__!='numpy' or isinstance(r, np.float64):
r = np.array(r)
if np.any(r<0):
sys.exit('Distance argument must be nonnegative.')
r[r == 0] = 1e-10
# range = theta[0]
range = np.sqrt(theta[0]) # Mark's generic Matern
nu = theta[1]
part1 = 2 ** (1 - nu) / sc.gamma(nu)
part2 = (r / range) ** nu
part3 = sc.kv(nu, r / range)
# part2 = (np.sqrt(2 * nu) * r / range) ** nu
# part3 = sc.kv(nu, np.sqrt(2 * nu) * r / range)
return part1 * part2 * part3
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For MVN
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
## Assumes that A = VDV', where D is a diagonal vector of eigenvectors of A, and
## V is a matrix of normalized eigenvectors of A.
## Computes A^{-1}x
##
def eig2inv_times_vector(V, d_inv, x):
return V@np.diag(d_inv)@V.T@x
## Computes y=A^{-1}x via solving linear system Ay=x
from scipy.linalg import lapack
def inv_times_vector(A, x):
inv = lapack.dposv(A,x)
return inv
## Assumes that A = VDV', where D is a diagonal vector of eigenvectors of A, and
## V is a matrix of normalized eigenvectors of A.
##
## log(|A|)
##
def eig2logdet(d):
return sum(np.log(d))
## Multivariate normal log density of R, where each column of
## R iid N(mean,VDV'), where VDV' is the covariance matrix
## It essentially computes the log density of each column of R, then takes
## the sum. Faster than looping over the columns, but not as transparent.
##
## Ignore the coefficient: -p/2*log(2*pi)
##
def dmvn_eig(R, V, d_inv, mean=0):
if len(R.shape)==1:
n_rep = 1
else:
n_rep = R.shape[1]
res = -0.5*n_rep*eig2logdet(1/d_inv) - 0.5 * np.sum((R-mean) * eig2inv_times_vector(V, d_inv, R-mean))
return res
def dmvn(R, Cor, mean=0, cholesky_inv = None):## cholesky_inv is the output of inv_times_vector()
if len(R.shape)==1:
n_rep = 1
else:
n_rep = R.shape[1]
if cholesky_inv is None:
inv = lapack.dposv(Cor, R-mean)
else:
sol = lapack.dpotrs(cholesky_inv[0],R-mean) #Solve Ax = b using factorization
inv = (cholesky_inv[0],sol[0])
logdet = 2*sum(np.log(np.diag(inv[0])))
res = -0.5*n_rep*logdet - 0.5 * np.sum((R-mean) * inv[1])
return res
## Assumes that A = VDV', where D is a diagonal vector of eigenvectors of A, and
## V is a matrix of normalized eigenvectors of A.
##
## Computes x'A^{-1}x
##
def eig2inv_quadform_vector(V, d_inv, x):
cp = V@np.diag(d_inv)@V.T@x
return sum(x*cp)
def inv_quadform_vector(Cor, x, cholesky_inv = None):
if cholesky_inv is None:
inv = lapack.dposv(Cor, x)
cp = inv[1]
else:
sol = lapack.dpotrs(cholesky_inv[0],x) #Solve Ax = b using factorization
cp = sol[0]
return sum(x*cp)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For generalized extreme value (GEV) distribution
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
## Negative shape parametrization in scipy.genextreme
##
def dgev(yvals, Loc, Scale, Shape, log=False):
if log:
return genextreme.logpdf(yvals, c=-Shape, loc=Loc, scale=Scale) # Opposite shape
else:
return genextreme.pdf(yvals, c=-Shape, loc=Loc, scale=Scale) # Opposite shape
def pgev(yvals, Loc, Scale, Shape, log=False):
if log:
return genextreme.logcdf(yvals, c=-Shape, loc=Loc, scale=Scale) # Opposite shape
else:
return genextreme.cdf(yvals, c=-Shape, loc=Loc, scale=Scale) # Opposite shape
def qgev(p, Loc, Scale, Shape):
if type(p).__module__!='numpy':
p = np.array(p)
return genextreme.ppf(p, c=-Shape, loc=Loc, scale=Scale) # Opposite shape
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Transform Normal to Standard Pareto
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
##
## i.e., Stable variables with alpha=1/2
##
def norm_to_Pareto(z):
if(isinstance(z, (int, np.int64, float))): z=np.array([z])
tmp = norm.cdf(z)
if np.any(tmp==1): tmp[tmp==1]=1-1e-9
return 1/(1-tmp)
def pareto_to_Norm(W):
if(isinstance(W, (int, np.int64, float))): W=np.array([W])
if np.any(W<1): sys.exit("W must be greater than 1")
tmp = 1-1/W
return norm.ppf(tmp)
## -------------------------------------------------------------------------- ##
## scalemix_likelihoods.R
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Marginal transformations
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
##
## Transforms observations from a Gaussian scale mixture to a GPD, or vice versa
##
def RW_me_2_gev(X, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape):
unifs = pRW_me_interp(X, xp, surv_p, tau_sqd, phi, gamma)
gevs = qgev(unifs, Loc=Loc, Scale=Scale, Shape=Shape)
return gevs
def gev_2_RW_me(Y, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape):
unifs = pgev(Y, Loc, Scale, Shape)
scalemixes = qRW_me_interp(unifs, xp, surv_p, tau_sqd, phi, gamma)
return scalemixes
## After GEV params are updated, the 'cen' should be re-calculated.
def which_censored(Y, Loc, Scale, Shape, prob_below):
unifs = pgev(Y, Loc, Scale, Shape)
return unifs<prob_below
## Only calculate the un-censored elements
def X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape):
X = np.empty(Y.shape)
X[:] = np.nan
if np.any(~cen & ~cen_above):
X[~cen & ~cen_above] = gev_2_RW_me(Y[~cen & ~cen_above], xp, surv_p, tau_sqd, phi, gamma,
Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
return X
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Censored likelihood
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## The log likelihood of the data, where the data comes from a scale mixture
## of Gaussians, transformed to GEV (matrix/vector input)
##
## NOT ACTUALLY depending on X. X and cen need to be calculated in advance.
##
##
def marg_transform_data_mixture_me_likelihood0(Y, X, X_s, cen, prob_below, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp=np.nan, surv_p=np.nan, den_p=np.nan, thresh_X=np.nan):
if np.any(np.isnan(xp)):
grid = density_interp_grid(phi, gamma)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
if np.isnan(thresh_X):
thresh_X = qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
sd = np.sqrt(tau_sqd)
# ## Generate X_s
# X_s = (R**phi)*norm_to_Pareto(Z)
## Initialize space to store the log-likelihoods for each observation:
ll = np.empty(Y.shape)
ll[:] = np.nan
if np.any(cen):
ll[cen] = norm.logcdf(thresh_X, loc=X_s[cen], scale=sd)
if np.any(~cen):
# # Sometimes pgev easily becomes 1, which causes the gev_2_scalemix to become nan
# if np.any(np.isnan(X[~cen])):
# return -np.inf
ll[~cen] = norm.logpdf(X[~cen], loc=X_s[~cen], scale=sd
)+dgev(Y[~cen], Loc=Loc[~cen], Scale=Scale[~cen], Shape=Shape[~cen], log=True
)-dRW_me_interp(X[~cen], xp, den_p, tau_sqd, phi, gamma, log =True)
#which = np.isnan(ll)
#if np.any(which):
# ll[which] = -np.inf # Normal density larger order than marginal density of scalemix
return np.sum(ll)
def marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, phi, gamma,
xp=np.nan, surv_p=np.nan, den_p=np.nan, thresh_X=np.nan, thresh_X_above=np.nan):
if np.any(np.isnan(xp)):
grid = density_interp_grid(phi, gamma)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
if np.isnan(thresh_X):
thresh_X = qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
sd = np.sqrt(tau_sqd)
# ## Generate X_s
# X_s = (R**phi)*norm_to_Pareto(Z)
## Initialize space to store the log-likelihoods for each observation:
ll = np.empty(Y.shape)
ll[:] = np.nan
if np.any(cen):
ll[cen] = norm.logcdf(thresh_X, loc=X_s[cen], scale=sd)
if np.any(cen_above):
ll[cen_above] = norm.logsf(thresh_X_above, loc=X_s[cen_above], scale=sd)
if np.any(~cen & ~cen_above):
# # Sometimes pgev easily becomes 1, which causes the gev_2_scalemix to become nan
# if np.any(np.isnan(X[~cen])):
# return -np.inf
ll[~cen & ~cen_above] = norm.logpdf(X[~cen & ~cen_above], loc=X_s[~cen & ~cen_above], scale=sd
)+dgev(Y[~cen & ~cen_above], Loc=Loc[~cen & ~cen_above], Scale=Scale[~cen & ~cen_above], Shape=Shape[~cen & ~cen_above], log=True
)-dRW_me_interp(X[~cen & ~cen_above], xp, den_p, tau_sqd, phi, gamma, log =True)
#which = np.isnan(ll)
#if np.any(which):
# ll[which] = -np.inf # Normal density larger order than marginal density of scalemix
return np.sum(ll)
## Univariate version
def marg_transform_data_mixture_me_likelihood_uni(Y, X, X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, phi, gamma,
xp=np.nan, surv_p=np.nan, den_p=np.nan, thresh_X=np.nan, thresh_X_above=np.nan):
if np.any(np.isnan(xp)):
grid = density_interp_grid(phi, gamma)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
if np.isnan(thresh_X):
thresh_X = qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
sd = np.sqrt(tau_sqd)
# ## Generate X_s
# X_s = (R**phi)*norm_to_Pareto(Z)
ll=np.array(np.nan)
if cen:
ll = norm.logcdf(thresh_X, loc=X_s, scale=sd)
elif cen_above:
ll = norm.logsf(thresh_X_above, loc=X_s, scale=sd)
else:
ll = norm.logpdf(X, loc=X_s, scale=sd
)+dgev(Y, Loc=Loc, Scale=Scale, Shape=Shape, log=True
)-dRW_me_interp(X, xp, den_p, tau_sqd, phi, gamma, log =True)
#if np.isnan(ll):
# ll = -np.inf
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for phi
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians, where the mixing distribution comes from
## dlevy.
##
##
def phi_update_mixture_me_likelihood(data, params, R, Z, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, gamma):
Y = data
phi = params
if phi < 0:
return -np.inf
grid = density_interp_grid(phi, gamma)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
X_s = (R**phi)*norm_to_Pareto(Z)
# ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, prob_below, Loc, Scale, Shape,
# tau_sqd, phi, gamma, xp, surv_p, den_p) + dR_power_phi(R,phi,m=0,s=gamma,log=True)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p)
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for tau
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
##
def tau_update_mixture_me_likelihood(data, params, X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape,
phi, gamma, xp, surv_p, den_p):
Y = data
tau_sqd = params
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p)
return ll
def phi_tau_update_mixture_me_likelihood(data, params, R, Z, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, gamma):
Y = data
phi = params[0]
tau_sqd = params[1]
if phi < 0:
return -np.inf
grid = density_interp_grid(phi, gamma)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
X_s = (R**phi)*norm_to_Pareto(Z)
# ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, prob_below, Loc, Scale, Shape,
# tau_sqd, phi, gamma, xp, surv_p, den_p) + dR_power_phi(R,phi,m=0,s=gamma,log=True)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p)
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for GEV marginals (Loc)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
##
def loc0_gev_update_mixture_me_likelihood(data, params, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc1, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_loc0 = params
loc0 = data@beta_loc0 # mu = Xb
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below-0.05 or np.max(tmp)>prob_above+0.05:
return -np.inf
# cen = which_censored(Y, Loc, Scale, Shape, prob_below) # 'cen' isn't altered in Global
# cen_above = ~which_censored(Y, Loc, Scale, Shape, prob_above)
## What if GEV params are such that all Y's are censored?
if np.all(cen):
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
def loc0_interc_gev_update_mixture_me_likelihood(data, params, beta_loc0_1, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc1, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_loc0_0 = params
beta_loc0 = np.r_[beta_loc0_0,beta_loc0_1]
loc0 = data@beta_loc0 # mu = Xb
# loc0 = loc0.astype(float)
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)< prob_below or np.max(tmp)>prob_above:
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
## For the slope wrt T of the location parameter
def loc1_gev_update_mixture_me_likelihood(data, params, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc0, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
##Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_loc1 = params
loc1 = data@beta_loc1 # mu = Xb
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below-0.05 or np.max(tmp)>prob_above+0.05:
return -np.inf
# cen = which_censored(Y, Loc, Scale, Shape, prob_below) # 'cen' isn't altered in Global
# cen_above = ~which_censored(Y, Loc, Scale, Shape, prob_above)
## What if GEV params are such that all Y's are censored?
if(np.all(cen)):
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for GEV marginals (Scale)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
##
def scale_gev_update_mixture_me_likelihood(data, params, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_scale = params
scale = data@beta_scale # mu = Xb
if np.any(scale < 0):
return -np.inf
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below-0.05 or np.max(tmp)>prob_above+0.05:
return -np.inf
# cen = which_censored(Y, Loc, Scale, Shape, prob_below) # 'cen' isn't altered in Global
# cen_above = ~which_censored(Y, Loc, Scale, Shape, prob_above)
## What if GEV params are such that all Y's are censored?
if(np.all(cen)):
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
def scale_interc_gev_update_mixture_me_likelihood(data, params, beta_scale_1, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_scale_0 = params
beta_scale = np.r_[beta_scale_0,beta_scale_1]
scale = data@beta_scale # mu = Xb
if np.any(scale < 0):
return -np.inf
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below or np.max(tmp)>prob_above:
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for GEV marginals (Scale)
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
##
def shape_gev_update_mixture_me_likelihood(data, params, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Scale, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_shape = params
shape = data@beta_shape # mu = Xb
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Shape = np.tile(shape, n_t)
Shape = Shape.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below-0.05 or np.max(tmp)>prob_above+0.05:
return -np.inf
# cen = which_censored(Y, Loc, Scale, Shape, prob_below) # 'cen' isn't altered in Global
# cen_above = ~which_censored(Y, Loc, Scale, Shape, prob_above)
## What if GEV params are such that all Y's are censored?
if(np.all(cen)):
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
def shape_interc_gev_update_mixture_me_likelihood(data, params, beta_shape_1, Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Scale, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above):
## Design_mat = data
## For the time being, assume that the intercept, slope are CONSTANTS
beta_shape_0 = params
beta_shape = np.r_[beta_shape_0,beta_shape_1]
shape = data@beta_shape # mu = Xb
if len(X_s.shape)==1:
X_s = X_s.reshape((X_s.shape[0],1))
n_t = X_s.shape[1]
n_s = X_s.shape[0]
Shape = np.tile(shape, n_t)
Shape = Shape.reshape((n_s,n_t),order='F')
max_support = Loc - Scale/Shape
max_support[Shape>0] = np.inf
# When cen is not updated, the best thing we can do is to make sure the unifs is not too far from [below, above].
tmp=pgev(Y[~cen & ~cen_above], Loc[~cen & ~cen_above], Scale[~cen & ~cen_above], Shape[~cen & ~cen_above])
# If the parameters imply support that is not consistent with the data,
# then reject the parameters.
if np.any(Y > max_support) or np.min(tmp)<prob_below or np.max(tmp)>prob_above:
return -np.inf
X = X_update(Y, cen, cen_above, xp, surv_p, tau_sqd, phi, gamma, Loc, Scale, Shape)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above, prob_below, prob_above, Loc, Scale, Shape,
tau_sqd, phi, gamma, xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for latent Gaussian field
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
## Both functions in this section admit column vectors only.
##
def Z_likelihood_conditional_eigen(Z, V, d):
# R_powered = R**phi
part1 = -0.5*eig2inv_quadform_vector(V, 1/d, Z)-0.5*np.sum(np.log(d))
return part1
def Z_likelihood_conditional(Z, Cor, cholesky_inv):
# R_powered = R**phi
part1 = -0.5*inv_quadform_vector(Cor, Z, cholesky_inv)-0.5*2*sum(np.log(np.diag(cholesky_inv[0])))
return part1
def Z_update_onetime(Y, X, R, Z, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Scale, Shape, xp, surv_p, den_p,
thresh_X, thresh_X_above, Cor, cholesky_inv, Sigma_m, random_generator):
n_s = X.size
prop_Z = np.empty(X.shape)
accept = np.zeros(n_s)
## Generate X_s
X_s = (R**phi)*norm_to_Pareto(Z)
log_num=0; log_denom=0 # sd= np.sqrt(tau_sqd)
for idx, Z_idx in enumerate(Z):
# tripped : X = Y, changing X will change Y as well.
prop_Z[:] = Z
# temp = X_s(iter)+v_q(iter)*R::rnorm(0,1);
temp = Z_idx + Sigma_m[idx]*random_generator.standard_normal(1)
prop_Z[idx] = temp
prop_X_s_idx = (R**phi)*norm_to_Pareto(temp)
log_num = marg_transform_data_mixture_me_likelihood_uni(Y[idx], X[idx], prop_X_s_idx,
cen[idx], cen_above[idx], prob_below, prob_above, Loc[idx], Scale[idx], Shape[idx], tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above) + Z_likelihood_conditional(prop_Z, Cor, cholesky_inv);
log_denom = marg_transform_data_mixture_me_likelihood_uni(Y[idx], X[idx], X_s[idx],
cen[idx], cen_above[idx], prob_below, prob_above, Loc[idx], Scale[idx], Shape[idx], tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above) + Z_likelihood_conditional(Z, Cor, cholesky_inv);
with np.errstate(over='raise'):
try:
r = np.exp(log_num - log_denom) # this gets caught and handled as an exception
except FloatingPointError:
print(" -- idx="+str(idx)+", Z="+str(Z[idx])+", prop_Z="+str(temp)+", log_num="+str(log_num)+", log_denom="+str(log_denom))
r=0
if random_generator.uniform(0,1,1)<r:
Z[idx] = temp # changes argument 'X_s' directly
X_s[idx] = prop_X_s_idx
accept[idx] = accept[idx] + 1
#result = (X_s,accept)
return accept
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for Matern parameters
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Update covariance parameters.
##
##
def theta_c_update_mixture_me_likelihood_eigen(data, params, S, V=np.nan, d=np.nan):
Z = data
range = params[0]
nu = params[1]
if len(Z.shape)==1:
Z = Z.reshape((Z.shape[0],1))
n_t = Z.shape[1]
if np.any(np.isnan(V)):
Cor = corr_fn(S, np.array([range,nu]))
eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
V = eig_Cor[1]
d = eig_Cor[0]
ll = np.empty(n_t)
ll[:]=np.nan
for idx in np.arange(n_t):
ll[idx] = Z_likelihood_conditional_eigen(Z[:,idx], V, d)
return np.sum(ll)
def range_update_mixture_me_likelihood_eigen(data, params, nu, S, V=np.nan, d=np.nan):
Z = data
range = params
if len(Z.shape)==1:
Z = Z.reshape((Z.shape[0],1))
n_t = Z.shape[1]
if np.any(np.isnan(V)):
Cor = corr_fn(S, np.array([range,nu]))
eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
V = eig_Cor[1]
d = eig_Cor[0]
ll = np.empty(n_t)
ll[:]=np.nan
for idx in np.arange(n_t):
ll[idx] = Z_likelihood_conditional_eigen(Z[:,idx], V, d)
return np.sum(ll)
def theta_c_update_mixture_me_likelihood(data, params, S, Cor=None, cholesky_inv=None):
Z = data
range = params[0]
nu = params[1]
if len(Z.shape)==1:
Z = Z.reshape((Z.shape[0],1))
n_t = Z.shape[1]
if Cor is None:
Cor = corr_fn(S, np.array([range,nu]))
cholesky_inv = lapack.dposv(Cor,Z[:,0])
ll = np.empty(n_t)
ll[:]=np.nan
for idx in np.arange(n_t):
ll[idx] = Z_likelihood_conditional(Z[:,idx], Cor, cholesky_inv)
return np.sum(ll)
def range_update_mixture_me_likelihood(data, params, nu, S, Cor=None, cholesky_inv=None):
Z = data
range = params
if len(Z.shape)==1:
Z = Z.reshape((Z.shape[0],1))
n_t = Z.shape[1]
if Cor is None:
Cor = corr_fn(S, np.array([range,nu]))
cholesky_inv = lapack.dposv(Cor,Z[:,0])
ll = np.empty(n_t)
ll[:]=np.nan
for idx in np.arange(n_t):
ll[idx] = Z_likelihood_conditional(Z[:,idx], Cor, cholesky_inv)
return np.sum(ll)
##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Full likelihood for R
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## For the generic Metropolis sampler
## Samples from the parameters of the mixing distribution, for the scale
## mixture of Gaussians.
##
##
def Rt_update_mixture_me_likelihood(data, params, X, Z, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above):
Y = data
R = params
if R < 0:
return -np.inf
else:
## Generate X_s
X_s = (R**phi)*norm_to_Pareto(Z)
ll = marg_transform_data_mixture_me_likelihood(Y, X, X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above)
return ll
##
## -------------------------------------------------------------------------- ##
## New python code
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## Gaussian moothing kernel
## -------------------------------------------------------------------------- ##
## -------------------------------------------------------------------------- ##
## When d > fit radius, the weight will be zero
## h is tje bandwidth parameter
##
def weights_fun(d,radius,h=1, cutoff=True):
if(isinstance(d, (int, np.int64, float))): d=np.array([d])
tmp = np.exp(-d**2/(2*h))
if cutoff: tmp[d>radius] = 0
return tmp/np.sum(tmp)
##
## -------------------------------------------------------------------------- ## | [
"scipy.special.gammaincc",
"numpy.sqrt",
"scipy.special.kv",
"scipy.stats.genextreme.logcdf",
"numpy.log",
"scipy.stats.genextreme.pdf",
"scipy.stats.norm.logsf",
"numpy.invert",
"numpy.ascontiguousarray",
"numpy.array",
"scipy.stats.norm.logpdf",
"sys.exit",
"scipy.stats.genextreme.ppf",
... | [((1065, 1113), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'ndim': '(1)', 'dtype': 'np.float64'}), '(ndim=1, dtype=np.float64)\n', (1087, 1113), True, 'import numpy as np\n'), ((1127, 1175), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'ndim': '(1)', 'dtype': 'np.float64'}), '(ndim=1, dtype=np.float64)\n', (1149, 1175), True, 'import numpy as np\n'), ((1189, 1233), 'numpy.ctypeslib.ndpointer', 'np.ctypeslib.ndpointer', ([], {'ndim': '(1)', 'dtype': '"""bool"""'}), "(ndim=1, dtype='bool')\n", (1211, 1233), True, 'import numpy as np\n'), ((8935, 8964), 'numpy.vectorize', 'np.vectorize', (['RW_marginal_uni'], {}), '(RW_marginal_uni)\n', (8947, 8964), True, 'import numpy as np\n'), ((11211, 11235), 'numpy.vectorize', 'np.vectorize', (['pRW_me_uni'], {}), '(pRW_me_uni)\n', (11223, 11235), True, 'import numpy as np\n'), ((28515, 28543), 'numpy.vectorize', 'np.vectorize', (['RW_density_uni'], {}), '(RW_density_uni)\n', (28527, 28543), True, 'import numpy as np\n'), ((30593, 30617), 'numpy.vectorize', 'np.vectorize', (['dRW_me_uni'], {}), '(dRW_me_uni)\n', (30605, 30617), True, 'import numpy as np\n'), ((987, 1048), 'os.path.abspath', 'os.path.abspath', (['"""./nonstat_model_noXs_global/p_integrand.so"""'], {}), "('./nonstat_model_noXs_global/p_integrand.so')\n", (1002, 1048), False, 'import os, ctypes\n'), ((6489, 6502), 'numpy.any', 'np.any', (['(s < 0)'], {}), '(s < 0)\n', (6495, 6502), True, 'import numpy as np\n'), ((6722, 6740), 'numpy.any', 'np.any', (['(x_phi <= m)'], {}), '(x_phi <= m)\n', (6728, 6740), True, 'import numpy as np\n'), ((6786, 6800), 'numpy.any', 'np.any', (['(s <= 0)'], {}), '(s <= 0)\n', (6792, 6800), True, 'import numpy as np\n'), ((6982, 6996), 'numpy.invert', 'np.invert', (['log'], {}), '(log)\n', (6991, 6996), True, 'import numpy as np\n'), ((10889, 10905), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (10896, 10905), True, 'import numpy as np\n'), ((10921, 11007), 'scipy.integrate.quad', 'si.quad', (['mix_distn_integrand', '(-np.inf)', 'xval'], {'args': '(xval, phi, tmp1, tmp2, tau_sqd)'}), '(mix_distn_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2,\n tau_sqd))\n', (10928, 11007), True, 'import scipy.integrate as si\n'), ((11560, 11610), 'numpy.linspace', 'np.linspace', (['(1e-06)', '(200)', 'grid_size'], {'endpoint': '(False)'}), '(1e-06, 200, grid_size, endpoint=False)\n', (11571, 11610), True, 'import numpy as np\n'), ((11771, 11805), 'numpy.concatenate', 'np.concatenate', (['(xp_1, xp_2, xp_3)'], {}), '((xp_1, xp_2, xp_3))\n', (11785, 11805), True, 'import numpy as np\n'), ((11854, 11890), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xp', 'np.float64'], {}), '(xp, np.float64)\n', (11874, 11890), True, 'import numpy as np\n'), ((11974, 11990), 'numpy.empty', 'np.empty', (['n_xval'], {}), '(n_xval)\n', (11982, 11990), True, 'import numpy as np\n'), ((12357, 12373), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (12364, 12373), True, 'import numpy as np\n'), ((12912, 12932), 'numpy.zeros', 'np.zeros', (['xval.shape'], {}), '(xval.shape)\n', (12920, 12932), True, 'import numpy as np\n'), ((14246, 14262), 'numpy.zeros', 'np.zeros', (['n_xval'], {}), '(n_xval)\n', (14254, 14262), True, 'import numpy as np\n'), ((14877, 14927), 'numpy.linspace', 'np.linspace', (['(1e-06)', '(400)', 'grid_size'], {'endpoint': '(False)'}), '(1e-06, 400, grid_size, endpoint=False)\n', (14888, 14927), True, 'import numpy as np\n'), ((15090, 15124), 'numpy.concatenate', 'np.concatenate', (['(sp_1, sp_2, sp_3)'], {}), '((sp_1, sp_2, sp_3))\n', (15104, 15124), True, 'import numpy as np\n'), ((15430, 15449), 'numpy.zeros', 'np.zeros', (['xval.size'], {}), '(xval.size)\n', (15438, 15449), True, 'import numpy as np\n'), ((20034, 20045), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (20042, 20045), True, 'import numpy as np\n'), ((26137, 26154), 'numpy.zeros', 'np.zeros', (['p.shape'], {}), '(p.shape)\n', (26145, 26154), True, 'import numpy as np\n'), ((30318, 30346), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (30325, 30346), True, 'import numpy as np\n'), ((30358, 30443), 'scipy.integrate.quad', 'si.quad', (['mix_den_integrand', '(-np.inf)', 'xval'], {'args': '(xval, phi, tmp1, tmp2, tau_sqd)'}), '(mix_den_integrand, -np.inf, xval, args=(xval, phi, tmp1, tmp2, tau_sqd)\n )\n', (30365, 30443), True, 'import scipy.integrate as si\n'), ((30941, 30991), 'numpy.linspace', 'np.linspace', (['(1e-06)', '(200)', 'grid_size'], {'endpoint': '(False)'}), '(1e-06, 200, grid_size, endpoint=False)\n', (30952, 30991), True, 'import numpy as np\n'), ((31152, 31186), 'numpy.concatenate', 'np.concatenate', (['(xp_1, xp_2, xp_3)'], {}), '((xp_1, xp_2, xp_3))\n', (31166, 31186), True, 'import numpy as np\n'), ((31235, 31271), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xp', 'np.float64'], {}), '(xp, np.float64)\n', (31255, 31271), True, 'import numpy as np\n'), ((31354, 31370), 'numpy.empty', 'np.empty', (['n_xval'], {}), '(n_xval)\n', (31362, 31370), True, 'import numpy as np\n'), ((31381, 31397), 'numpy.empty', 'np.empty', (['n_xval'], {}), '(n_xval)\n', (31389, 31397), True, 'import numpy as np\n'), ((31993, 32021), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (32000, 32021), True, 'import numpy as np\n'), ((32559, 32578), 'numpy.zeros', 'np.zeros', (['xval.size'], {}), '(xval.size)\n', (32567, 32578), True, 'import numpy as np\n'), ((34000, 34016), 'numpy.zeros', 'np.zeros', (['n_xval'], {}), '(n_xval)\n', (34008, 34016), True, 'import numpy as np\n'), ((37216, 37229), 'numpy.any', 'np.any', (['(r < 0)'], {}), '(r < 0)\n', (37222, 37229), True, 'import numpy as np\n'), ((37344, 37361), 'numpy.sqrt', 'np.sqrt', (['theta[0]'], {}), '(theta[0])\n', (37351, 37361), True, 'import numpy as np\n'), ((37488, 37508), 'scipy.special.kv', 'sc.kv', (['nu', '(r / range)'], {}), '(nu, r / range)\n', (37493, 37508), True, 'import scipy.special as sc\n'), ((38451, 38469), 'scipy.linalg.lapack.dposv', 'lapack.dposv', (['A', 'x'], {}), '(A, x)\n', (38463, 38469), False, 'from scipy.linalg import lapack\n'), ((41388, 41437), 'scipy.stats.genextreme.ppf', 'genextreme.ppf', (['p'], {'c': '(-Shape)', 'loc': 'Loc', 'scale': 'Scale'}), '(p, c=-Shape, loc=Loc, scale=Scale)\n', (41402, 41437), False, 'from scipy.stats import genextreme\n'), ((42073, 42084), 'scipy.stats.norm.cdf', 'norm.cdf', (['z'], {}), '(z)\n', (42081, 42084), False, 'from scipy.stats import norm\n'), ((42092, 42108), 'numpy.any', 'np.any', (['(tmp == 1)'], {}), '(tmp == 1)\n', (42098, 42108), True, 'import numpy as np\n'), ((42243, 42256), 'numpy.any', 'np.any', (['(W < 1)'], {}), '(W < 1)\n', (42249, 42256), True, 'import numpy as np\n'), ((42320, 42333), 'scipy.stats.norm.ppf', 'norm.ppf', (['tmp'], {}), '(tmp)\n', (42328, 42333), False, 'from scipy.stats import norm\n'), ((43669, 43686), 'numpy.empty', 'np.empty', (['Y.shape'], {}), '(Y.shape)\n', (43677, 43686), True, 'import numpy as np\n'), ((43717, 43742), 'numpy.any', 'np.any', (['(~cen & ~cen_above)'], {}), '(~cen & ~cen_above)\n', (43723, 43742), True, 'import numpy as np\n'), ((44973, 44991), 'numpy.isnan', 'np.isnan', (['thresh_X'], {}), '(thresh_X)\n', (44981, 44991), True, 'import numpy as np\n'), ((45075, 45091), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (45082, 45091), True, 'import numpy as np\n'), ((45237, 45254), 'numpy.empty', 'np.empty', (['Y.shape'], {}), '(Y.shape)\n', (45245, 45254), True, 'import numpy as np\n'), ((45277, 45288), 'numpy.any', 'np.any', (['cen'], {}), '(cen)\n', (45283, 45288), True, 'import numpy as np\n'), ((45359, 45371), 'numpy.any', 'np.any', (['(~cen)'], {}), '(~cen)\n', (45365, 45371), True, 'import numpy as np\n'), ((45912, 45922), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (45918, 45922), True, 'import numpy as np\n'), ((46307, 46325), 'numpy.isnan', 'np.isnan', (['thresh_X'], {}), '(thresh_X)\n', (46315, 46325), True, 'import numpy as np\n'), ((46490, 46506), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (46497, 46506), True, 'import numpy as np\n'), ((46652, 46669), 'numpy.empty', 'np.empty', (['Y.shape'], {}), '(Y.shape)\n', (46660, 46669), True, 'import numpy as np\n'), ((46692, 46703), 'numpy.any', 'np.any', (['cen'], {}), '(cen)\n', (46698, 46703), True, 'import numpy as np\n'), ((46771, 46788), 'numpy.any', 'np.any', (['cen_above'], {}), '(cen_above)\n', (46777, 46788), True, 'import numpy as np\n'), ((46876, 46901), 'numpy.any', 'np.any', (['(~cen & ~cen_above)'], {}), '(~cen & ~cen_above)\n', (46882, 46901), True, 'import numpy as np\n'), ((47546, 47556), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (47552, 47556), True, 'import numpy as np\n'), ((47968, 47986), 'numpy.isnan', 'np.isnan', (['thresh_X'], {}), '(thresh_X)\n', (47976, 47986), True, 'import numpy as np\n'), ((48151, 48167), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (48158, 48167), True, 'import numpy as np\n'), ((48238, 48254), 'numpy.array', 'np.array', (['np.nan'], {}), '(np.nan)\n', (48246, 48254), True, 'import numpy as np\n'), ((54237, 54248), 'numpy.all', 'np.all', (['cen'], {}), '(cen)\n', (54243, 54248), True, 'import numpy as np\n'), ((57532, 57543), 'numpy.all', 'np.all', (['cen'], {}), '(cen)\n', (57538, 57543), True, 'import numpy as np\n'), ((58968, 58985), 'numpy.any', 'np.any', (['(scale < 0)'], {}), '(scale < 0)\n', (58974, 58985), True, 'import numpy as np\n'), ((59129, 59148), 'numpy.tile', 'np.tile', (['scale', 'n_t'], {}), '(scale, n_t)\n', (59136, 59148), True, 'import numpy as np\n'), ((59939, 59950), 'numpy.all', 'np.all', (['cen'], {}), '(cen)\n', (59945, 59950), True, 'import numpy as np\n'), ((60754, 60771), 'numpy.any', 'np.any', (['(scale < 0)'], {}), '(scale < 0)\n', (60760, 60771), True, 'import numpy as np\n'), ((60915, 60934), 'numpy.tile', 'np.tile', (['scale', 'n_t'], {}), '(scale, n_t)\n', (60922, 60934), True, 'import numpy as np\n'), ((62933, 62952), 'numpy.tile', 'np.tile', (['shape', 'n_t'], {}), '(shape, n_t)\n', (62940, 62952), True, 'import numpy as np\n'), ((63743, 63754), 'numpy.all', 'np.all', (['cen'], {}), '(cen)\n', (63749, 63754), True, 'import numpy as np\n'), ((64673, 64692), 'numpy.tile', 'np.tile', (['shape', 'n_t'], {}), '(shape, n_t)\n', (64680, 64692), True, 'import numpy as np\n'), ((66875, 66892), 'numpy.empty', 'np.empty', (['X.shape'], {}), '(X.shape)\n', (66883, 66892), True, 'import numpy as np\n'), ((66906, 66919), 'numpy.zeros', 'np.zeros', (['n_s'], {}), '(n_s)\n', (66914, 66919), True, 'import numpy as np\n'), ((69554, 69567), 'numpy.empty', 'np.empty', (['n_t'], {}), '(n_t)\n', (69562, 69567), True, 'import numpy as np\n'), ((69596, 69610), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (69605, 69610), True, 'import numpy as np\n'), ((69682, 69692), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (69688, 69692), True, 'import numpy as np\n'), ((70065, 70078), 'numpy.empty', 'np.empty', (['n_t'], {}), '(n_t)\n', (70073, 70078), True, 'import numpy as np\n'), ((70107, 70121), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (70116, 70121), True, 'import numpy as np\n'), ((70193, 70203), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (70199, 70203), True, 'import numpy as np\n'), ((70535, 70548), 'numpy.empty', 'np.empty', (['n_t'], {}), '(n_t)\n', (70543, 70548), True, 'import numpy as np\n'), ((70577, 70591), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (70586, 70591), True, 'import numpy as np\n'), ((70670, 70680), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (70676, 70680), True, 'import numpy as np\n'), ((70996, 71009), 'numpy.empty', 'np.empty', (['n_t'], {}), '(n_t)\n', (71004, 71009), True, 'import numpy as np\n'), ((71038, 71052), 'numpy.arange', 'np.arange', (['n_t'], {}), '(n_t)\n', (71047, 71052), True, 'import numpy as np\n'), ((71131, 71141), 'numpy.sum', 'np.sum', (['ll'], {}), '(ll)\n', (71137, 71141), True, 'import numpy as np\n'), ((73099, 73124), 'numpy.exp', 'np.exp', (['(-d ** 2 / (2 * h))'], {}), '(-d ** 2 / (2 * h))\n', (73105, 73124), True, 'import numpy as np\n'), ((6508, 6538), 'sys.exit', 'sys.exit', (['"""s must be positive"""'], {}), "('s must be positive')\n", (6516, 6538), False, 'import sys\n'), ((6750, 6778), 'sys.exit', 'sys.exit', (['"""some x**phi <= m"""'], {}), "('some x**phi <= m')\n", (6758, 6778), False, 'import sys\n'), ((6811, 6841), 'sys.exit', 'sys.exit', (['"""s must be positive"""'], {}), "('s must be positive')\n", (6819, 6841), False, 'import sys\n'), ((7013, 7024), 'numpy.exp', 'np.exp', (['tmp'], {}), '(tmp)\n', (7019, 7024), True, 'import numpy as np\n'), ((7653, 7666), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (7661, 7666), True, 'import numpy as np\n'), ((7685, 7715), 'sys.exit', 'sys.exit', (['"""x must be positive"""'], {}), "('x must be positive')\n", (7693, 7715), False, 'import sys\n'), ((8762, 8775), 'scipy.special.gamma', 'sc.gamma', (['(0.5)'], {}), '(0.5)\n', (8770, 8775), True, 'import scipy.special as sc\n'), ((8786, 8808), 'scipy.special.gammainc', 'sc.gammainc', (['(0.5)', 'tmp1'], {}), '(0.5, tmp1)\n', (8797, 8808), True, 'import scipy.special as sc\n'), ((10642, 10664), 'scipy.special.gammainc', 'sc.gammainc', (['(0.5)', 'tmp3'], {}), '(0.5, tmp3)\n', (10653, 10664), True, 'import scipy.special as sc\n'), ((10731, 10762), 'numpy.exp', 'np.exp', (['(-t ** 2 / (2 * tau_sqd))'], {}), '(-t ** 2 / (2 * tau_sqd))\n', (10737, 10762), True, 'import numpy as np\n'), ((10866, 10879), 'scipy.special.gamma', 'sc.gamma', (['(0.5)'], {}), '(0.5)\n', (10874, 10879), True, 'import scipy.special as sc\n'), ((11024, 11057), 'scipy.stats.norm.cdf', 'norm.cdf', (['xval'], {'loc': '(0.0)', 'scale': 'sd'}), '(xval, loc=0.0, scale=sd)\n', (11032, 11057), False, 'from scipy.stats import norm\n'), ((12074, 12109), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (12082, 12109), False, 'import sys\n'), ((12312, 12344), 'numpy.exp', 'np.exp', (['(-tp ** 2 / (2 * tau_sqd))'], {}), '(-tp ** 2 / (2 * tau_sqd))\n', (12318, 12344), True, 'import numpy as np\n'), ((12466, 12499), 'scipy.stats.norm.cdf', 'norm.cdf', (['xval'], {'loc': '(0.0)', 'scale': 'sd'}), '(xval, loc=0.0, scale=sd)\n', (12474, 12499), False, 'from scipy.stats import norm\n'), ((12868, 12901), 'numpy.array', 'np.array', (['[xval]'], {'dtype': '"""float64"""'}), "([xval], dtype='float64')\n", (12876, 12901), True, 'import numpy as np\n'), ((13086, 13114), 'numpy.repeat', 'np.repeat', (['(False)', 'xval.shape'], {}), '(False, xval.shape)\n', (13095, 13114), True, 'import numpy as np\n'), ((13177, 13190), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (13183, 13190), True, 'import numpy as np\n'), ((13352, 13368), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (13359, 13368), True, 'import numpy as np\n'), ((13715, 13734), 'numpy.any', 'np.any', (['(tmp_res < 0)'], {}), '(tmp_res < 0)\n', (13721, 13734), True, 'import numpy as np\n'), ((14158, 14191), 'numpy.array', 'np.array', (['[xval]'], {'dtype': '"""float64"""'}), "([xval], dtype='float64')\n", (14166, 14191), True, 'import numpy as np\n'), ((14399, 14434), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (14407, 14434), False, 'import sys\n'), ((15169, 15190), 'scipy.special.gammainc', 'sc.gammainc', (['(0.5)', 'tmp'], {}), '(0.5, tmp)\n', (15180, 15190), True, 'import scipy.special as sc\n'), ((15386, 15419), 'numpy.array', 'np.array', (['[xval]'], {'dtype': '"""float64"""'}), "([xval], dtype='float64')\n", (15394, 15419), True, 'import numpy as np\n'), ((15797, 15825), 'numpy.repeat', 'np.repeat', (['(False)', 'xval.shape'], {}), '(False, xval.shape)\n', (15806, 15825), True, 'import numpy as np\n'), ((15888, 15901), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (15894, 15901), True, 'import numpy as np\n'), ((16068, 16084), 'numpy.sqrt', 'np.sqrt', (['tau_sqd'], {}), '(tau_sqd)\n', (16075, 16084), True, 'import numpy as np\n'), ((16427, 16446), 'numpy.any', 'np.any', (['(tmp_res < 0)'], {}), '(tmp_res < 0)\n', (16433, 16446), True, 'import numpy as np\n'), ((20243, 20296), 'sys.exit', 'sys.exit', (['"""x_init[0] must be smaller than x_init[1]."""'], {}), "('x_init[0] must be smaller than x_init[1].')\n", (20251, 20296), False, 'import sys\n'), ((21370, 21383), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (21378, 21383), True, 'import numpy as np\n'), ((21480, 21496), 'numpy.isnan', 'np.isnan', (['x_vals'], {}), '(x_vals)\n', (21488, 21496), True, 'import numpy as np\n'), ((21664, 21684), 'numpy.isinf', 'np.isinf', (['x_range[1]'], {}), '(x_range[1])\n', (21672, 21684), True, 'import numpy as np\n'), ((21787, 21807), 'numpy.any', 'np.any', (['(x_range <= 0)'], {}), '(x_range <= 0)\n', (21793, 21807), True, 'import numpy as np\n'), ((23064, 23078), 'numpy.any', 'np.any', (['(~which)'], {}), '(~which)\n', (23070, 23078), True, 'import numpy as np\n'), ((23480, 23493), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (23488, 23493), True, 'import numpy as np\n'), ((23731, 23754), 'numpy.int', 'np.int', (['(100 * (p + 0.1))'], {}), '(100 * (p + 0.1))\n', (23737, 23754), True, 'import numpy as np\n'), ((23824, 23840), 'numpy.isnan', 'np.isnan', (['x_vals'], {}), '(x_vals)\n', (23832, 23840), True, 'import numpy as np\n'), ((23861, 23872), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (23869, 23872), True, 'import numpy as np\n'), ((24128, 24148), 'numpy.isinf', 'np.isinf', (['x_range[1]'], {}), '(x_range[1])\n', (24136, 24148), True, 'import numpy as np\n'), ((24251, 24271), 'numpy.any', 'np.any', (['(x_range <= 0)'], {}), '(x_range <= 0)\n', (24257, 24271), True, 'import numpy as np\n'), ((25526, 25540), 'numpy.any', 'np.any', (['(~which)'], {}), '(~which)\n', (25532, 25540), True, 'import numpy as np\n'), ((26110, 26123), 'numpy.array', 'np.array', (['[p]'], {}), '([p])\n', (26118, 26123), True, 'import numpy as np\n'), ((26206, 26317), 'scipy.optimize.minimize', 'optim.minimize', (['diff', '(1.0)'], {'args': '(p_value, xp, surv_p, tau_sqd, phi, gamma)', 'method': '"""Nelder-Mead"""', 'tol': '(1e-06)'}), "(diff, 1.0, args=(p_value, xp, surv_p, tau_sqd, phi, gamma),\n method='Nelder-Mead', tol=1e-06)\n", (26220, 26317), True, 'import scipy.optimize as optim\n'), ((28361, 28374), 'scipy.special.gamma', 'sc.gamma', (['(0.5)'], {}), '(0.5)\n', (28369, 28374), True, 'import scipy.special as sc\n'), ((28460, 28471), 'numpy.log', 'np.log', (['res'], {}), '(res)\n', (28466, 28471), True, 'import numpy as np\n'), ((30157, 30188), 'numpy.exp', 'np.exp', (['(-t ** 2 / (2 * tau_sqd))'], {}), '(-t ** 2 / (2 * tau_sqd))\n', (30163, 30188), True, 'import numpy as np\n'), ((30292, 30305), 'scipy.special.gamma', 'sc.gamma', (['(0.5)'], {}), '(0.5)\n', (30300, 30305), True, 'import scipy.special as sc\n'), ((31489, 31524), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (31497, 31524), False, 'import sys\n'), ((31613, 31648), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (31621, 31648), False, 'import sys\n'), ((31946, 31978), 'numpy.exp', 'np.exp', (['(-tp ** 2 / (2 * tau_sqd))'], {}), '(-tp ** 2 / (2 * tau_sqd))\n', (31952, 31978), True, 'import numpy as np\n'), ((32515, 32548), 'numpy.array', 'np.array', (['[xval]'], {'dtype': '"""float64"""'}), "([xval], dtype='float64')\n", (32523, 32548), True, 'import numpy as np\n'), ((32799, 32827), 'numpy.repeat', 'np.repeat', (['(False)', 'xval.shape'], {}), '(False, xval.shape)\n', (32808, 32827), True, 'import numpy as np\n'), ((32890, 32903), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (32896, 32903), True, 'import numpy as np\n'), ((33067, 33095), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (33074, 33095), True, 'import numpy as np\n'), ((33699, 33710), 'numpy.log', 'np.log', (['tmp'], {}), '(tmp)\n', (33705, 33710), True, 'import numpy as np\n'), ((33912, 33945), 'numpy.array', 'np.array', (['[xval]'], {'dtype': '"""float64"""'}), "([xval], dtype='float64')\n", (33920, 33945), True, 'import numpy as np\n'), ((34152, 34187), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (34160, 34187), False, 'import sys\n'), ((34215, 34229), 'numpy.log', 'np.log', (['result'], {}), '(result)\n', (34221, 34229), True, 'import numpy as np\n'), ((37197, 37208), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (37205, 37208), True, 'import numpy as np\n'), ((37235, 37285), 'sys.exit', 'sys.exit', (['"""Distance argument must be nonnegative."""'], {}), "('Distance argument must be nonnegative.')\n", (37243, 37285), False, 'import sys\n'), ((37433, 37445), 'scipy.special.gamma', 'sc.gamma', (['nu'], {}), '(nu)\n', (37441, 37445), True, 'import scipy.special as sc\n'), ((38665, 38674), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (38671, 38674), True, 'import numpy as np\n'), ((39472, 39499), 'scipy.linalg.lapack.dposv', 'lapack.dposv', (['Cor', '(R - mean)'], {}), '(Cor, R - mean)\n', (39484, 39499), False, 'from scipy.linalg import lapack\n'), ((39518, 39558), 'scipy.linalg.lapack.dpotrs', 'lapack.dpotrs', (['cholesky_inv[0]', '(R - mean)'], {}), '(cholesky_inv[0], R - mean)\n', (39531, 39558), False, 'from scipy.linalg import lapack\n'), ((40087, 40107), 'scipy.linalg.lapack.dposv', 'lapack.dposv', (['Cor', 'x'], {}), '(Cor, x)\n', (40099, 40107), False, 'from scipy.linalg import lapack\n'), ((40146, 40179), 'scipy.linalg.lapack.dpotrs', 'lapack.dpotrs', (['cholesky_inv[0]', 'x'], {}), '(cholesky_inv[0], x)\n', (40159, 40179), False, 'from scipy.linalg import lapack\n'), ((40865, 40921), 'scipy.stats.genextreme.logpdf', 'genextreme.logpdf', (['yvals'], {'c': '(-Shape)', 'loc': 'Loc', 'scale': 'Scale'}), '(yvals, c=-Shape, loc=Loc, scale=Scale)\n', (40882, 40921), False, 'from scipy.stats import genextreme\n'), ((40965, 41018), 'scipy.stats.genextreme.pdf', 'genextreme.pdf', (['yvals'], {'c': '(-Shape)', 'loc': 'Loc', 'scale': 'Scale'}), '(yvals, c=-Shape, loc=Loc, scale=Scale)\n', (40979, 41018), False, 'from scipy.stats import genextreme\n'), ((41112, 41168), 'scipy.stats.genextreme.logcdf', 'genextreme.logcdf', (['yvals'], {'c': '(-Shape)', 'loc': 'Loc', 'scale': 'Scale'}), '(yvals, c=-Shape, loc=Loc, scale=Scale)\n', (41129, 41168), False, 'from scipy.stats import genextreme\n'), ((41212, 41265), 'scipy.stats.genextreme.cdf', 'genextreme.cdf', (['yvals'], {'c': '(-Shape)', 'loc': 'Loc', 'scale': 'Scale'}), '(yvals, c=-Shape, loc=Loc, scale=Scale)\n', (41226, 41265), False, 'from scipy.stats import genextreme\n'), ((41363, 41374), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (41371, 41374), True, 'import numpy as np\n'), ((42049, 42062), 'numpy.array', 'np.array', (['[z]'], {}), '([z])\n', (42057, 42062), True, 'import numpy as np\n'), ((42222, 42235), 'numpy.array', 'np.array', (['[W]'], {}), '([W])\n', (42230, 42235), True, 'import numpy as np\n'), ((42256, 42292), 'sys.exit', 'sys.exit', (['"""W must be greater than 1"""'], {}), "('W must be greater than 1')\n", (42264, 42292), False, 'import sys\n'), ((44854, 44866), 'numpy.isnan', 'np.isnan', (['xp'], {}), '(xp)\n', (44862, 44866), True, 'import numpy as np\n'), ((45305, 45350), 'scipy.stats.norm.logcdf', 'norm.logcdf', (['thresh_X'], {'loc': 'X_s[cen]', 'scale': 'sd'}), '(thresh_X, loc=X_s[cen], scale=sd)\n', (45316, 45350), False, 'from scipy.stats import norm\n'), ((46188, 46200), 'numpy.isnan', 'np.isnan', (['xp'], {}), '(xp)\n', (46196, 46200), True, 'import numpy as np\n'), ((46720, 46765), 'scipy.stats.norm.logcdf', 'norm.logcdf', (['thresh_X'], {'loc': 'X_s[cen]', 'scale': 'sd'}), '(thresh_X, loc=X_s[cen], scale=sd)\n', (46731, 46765), False, 'from scipy.stats import norm\n'), ((46811, 46867), 'scipy.stats.norm.logsf', 'norm.logsf', (['thresh_X_above'], {'loc': 'X_s[cen_above]', 'scale': 'sd'}), '(thresh_X_above, loc=X_s[cen_above], scale=sd)\n', (46821, 46867), False, 'from scipy.stats import norm\n'), ((47849, 47861), 'numpy.isnan', 'np.isnan', (['xp'], {}), '(xp)\n', (47857, 47861), True, 'import numpy as np\n'), ((48275, 48315), 'scipy.stats.norm.logcdf', 'norm.logcdf', (['thresh_X'], {'loc': 'X_s', 'scale': 'sd'}), '(thresh_X, loc=X_s, scale=sd)\n', (48286, 48315), False, 'from scipy.stats import norm\n'), ((53389, 53407), 'numpy.tile', 'np.tile', (['loc0', 'n_t'], {}), '(loc0, n_t)\n', (53396, 53407), True, 'import numpy as np\n'), ((53901, 53924), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (53907, 53924), True, 'import numpy as np\n'), ((55195, 55213), 'numpy.tile', 'np.tile', (['loc0', 'n_t'], {}), '(loc0, n_t)\n', (55202, 55213), True, 'import numpy as np\n'), ((55707, 55730), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (55713, 55730), True, 'import numpy as np\n'), ((56684, 56702), 'numpy.tile', 'np.tile', (['loc0', 'n_t'], {}), '(loc0, n_t)\n', (56691, 56702), True, 'import numpy as np\n'), ((57196, 57219), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (57202, 57219), True, 'import numpy as np\n'), ((59605, 59628), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (59611, 59628), True, 'import numpy as np\n'), ((61391, 61414), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (61397, 61414), True, 'import numpy as np\n'), ((63409, 63432), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (63415, 63432), True, 'import numpy as np\n'), ((65149, 65172), 'numpy.any', 'np.any', (['(Y > max_support)'], {}), '(Y > max_support)\n', (65155, 65172), True, 'import numpy as np\n'), ((69393, 69404), 'numpy.isnan', 'np.isnan', (['V'], {}), '(V)\n', (69401, 69404), True, 'import numpy as np\n'), ((69464, 69483), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Cor'], {}), '(Cor)\n', (69478, 69483), True, 'import numpy as np\n'), ((69904, 69915), 'numpy.isnan', 'np.isnan', (['V'], {}), '(V)\n', (69912, 69915), True, 'import numpy as np\n'), ((69975, 69994), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Cor'], {}), '(Cor)\n', (69989, 69994), True, 'import numpy as np\n'), ((70502, 70528), 'scipy.linalg.lapack.dposv', 'lapack.dposv', (['Cor', 'Z[:, 0]'], {}), '(Cor, Z[:, 0])\n', (70514, 70528), False, 'from scipy.linalg import lapack\n'), ((70963, 70989), 'scipy.linalg.lapack.dposv', 'lapack.dposv', (['Cor', 'Z[:, 0]'], {}), '(Cor, Z[:, 0])\n', (70975, 70989), False, 'from scipy.linalg import lapack\n'), ((73077, 73090), 'numpy.array', 'np.array', (['[d]'], {}), '([d])\n', (73085, 73090), True, 'import numpy as np\n'), ((73166, 73177), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (73172, 73177), True, 'import numpy as np\n'), ((6962, 6973), 'numpy.log', 'np.log', (['phi'], {}), '(phi)\n', (6968, 6973), True, 'import numpy as np\n'), ((7744, 7755), 'scipy.special.gamma', 'sc.gamma', (['a'], {}), '(a)\n', (7752, 7755), True, 'import scipy.special as sc\n'), ((7756, 7774), 'scipy.special.gammaincc', 'sc.gammaincc', (['a', 'x'], {}), '(a, x)\n', (7768, 7774), True, 'import scipy.special as sc\n'), ((7880, 7901), 'mpmath.gammainc', 'mpmath.gammainc', (['(0)', 'x'], {}), '(0, x)\n', (7895, 7901), False, 'import mpmath\n'), ((9075, 9092), 'scipy.special.gamma', 'sc.gamma', (['(1 - phi)'], {}), '(1 - phi)\n', (9083, 9092), True, 'import scipy.special as sc\n'), ((9368, 9385), 'scipy.special.gamma', 'sc.gamma', (['(1 - phi)'], {}), '(1 - phi)\n', (9376, 9385), True, 'import scipy.special as sc\n'), ((11065, 11093), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (11072, 11093), True, 'import numpy as np\n'), ((12504, 12532), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (12511, 12532), True, 'import numpy as np\n'), ((13289, 13321), 'numpy.exp', 'np.exp', (['(-tp ** 2 / (2 * tau_sqd))'], {}), '(-tp ** 2 / (2 * tau_sqd))\n', (13295, 13321), True, 'import numpy as np\n'), ((13583, 13621), 'scipy.stats.norm.cdf', 'norm.cdf', (['xval_less'], {'loc': '(0.0)', 'scale': 'sd'}), '(xval_less, loc=0.0, scale=sd)\n', (13591, 13621), False, 'from scipy.stats import norm\n'), ((13872, 13885), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (13878, 13885), True, 'import numpy as np\n'), ((13902, 13918), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (13911, 13918), True, 'import numpy as np\n'), ((16005, 16037), 'numpy.exp', 'np.exp', (['(-tp ** 2 / (2 * tau_sqd))'], {}), '(-tp ** 2 / (2 * tau_sqd))\n', (16011, 16037), True, 'import numpy as np\n'), ((16290, 16328), 'scipy.stats.norm.cdf', 'norm.cdf', (['xval_less'], {'loc': '(0.0)', 'scale': 'sd'}), '(xval_less, loc=0.0, scale=sd)\n', (16298, 16328), False, 'from scipy.stats import norm\n'), ((16584, 16597), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (16590, 16597), True, 'import numpy as np\n'), ((16614, 16630), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (16623, 16630), True, 'import numpy as np\n'), ((21536, 21545), 'numpy.min', 'np.min', (['p'], {}), '(p)\n', (21542, 21545), True, 'import numpy as np\n'), ((21546, 21555), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (21552, 21555), True, 'import numpy as np\n'), ((21557, 21581), 'numpy.array', 'np.array', (['[lower, upper]'], {}), '([lower, upper])\n', (21565, 21581), True, 'import numpy as np\n'), ((22192, 22210), 'numpy.isnan', 'np.isnan', (['cdf_vals'], {}), '(cdf_vals)\n', (22200, 22210), True, 'import numpy as np\n'), ((22459, 22505), 'scipy.interpolate.pchip', 'interp.pchip', (['cdf_vals[zeros:]', 'x_vals[zeros:]'], {}), '(cdf_vals[zeros:], x_vals[zeros:])\n', (22471, 22505), True, 'import scipy.interpolate as interp\n'), ((23168, 23198), 'scipy.interpolate.pchip', 'interp.pchip', (['cdf_vals', 'x_vals'], {}), '(cdf_vals, x_vals)\n', (23180, 23198), True, 'import scipy.interpolate as interp\n'), ((23934, 23943), 'numpy.min', 'np.min', (['p'], {}), '(p)\n', (23940, 23943), True, 'import numpy as np\n'), ((23945, 23954), 'numpy.max', 'np.max', (['p'], {}), '(p)\n', (23951, 23954), True, 'import numpy as np\n'), ((24081, 24116), 'sys.exit', 'sys.exit', (['"""C implementaion failed."""'], {}), "('C implementaion failed.')\n", (24089, 24116), False, 'import sys\n'), ((24653, 24671), 'numpy.isnan', 'np.isnan', (['cdf_vals'], {}), '(cdf_vals)\n', (24661, 24671), True, 'import numpy as np\n'), ((24921, 24967), 'scipy.interpolate.pchip', 'interp.pchip', (['cdf_vals[zeros:]', 'x_vals[zeros:]'], {}), '(cdf_vals[zeros:], x_vals[zeros:])\n', (24933, 24967), True, 'import scipy.interpolate as interp\n'), ((25630, 25660), 'scipy.interpolate.pchip', 'interp.pchip', (['cdf_vals', 'x_vals'], {}), '(cdf_vals, x_vals)\n', (25642, 25660), True, 'import scipy.interpolate as interp\n'), ((28648, 28665), 'scipy.special.gamma', 'sc.gamma', (['(1 - phi)'], {}), '(1 - phi)\n', (28656, 28665), True, 'import scipy.special as sc\n'), ((33002, 33034), 'numpy.exp', 'np.exp', (['(-tp ** 2 / (2 * tau_sqd))'], {}), '(-tp ** 2 / (2 * tau_sqd))\n', (33008, 33034), True, 'import numpy as np\n'), ((33538, 33551), 'numpy.sum', 'np.sum', (['which'], {}), '(which)\n', (33544, 33551), True, 'import numpy as np\n'), ((39703, 39730), 'numpy.sum', 'np.sum', (['((R - mean) * inv[1])'], {}), '((R - mean) * inv[1])\n', (39709, 39730), True, 'import numpy as np\n'), ((48344, 48389), 'scipy.stats.norm.logsf', 'norm.logsf', (['thresh_X_above'], {'loc': 'X_s', 'scale': 'sd'}), '(thresh_X_above, loc=X_s, scale=sd)\n', (48354, 48389), False, 'from scipy.stats import norm\n'), ((53410, 53428), 'numpy.tile', 'np.tile', (['loc1', 'n_t'], {}), '(loc1, n_t)\n', (53417, 53428), True, 'import numpy as np\n'), ((53429, 53449), 'numpy.repeat', 'np.repeat', (['Time', 'n_s'], {}), '(Time, n_s)\n', (53438, 53449), True, 'import numpy as np\n'), ((53928, 53939), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (53934, 53939), True, 'import numpy as np\n'), ((53959, 53970), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (53965, 53970), True, 'import numpy as np\n'), ((55216, 55234), 'numpy.tile', 'np.tile', (['loc1', 'n_t'], {}), '(loc1, n_t)\n', (55223, 55234), True, 'import numpy as np\n'), ((55235, 55255), 'numpy.repeat', 'np.repeat', (['Time', 'n_s'], {}), '(Time, n_s)\n', (55244, 55255), True, 'import numpy as np\n'), ((55734, 55745), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (55740, 55745), True, 'import numpy as np\n'), ((55761, 55772), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (55767, 55772), True, 'import numpy as np\n'), ((56705, 56723), 'numpy.tile', 'np.tile', (['loc1', 'n_t'], {}), '(loc1, n_t)\n', (56712, 56723), True, 'import numpy as np\n'), ((56724, 56744), 'numpy.repeat', 'np.repeat', (['Time', 'n_s'], {}), '(Time, n_s)\n', (56733, 56744), True, 'import numpy as np\n'), ((57223, 57234), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (57229, 57234), True, 'import numpy as np\n'), ((57254, 57265), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (57260, 57265), True, 'import numpy as np\n'), ((59632, 59643), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (59638, 59643), True, 'import numpy as np\n'), ((59663, 59674), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (59669, 59674), True, 'import numpy as np\n'), ((61418, 61429), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (61424, 61429), True, 'import numpy as np\n'), ((61444, 61455), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (61450, 61455), True, 'import numpy as np\n'), ((63436, 63447), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (63442, 63447), True, 'import numpy as np\n'), ((63467, 63478), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (63473, 63478), True, 'import numpy as np\n'), ((65176, 65187), 'numpy.min', 'np.min', (['tmp'], {}), '(tmp)\n', (65182, 65187), True, 'import numpy as np\n'), ((65202, 65213), 'numpy.max', 'np.max', (['tmp'], {}), '(tmp)\n', (65208, 65213), True, 'import numpy as np\n'), ((68073, 68098), 'numpy.errstate', 'np.errstate', ([], {'over': '"""raise"""'}), "(over='raise')\n", (68084, 68098), True, 'import numpy as np\n'), ((69428, 69449), 'numpy.array', 'np.array', (['[range, nu]'], {}), '([range, nu])\n', (69436, 69449), True, 'import numpy as np\n'), ((69939, 69960), 'numpy.array', 'np.array', (['[range, nu]'], {}), '([range, nu])\n', (69947, 69960), True, 'import numpy as np\n'), ((70461, 70482), 'numpy.array', 'np.array', (['[range, nu]'], {}), '([range, nu])\n', (70469, 70482), True, 'import numpy as np\n'), ((70922, 70943), 'numpy.array', 'np.array', (['[range, nu]'], {}), '([range, nu])\n', (70930, 70943), True, 'import numpy as np\n'), ((9057, 9078), 'scipy.special.gamma', 'sc.gamma', (['(1 - 2 * phi)'], {}), '(1 - 2 * phi)\n', (9065, 9078), True, 'import scipy.special as sc\n'), ((9350, 9371), 'scipy.special.gamma', 'sc.gamma', (['(1 - 2 * phi)'], {}), '(1 - 2 * phi)\n', (9358, 9371), True, 'import scipy.special as sc\n'), ((9529, 9555), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (9536, 9555), True, 'import numpy as np\n'), ((9571, 9587), 'scipy.special.lambertw', 'sc.lambertw', (['tmp'], {}), '(tmp)\n', (9582, 9587), True, 'import scipy.special as sc\n'), ((12393, 12404), 'numpy.diff', 'np.diff', (['tp'], {}), '(tp)\n', (12400, 12404), True, 'import numpy as np\n'), ((13626, 13654), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (13633, 13654), True, 'import numpy as np\n'), ((13939, 13955), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (13948, 13955), True, 'import numpy as np\n'), ((15231, 15244), 'scipy.special.gamma', 'sc.gamma', (['(0.5)'], {}), '(0.5)\n', (15239, 15244), True, 'import scipy.special as sc\n'), ((16338, 16366), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * tau_sqd)'], {}), '(2 * np.pi * tau_sqd)\n', (16345, 16366), True, 'import numpy as np\n'), ((16651, 16667), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (16660, 16667), True, 'import numpy as np\n'), ((22587, 22639), 'sklearn.isotonic.IsotonicRegression', 'sklearn.isotonic.IsotonicRegression', ([], {'increasing': '(True)'}), '(increasing=True)\n', (22622, 22639), False, 'import sklearn\n'), ((23002, 23013), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (23010, 23013), True, 'import numpy as np\n'), ((25049, 25101), 'sklearn.isotonic.IsotonicRegression', 'sklearn.isotonic.IsotonicRegression', ([], {'increasing': '(True)'}), '(increasing=True)\n', (25084, 25101), False, 'import sklearn\n'), ((25464, 25475), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (25472, 25475), True, 'import numpy as np\n'), ((28630, 28651), 'scipy.special.gamma', 'sc.gamma', (['(1 - 2 * phi)'], {}), '(1 - 2 * phi)\n', (28638, 28651), True, 'import scipy.special as sc\n'), ((32037, 32048), 'numpy.diff', 'np.diff', (['tp'], {}), '(tp)\n', (32044, 32048), True, 'import numpy as np\n'), ((33568, 33584), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (33577, 33584), True, 'import numpy as np\n'), ((38308, 38322), 'numpy.diag', 'np.diag', (['d_inv'], {}), '(d_inv)\n', (38315, 38322), True, 'import numpy as np\n'), ((39651, 39666), 'numpy.diag', 'np.diag', (['inv[0]'], {}), '(inv[0])\n', (39658, 39666), True, 'import numpy as np\n'), ((39953, 39967), 'numpy.diag', 'np.diag', (['d_inv'], {}), '(d_inv)\n', (39960, 39967), True, 'import numpy as np\n'), ((45540, 45585), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['X[~cen]'], {'loc': 'X_s[~cen]', 'scale': 'sd'}), '(X[~cen], loc=X_s[~cen], scale=sd)\n', (45551, 45585), False, 'from scipy.stats import norm\n'), ((47083, 47154), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['X[~cen & ~cen_above]'], {'loc': 'X_s[~cen & ~cen_above]', 'scale': 'sd'}), '(X[~cen & ~cen_above], loc=X_s[~cen & ~cen_above], scale=sd)\n', (47094, 47154), False, 'from scipy.stats import norm\n'), ((66375, 66384), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (66381, 66384), True, 'import numpy as np\n'), ((68137, 68164), 'numpy.exp', 'np.exp', (['(log_num - log_denom)'], {}), '(log_num - log_denom)\n', (68143, 68164), True, 'import numpy as np\n'), ((6952, 6961), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (6958, 6961), True, 'import numpy as np\n'), ((9148, 9174), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (9155, 9174), True, 'import numpy as np\n'), ((9228, 9254), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (9235, 9254), True, 'import numpy as np\n'), ((9251, 9260), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (9257, 9260), True, 'import numpy as np\n'), ((13547, 13562), 'numpy.arange', 'np.arange', (['ncol'], {}), '(ncol)\n', (13556, 13562), True, 'import numpy as np\n'), ((16254, 16269), 'numpy.arange', 'np.arange', (['ncol'], {}), '(ncol)\n', (16263, 16269), True, 'import numpy as np\n'), ((21844, 21884), 'numpy.linspace', 'np.linspace', (['x_range[0]', '(0.0001)'], {'num': '(150)'}), '(x_range[0], 0.0001, num=150)\n', (21855, 21884), True, 'import numpy as np\n'), ((22037, 22055), 'numpy.log', 'np.log', (['x_range[0]'], {}), '(x_range[0])\n', (22043, 22055), True, 'import numpy as np\n'), ((22057, 22075), 'numpy.log', 'np.log', (['x_range[1]'], {}), '(x_range[1])\n', (22063, 22075), True, 'import numpy as np\n'), ((22836, 22866), 'numpy.delete', 'np.delete', (['cdf_vals_1', 'indices'], {}), '(cdf_vals_1, indices)\n', (22845, 22866), True, 'import numpy as np\n'), ((22867, 22901), 'numpy.delete', 'np.delete', (['x_vals[zeros:]', 'indices'], {}), '(x_vals[zeros:], indices)\n', (22876, 22901), True, 'import numpy as np\n'), ((24308, 24348), 'numpy.linspace', 'np.linspace', (['x_range[0]', '(0.0001)'], {'num': '(150)'}), '(x_range[0], 0.0001, num=150)\n', (24319, 24348), True, 'import numpy as np\n'), ((24501, 24519), 'numpy.log', 'np.log', (['x_range[0]'], {}), '(x_range[0])\n', (24507, 24519), True, 'import numpy as np\n'), ((24521, 24539), 'numpy.log', 'np.log', (['x_range[1]'], {}), '(x_range[1])\n', (24527, 24539), True, 'import numpy as np\n'), ((25298, 25328), 'numpy.delete', 'np.delete', (['cdf_vals_1', 'indices'], {}), '(cdf_vals_1, indices)\n', (25307, 25328), True, 'import numpy as np\n'), ((25329, 25363), 'numpy.delete', 'np.delete', (['x_vals[zeros:]', 'indices'], {}), '(x_vals[zeros:], indices)\n', (25338, 25363), True, 'import numpy as np\n'), ((28726, 28752), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (28733, 28752), True, 'import numpy as np\n'), ((28804, 28830), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (28811, 28830), True, 'import numpy as np\n'), ((33269, 33284), 'numpy.arange', 'np.arange', (['ncol'], {}), '(ncol)\n', (33278, 33284), True, 'import numpy as np\n'), ((33615, 33631), 'numpy.invert', 'np.invert', (['which'], {}), '(which)\n', (33624, 33631), True, 'import numpy as np\n'), ((48408, 48441), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['X'], {'loc': 'X_s', 'scale': 'sd'}), '(X, loc=X_s, scale=sd)\n', (48419, 48441), False, 'from scipy.stats import norm\n'), ((66557, 66581), 'numpy.diag', 'np.diag', (['cholesky_inv[0]'], {}), '(cholesky_inv[0])\n', (66564, 66581), True, 'import numpy as np\n'), ((6559, 6579), 'scipy.stats.uniform.rvs', 'uniform.rvs', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (6570, 6579), False, 'from scipy.stats import uniform\n'), ((7842, 7852), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (7848, 7852), True, 'import numpy as np\n'), ((9446, 9472), 'numpy.sqrt', 'np.sqrt', (['(2 * gamma / np.pi)'], {}), '(2 * gamma / np.pi)\n', (9453, 9472), True, 'import numpy as np\n'), ((28828, 28837), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (28834, 28837), True, 'import numpy as np\n'), ((6859, 6882), 'numpy.log', 'np.log', (['(s / (2 * np.pi))'], {}), '(s / (2 * np.pi))\n', (6865, 6882), True, 'import numpy as np\n'), ((13446, 13467), 'numpy.diff', 'np.diff', (['tp[:, index]'], {}), '(tp[:, index])\n', (13453, 13467), True, 'import numpy as np\n'), ((16162, 16173), 'numpy.diff', 'np.diff', (['sp'], {}), '(sp)\n', (16169, 16173), True, 'import numpy as np\n'), ((21932, 21949), 'numpy.log', 'np.log', (['(0.0001001)'], {}), '(0.0001001)\n', (21938, 21949), True, 'import numpy as np\n'), ((21951, 21969), 'numpy.log', 'np.log', (['x_range[1]'], {}), '(x_range[1])\n', (21957, 21969), True, 'import numpy as np\n'), ((24396, 24413), 'numpy.log', 'np.log', (['(0.0001001)'], {}), '(0.0001001)\n', (24402, 24413), True, 'import numpy as np\n'), ((24415, 24433), 'numpy.log', 'np.log', (['x_range[1]'], {}), '(x_range[1])\n', (24421, 24433), True, 'import numpy as np\n'), ((33168, 33189), 'numpy.diff', 'np.diff', (['tp[:, index]'], {}), '(tp[:, index])\n', (33175, 33189), True, 'import numpy as np\n'), ((6889, 6906), 'numpy.log', 'np.log', (['(x_phi - m)'], {}), '(x_phi - m)\n', (6895, 6906), True, 'import numpy as np\n'), ((22776, 22795), 'numpy.diff', 'np.diff', (['cdf_vals_1'], {}), '(cdf_vals_1)\n', (22783, 22795), True, 'import numpy as np\n'), ((25238, 25257), 'numpy.diff', 'np.diff', (['cdf_vals_1'], {}), '(cdf_vals_1)\n', (25245, 25257), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import pandas as pd
from pyalink.alink import *
class TestPinyi(unittest.TestCase):
def test_one_hot(self):
data = np.array([
["assisbragasm", 1],
["assiseduc", 1],
["assist", 1],
["assiseduc", 1],
["assistebrasil", 1],
["assiseduc", 1],
["assistebrasil", 1],
["assistencialgsamsung", 1]
])
# load data
df = pd.DataFrame({"query": data[:, 0], "weight": data[:, 1]})
inOp = dataframeToOperator(df, schemaStr='query string, weight long', op_type='batch')
# one hot train
one_hot = OneHotTrainBatchOp().setSelectedCols(["query"])
model = inOp.link(one_hot)
from pyalink.alink.common.types.model_info import OneHotModelInfo
def model_info_callback(d: OneHotModelInfo):
self.assertEquals(type(d), OneHotModelInfo)
print("selected cols:", d.getSelectedColsInModel())
print("category size:", d.getDistinctTokenNumber("query"))
one_hot.lazyCollectModelInfo(model_info_callback)
# batch predict
predictor = OneHotPredictBatchOp().setOutputCols(["predicted_r"]).setReservedCols(["weight"]).setDropLast(False)
print(BatchOperator.collectToDataframe(predictor.linkFrom(model, inOp)))
| [
"pandas.DataFrame",
"numpy.array"
] | [((167, 350), 'numpy.array', 'np.array', (["[['assisbragasm', 1], ['assiseduc', 1], ['assist', 1], ['assiseduc', 1], [\n 'assistebrasil', 1], ['assiseduc', 1], ['assistebrasil', 1], [\n 'assistencialgsamsung', 1]]"], {}), "([['assisbragasm', 1], ['assiseduc', 1], ['assist', 1], [\n 'assiseduc', 1], ['assistebrasil', 1], ['assiseduc', 1], [\n 'assistebrasil', 1], ['assistencialgsamsung', 1]])\n", (175, 350), True, 'import numpy as np\n'), ((481, 538), 'pandas.DataFrame', 'pd.DataFrame', (["{'query': data[:, 0], 'weight': data[:, 1]}"], {}), "({'query': data[:, 0], 'weight': data[:, 1]})\n", (493, 538), True, 'import pandas as pd\n')] |
# Copyright (c) 2021 <NAME>
# This software is distributed under the terms of the MIT license
# which is available at https://opensource.org/licenses/MIT
"""Optuna only functionality."""
import logging
import numpy as np
import optuna
from optuna.pruners import BasePruner
from optuna.study import StudyDirection
from scipy import stats
_logger = logging.getLogger(__name__)
class SignificanceRepeatedTrainingPruner(BasePruner):
"""Pruner which uses statistical significance as an heuristic for decision-making.
Pruner to use statistical significance to prune repeated trainings like in a cross validation.
As the test method a t-test is used. Our experiments have shown that an ``aplha`` value
between 0.3 and 0.4 is reasonable.
"""
def __init__(self, alpha: float = 0.1, n_warmup_steps: int = 4) -> None:
"""Constructor.
Args:
alpha: The alpha level for the statistical significance test.
The larger this value is, the more aggressively this pruner works.
The smaller this value is, the stronger the statistical difference between the two
distributions must be for Optuna to prune.
``alpha`` must be ``0 < alpha < 1``.
n_warmup_steps: Pruning is disabled until the trial reaches or exceeds the given number
of steps.
"""
# input value check
if n_warmup_steps < 0:
raise ValueError(
"'n_warmup_steps' must not be negative! n_warmup_steps: {}".format(n_warmup_steps)
)
if alpha >= 1:
raise ValueError("'alpha' must be smaller than 1! {}".format(alpha))
if alpha <= 0:
raise ValueError("'alpha' must be greater than 0! {}".format(alpha))
self.n_warmup_steps = n_warmup_steps
self.alpha = alpha
def prune(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> bool:
"""Judge whether the trial should be pruned based on the reported values."""
# get best tial - best trial is not available for first trial
best_trial = None
try:
best_trial = study.best_trial
except ValueError:
pass
if best_trial is not None:
trial_intermediate_values = list(trial.intermediate_values.values())
_logger.debug("trial_intermediate_values: %s", trial_intermediate_values)
# wait until the trial reaches or exceeds n_warmup_steps number of steps
if len(trial_intermediate_values) >= self.n_warmup_steps:
trial_mean = np.mean(trial_intermediate_values)
best_trial_intermediate_values = list(best_trial.intermediate_values.values())
best_trial_mean = np.mean(best_trial_intermediate_values)
_logger.debug("trial_mean: %s", trial_mean)
_logger.debug("best_trial_intermediate_values: %s", best_trial_intermediate_values)
_logger.debug("best_trial_mean: %s", best_trial_mean)
if (
trial_mean < best_trial_mean and study.direction == StudyDirection.MAXIMIZE
) or (trial_mean > best_trial_mean and study.direction == StudyDirection.MINIMIZE):
pvalue = stats.ttest_ind(
trial_intermediate_values,
best_trial_intermediate_values,
).pvalue
_logger.debug("pvalue: %s", pvalue)
if pvalue < self.alpha:
_logger.info("We prune this trial. pvalue: %s", pvalue)
return True
else:
_logger.debug(
"This trial is better than best trial - we do not check for pruning."
)
else:
_logger.debug("This trial did not reach n_warmup_steps - we do no checks.")
return False
| [
"logging.getLogger",
"numpy.mean",
"scipy.stats.ttest_ind"
] | [((352, 379), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (369, 379), False, 'import logging\n'), ((2627, 2661), 'numpy.mean', 'np.mean', (['trial_intermediate_values'], {}), '(trial_intermediate_values)\n', (2634, 2661), True, 'import numpy as np\n'), ((2792, 2831), 'numpy.mean', 'np.mean', (['best_trial_intermediate_values'], {}), '(best_trial_intermediate_values)\n', (2799, 2831), True, 'import numpy as np\n'), ((3311, 3385), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['trial_intermediate_values', 'best_trial_intermediate_values'], {}), '(trial_intermediate_values, best_trial_intermediate_values)\n', (3326, 3385), False, 'from scipy import stats\n')] |
# https://in-the-sky.org/data/asteroids.php# ###Website to get the data about the asteroid position
import pandas
import numpy as np
import matplotlib.pyplot as plt
## READING THE FILE
data= pandas.read_csv("vesta_data.csv",skiprows=2) #reading the file
print(data["AU"]) #printing a list of only the data position in astronomical units
## DOING THE GRAPH
plt.figure(figsize=(10,5)) #size of the grap
ax = plt.axes()
#ax.set_facecolor("#e6e6e6") #here you can put color to the backgournd
dias = np.linspace(0,500,500) #number of days used
plt.plot(dias, data["AU"].head(500), lw = 3) #ploting
plt.ylabel("Distancia ao Sol (AU)")
plt.xlabel("Dias")
plt.title("Efemerides de 4Vega")
plt.savefig("Your_Graph") #saves the graph
plt.show() #generates the graph
## ADITIONAL INFORMATION
apogeu = max(data["AU"]) #gets you the apogee
perigeu = min(data["AU"]) #gets you the perigee
print("Afelio:", apogeu)
print("Perielio:", perigeu)
| [
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((197, 242), 'pandas.read_csv', 'pandas.read_csv', (['"""vesta_data.csv"""'], {'skiprows': '(2)'}), "('vesta_data.csv', skiprows=2)\n", (212, 242), False, 'import pandas\n'), ((367, 394), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (377, 394), True, 'import matplotlib.pyplot as plt\n'), ((418, 428), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (426, 428), True, 'import matplotlib.pyplot as plt\n'), ((510, 534), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(500)'], {}), '(0, 500, 500)\n', (521, 534), True, 'import numpy as np\n'), ((614, 649), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distancia ao Sol (AU)"""'], {}), "('Distancia ao Sol (AU)')\n", (624, 649), True, 'import matplotlib.pyplot as plt\n'), ((650, 668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dias"""'], {}), "('Dias')\n", (660, 668), True, 'import matplotlib.pyplot as plt\n'), ((669, 701), 'matplotlib.pyplot.title', 'plt.title', (['"""Efemerides de 4Vega"""'], {}), "('Efemerides de 4Vega')\n", (678, 701), True, 'import matplotlib.pyplot as plt\n'), ((703, 728), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Your_Graph"""'], {}), "('Your_Graph')\n", (714, 728), True, 'import matplotlib.pyplot as plt\n'), ((749, 759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (757, 759), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from deep_utils.utils.box_utils.boxes import Point
class VideoWriterCV:
def __init__(self, save_path, width, height, fourcc="XVID", fps=30, colorful=True, in_source='Numpy'):
import cv2
point = Point.point2point((width, height), in_source=in_source, to_source=Point.PointSource.CV)
fourcc = cv2.VideoWriter_fourcc(*fourcc) if isinstance(fourcc, str) else fourcc
self.vw = cv2.VideoWriter(save_path, fourcc, fps, point, colorful)
def write(self, frame):
self.vw.write(frame)
def rotate(img, rotation_degree, center_point=None, scale=1.0, dsize=None, bound=False, clockwise=True):
import cv2
h, w = img.shape[:2]
(w, h) = dsize = (w, h) if dsize is None else dsize
center_point = (w // 2, h // 2) if center_point is None else center_point
# negative angle >> clockwise rotation | positive angle >> counter clockwise rotation
rotation_degree = -rotation_degree if clockwise else rotation_degree
m = cv2.getRotationMatrix2D(center_point, rotation_degree, scale)
if bound:
h, w = img.shape[:2]
cos = abs(m[0, 0])
sin = abs(m[0, 1])
w_ = int((cos * w) + (sin * h))
h_ = int((cos * h) + (sin * w))
m[0, 2] += w_ // 2 - w // 2
m[1, 2] += h_ // 2 - h // 2
dsize = (w_, h_)
rotated = cv2.warpAffine(img, m, dsize)
return rotated
def translate(img, tx, ty, dsize=None):
import cv2
h, w = img.shape[:2][::-1]
dsize = (w, h) if dsize is None else dsize
translation_matrix = np.array([
[1, 0, tx],
[0, 1, ty]], dtype=np.float32)
translated_image = cv2.warpAffine(src=img, M=translation_matrix, dsize=dsize)
return translated_image
def show_destroy_cv2(img, win_name=''):
import cv2
try:
cv2.imshow(win_name, img)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
except Exception as e:
cv2.destroyWindow(win_name)
raise e
| [
"deep_utils.utils.box_utils.boxes.Point.point2point",
"cv2.warpAffine",
"cv2.destroyWindow",
"cv2.VideoWriter",
"cv2.imshow",
"numpy.array",
"cv2.VideoWriter_fourcc",
"cv2.getRotationMatrix2D",
"cv2.waitKey"
] | [((996, 1057), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['center_point', 'rotation_degree', 'scale'], {}), '(center_point, rotation_degree, scale)\n', (1019, 1057), False, 'import cv2\n'), ((1346, 1375), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'm', 'dsize'], {}), '(img, m, dsize)\n', (1360, 1375), False, 'import cv2\n'), ((1556, 1608), 'numpy.array', 'np.array', (['[[1, 0, tx], [0, 1, ty]]'], {'dtype': 'np.float32'}), '([[1, 0, tx], [0, 1, ty]], dtype=np.float32)\n', (1564, 1608), True, 'import numpy as np\n'), ((1649, 1707), 'cv2.warpAffine', 'cv2.warpAffine', ([], {'src': 'img', 'M': 'translation_matrix', 'dsize': 'dsize'}), '(src=img, M=translation_matrix, dsize=dsize)\n', (1663, 1707), False, 'import cv2\n'), ((235, 327), 'deep_utils.utils.box_utils.boxes.Point.point2point', 'Point.point2point', (['(width, height)'], {'in_source': 'in_source', 'to_source': 'Point.PointSource.CV'}), '((width, height), in_source=in_source, to_source=Point.\n PointSource.CV)\n', (252, 327), False, 'from deep_utils.utils.box_utils.boxes import Point\n'), ((429, 485), 'cv2.VideoWriter', 'cv2.VideoWriter', (['save_path', 'fourcc', 'fps', 'point', 'colorful'], {}), '(save_path, fourcc, fps, point, colorful)\n', (444, 485), False, 'import cv2\n'), ((1810, 1835), 'cv2.imshow', 'cv2.imshow', (['win_name', 'img'], {}), '(win_name, img)\n', (1820, 1835), False, 'import cv2\n'), ((1844, 1858), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1855, 1858), False, 'import cv2\n'), ((1867, 1894), 'cv2.destroyWindow', 'cv2.destroyWindow', (['win_name'], {}), '(win_name)\n', (1884, 1894), False, 'import cv2\n'), ((340, 371), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['*fourcc'], {}), '(*fourcc)\n', (362, 371), False, 'import cv2\n'), ((1930, 1957), 'cv2.destroyWindow', 'cv2.destroyWindow', (['win_name'], {}), '(win_name)\n', (1947, 1957), False, 'import cv2\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import math
import json
import threading
import numpy as np
import tensorflow as tf
import h5py
import util
class PluralModel(object):
def __init__(self, config):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"], maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
if config["lm_path"]:
self.lm_file = h5py.File(self.config["lm_path"], "r")
else:
self.lm_file = None
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.eval_data = None # Load eval data lazily.
input_props = []
input_props.append((tf.float32, [None, None, self.context_embeddings.size])) # Context embeddings.
input_props.append((tf.float32, [None, None, self.head_embeddings.size])) # Head embeddings.
input_props.append((tf.float32, [None, None, self.lm_size, self.lm_layers])) # LM embeddings.
input_props.append((tf.int32, [None, None, None])) # Character indices.
input_props.append((tf.int32, [None])) # Text lengths.
input_props.append((tf.bool, [])) # Is training.
input_props.append((tf.int32, [None])) # Gold starts.
input_props.append((tf.int32, [None])) # Gold ends.
input_props.append((tf.int32, [None,None])) # antecedents
input_props.append((tf.int32, [None])) #antecedent_len
input_props.append((tf.int32,[None])) #anaphors
input_props.append((tf.bool,[None,None])) # gold labels
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in input_props]
dtypes, shapes = zip(*input_props)
queue = tf.PaddingFIFOQueue(capacity=10, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
self.global_step = tf.Variable(0, name="global_step", trainable=False)
self.reset_global_step = tf.assign(self.global_step, 0)
self.predictions, self.loss = self.get_predictions_and_loss(*self.input_tensors)
learning_rate = tf.train.exponential_decay(self.config["learning_rate"], self.global_step,
self.config["decay_frequency"], self.config["decay_rate"], staircase=True)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
gradients, _ = tf.clip_by_global_norm(gradients, self.config["max_gradient_norm"])
optimizers = {
"adam" : tf.train.AdamOptimizer,
"sgd" : tf.train.GradientDescentOptimizer
}
optimizer = optimizers[self.config["optimizer"]](learning_rate)
self.train_op = optimizer.apply_gradients(zip(gradients, trainable_params), global_step=self.global_step)
def start_enqueue_thread(self, session):
train_examples = []
auxiliary_train_examples = []
use_teacher_annealing = self.config["use_teacher_annealing"]
total_plural_doc, use_plural_doc = 0,0
total_2nd_doc,use_2nd_doc = 0,0
auxiliary_train_type = self.config["auxiliary_train_type"] #[plural, coref, bridging]
if self.config["train_path"]: #the main train_path can only be the plural
for line in open(self.config["train_path"]):
doc = json.loads(line)
doc['mode'] = 'plural'
total_plural_doc+=1
if len(doc['plurals']) > 0:
use_plural_doc+=1
train_examples.append(doc)
if self.config["auxiliary_train_path"]:
for line in open(self.config["auxiliary_train_path"]):
doc = json.loads(line)
doc['mode'] = auxiliary_train_type
total_2nd_doc += 1
if (auxiliary_train_type == 'plural' and len(doc['plurals']) > 0) or \
(auxiliary_train_type == 'bridging' and len(doc['bridgings'])>0) or \
auxiliary_train_type == 'coref':
use_2nd_doc += 1
auxiliary_train_examples.append(doc)
print('Find %d plural train documents use %d.' %(total_plural_doc,use_plural_doc))
if self.config["auxiliary_train_path"]:
print('Find %d %s train documents use %d from auxiliary train path.' %(total_2nd_doc,auxiliary_train_type,use_2nd_doc))
if not train_examples:
train_examples = auxiliary_train_examples
auxiliary_train_examples = []
def _enqueue_loop():
max_step = self.config["max_step"]
curr_step = 0
train_ind = 0
train_2nd_ind = 0
random.shuffle(train_examples)
if auxiliary_train_examples:
random.shuffle(auxiliary_train_examples)
while True:
main_train_ratio = 1.0 if not auxiliary_train_examples else \
(0.5 if not use_teacher_annealing else curr_step*1.0/max_step)
if random.random() <= main_train_ratio:
example = train_examples[train_ind]
train_ind+=1
if train_ind >= len(train_examples):
random.shuffle(train_examples)
train_ind = 0
else:
example = auxiliary_train_examples[train_2nd_ind]
train_2nd_ind+=1
if train_2nd_ind >= len(auxiliary_train_examples):
random.shuffle(auxiliary_train_examples)
train_2nd_ind = 0
tensorized_example = self.tensorize_example(example, is_training=True)
feed_dict = dict(zip(self.queue_input_tensors, tensorized_example))
session.run(self.enqueue_op, feed_dict=feed_dict)
curr_step+=1
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def restore(self, session,model_file_name = 'model.max.ckpt'):
# Don't try to restore unused variables from the TF-Hub ELMo module.
vars_to_restore = [v for v in tf.global_variables() if "module/" not in v.name]
saver = tf.train.Saver(vars_to_restore)
checkpoint_path = os.path.join(self.config["log_dir"], model_file_name)
print("Restoring from {}".format(checkpoint_path))
session.run(tf.global_variables_initializer())
saver.restore(session, checkpoint_path)
def load_lm_embeddings(self, doc_key):
if self.lm_file is None:
return np.zeros([0, 0, self.lm_size, self.lm_layers])
file_key = doc_key.replace("/", ":")
group = self.lm_file[file_key]
num_sentences = len(list(group.keys()))
sentences = [group[str(i)][...] for i in range(num_sentences)]
lm_emb = np.zeros([num_sentences, max(s.shape[0] for s in sentences), self.lm_size, self.lm_layers])
for i, s in enumerate(sentences):
lm_emb[i, :s.shape[0], :, :] = s
return lm_emb
def tensorize_example(self, example, is_training):
clusters = example["clusters"]
doc_mode = 'plural' if not 'mode' in example else example['mode']
gold_mentions = [tuple(m) for m in example['mentions']]
gold_starts = np.array([s for s,_ in gold_mentions])
gold_ends = np.array([e for _,e in gold_mentions])
cluster_ids = [-1] * len(gold_mentions)
for cluster_id, cluster in enumerate(clusters):
for mid in cluster:
cluster_ids[mid] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
text_len = np.array([len(s) for s in sentences])
tokens = [[""] * max_sentence_length for _ in sentences]
context_word_emb = np.zeros([len(sentences), max_sentence_length, self.context_embeddings.size])
head_word_emb = np.zeros([len(sentences), max_sentence_length, self.head_embeddings.size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
tokens[i][j] = word
context_word_emb[i, j] = self.context_embeddings[word]
head_word_emb[i, j] = self.head_embeddings[word]
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
tokens = np.array(tokens)
doc_key = example["doc_key"]
plural_bridging_cluster_pairs = {} #for bridging and plural
if doc_mode == 'plural':
for mid, aid in example['plurals']:
msid = cluster_ids[mid]
asid = cluster_ids[aid]
if not msid in plural_bridging_cluster_pairs:
plural_bridging_cluster_pairs[msid] = set()
plural_bridging_cluster_pairs[msid].add(asid)
elif doc_mode == 'bridging':
for mid, aid, is_inv in example['bridgings']:
if is_inv:#for element-of-inv the antecedent is equavalent to plural
msid = cluster_ids[mid]
asid = cluster_ids[aid]
else:
msid = cluster_ids[aid]
asid = cluster_ids[mid]
if not msid in plural_bridging_cluster_pairs:
plural_bridging_cluster_pairs[msid] = set()
plural_bridging_cluster_pairs[msid].add(asid)
num_anaphors = sum(len(cl) for cid, cl in enumerate(clusters) if cid in plural_bridging_cluster_pairs)
negative_example_rate = 1 if is_training else 0
if doc_mode != 'coref' and is_training and self.config["ntimes_negative_examples"] > 0:
negative_example_rate = max(0.2, num_anaphors*self.config["ntimes_negative_examples"]/(len(gold_mentions)-num_anaphors))
lm_emb = self.load_lm_embeddings(doc_key)
max_training_words = self.config["max_training_words"] if doc_mode=='plural' else 1000 #for coref/bridging 1000 words maximum
if is_training and num_words > max_training_words:
context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends,np_cluster_ids\
= self.truncate_example(context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends, np.array(cluster_ids),max_training_words)
cluster_ids = [id.item() for id in np_cluster_ids]
num_mention = gold_starts.shape[0]
anaphors = np.arange(num_mention)
num_top_antecedents = min(num_mention, self.config['max_top_antecedents'])
antecedents = np.zeros([num_mention, num_top_antecedents])
gold_labels = np.zeros([num_mention, num_top_antecedents], dtype=np.bool)
antecedents_len = np.zeros([num_mention])
us_mask = np.ones([num_mention],np.bool)
for i in xrange(num_mention):
antecedents_len[i] = min(i, num_top_antecedents)
cid = cluster_ids[i]
if doc_mode == 'coref':
for j in xrange(min(i, num_top_antecedents)):
ant = i - j - 1
antecedents[i, j] = ant
if cluster_ids[i] == cluster_ids[ant]:
gold_labels[i, j] = True
elif doc_mode == 'plural':
us_mask[i] = cid in plural_bridging_cluster_pairs or random.random() < negative_example_rate
if not us_mask[i]:
continue
for j in xrange(min(i, num_top_antecedents)):
ant = i - j - 1
antecedents[i, j] = ant
if cid in plural_bridging_cluster_pairs and cluster_ids[ant] in plural_bridging_cluster_pairs[cid]:
gold_labels[i, j] = True
elif doc_mode == 'bridging':
# for bridging we seach antecedent from both direction as
# we switch the antecedent and anaphors for element-of relations
# for element-of-inv we do not switch the position
us_mask[i] = cid in plural_bridging_cluster_pairs or random.random() < negative_example_rate
if not us_mask[i]:
continue
nbefore = min(i, num_top_antecedents // 2)
mafter = min(num_mention - i - 1, num_top_antecedents // 2)
antecedents_len[i] = nbefore + mafter
for j in xrange(nbefore):
ant = i - j - 1
antecedents[i, j] = ant
if cid in plural_bridging_cluster_pairs and cluster_ids[ant] in plural_bridging_cluster_pairs[cid]:
gold_labels[i, j] = True
for j in xrange(mafter):
ant = i + j + 1
antecedents[i, j + nbefore] = ant
if cid in plural_bridging_cluster_pairs and cluster_ids[ant] in plural_bridging_cluster_pairs[cid]:
gold_labels[i, j + nbefore] = True
anaphors = anaphors[us_mask]
antecedents = antecedents[us_mask]
antecedents_len = antecedents_len[us_mask]
gold_labels = gold_labels[us_mask]
return (context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends, antecedents,antecedents_len,anaphors,gold_labels)
def truncate_example(self, context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends, cluster_ids,max_training_words):
num_words = sum(text_len)
assert num_words > max_training_words
num_sentences = context_word_emb.shape[0]
max_training_sentences = num_sentences
while num_words > max_training_words:
max_training_sentences -= 1
sentence_offset = random.randint(0, num_sentences - max_training_sentences)
word_offset = text_len[:sentence_offset].sum()
num_words = text_len[sentence_offset:sentence_offset + max_training_sentences].sum()
context_word_emb = context_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
head_word_emb = head_word_emb[sentence_offset:sentence_offset + max_training_sentences, :, :]
lm_emb = lm_emb[sentence_offset:sentence_offset + max_training_sentences, :, :, :]
char_index = char_index[sentence_offset:sentence_offset + max_training_sentences, :, :]
text_len = text_len[sentence_offset:sentence_offset + max_training_sentences]
gold_spans = np.logical_and(gold_starts >= word_offset, gold_ends < word_offset + num_words)
gold_starts = gold_starts[gold_spans] - word_offset
gold_ends = gold_ends[gold_spans] - word_offset
cluster_ids = cluster_ids[gold_spans]
return context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends, cluster_ids
def get_dropout(self, dropout_rate, is_training):
return 1 - (tf.to_float(is_training) * dropout_rate)
def get_predictions_and_loss(self, context_word_emb, head_word_emb, lm_emb, char_index, text_len, is_training, gold_starts, gold_ends, antecedents,antecedents_len,anaphors, gold_labels):
self.dropout = self.get_dropout(self.config["dropout_rate"], is_training)
self.lexical_dropout = self.get_dropout(self.config["lexical_dropout_rate"], is_training)
self.lstm_dropout = self.get_dropout(self.config["lstm_dropout_rate"], is_training)
num_sentences = tf.shape(context_word_emb)[0]
max_sentence_length = tf.shape(context_word_emb)[1]
context_emb_list = [context_word_emb]
head_emb_list = [head_word_emb]
if self.config["char_embedding_size"] > 0:
char_emb = tf.gather(tf.get_variable("char_embeddings", [len(self.char_dict), self.config["char_embedding_size"]]), char_index) # [num_sentences, max_sentence_length, max_word_length, emb]
flattened_char_emb = tf.reshape(char_emb, [num_sentences * max_sentence_length, util.shape(char_emb, 2), util.shape(char_emb, 3)]) # [num_sentences * max_sentence_length, max_word_length, emb]
flattened_aggregated_char_emb = util.cnn(flattened_char_emb, self.config["filter_widths"], self.config["filter_size"]) # [num_sentences * max_sentence_length, emb]
aggregated_char_emb = tf.reshape(flattened_aggregated_char_emb, [num_sentences, max_sentence_length, util.shape(flattened_aggregated_char_emb, 1)]) # [num_sentences, max_sentence_length, emb]
context_emb_list.append(aggregated_char_emb)
head_emb_list.append(aggregated_char_emb)
if self.lm_file:
lm_emb_size = util.shape(lm_emb, 2)
lm_num_layers = util.shape(lm_emb, 3)
with tf.variable_scope("lm_aggregation"):
self.lm_weights = tf.nn.softmax(tf.get_variable("lm_scores", [lm_num_layers], initializer=tf.constant_initializer(0.0)))
self.lm_scaling = tf.get_variable("lm_scaling", [], initializer=tf.constant_initializer(1.0))
flattened_lm_emb = tf.reshape(lm_emb, [num_sentences * max_sentence_length * lm_emb_size, lm_num_layers])
flattened_aggregated_lm_emb = tf.matmul(flattened_lm_emb, tf.expand_dims(self.lm_weights, 1)) # [num_sentences * max_sentence_length * emb, 1]
aggregated_lm_emb = tf.reshape(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length, lm_emb_size])
aggregated_lm_emb *= self.lm_scaling
context_emb_list.append(aggregated_lm_emb)
context_emb = tf.concat(context_emb_list, 2) # [num_sentences, max_sentence_length, emb]
head_emb = tf.concat(head_emb_list, 2) # [num_sentences, max_sentence_length, emb]
context_emb = tf.nn.dropout(context_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
head_emb = tf.nn.dropout(head_emb, self.lexical_dropout) # [num_sentences, max_sentence_length, emb]
text_len_mask = tf.sequence_mask(text_len, maxlen=max_sentence_length) # [num_sentence, max_sentence_length]
context_outputs = self.lstm_contextualize(context_emb, text_len, text_len_mask) # [num_words, emb]
flattened_head_emb = self.flatten_emb_by_sentence(head_emb, text_len_mask) # [num_words]
mention_emb = self.get_span_emb(flattened_head_emb, context_outputs, gold_starts, gold_ends)
k=util.shape(antecedents,0)
c = util.shape(antecedents,1)
anaphor_emb = tf.gather(mention_emb,anaphors) #[k,emb]
antecedent_emb = tf.gather(mention_emb, antecedents) # [k, c, emb]
pair_emb = self.get_pair_embeddings(anaphor_emb, antecedents, antecedent_emb) # [k, c,emb]
with tf.variable_scope("plural_scores"):
plural_scores = util.ffnn(pair_emb, self.config["ffnn_depth"], self.config["ffnn_size"], 1, self.dropout) # [k, c, 1]
plural_scores = tf.squeeze(plural_scores, 2) # [k, c]
plural_scores = plural_scores + tf.log(tf.sequence_mask(antecedents_len,c,tf.float32))
dummy_scores = tf.zeros([k, 1])
dummy_labels = tf.logical_not(tf.reduce_any(gold_labels, 1, keepdims=True)) # [k, 1]
plural_scores_with_dummy = tf.concat([dummy_scores, plural_scores], 1)
gold_labels_with_dummy = tf.concat([dummy_labels, gold_labels], 1)
loss = self.softmax_loss(plural_scores_with_dummy,gold_labels_with_dummy)
loss = tf.reduce_sum(loss)
return [plural_scores,antecedents_len,anaphors], loss
def get_span_emb(self, head_emb, context_outputs, span_starts, span_ends):
span_emb_list = []
span_start_emb = tf.gather(context_outputs, span_starts) # [k, emb]
span_emb_list.append(span_start_emb)
span_end_emb = tf.gather(context_outputs, span_ends) # [k, emb]
span_emb_list.append(span_end_emb)
span_width = tf.minimum(1 + span_ends - span_starts,self.config["max_span_width"]) # [k]
if self.config["use_features"]:
span_width_index = span_width - 1 # [k]
span_width_emb = tf.gather(tf.get_variable("span_width_embeddings", [self.config["max_span_width"], self.config["feature_size"]]), span_width_index) # [k, emb]
span_width_emb = tf.nn.dropout(span_width_emb, self.dropout)
span_emb_list.append(span_width_emb)
if self.config["model_heads"]:
span_indices = tf.expand_dims(tf.range(self.config["max_span_width"]), 0) + tf.expand_dims(span_starts, 1) # [k, max_span_width]
span_indices = tf.minimum(util.shape(context_outputs, 0) - 1, span_indices) # [k, max_span_width]
span_text_emb = tf.gather(head_emb, span_indices) # [k, max_span_width, emb]
with tf.variable_scope("head_scores"):
self.head_scores = util.projection(context_outputs, 1) # [num_words, 1]
span_head_scores = tf.gather(self.head_scores, span_indices) # [k, max_span_width, 1]
span_mask = tf.expand_dims(tf.sequence_mask(span_width, self.config["max_span_width"], dtype=tf.float32), 2) # [k, max_span_width, 1]
span_head_scores += tf.log(span_mask) # [k, max_span_width, 1]
span_attention = tf.nn.softmax(span_head_scores, 1) # [k, max_span_width, 1]
span_head_emb = tf.reduce_sum(span_attention * span_text_emb, 1) # [k, emb]
span_emb_list.append(span_head_emb)
span_emb = tf.concat(span_emb_list, 1) # [k, emb]
return span_emb # [k, emb]
def softmax_loss(self, antecedent_scores, antecedent_labels):
gold_scores = antecedent_scores + tf.log(tf.to_float(antecedent_labels)) # [k, max_ant + 1]
marginalized_gold_scores = tf.reduce_logsumexp(gold_scores, [1]) # [k]
log_norm = tf.reduce_logsumexp(antecedent_scores, [1]) # [k]
return log_norm - marginalized_gold_scores # [k]
def bucket_distance(self, distances):
"""
Places the given values (designed for distances) into 10 semi-logscale buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
"""
logspace_idx = tf.to_int32(tf.floor(tf.log(tf.to_float(distances))/math.log(2))) + 3
use_identity = tf.to_int32(distances <= 4)
combined_idx = use_identity * distances + (1 - use_identity) * logspace_idx
return tf.clip_by_value(combined_idx, 0, 9)
def get_pair_embeddings(self, mention_emb, antecedents, antecedent_emb):
k = util.shape(mention_emb, 0)
c = util.shape(antecedents, 1)
feature_emb_list = []
antecedent_offsets = tf.tile(tf.expand_dims(tf.range(c) + 1, 0), [k, 1]) # [k, c]
if self.config["use_features"]:
antecedent_distance_buckets = self.bucket_distance(antecedent_offsets) # [k, c]
antecedent_distance_emb = tf.gather(tf.get_variable("antecedent_distance_emb", [10, self.config["feature_size"]]), antecedent_distance_buckets) # [k, c]
feature_emb_list.append(antecedent_distance_emb)
feature_emb = tf.concat(feature_emb_list, 2) # [k, c, emb]
feature_emb = tf.nn.dropout(feature_emb, self.dropout) # [k, c, emb]
target_emb = tf.expand_dims(mention_emb, 1) # [k, 1, emb]
similarity_emb = antecedent_emb * target_emb # [k, c, emb]
target_emb = tf.tile(target_emb, [1, c, 1]) # [k, c, emb]
pair_emb = tf.concat([target_emb, antecedent_emb, similarity_emb, feature_emb], 2) # [k, c, emb]
return pair_emb
def flatten_emb_by_sentence(self, emb, text_len_mask):
num_sentences = tf.shape(emb)[0]
max_sentence_length = tf.shape(emb)[1]
emb_rank = len(emb.get_shape())
if emb_rank == 2:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length])
elif emb_rank == 3:
flattened_emb = tf.reshape(emb, [num_sentences * max_sentence_length, util.shape(emb, 2)])
else:
raise ValueError("Unsupported rank: {}".format(emb_rank))
return tf.boolean_mask(flattened_emb, tf.reshape(text_len_mask, [num_sentences * max_sentence_length]))
def lstm_contextualize(self, text_emb, text_len, text_len_mask):
num_sentences = tf.shape(text_emb)[0]
current_inputs = text_emb # [num_sentences, max_sentence_length, emb]
for layer in xrange(self.config["contextualization_layers"]):
with tf.variable_scope("layer_{}".format(layer)):
with tf.variable_scope("fw_cell"):
cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
with tf.variable_scope("bw_cell"):
cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
(fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=current_inputs,
sequence_length=text_len,
initial_state_fw=state_fw,
initial_state_bw=state_bw)
text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
if layer > 0:
highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
current_inputs = text_outputs
return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
def get_plural_pairs(self, plural_scores,antecedent_len,anaphors, cluster_ids, plural_mentions):
plural_pairs = []
plural_maps = {}
for i, mid in enumerate(anaphors):
if not mid in plural_mentions:
continue
score_list = []
ant_cid_list = set()
for j in xrange(antecedent_len[i]):
ant = mid - j -1
score_list.append((plural_scores[i,j].item(), ant))
score_list = sorted(score_list,reverse=True)
for s, ant in score_list:
ant_cid = cluster_ids[ant]
if len(ant_cid_list) >=5:
break
elif ant_cid in ant_cid_list:
continue
elif s > 0:
ant_cid_list.add(ant_cid)
elif len(ant_cid_list) < 2:
ant_cid_list.add(ant_cid)
plural_maps[mid] = ant_cid_list
for cid in ant_cid_list:
plural_pairs.append((mid, cid))
return set(plural_pairs),plural_maps
def load_eval_data(self):
if self.eval_data is None:
def load_line(line):
example = json.loads(line)
return self.tensorize_example(example, is_training=False), example
with open(self.config["eval_path"]) as f:
self.eval_data = [load_line(l) for l in f.readlines()]
print("Loaded {} eval examples.".format(len(self.eval_data)))
def evaluate(self, session):
self.load_eval_data()
tp,fn,fp = 0,0,0
emp,emt = 0,0
for example_num, (tensorized_example, example) in enumerate(self.eval_data):
feed_dict = {i:t for i,t in zip(self.input_tensors, tensorized_example)}
plural_scores,antecedent_len,anaphors = session.run(self.predictions, feed_dict=feed_dict)
num_of_mention = len(example['mentions'])
clusters = example['clusters']
cluster_ids = [-1] * num_of_mention
for cid, cl in enumerate(clusters):
for m in cl:
cluster_ids[m] = cid
plural_mentions = set(mid for mid,_ in example['plurals'])
gold_plural_maps = {}
for mid, ant in example['plurals']:
if mid not in gold_plural_maps:
gold_plural_maps[mid] = set()
gold_plural_maps[mid].add(cluster_ids[ant])
gold_plurals = set((mid, cluster_ids[ant]) for mid, ant in example['plurals'])
pred_plurals, pred_plural_maps = self.get_plural_pairs(plural_scores,antecedent_len,anaphors,cluster_ids,plural_mentions)
tp += len(gold_plurals & pred_plurals)
fn += len(gold_plurals - pred_plurals)
fp += len(pred_plurals - gold_plurals)
emt += len(gold_plural_maps)
for mid in gold_plural_maps:
if len(gold_plural_maps[mid]) == len(pred_plural_maps[mid])==len(gold_plural_maps[mid] & pred_plural_maps[mid]):
emp+=1
if example_num % 10 == 0:
print("Evaluated {}/{} examples.".format(example_num + 1, len(self.eval_data)))
recall = 0.0 if tp == 0 else float(tp)/(tp+fn)
precision = 0.0 if tp == 0 else float(tp)/(tp+fp)
f1 = 0.0 if precision == 0.0 else 2.0*recall*precision/(recall+precision)
print("Plural F1: {:.2f}%".format(f1*100))
print("Plural recall: {:.2f}%".format(recall*100))
print("Plural precision: {:.2f}%".format(precision*100))
print("Plural exact match accrucy: {:.2f}%".format(emp*100.0/emt))
summary_dict = {}
summary_dict["Plural F1"] = f1
summary_dict["Plural recall"] = recall
summary_dict["Plural precision"] = precision
return util.make_summary(summary_dict), f1*100
| [
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.reduce_sum",
"math.log",
"util.load_char_dict",
"tensorflow.gradients",
"numpy.array",
"tensorflow.nn.dropout",
"tensorflow.nn.softmax",
"tensorflow.PaddingFIFOQueue",
"te... | [((356, 410), 'util.EmbeddingDictionary', 'util.EmbeddingDictionary', (["config['context_embeddings']"], {}), "(config['context_embeddings'])\n", (380, 410), False, 'import util\n'), ((438, 531), 'util.EmbeddingDictionary', 'util.EmbeddingDictionary', (["config['head_embeddings']"], {'maybe_cache': 'self.context_embeddings'}), "(config['head_embeddings'], maybe_cache=self.\n context_embeddings)\n", (462, 531), False, 'import util\n'), ((609, 655), 'util.load_char_dict', 'util.load_char_dict', (["config['char_vocab_path']"], {}), "(config['char_vocab_path'])\n", (628, 655), False, 'import util\n'), ((1918, 1980), 'tensorflow.PaddingFIFOQueue', 'tf.PaddingFIFOQueue', ([], {'capacity': '(10)', 'dtypes': 'dtypes', 'shapes': 'shapes'}), '(capacity=10, dtypes=dtypes, shapes=shapes)\n', (1937, 1980), True, 'import tensorflow as tf\n'), ((2108, 2159), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (2119, 2159), True, 'import tensorflow as tf\n'), ((2189, 2219), 'tensorflow.assign', 'tf.assign', (['self.global_step', '(0)'], {}), '(self.global_step, 0)\n', (2198, 2219), True, 'import tensorflow as tf\n'), ((2326, 2479), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (["self.config['learning_rate']", 'self.global_step', "self.config['decay_frequency']", "self.config['decay_rate']"], {'staircase': '(True)'}), "(self.config['learning_rate'], self.global_step,\n self.config['decay_frequency'], self.config['decay_rate'], staircase=True)\n", (2352, 2479), True, 'import tensorflow as tf\n'), ((2546, 2570), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2568, 2570), True, 'import tensorflow as tf\n'), ((2587, 2628), 'tensorflow.gradients', 'tf.gradients', (['self.loss', 'trainable_params'], {}), '(self.loss, trainable_params)\n', (2599, 2628), True, 'import tensorflow as tf\n'), ((2648, 2715), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', "self.config['max_gradient_norm']"], {}), "(gradients, self.config['max_gradient_norm'])\n", (2670, 2715), True, 'import tensorflow as tf\n'), ((5675, 5713), 'threading.Thread', 'threading.Thread', ([], {'target': '_enqueue_loop'}), '(target=_enqueue_loop)\n', (5691, 5713), False, 'import threading\n'), ((6009, 6040), 'tensorflow.train.Saver', 'tf.train.Saver', (['vars_to_restore'], {}), '(vars_to_restore)\n', (6023, 6040), True, 'import tensorflow as tf\n'), ((6063, 6116), 'os.path.join', 'os.path.join', (["self.config['log_dir']", 'model_file_name'], {}), "(self.config['log_dir'], model_file_name)\n", (6075, 6116), False, 'import os\n'), ((7022, 7061), 'numpy.array', 'np.array', (['[s for s, _ in gold_mentions]'], {}), '([s for s, _ in gold_mentions])\n', (7030, 7061), True, 'import numpy as np\n'), ((7077, 7116), 'numpy.array', 'np.array', (['[e for _, e in gold_mentions]'], {}), '([e for _, e in gold_mentions])\n', (7085, 7116), True, 'import numpy as np\n'), ((8241, 8257), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (8249, 8257), True, 'import numpy as np\n'), ((10153, 10175), 'numpy.arange', 'np.arange', (['num_mention'], {}), '(num_mention)\n', (10162, 10175), True, 'import numpy as np\n'), ((10273, 10317), 'numpy.zeros', 'np.zeros', (['[num_mention, num_top_antecedents]'], {}), '([num_mention, num_top_antecedents])\n', (10281, 10317), True, 'import numpy as np\n'), ((10336, 10395), 'numpy.zeros', 'np.zeros', (['[num_mention, num_top_antecedents]'], {'dtype': 'np.bool'}), '([num_mention, num_top_antecedents], dtype=np.bool)\n', (10344, 10395), True, 'import numpy as np\n'), ((10418, 10441), 'numpy.zeros', 'np.zeros', (['[num_mention]'], {}), '([num_mention])\n', (10426, 10441), True, 'import numpy as np\n'), ((10456, 10487), 'numpy.ones', 'np.ones', (['[num_mention]', 'np.bool'], {}), '([num_mention], np.bool)\n', (10463, 10487), True, 'import numpy as np\n'), ((13755, 13834), 'numpy.logical_and', 'np.logical_and', (['(gold_starts >= word_offset)', '(gold_ends < word_offset + num_words)'], {}), '(gold_starts >= word_offset, gold_ends < word_offset + num_words)\n', (13769, 13834), True, 'import numpy as np\n'), ((16643, 16673), 'tensorflow.concat', 'tf.concat', (['context_emb_list', '(2)'], {}), '(context_emb_list, 2)\n', (16652, 16673), True, 'import tensorflow as tf\n'), ((16733, 16760), 'tensorflow.concat', 'tf.concat', (['head_emb_list', '(2)'], {}), '(head_emb_list, 2)\n', (16742, 16760), True, 'import tensorflow as tf\n'), ((16823, 16871), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['context_emb', 'self.lexical_dropout'], {}), '(context_emb, self.lexical_dropout)\n', (16836, 16871), True, 'import tensorflow as tf\n'), ((16931, 16976), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['head_emb', 'self.lexical_dropout'], {}), '(head_emb, self.lexical_dropout)\n', (16944, 16976), True, 'import tensorflow as tf\n'), ((17042, 17096), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['text_len'], {'maxlen': 'max_sentence_length'}), '(text_len, maxlen=max_sentence_length)\n', (17058, 17096), True, 'import tensorflow as tf\n'), ((17440, 17466), 'util.shape', 'util.shape', (['antecedents', '(0)'], {}), '(antecedents, 0)\n', (17450, 17466), False, 'import util\n'), ((17474, 17500), 'util.shape', 'util.shape', (['antecedents', '(1)'], {}), '(antecedents, 1)\n', (17484, 17500), False, 'import util\n'), ((17519, 17551), 'tensorflow.gather', 'tf.gather', (['mention_emb', 'anaphors'], {}), '(mention_emb, anaphors)\n', (17528, 17551), True, 'import tensorflow as tf\n'), ((17581, 17616), 'tensorflow.gather', 'tf.gather', (['mention_emb', 'antecedents'], {}), '(mention_emb, antecedents)\n', (17590, 17616), True, 'import tensorflow as tf\n'), ((18072, 18088), 'tensorflow.zeros', 'tf.zeros', (['[k, 1]'], {}), '([k, 1])\n', (18080, 18088), True, 'import tensorflow as tf\n'), ((18211, 18254), 'tensorflow.concat', 'tf.concat', (['[dummy_scores, plural_scores]', '(1)'], {}), '([dummy_scores, plural_scores], 1)\n', (18220, 18254), True, 'import tensorflow as tf\n'), ((18284, 18325), 'tensorflow.concat', 'tf.concat', (['[dummy_labels, gold_labels]', '(1)'], {}), '([dummy_labels, gold_labels], 1)\n', (18293, 18325), True, 'import tensorflow as tf\n'), ((18416, 18435), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), '(loss)\n', (18429, 18435), True, 'import tensorflow as tf\n'), ((18618, 18657), 'tensorflow.gather', 'tf.gather', (['context_outputs', 'span_starts'], {}), '(context_outputs, span_starts)\n', (18627, 18657), True, 'import tensorflow as tf\n'), ((18730, 18767), 'tensorflow.gather', 'tf.gather', (['context_outputs', 'span_ends'], {}), '(context_outputs, span_ends)\n', (18739, 18767), True, 'import tensorflow as tf\n'), ((18836, 18906), 'tensorflow.minimum', 'tf.minimum', (['(1 + span_ends - span_starts)', "self.config['max_span_width']"], {}), "(1 + span_ends - span_starts, self.config['max_span_width'])\n", (18846, 18906), True, 'import tensorflow as tf\n'), ((20278, 20305), 'tensorflow.concat', 'tf.concat', (['span_emb_list', '(1)'], {}), '(span_emb_list, 1)\n', (20287, 20305), True, 'import tensorflow as tf\n'), ((20541, 20578), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['gold_scores', '[1]'], {}), '(gold_scores, [1])\n', (20560, 20578), True, 'import tensorflow as tf\n'), ((20600, 20643), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['antecedent_scores', '[1]'], {}), '(antecedent_scores, [1])\n', (20619, 20643), True, 'import tensorflow as tf\n'), ((21003, 21030), 'tensorflow.to_int32', 'tf.to_int32', (['(distances <= 4)'], {}), '(distances <= 4)\n', (21014, 21030), True, 'import tensorflow as tf\n'), ((21122, 21158), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['combined_idx', '(0)', '(9)'], {}), '(combined_idx, 0, 9)\n', (21138, 21158), True, 'import tensorflow as tf\n'), ((21243, 21269), 'util.shape', 'util.shape', (['mention_emb', '(0)'], {}), '(mention_emb, 0)\n', (21253, 21269), False, 'import util\n'), ((21278, 21304), 'util.shape', 'util.shape', (['antecedents', '(1)'], {}), '(antecedents, 1)\n', (21288, 21304), False, 'import util\n'), ((21775, 21805), 'tensorflow.concat', 'tf.concat', (['feature_emb_list', '(2)'], {}), '(feature_emb_list, 2)\n', (21784, 21805), True, 'import tensorflow as tf\n'), ((21838, 21878), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['feature_emb', 'self.dropout'], {}), '(feature_emb, self.dropout)\n', (21851, 21878), True, 'import tensorflow as tf\n'), ((21911, 21941), 'tensorflow.expand_dims', 'tf.expand_dims', (['mention_emb', '(1)'], {}), '(mention_emb, 1)\n', (21925, 21941), True, 'import tensorflow as tf\n'), ((22036, 22066), 'tensorflow.tile', 'tf.tile', (['target_emb', '[1, c, 1]'], {}), '(target_emb, [1, c, 1])\n', (22043, 22066), True, 'import tensorflow as tf\n'), ((22097, 22168), 'tensorflow.concat', 'tf.concat', (['[target_emb, antecedent_emb, similarity_emb, feature_emb]', '(2)'], {}), '([target_emb, antecedent_emb, similarity_emb, feature_emb], 2)\n', (22106, 22168), True, 'import tensorflow as tf\n'), ((703, 741), 'h5py.File', 'h5py.File', (["self.config['lm_path']", '"""r"""'], {}), "(self.config['lm_path'], 'r')\n", (712, 741), False, 'import h5py\n'), ((1805, 1833), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', 'shape'], {}), '(dtype, shape)\n', (1819, 1833), True, 'import tensorflow as tf\n'), ((4664, 4694), 'random.shuffle', 'random.shuffle', (['train_examples'], {}), '(train_examples)\n', (4678, 4694), False, 'import random\n'), ((6188, 6221), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6219, 6221), True, 'import tensorflow as tf\n'), ((6351, 6397), 'numpy.zeros', 'np.zeros', (['[0, 0, self.lm_size, self.lm_layers]'], {}), '([0, 0, self.lm_size, self.lm_layers])\n', (6359, 6397), True, 'import numpy as np\n'), ((13070, 13127), 'random.randint', 'random.randint', (['(0)', '(num_sentences - max_training_sentences)'], {}), '(0, num_sentences - max_training_sentences)\n', (13084, 13127), False, 'import random\n'), ((14693, 14719), 'tensorflow.shape', 'tf.shape', (['context_word_emb'], {}), '(context_word_emb)\n', (14701, 14719), True, 'import tensorflow as tf\n'), ((14749, 14775), 'tensorflow.shape', 'tf.shape', (['context_word_emb'], {}), '(context_word_emb)\n', (14757, 14775), True, 'import tensorflow as tf\n'), ((15338, 15429), 'util.cnn', 'util.cnn', (['flattened_char_emb', "self.config['filter_widths']", "self.config['filter_size']"], {}), "(flattened_char_emb, self.config['filter_widths'], self.config[\n 'filter_size'])\n", (15346, 15429), False, 'import util\n'), ((15809, 15830), 'util.shape', 'util.shape', (['lm_emb', '(2)'], {}), '(lm_emb, 2)\n', (15819, 15830), False, 'import util\n'), ((15853, 15874), 'util.shape', 'util.shape', (['lm_emb', '(3)'], {}), '(lm_emb, 3)\n', (15863, 15874), False, 'import util\n'), ((16179, 16269), 'tensorflow.reshape', 'tf.reshape', (['lm_emb', '[num_sentences * max_sentence_length * lm_emb_size, lm_num_layers]'], {}), '(lm_emb, [num_sentences * max_sentence_length * lm_emb_size,\n lm_num_layers])\n', (16189, 16269), True, 'import tensorflow as tf\n'), ((16441, 16535), 'tensorflow.reshape', 'tf.reshape', (['flattened_aggregated_lm_emb', '[num_sentences, max_sentence_length, lm_emb_size]'], {}), '(flattened_aggregated_lm_emb, [num_sentences, max_sentence_length,\n lm_emb_size])\n', (16451, 16535), True, 'import tensorflow as tf\n'), ((17739, 17773), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""plural_scores"""'], {}), "('plural_scores')\n", (17756, 17773), True, 'import tensorflow as tf\n'), ((17797, 17890), 'util.ffnn', 'util.ffnn', (['pair_emb', "self.config['ffnn_depth']", "self.config['ffnn_size']", '(1)', 'self.dropout'], {}), "(pair_emb, self.config['ffnn_depth'], self.config['ffnn_size'], 1,\n self.dropout)\n", (17806, 17890), False, 'import util\n'), ((17921, 17949), 'tensorflow.squeeze', 'tf.squeeze', (['plural_scores', '(2)'], {}), '(plural_scores, 2)\n', (17931, 17949), True, 'import tensorflow as tf\n'), ((18123, 18167), 'tensorflow.reduce_any', 'tf.reduce_any', (['gold_labels', '(1)'], {'keepdims': '(True)'}), '(gold_labels, 1, keepdims=True)\n', (18136, 18167), True, 'import tensorflow as tf\n'), ((19184, 19227), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['span_width_emb', 'self.dropout'], {}), '(span_width_emb, self.dropout)\n', (19197, 19227), True, 'import tensorflow as tf\n'), ((19568, 19601), 'tensorflow.gather', 'tf.gather', (['head_emb', 'span_indices'], {}), '(head_emb, span_indices)\n', (19577, 19601), True, 'import tensorflow as tf\n'), ((19779, 19820), 'tensorflow.gather', 'tf.gather', (['self.head_scores', 'span_indices'], {}), '(self.head_scores, span_indices)\n', (19788, 19820), True, 'import tensorflow as tf\n'), ((20012, 20029), 'tensorflow.log', 'tf.log', (['span_mask'], {}), '(span_mask)\n', (20018, 20029), True, 'import tensorflow as tf\n'), ((20078, 20112), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['span_head_scores', '(1)'], {}), '(span_head_scores, 1)\n', (20091, 20112), True, 'import tensorflow as tf\n'), ((20160, 20208), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(span_attention * span_text_emb)', '(1)'], {}), '(span_attention * span_text_emb, 1)\n', (20173, 20208), True, 'import tensorflow as tf\n'), ((22284, 22297), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22292, 22297), True, 'import tensorflow as tf\n'), ((22327, 22340), 'tensorflow.shape', 'tf.shape', (['emb'], {}), '(emb)\n', (22335, 22340), True, 'import tensorflow as tf\n'), ((22426, 22480), 'tensorflow.reshape', 'tf.reshape', (['emb', '[num_sentences * max_sentence_length]'], {}), '(emb, [num_sentences * max_sentence_length])\n', (22436, 22480), True, 'import tensorflow as tf\n'), ((22718, 22782), 'tensorflow.reshape', 'tf.reshape', (['text_len_mask', '[num_sentences * max_sentence_length]'], {}), '(text_len_mask, [num_sentences * max_sentence_length])\n', (22728, 22782), True, 'import tensorflow as tf\n'), ((22872, 22890), 'tensorflow.shape', 'tf.shape', (['text_emb'], {}), '(text_emb)\n', (22880, 22890), True, 'import tensorflow as tf\n'), ((27931, 27962), 'util.make_summary', 'util.make_summary', (['summary_dict'], {}), '(summary_dict)\n', (27948, 27962), False, 'import util\n'), ((3487, 3503), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3497, 3503), False, 'import json\n'), ((3784, 3800), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3794, 3800), False, 'import json\n'), ((4738, 4778), 'random.shuffle', 'random.shuffle', (['auxiliary_train_examples'], {}), '(auxiliary_train_examples)\n', (4752, 4778), False, 'import random\n'), ((5947, 5968), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5966, 5968), True, 'import tensorflow as tf\n'), ((9999, 10020), 'numpy.array', 'np.array', (['cluster_ids'], {}), '(cluster_ids)\n', (10007, 10020), True, 'import numpy as np\n'), ((14179, 14203), 'tensorflow.to_float', 'tf.to_float', (['is_training'], {}), '(is_training)\n', (14190, 14203), True, 'import tensorflow as tf\n'), ((15886, 15921), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""lm_aggregation"""'], {}), "('lm_aggregation')\n", (15903, 15921), True, 'import tensorflow as tf\n'), ((16330, 16364), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.lm_weights', '(1)'], {}), '(self.lm_weights, 1)\n', (16344, 16364), True, 'import tensorflow as tf\n'), ((19028, 19134), 'tensorflow.get_variable', 'tf.get_variable', (['"""span_width_embeddings"""', "[self.config['max_span_width'], self.config['feature_size']]"], {}), "('span_width_embeddings', [self.config['max_span_width'],\n self.config['feature_size']])\n", (19043, 19134), True, 'import tensorflow as tf\n'), ((19389, 19419), 'tensorflow.expand_dims', 'tf.expand_dims', (['span_starts', '(1)'], {}), '(span_starts, 1)\n', (19403, 19419), True, 'import tensorflow as tf\n'), ((19640, 19672), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""head_scores"""'], {}), "('head_scores')\n", (19657, 19672), True, 'import tensorflow as tf\n'), ((19701, 19736), 'util.projection', 'util.projection', (['context_outputs', '(1)'], {}), '(context_outputs, 1)\n', (19716, 19736), False, 'import util\n'), ((19879, 19956), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['span_width', "self.config['max_span_width']"], {'dtype': 'tf.float32'}), "(span_width, self.config['max_span_width'], dtype=tf.float32)\n", (19895, 19956), True, 'import tensorflow as tf\n'), ((20459, 20489), 'tensorflow.to_float', 'tf.to_float', (['antecedent_labels'], {}), '(antecedent_labels)\n', (20470, 20489), True, 'import tensorflow as tf\n'), ((21584, 21661), 'tensorflow.get_variable', 'tf.get_variable', (['"""antecedent_distance_emb"""', "[10, self.config['feature_size']]"], {}), "('antecedent_distance_emb', [10, self.config['feature_size']])\n", (21599, 21661), True, 'import tensorflow as tf\n'), ((23757, 23934), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'cell_fw', 'cell_bw': 'cell_bw', 'inputs': 'current_inputs', 'sequence_length': 'text_len', 'initial_state_fw': 'state_fw', 'initial_state_bw': 'state_bw'}), '(cell_fw=cell_fw, cell_bw=cell_bw, inputs=\n current_inputs, sequence_length=text_len, initial_state_fw=state_fw,\n initial_state_bw=state_bw)\n', (23788, 23934), True, 'import tensorflow as tf\n'), ((24011, 24049), 'tensorflow.concat', 'tf.concat', (['[fw_outputs, bw_outputs]', '(2)'], {}), '([fw_outputs, bw_outputs], 2)\n', (24020, 24049), True, 'import tensorflow as tf\n'), ((24117, 24163), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['text_outputs', 'self.lstm_dropout'], {}), '(text_outputs, self.lstm_dropout)\n', (24130, 24163), True, 'import tensorflow as tf\n'), ((25550, 25566), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (25560, 25566), False, 'import json\n'), ((4952, 4967), 'random.random', 'random.random', ([], {}), '()\n', (4965, 4967), False, 'import random\n'), ((15187, 15210), 'util.shape', 'util.shape', (['char_emb', '(2)'], {}), '(char_emb, 2)\n', (15197, 15210), False, 'import util\n'), ((15212, 15235), 'util.shape', 'util.shape', (['char_emb', '(3)'], {}), '(char_emb, 3)\n', (15222, 15235), False, 'import util\n'), ((15577, 15621), 'util.shape', 'util.shape', (['flattened_aggregated_char_emb', '(1)'], {}), '(flattened_aggregated_char_emb, 1)\n', (15587, 15621), False, 'import util\n'), ((18004, 18052), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['antecedents_len', 'c', 'tf.float32'], {}), '(antecedents_len, c, tf.float32)\n', (18020, 18052), True, 'import tensorflow as tf\n'), ((19343, 19382), 'tensorflow.range', 'tf.range', (["self.config['max_span_width']"], {}), "(self.config['max_span_width'])\n", (19351, 19382), True, 'import tensorflow as tf\n'), ((19474, 19504), 'util.shape', 'util.shape', (['context_outputs', '(0)'], {}), '(context_outputs, 0)\n', (19484, 19504), False, 'import util\n'), ((21380, 21391), 'tensorflow.range', 'tf.range', (['c'], {}), '(c)\n', (21388, 21391), True, 'import tensorflow as tf\n'), ((23105, 23133), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""fw_cell"""'], {}), "('fw_cell')\n", (23122, 23133), True, 'import tensorflow as tf\n'), ((23155, 23251), 'util.CustomLSTMCell', 'util.CustomLSTMCell', (["self.config['contextualization_size']", 'num_sentences', 'self.lstm_dropout'], {}), "(self.config['contextualization_size'], num_sentences,\n self.lstm_dropout)\n", (23174, 23251), False, 'import util\n'), ((23261, 23289), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bw_cell"""'], {}), "('bw_cell')\n", (23278, 23289), True, 'import tensorflow as tf\n'), ((23311, 23407), 'util.CustomLSTMCell', 'util.CustomLSTMCell', (["self.config['contextualization_size']", 'num_sentences', 'self.lstm_dropout'], {}), "(self.config['contextualization_size'], num_sentences,\n self.lstm_dropout)\n", (23330, 23407), False, 'import util\n'), ((23453, 23505), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.c, [num_sentences, 1])\n', (23460, 23505), True, 'import tensorflow as tf\n'), ((23507, 23559), 'tensorflow.tile', 'tf.tile', (['cell_fw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_fw.initial_state.h, [num_sentences, 1])\n', (23514, 23559), True, 'import tensorflow as tf\n'), ((23610, 23662), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.c', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.c, [num_sentences, 1])\n', (23617, 23662), True, 'import tensorflow as tf\n'), ((23664, 23716), 'tensorflow.tile', 'tf.tile', (['cell_bw.initial_state.h', '[num_sentences, 1]'], {}), '(cell_bw.initial_state.h, [num_sentences, 1])\n', (23671, 23716), True, 'import tensorflow as tf\n'), ((5117, 5147), 'random.shuffle', 'random.shuffle', (['train_examples'], {}), '(train_examples)\n', (5131, 5147), False, 'import random\n'), ((5348, 5388), 'random.shuffle', 'random.shuffle', (['auxiliary_train_examples'], {}), '(auxiliary_train_examples)\n', (5362, 5388), False, 'import random\n'), ((16124, 16152), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (16147, 16152), True, 'import tensorflow as tf\n'), ((20966, 20977), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (20974, 20977), False, 'import math\n'), ((22581, 22599), 'util.shape', 'util.shape', (['emb', '(2)'], {}), '(emb, 2)\n', (22591, 22599), False, 'import util\n'), ((10928, 10943), 'random.random', 'random.random', ([], {}), '()\n', (10941, 10943), False, 'import random\n'), ((16021, 16049), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (16044, 16049), True, 'import tensorflow as tf\n'), ((20942, 20964), 'tensorflow.to_float', 'tf.to_float', (['distances'], {}), '(distances)\n', (20953, 20964), True, 'import tensorflow as tf\n'), ((24253, 24280), 'util.shape', 'util.shape', (['text_outputs', '(2)'], {}), '(text_outputs, 2)\n', (24263, 24280), False, 'import util\n'), ((11569, 11584), 'random.random', 'random.random', ([], {}), '()\n', (11582, 11584), False, 'import random\n')] |
import logging
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi.data import AnnDataManager
from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField
from scvi.external.stereoscope._module import RNADeconv, SpatialDeconv
from scvi.model.base import BaseModelClass, UnsupervisedTrainingMixin
from scvi.utils import setup_anndata_dsp
logger = logging.getLogger(__name__)
class RNAStereoscope(UnsupervisedTrainingMixin, BaseModelClass):
"""
Reimplementation of Stereoscope [Andersson20]_ for deconvolution of spatial transcriptomics from single-cell transcriptomics.
https://github.com/almaan/stereoscope.
Parameters
----------
sc_adata
single-cell AnnData object that has been registered via :meth:`~scvi.external.RNAStereoscope.setup_anndata`.
**model_kwargs
Keyword args for :class:`~scvi.external.stereoscope.RNADeconv`
Examples
--------
>>> sc_adata = anndata.read_h5ad(path_to_sc_anndata)
>>> scvi.external.RNAStereoscope.setup_anndata(sc_adata, labels_key="labels")
>>> stereo = scvi.external.stereoscope.RNAStereoscope(sc_adata)
>>> stereo.train()
"""
def __init__(
self,
sc_adata: AnnData,
**model_kwargs,
):
super(RNAStereoscope, self).__init__(sc_adata)
self.n_genes = self.summary_stats.n_vars
self.n_labels = self.summary_stats.n_labels
# first we have the scRNA-seq model
self.module = RNADeconv(
n_genes=self.n_genes,
n_labels=self.n_labels,
**model_kwargs,
)
self._model_summary_string = (
"RNADeconv Model with params: \nn_genes: {}, n_labels: {}"
).format(
self.n_genes,
self.n_labels,
)
self.init_params_ = self._get_init_params(locals())
def train(
self,
max_epochs: int = 400,
lr: float = 0.01,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 1,
validation_size: Optional[float] = None,
batch_size: int = 128,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using MAP inference.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
update_dict = {
"lr": lr,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
super().train(
max_epochs=max_epochs,
use_gpu=use_gpu,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
plan_kwargs=plan_kwargs,
**kwargs,
)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
labels_key: Optional[str] = None,
layer: Optional[str] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_labels_key)s
%(param_layer)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
class SpatialStereoscope(UnsupervisedTrainingMixin, BaseModelClass):
"""
Reimplementation of Stereoscope [Andersson20]_ for deconvolution of spatial transcriptomics from single-cell transcriptomics.
https://github.com/almaan/stereoscope.
Parameters
----------
st_adata
spatial transcriptomics AnnData object that has been registered via :meth:`~scvi.external.SpatialStereoscope.setup_anndata`.
sc_params
parameters of the model learned from the single-cell RNA seq data for deconvolution.
cell_type_mapping
numpy array mapping for the cell types used in the deconvolution
prior_weight
how to reweight the minibatches for stochastic optimization. "n_obs" is the valid
procedure, "minibatch" is the procedure implemented in Stereoscope.
**model_kwargs
Keyword args for :class:`~scvi.external.stereoscope.SpatialDeconv`
Examples
--------
>>> sc_adata = anndata.read_h5ad(path_to_sc_anndata)
>>> scvi.external.RNAStereoscope.setup_anndata(sc_adata, labels_key="labels")
>>> sc_model = scvi.external.stereoscope.RNAStereoscope(sc_adata)
>>> sc_model.train()
>>> st_adata = anndata.read_h5ad(path_to_st_anndata)
>>> scvi.external.SpatialStereoscope.setup_anndata(st_adata)
>>> stereo = scvi.external.stereoscope.SpatialStereoscope.from_rna_model(st_adata, sc_model)
>>> stereo.train()
>>> st_adata.obsm["deconv"] = stereo.get_proportions()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/user_guide/notebooks/stereoscope_heart_LV_tutorial`
"""
def __init__(
self,
st_adata: AnnData,
sc_params: Tuple[np.ndarray],
cell_type_mapping: np.ndarray,
prior_weight: Literal["n_obs", "minibatch"] = "n_obs",
**model_kwargs,
):
super().__init__(st_adata)
self.module = SpatialDeconv(
n_spots=st_adata.n_obs,
sc_params=sc_params,
prior_weight=prior_weight,
**model_kwargs,
)
self._model_summary_string = (
"RNADeconv Model with params: \nn_spots: {}"
).format(
st_adata.n_obs,
)
self.cell_type_mapping = cell_type_mapping
self.init_params_ = self._get_init_params(locals())
@classmethod
def from_rna_model(
cls,
st_adata: AnnData,
sc_model: RNAStereoscope,
prior_weight: Literal["n_obs", "minibatch"] = "n_obs",
layer: Optional[str] = None,
**model_kwargs,
):
"""
Alternate constructor for exploiting a pre-trained model on RNA-seq data.
Parameters
----------
st_adata
registed anndata object
sc_model
trained RNADeconv model
prior_weight
how to reweight the minibatches for stochastic optimization. "n_obs" is the valid
procedure, "minibatch" is the procedure implemented in Stereoscope.
layer
if not `None`, uses this as the key in `adata.layers` for raw count data.
**model_kwargs
Keyword args for :class:`~scvi.external.SpatialDeconv`
"""
cls.setup_anndata(st_adata, layer=layer)
return cls(
st_adata,
sc_model.module.get_params(),
sc_model.adata_manager.get_state_registry(
REGISTRY_KEYS.LABELS_KEY
).categorical_mapping,
prior_weight=prior_weight,
**model_kwargs,
)
def get_proportions(self, keep_noise=False) -> pd.DataFrame:
"""
Returns the estimated cell type proportion for the spatial data.
Shape is n_cells x n_labels OR n_cells x (n_labels + 1) if keep_noise
Parameters
----------
keep_noise
whether to account for the noise term as a standalone cell type in the proportion estimate.
"""
self._check_if_trained()
column_names = self.cell_type_mapping
if keep_noise:
column_names = column_names.append("noise_term")
return pd.DataFrame(
data=self.module.get_proportions(keep_noise),
columns=column_names,
index=self.adata.obs.index,
)
def get_scale_for_ct(
self,
y: np.ndarray,
) -> np.ndarray:
r"""
Calculate the cell type specific expression.
Parameters
----------
y
numpy array containing the list of cell types
Returns
-------
gene_expression
"""
self._check_if_trained()
ind_y = np.array([np.where(ct == self.cell_type_mapping)[0][0] for ct in y])
if ind_y.shape != y.shape:
raise ValueError(
"Incorrect shape after matching cell types to reference mapping. Please check cell type query."
)
px_scale = self.module.get_ct_specific_expression(torch.tensor(ind_y)[:, None])
return np.array(px_scale.cpu())
def train(
self,
max_epochs: int = 400,
lr: float = 0.01,
use_gpu: Optional[Union[str, int, bool]] = None,
batch_size: int = 128,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using MAP inference.
Parameters
----------
max_epochs
Number of epochs to train for
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
batch_size
Minibatch size to use during training.
plan_kwargs
Keyword args for :class:`~scvi.train.TrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
update_dict = {
"lr": lr,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
super().train(
max_epochs=max_epochs,
use_gpu=use_gpu,
train_size=1,
validation_size=None,
batch_size=batch_size,
plan_kwargs=plan_kwargs,
**kwargs,
)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
layer: Optional[str] = None,
**kwargs,
):
"""
%(summary)s.
Parameters
----------
%(param_layer)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
# add index for each cell (provided to pyro plate for correct minibatching)
adata.obs["_indices"] = np.arange(adata.n_obs)
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
NumericalObsField(REGISTRY_KEYS.INDICES_KEY, "_indices"),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
| [
"logging.getLogger",
"scvi.data.fields.CategoricalObsField",
"numpy.arange",
"scvi.external.stereoscope._module.SpatialDeconv",
"numpy.where",
"scvi.data.fields.NumericalObsField",
"scvi.data.fields.LayerField",
"torch.tensor",
"scvi.external.stereoscope._module.RNADeconv",
"scvi.data.AnnDataManag... | [((512, 539), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (529, 539), False, 'import logging\n'), ((1618, 1689), 'scvi.external.stereoscope._module.RNADeconv', 'RNADeconv', ([], {'n_genes': 'self.n_genes', 'n_labels': 'self.n_labels'}), '(n_genes=self.n_genes, n_labels=self.n_labels, **model_kwargs)\n', (1627, 1689), False, 'from scvi.external.stereoscope._module import RNADeconv, SpatialDeconv\n'), ((4424, 4498), 'scvi.data.AnnDataManager', 'AnnDataManager', ([], {'fields': 'anndata_fields', 'setup_method_args': 'setup_method_args'}), '(fields=anndata_fields, setup_method_args=setup_method_args)\n', (4438, 4498), False, 'from scvi.data import AnnDataManager\n'), ((6534, 6640), 'scvi.external.stereoscope._module.SpatialDeconv', 'SpatialDeconv', ([], {'n_spots': 'st_adata.n_obs', 'sc_params': 'sc_params', 'prior_weight': 'prior_weight'}), '(n_spots=st_adata.n_obs, sc_params=sc_params, prior_weight=\n prior_weight, **model_kwargs)\n', (6547, 6640), False, 'from scvi.external.stereoscope._module import RNADeconv, SpatialDeconv\n'), ((11592, 11614), 'numpy.arange', 'np.arange', (['adata.n_obs'], {}), '(adata.n_obs)\n', (11601, 11614), True, 'import numpy as np\n'), ((11818, 11892), 'scvi.data.AnnDataManager', 'AnnDataManager', ([], {'fields': 'anndata_fields', 'setup_method_args': 'setup_method_args'}), '(fields=anndata_fields, setup_method_args=setup_method_args)\n', (11832, 11892), False, 'from scvi.data import AnnDataManager\n'), ((4259, 4317), 'scvi.data.fields.LayerField', 'LayerField', (['REGISTRY_KEYS.X_KEY', 'layer'], {'is_count_data': '(True)'}), '(REGISTRY_KEYS.X_KEY, layer, is_count_data=True)\n', (4269, 4317), False, 'from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField\n'), ((4331, 4388), 'scvi.data.fields.CategoricalObsField', 'CategoricalObsField', (['REGISTRY_KEYS.LABELS_KEY', 'labels_key'], {}), '(REGISTRY_KEYS.LABELS_KEY, labels_key)\n', (4350, 4388), False, 'from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField\n'), ((11654, 11712), 'scvi.data.fields.LayerField', 'LayerField', (['REGISTRY_KEYS.X_KEY', 'layer'], {'is_count_data': '(True)'}), '(REGISTRY_KEYS.X_KEY, layer, is_count_data=True)\n', (11664, 11712), False, 'from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField\n'), ((11726, 11782), 'scvi.data.fields.NumericalObsField', 'NumericalObsField', (['REGISTRY_KEYS.INDICES_KEY', '"""_indices"""'], {}), "(REGISTRY_KEYS.INDICES_KEY, '_indices')\n", (11743, 11782), False, 'from scvi.data.fields import CategoricalObsField, LayerField, NumericalObsField\n'), ((9614, 9633), 'torch.tensor', 'torch.tensor', (['ind_y'], {}), '(ind_y)\n', (9626, 9633), False, 'import torch\n'), ((9306, 9344), 'numpy.where', 'np.where', (['(ct == self.cell_type_mapping)'], {}), '(ct == self.cell_type_mapping)\n', (9314, 9344), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from stella.parameter.metal import feh_to_z
fig = plt.figure(figsize=(10,4), dpi=150)
ax1 = fig.add_axes([0.07,0.15,0.40,0.80])
ax2 = fig.add_axes([0.50,0.15,0.40,0.80], projection='3d')
ax3 = fig.add_axes([0.90,0.15,0.02,0.80])
fe0, fe1, dfe = -3.0, 1.0, 0.10
af0, af1, daf = 0.0, 0.6, 0.02
feh_lst = np.arange(fe0, fe1+1e-6, dfe)
alpha_lst = np.arange(af0, af1+1e-6, daf)
feh, alpha = np.meshgrid(feh_lst, alpha_lst)
fnew = feh_to_z(feh, alpha)
cax = ax1.imshow(fnew, interpolation='nearest', aspect='auto')
y1, y2 = ax1.get_ylim()
ax1.set_ylim(y2, y1)
ax1.set_xticklabels([v*dfe+fe0 for v in ax1.get_xticks()])
ax1.set_yticklabels([v*daf+af0 for v in ax1.get_yticks()])
ax2.plot_surface(feh, alpha, fnew, cmap='jet', rstride=1, cstride=1,
lw=0, antialiased=True)
ax2.view_init(elev=30, azim=225)
family = 'Georgia'
cbar = fig.colorbar(cax, cax=ax3)
cbar.set_label('Z', family=family)
for ax in [ax1, ax2]:
ax.set_xlabel('[Fe/H]', family=family)
ax.set_ylabel(u'[\u03b1/Fe]', family=family)
ax2.set_zlabel('Z', family=family)
ax2.set_xticks(np.arange(fe0, fe1+1e-6, 1.0))
fig.savefig('zmetal.png')
plt.show()
| [
"stella.parameter.metal.feh_to_z",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((165, 201), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)', 'dpi': '(150)'}), '(figsize=(10, 4), dpi=150)\n', (175, 201), True, 'import matplotlib.pyplot as plt\n'), ((421, 453), 'numpy.arange', 'np.arange', (['fe0', '(fe1 + 1e-06)', 'dfe'], {}), '(fe0, fe1 + 1e-06, dfe)\n', (430, 453), True, 'import numpy as np\n'), ((463, 495), 'numpy.arange', 'np.arange', (['af0', '(af1 + 1e-06)', 'daf'], {}), '(af0, af1 + 1e-06, daf)\n', (472, 495), True, 'import numpy as np\n'), ((506, 537), 'numpy.meshgrid', 'np.meshgrid', (['feh_lst', 'alpha_lst'], {}), '(feh_lst, alpha_lst)\n', (517, 537), True, 'import numpy as np\n'), ((546, 566), 'stella.parameter.metal.feh_to_z', 'feh_to_z', (['feh', 'alpha'], {}), '(feh, alpha)\n', (554, 566), False, 'from stella.parameter.metal import feh_to_z\n'), ((1248, 1258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1256, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1190, 1222), 'numpy.arange', 'np.arange', (['fe0', '(fe1 + 1e-06)', '(1.0)'], {}), '(fe0, fe1 + 1e-06, 1.0)\n', (1199, 1222), True, 'import numpy as np\n')] |
# Author: <NAME>
# Module: Siamese LSTM with Fully Connected Layers
# Competition : Quora question pairs
#packages required
import os
import re
import numpy as np
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,f1_score,confusion_matrix
import gensim
import nltk
import tensorflow as tf
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Input, Embedding, LSTM, Merge,Bidirectional
import keras.backend as K
from keras.optimizers import Adadelta
from keras.callbacks import ModelCheckpoint
from keras.layers import LeakyReLU,Dense,Dropout,Lambda
from keras import metrics
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate,Concatenate
#Download these if nltk doesn't have
nltk.download('punkt')
nltk.download('stopwords')
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
stopword = stopwords.words('english')
#Train data
train_data = pd.read_csv('train.csv')
print(len(train_data))
train_data.drop(["qid1","qid2","id"],inplace=True,axis=1)
train_labels = train_data["is_duplicate"].astype(int)
train_labels = train_labels.as_matrix()
print(train_labels[0:2])
# Load Google pre trained vectors:
# Mention the correct path to your bin/txt file
google_w2v_model = gensim.models.KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
# This is the stanford Glove Model download 'glove.42B.300d.zip',Zip it and keep in your directory. Each vector is 300 dimension
def load_glove_model():
glove_model = {}
with open('glove.42B.300d.txt','r') as f: # Path to your Glove File
for line in f.readlines():
splitline = line.split()
word = splitline[0]
embedding = np.array([float(val) for val in splitline[1:]])
glove_model[word] = embedding
return glove_model
#This function loads the glove model
glove_w2v_model = load_glove_model()
# Preprocess the data using this function.
# It return list of tokens after preprocessing
def preprocess(text):
text = re.sub(r"it\'s","it is",str(text))
text = re.sub(r"i\'d","i would",str(text))
text = re.sub(r"don\'t","do not",str(text))
text = re.sub(r"he\'s","he is",str(text))
text = re.sub(r"there\'s","there is",str(text))
text = re.sub(r"that\'s","that is",str(text))
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"cannot", "can not ", text)
text = re.sub(r"what\'s", "what is", text)
text = re.sub(r"What\'s", "what is", text)
text = re.sub(r"\'ve ", " have ", text)
text = re.sub(r"n\'t", " not ", text)
text = re.sub(r"i\'m", "i am ", text)
text = re.sub(r"I\'m", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'s"," is",text)
text = re.sub(r"[0-9]"," ",str(text))
sents = word_tokenize(text)
return sents
# Divide the training data into Train set and Validation set
# Train data --> 98%
# Validation data --> 2%
X_train,X_val,y_train,y_val = train_test_split(train_data,train_labels,test_size=0.02,random_state=0)
# Since the split is done on pandas dataframe the indices need to be reset to begin from 0
# This function resets the indices
def resetindex(data):
data.reset_index(drop=True,inplace=True)
return data
X_train = resetindex(X_train) # Reset the train set indices
X_val = resetindex(X_val) # Reset the validation set indices
train_length = len(X_train)
val_length = len(X_val)
max_sentence_length = 20 # Maximum number of words per sentence to be considered
embedding_dim = 300 # Each word is converted to 300 dimensional vector
train_ques1 = np.zeros((train_length,max_sentence_length,embedding_dim)) # Vectors of question1 in train set
train_ques2 = np.zeros((train_length,max_sentence_length,embedding_dim)) # Vectors of question2 in train set
val_ques1 = np.zeros((val_length,max_sentence_length,embedding_dim)) # Vectors of question1 in validation set
val_ques2 = np.zeros((val_length,max_sentence_length,embedding_dim)) # Vectors of question2 in validation set
# This function is to add padding to sentences if the sentence length is less than max_sentence_length
# There are 2 types of intializations for words not in both the word vector models
# 1. Intialize with some random numbers
# 2. Intialize with zeros
# I have intizlized to zeros because it gave better results than random intialization.
# You can uncomment if you want to intialize randomly
# You can also intialize using normal distribution -----> np.random.normal(0,1,embedding_dim)
def pad_sentences(start,vectors):
for i in range(start,max_sentence_length):
vectors[i,:] = np.random.rand(embedding_dim)
# This function checks if word is in both pretrained models
# If word exits then it is added and count is increased
# If word doesn't exists it intialized to zero if the count is less than max_sentence_length
# You can also use Embedding layer of keras
# For that you have to create a dictionary of words that doesn't exist in Word2vec models
# Then you have to create indices for Out of Vocabulary words
# Also the question number has to bes stored along with words to map corresponding vectors to that question
# This method is easier.
def convert_to_vectors(sentence):
sents = preprocess(sentence)
vectors = np.zeros((max_sentence_length,embedding_dim))
count = 0
for sent in sents:
if sent not in stopword: # Check if word is not a stopword
if sent in glove_w2vmodel: # Check if word is in glove model
vectors[count,:] = glove_w2vmodel[sent]
count+=1
elif sent in google_w2v_model: # Check if word is in google word2vec pretrained model
vectors[count,:] = google_w2v_model[sent]
count+=1
if(count==max_sentence_length): # If count of words equals max_sentence_length return vectors
return vectors
if(count<max_sentence_length):
pad_sentences(count,vectors)
return vectors
def generate_train_vectors():
for i in range(train_length):
train_ques1[i,:,:] = convert_to_vectors(X_train["question1"][i])
train_ques2[i,:,:] = convert_to_vectors(X_train["question2"][i])
# Generate vectors for Train set
generate_train_vectors()
def generate_validation_vectors():
for i in range(val_length):
val_ques1[i,:,:] = convert_to_vectors(X_val["question1"][i])
val_ques2[i,:,:] = convert_to_vectors(X_val["question2"][i])
# Generate vectors for validation set
generate_validation_vectors()
# Siamese LSTM Model with 3 Fully Connected Layers
def generate_model(n_hidden1,n_hidden2):
left_input = Input(shape=(max_sentence_length,embedding_dim))
right_input = Input(shape=(max_sentence_length,embedding_dim))
lstm1 = LSTM(n_hidden1,return_sequences=True)
lstm2 = LSTM(n_hidden2,return_sequences=False)
lstm1_ques1 = lstm1(left_input)
lstm1_ques2 = lstm1(right_input)
lstm2_ques1 = lstm2(lstm1_ques1)
lstm2_ques2 = lstm2(lstm1_ques2)
# Concatenates the outputs from 2 identical networks
combined_output = Concatenate([lstm2_ques1,lstm2_ques2])
# Fully connected Dense Layer
dense1 = Dense(200,activation='relu')(combined_output)
dense1_batch = BatchNormalization()(dense1)
dense1_out = Dropout(0.2)(dense1_batch)
dense2 = Dense(64,activation='relu')(dense1_out)
dense2_batch = BatchNormalization()(dense2)
dense2_out = Dropout(0.2)(dense2_batch)
dense3 = Dense(8,activation='relu')(dense2_out)
pred = Dense(1,activation='sigmoid')(dense3)
model = Model(inputs=[left_input,right_input],outputs=[pred])
# Mean squared loss is used to measure loss
model.compile(optimizer='rmsprop',loss='mean_squared_error',metrics=['accuracy'])
return model
# hidden sizes of LSTM
n_hidden1 = 64
n_hidden2 = 16
model = generate_model(n_hidden1,n_hidden2)
model.fit([train_ques1,train_ques2],y_train,validation_data=([val_ques1,val_ques2],y_val),batch_size=256,epochs=25)
model.save('siamese-dense-lstm.h5')
test_data = pd.read_csv('test.csv')
test_length = len(test_data)
print(test_length)
print(test_data.head(3))
test_data.drop(["test_id",],axis=1,inplace=True)
test_ques1 = np.zeros((test_length,max_sentence_length,embedding_dim))
test_ques2 = np.zeros((test_length,max_sentence_length,embedding_dim))
def generate_test_vectors():
for i in range(test_length):
test_ques1[i,:,:] = convert_to_vectors(test_data["question1"][i])
test_ques2[i,:,:] = convert_to_vectors(test_data["question2"][i])
generate_test_vectors()
pred = model.predict([test_ques1,test_ques2],batch_size=4096)
predictions = np.zeros(test_length,dtype='int32')
for i in range(test_length):
if(pred[i]>=0.5):
predictions[i] = int(1)
print(len(predictions))
test = pd.DataFrame({'is_duplicate':predictions})
print(len(test))
test.to_csv('predictions.csv',header=True,index_label='test_id')
| [
"keras.layers.merge.Concatenate",
"nltk.corpus.stopwords.words",
"nltk.download",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.random.rand",
"keras.layers.normalization.BatchNormalization",
"nltk.tokenize.word_tokenize",
"gensim.models.KeyedVectors.load_word2vec_format",
"... | [((912, 934), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (925, 934), False, 'import nltk\n'), ((936, 962), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (949, 962), False, 'import nltk\n'), ((1059, 1085), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1074, 1085), False, 'from nltk.corpus import stopwords\n'), ((1115, 1139), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (1126, 1139), True, 'import pandas as pd\n'), ((1465, 1571), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['"""GoogleNews-vectors-negative300.bin.gz"""'], {'binary': '(True)'}), "(\n 'GoogleNews-vectors-negative300.bin.gz', binary=True)\n", (1512, 1571), False, 'import gensim\n'), ((3263, 3337), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_data', 'train_labels'], {'test_size': '(0.02)', 'random_state': '(0)'}), '(train_data, train_labels, test_size=0.02, random_state=0)\n', (3279, 3337), False, 'from sklearn.model_selection import train_test_split\n'), ((3915, 3975), 'numpy.zeros', 'np.zeros', (['(train_length, max_sentence_length, embedding_dim)'], {}), '((train_length, max_sentence_length, embedding_dim))\n', (3923, 3975), True, 'import numpy as np\n'), ((4026, 4086), 'numpy.zeros', 'np.zeros', (['(train_length, max_sentence_length, embedding_dim)'], {}), '((train_length, max_sentence_length, embedding_dim))\n', (4034, 4086), True, 'import numpy as np\n'), ((4137, 4195), 'numpy.zeros', 'np.zeros', (['(val_length, max_sentence_length, embedding_dim)'], {}), '((val_length, max_sentence_length, embedding_dim))\n', (4145, 4195), True, 'import numpy as np\n'), ((4249, 4307), 'numpy.zeros', 'np.zeros', (['(val_length, max_sentence_length, embedding_dim)'], {}), '((val_length, max_sentence_length, embedding_dim))\n', (4257, 4307), True, 'import numpy as np\n'), ((8350, 8373), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (8361, 8373), True, 'import pandas as pd\n'), ((8524, 8583), 'numpy.zeros', 'np.zeros', (['(test_length, max_sentence_length, embedding_dim)'], {}), '((test_length, max_sentence_length, embedding_dim))\n', (8532, 8583), True, 'import numpy as np\n'), ((8596, 8655), 'numpy.zeros', 'np.zeros', (['(test_length, max_sentence_length, embedding_dim)'], {}), '((test_length, max_sentence_length, embedding_dim))\n', (8604, 8655), True, 'import numpy as np\n'), ((8969, 9005), 'numpy.zeros', 'np.zeros', (['test_length'], {'dtype': '"""int32"""'}), "(test_length, dtype='int32')\n", (8977, 9005), True, 'import numpy as np\n'), ((9122, 9165), 'pandas.DataFrame', 'pd.DataFrame', (["{'is_duplicate': predictions}"], {}), "({'is_duplicate': predictions})\n", (9134, 9165), True, 'import pandas as pd\n'), ((2530, 2564), 're.sub', 're.sub', (['"""can\\\\\'t"""', '"""can not"""', 'text'], {}), '("can\\\\\'t", \'can not\', text)\n', (2536, 2564), False, 'import re\n'), ((2575, 2609), 're.sub', 're.sub', (['"""cannot"""', '"""can not """', 'text'], {}), "('cannot', 'can not ', text)\n", (2581, 2609), False, 'import re\n'), ((2621, 2656), 're.sub', 're.sub', (['"""what\\\\\'s"""', '"""what is"""', 'text'], {}), '("what\\\\\'s", \'what is\', text)\n', (2627, 2656), False, 'import re\n'), ((2667, 2702), 're.sub', 're.sub', (['"""What\\\\\'s"""', '"""what is"""', 'text'], {}), '("What\\\\\'s", \'what is\', text)\n', (2673, 2702), False, 'import re\n'), ((2713, 2745), 're.sub', 're.sub', (['"""\\\\\'ve """', '""" have """', 'text'], {}), '("\\\\\'ve ", \' have \', text)\n', (2719, 2745), False, 'import re\n'), ((2756, 2786), 're.sub', 're.sub', (['"""n\\\\\'t"""', '""" not """', 'text'], {}), '("n\\\\\'t", \' not \', text)\n', (2762, 2786), False, 'import re\n'), ((2797, 2827), 're.sub', 're.sub', (['"""i\\\\\'m"""', '"""i am """', 'text'], {}), '("i\\\\\'m", \'i am \', text)\n', (2803, 2827), False, 'import re\n'), ((2838, 2868), 're.sub', 're.sub', (['"""I\\\\\'m"""', '"""i am """', 'text'], {}), '("I\\\\\'m", \'i am \', text)\n', (2844, 2868), False, 'import re\n'), ((2879, 2909), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are """', 'text'], {}), '("\\\\\'re", \' are \', text)\n', (2885, 2909), False, 'import re\n'), ((2920, 2951), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" would """', 'text'], {}), '("\\\\\'d", \' would \', text)\n', (2926, 2951), False, 'import re\n'), ((2962, 2993), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" will """', 'text'], {}), '("\\\\\'ll", \' will \', text)\n', (2968, 2993), False, 'import re\n'), ((3004, 3031), 're.sub', 're.sub', (['"""\\\\\'s"""', '""" is"""', 'text'], {}), '("\\\\\'s", \' is\', text)\n', (3010, 3031), False, 'import re\n'), ((3082, 3101), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (3095, 3101), False, 'from nltk.tokenize import word_tokenize\n'), ((5614, 5660), 'numpy.zeros', 'np.zeros', (['(max_sentence_length, embedding_dim)'], {}), '((max_sentence_length, embedding_dim))\n', (5622, 5660), True, 'import numpy as np\n'), ((6936, 6985), 'keras.layers.Input', 'Input', ([], {'shape': '(max_sentence_length, embedding_dim)'}), '(shape=(max_sentence_length, embedding_dim))\n', (6941, 6985), False, 'from keras.layers import Input, Embedding, LSTM, Merge, Bidirectional\n'), ((7002, 7051), 'keras.layers.Input', 'Input', ([], {'shape': '(max_sentence_length, embedding_dim)'}), '(shape=(max_sentence_length, embedding_dim))\n', (7007, 7051), False, 'from keras.layers import Input, Embedding, LSTM, Merge, Bidirectional\n'), ((7068, 7106), 'keras.layers.LSTM', 'LSTM', (['n_hidden1'], {'return_sequences': '(True)'}), '(n_hidden1, return_sequences=True)\n', (7072, 7106), False, 'from keras.layers import Input, Embedding, LSTM, Merge, Bidirectional\n'), ((7117, 7156), 'keras.layers.LSTM', 'LSTM', (['n_hidden2'], {'return_sequences': '(False)'}), '(n_hidden2, return_sequences=False)\n', (7121, 7156), False, 'from keras.layers import Input, Embedding, LSTM, Merge, Bidirectional\n'), ((7382, 7421), 'keras.layers.merge.Concatenate', 'Concatenate', (['[lstm2_ques1, lstm2_ques2]'], {}), '([lstm2_ques1, lstm2_ques2])\n', (7393, 7421), False, 'from keras.layers.merge import concatenate, Concatenate\n'), ((7862, 7917), 'keras.models.Model', 'Model', ([], {'inputs': '[left_input, right_input]', 'outputs': '[pred]'}), '(inputs=[left_input, right_input], outputs=[pred])\n', (7867, 7917), False, 'from keras.models import Model\n'), ((4949, 4978), 'numpy.random.rand', 'np.random.rand', (['embedding_dim'], {}), '(embedding_dim)\n', (4963, 4978), True, 'import numpy as np\n'), ((7470, 7499), 'keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""relu"""'}), "(200, activation='relu')\n", (7475, 7499), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n'), ((7534, 7554), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7552, 7554), False, 'from keras.layers.normalization import BatchNormalization\n'), ((7579, 7591), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7586, 7591), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n'), ((7618, 7646), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (7623, 7646), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n'), ((7676, 7696), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7694, 7696), False, 'from keras.layers.normalization import BatchNormalization\n'), ((7721, 7733), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7728, 7733), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n'), ((7760, 7787), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (7765, 7787), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n'), ((7809, 7839), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (7814, 7839), False, 'from keras.layers import LeakyReLU, Dense, Dropout, Lambda\n')] |
'''_____Standard imports_____'''
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
'''_____Project imports_____'''
from toolbox.fits import gauss
def dB_plot(data1, data2=None, arguments=None):
fig = plt.figure(figsize=(15, 6))
if data2 is None:
ax = fig.add_subplot(111)
ref1 = np.min(data1)
dB1 = 10 * np.log(data1/ref1)
ax.plot(dB1)
ax.grid()
ax.set_ylabel('Magnitude [dB]')
ax.set_xlabel('Wavenumber k [U.A]')
else:
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
ref1 = np.max(data1)
ref2 = np.max(data2)
dB1 = 10 * np.log(data1/ref1)
dB2 = 10 * np.log(data2/ref2)
ax0.plot(dB1)
ax0.set_title('Processed Aline')
ax0.grid()
ax0.set_ylabel('Magnitude [dB]')
ax0.set_xlabel('Wavenumber k [U.A]')
ax1.plot(dB2)
ax1.set_title('Raw Aline')
ax1.grid()
ax1.set_ylabel('Magnitude [dB]')
ax1.set_xlabel('Wavenumber k [U.A]')
ax1.set_ylim(ax0.get_ylim())
plt.waitforbuttonpress()
plt.close()
def interactive_shift(spectra1, param1, spectra2, param2, arguments=None):
shift1_condition = False
shift2_condition = False
while shift1_condition is False:
shifted_spectra_plots( spectra1,
param1,
spectra2,
param2 )
shift1 = input("Shift mirror1? [>0:Left, 0:None, <0:Right]")
shift1 = eval(shift1)
if shift1 == 0:
shift1_condition = True
else:
print(param1[2])
param1[2] += shift1
plt.close()
while shift2_condition is False:
shifted_spectra_plots( spectra1,
param1,
spectra2,
param2 )
shift2 = input("Shift mirror2? [>0:Left, 0:None, <0:Right]")
shift2 = eval(shift2)
if shift2 == 0:
shift2_condition = True
else:
param2[2] += shift2
plt.close()
return param1[2], param2[2]
def shifted_spectra_plots(spectra1, param1, spectra2, param2):
plt.ion()
fig = plt.figure(figsize=(15, 6))
ax0 = fig.add_subplot(121)
ax1 = fig.add_subplot(122)
ax0.plot( spectra1, label='Shifted raw' )
ax0.grid()
ax0.set_title('Shifted raw spectra mirror 1')
ax0.plot( gauss(*param1),'r-', label='gaussian fit' )
ax0.grid()
ax0.set_title('Fitted gaussian curve mirror 1')
ax1.plot( spectra2, label='Shifted raw' )
ax1.grid()
ax1.set_title('Shifted raw spectra mirror 2')
ax1.plot( gauss(*param2), 'r-', label='gaussian fit' )
ax1.grid()
ax1.set_title('Fitted gaussian curve mirror 2')
print("click the image to exit")
ax0.legend()
ax0.grid()
ax1.legend()
ax1.grid()
fig.canvas.draw()
def phase_dispersion_plot(exp_dispersion, fit_dispersion):
fig = plt.figure(figsize=(15, 6))
ax0 = fig.add_subplot(111)
ax0.plot(fit_dispersion, label = 'fitted ')
ax0.plot(exp_dispersion,'-',label = 'experimental')
ax0.set_ylabel('Unwrapped phase [U.A]')
ax0.set_title('System phase dispersion')
plt.grid()
plt.legend()
print("click the image to exit")
plt.waitforbuttonpress()
plt.close()
def Bscan_plots(Bscan, arguments=None):
Bscan = np.array(Bscan)
dBscan = 10*np.log(Bscan)
fig = plt.figure(figsize=(16,10))
ax0 = fig.add_subplot(221)
ax0.grid()
ax0.set_ylabel('Aline depth Intensity [U.A]')
ax0.set_xlabel('Wavenumber k [U.A]')
ax0.set_title("Spectra")
ax0.plot(Bscan[200])
ax1 = fig.add_subplot(222)
ax1.grid()
ax1.set_ylabel('Aline depth Magnitude [dB]')
ax1.set_xlabel('Wavenumber k [U.A]')
ax1.set_title("Aline")
ref = np.min(dBscan[200])
ax1.plot(dBscan[200])
data = dBscan.T
ax2 = fig.add_subplot(223)
l = ax2.imshow(data,
cmap = "gray",
vmin=None,
vmax=None)
ax2.invert_yaxis()
ax2.set_title("Processed Bscan")
axVmin = plt.axes([0.6, 0.1, 0.3, 0.03])
axVmax = plt.axes([0.6, 0.15, 0.3, 0.03])
axsave = plt.axes([0.7, 0.25, 0.1, 0.075])
Min, Max = np.min(data)*0.7, np.max(data)*1.2
Nstep = (Max - Min)/100
SVmin = Slider(axVmin, 'Vmin', Min, Max, valinit=Min, valstep=Nstep)
SVmax = Slider(axVmax, 'Vmax', Min, Max, valinit=Max, valstep=Nstep)
bsave = Button(axsave, 'Save Bscan')
def update(val):
Vmax = SVmax.val
Vmin = SVmin.val
l.set_clim(vmin=Vmin, vmax=Vmax)
fig.canvas.draw_idle()
def save(event):
save_dir = "results/"
extent = ax2.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
plt.savefig(save_dir + arguments.input_file, bbox_inches=extent)
bsave.on_clicked(save)
SVmin.on_changed(update)
SVmax.on_changed(update)
plt.show()
def plots_signals(data, sub_data, ref, sample, dark):
fig = plt.figure()
ax0 = fig.add_subplot(111)
ax0.plot(data, label='raw data')
ax0.plot(sub_data, 'k', label='substracted raw data')
ax0.plot(ref, 'r', label='reference noise')
ax0.plot(sample, 'b', label='sample noise')
ax0.plot(dark, 'g', label='background noise')
plt.grid()
plt.legend()
print("click the image to exit")
plt.waitforbuttonpress()
plt.close()
def plot_klinearization(phase1, phase2, Plin, Pfit=None):
plt.ion()
fig = plt.figure(figsize=(8,10))
ax = fig.add_subplot(111)
ax.plot(phase1,'r', label='Mirror1')
ax.plot(phase2,'b', label='Mirror2')
ax.plot(Plin, 'k', label='Linear phase')
if Pfit is not None:
ax.plot(Pfit, 'g', label='Fitted linear phase')
plt.grid()
ax.set_ylabel('Phase [rad]')
ax.set_xlabel('Points space [U.A]')
plt.legend()
print("click the image to exit")
plt.waitforbuttonpress()
plt.close()
class Bscan_vizualiser(object):
def __init__(self, fig1, Bscan_LP01, Bscan_LP11, arguments=None):
self.fig1 = fig1
self.Bscan_LP01 = Bscan_LP01
self.Bscan_LP11 = Bscan_LP11
self.arguments = arguments
def update_intensity(self, event):
Vmax_LP11 = self.SVmax_LP11.val
Vmin_LP11 = self.SVmin_LP11.val
self.l_LP11.set_clim(vmin=Vmin_LP11, vmax=Vmax_LP11)
Vmax_LP01 = self.SVmax_LP11.val
Vmin_LP01 = self.SVmin_LP11.val
self.l_LP01.set_clim(vmin=Vmin_LP01, vmax=Vmax_LP01)
self.fig.canvas.draw_idle()
def next(self, event):
self.N_plot += 1
self.l_LP01.set_data(self.dBscan_LP01[self.N_plot].T)
self.l_LP11.set_data(self.dBscan_LP11[self.N_plot].T)
self.fig.canvas.draw_idle()
def previous(self, event):
self.N_plot -= 1
self.l_LP01.set_data(self.dBscan_LP01[self.N_plot].T)
self.l_LP11.set_data(self.dBscan_LP11[self.N_plot].T)
self.fig.canvas.draw_idle()
def save_LP11(self, event):
save_dir = "results/"
extent = self.ax2.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
plt.savefig(save_dir + "figure_" + 'LP11', bbox_inches=extent)
def save_LP01(self, event):
save_dir = "results/"
extent = self.ax1.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
plt.savefig(save_dir + "figure_" + 'LP01', bbox_inches=extent)
def Bscan_lanterne_plots(self):
self.Bscan_LP01 = np.array(self.Bscan_LP01)
self.Bscan_LP11 = np.array(self.Bscan_LP11)
self.dBscan_LP11 = 10*np.log(self.Bscan_LP11)
self.dBscan_LP01 = 10*np.log(self.Bscan_LP01)
self.fig = plt.figure(figsize=(16,10))
self.N_plot = 0
axVmin_intensity = plt.axes([0.6, 0.1, 0.3, 0.03])
axVmax_intensity = plt.axes([0.6, 0.15, 0.3, 0.03])
axsave_LP11 = plt.axes([0.7, 0.25, 0.1, 0.075])
axsave_LP01 = plt.axes([0.7, 0.80, 0.1, 0.075])
axnext = plt.axes([0.8, 0.5, 0.1, 0.075])
axprevious = plt.axes([0.6, 0.5, 0.1, 0.075])
Min_LP11, Max_LP11 = np.min(self.dBscan_LP11)*0.7, np.max(self.dBscan_LP11)*1.2
Min_LP01, Max_LP01 = np.min(self.dBscan_LP01)*0.7, np.max(self.dBscan_LP01)*1.2
Nstep_LP11 = (Max_LP11 - Min_LP11)/100
Nstep_LP01 = (Max_LP01 - Min_LP01)/100
self.SVmin_LP11 = Slider(axVmin_intensity, 'Vmin', Min_LP11, Max_LP11, valinit=Min_LP11, valstep=Nstep_LP11)
self.SVmax_LP11 = Slider(axVmax_intensity, 'Vmax', Min_LP11, Max_LP11, valinit=Max_LP11, valstep=Nstep_LP11)
bsave_LP11 = Button(axsave_LP11, 'Save Bscan')
bsave_LP01 = Button(axsave_LP01, 'Save Bscan')
self.Bnext = Button(axnext, 'Next')
self.Bprevious = Button(axprevious, 'Previous')
self.ax1 = self.fig.add_subplot(221)
self.l_LP01 = self.ax1.imshow(self.dBscan_LP01[0].T,
cmap = "gray",
vmin=None,
vmax=None)
self.ax1.invert_yaxis()
self.ax1.set_title("Processed Bscan LP01")
self.ax2 = self.fig.add_subplot(223)
self.l_LP11 = self.ax2.imshow(self.dBscan_LP11[0].T,
cmap = "gray",
vmin=None,
vmax=None)
self.ax2.invert_yaxis()
self.ax2.set_title("Processed Bscan LP11")
bsave_LP11.on_clicked(self.save_LP11)
self.SVmin_LP11.on_changed(self.update_intensity)
self.SVmax_LP11.on_changed(self.update_intensity)
bsave_LP01.on_clicked(self.save_LP01)
#self.SVmin_LP01.on_changed(self.update_LP01)
#self.SVmax_LP01.on_changed(self.update_LP01)
self.Bnext.on_clicked(self.next)
self.Bprevious.on_clicked(self.previous)
plt.show()
# -
| [
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.log",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.widgets.Button",
"matplotlib.pyplot.axes",
"toolbox.fits.gauss",
"numpy.min",
"matplotlib... | [((271, 298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (281, 298), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1163), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (1161, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1179), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1177, 1179), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2313), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (2311, 2313), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (2334, 2351), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3116), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)'}), '(figsize=(15, 6))\n', (3099, 3116), True, 'import matplotlib.pyplot as plt\n'), ((3347, 3357), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3355, 3357), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3374), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3372, 3374), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3441), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (3439, 3441), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3457), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3455, 3457), True, 'import matplotlib.pyplot as plt\n'), ((3513, 3528), 'numpy.array', 'np.array', (['Bscan'], {}), '(Bscan)\n', (3521, 3528), True, 'import numpy as np\n'), ((3570, 3598), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (3580, 3598), True, 'import matplotlib.pyplot as plt\n'), ((3967, 3986), 'numpy.min', 'np.min', (['dBscan[200]'], {}), '(dBscan[200])\n', (3973, 3986), True, 'import numpy as np\n'), ((4259, 4290), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.6, 0.1, 0.3, 0.03]'], {}), '([0.6, 0.1, 0.3, 0.03])\n', (4267, 4290), True, 'import matplotlib.pyplot as plt\n'), ((4304, 4336), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.6, 0.15, 0.3, 0.03]'], {}), '([0.6, 0.15, 0.3, 0.03])\n', (4312, 4336), True, 'import matplotlib.pyplot as plt\n'), ((4350, 4383), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.7, 0.25, 0.1, 0.075]'], {}), '([0.7, 0.25, 0.1, 0.075])\n', (4358, 4383), True, 'import matplotlib.pyplot as plt\n'), ((4476, 4536), 'matplotlib.widgets.Slider', 'Slider', (['axVmin', '"""Vmin"""', 'Min', 'Max'], {'valinit': 'Min', 'valstep': 'Nstep'}), "(axVmin, 'Vmin', Min, Max, valinit=Min, valstep=Nstep)\n", (4482, 4536), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((4549, 4609), 'matplotlib.widgets.Slider', 'Slider', (['axVmax', '"""Vmax"""', 'Min', 'Max'], {'valinit': 'Max', 'valstep': 'Nstep'}), "(axVmax, 'Vmax', Min, Max, valinit=Max, valstep=Nstep)\n", (4555, 4609), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((4622, 4650), 'matplotlib.widgets.Button', 'Button', (['axsave', '"""Save Bscan"""'], {}), "(axsave, 'Save Bscan')\n", (4628, 4650), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((5098, 5108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5106, 5108), True, 'import matplotlib.pyplot as plt\n'), ((5176, 5188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5186, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5466, 5476), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5474, 5476), True, 'import matplotlib.pyplot as plt\n'), ((5481, 5493), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5491, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5535, 5559), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (5557, 5559), True, 'import matplotlib.pyplot as plt\n'), ((5564, 5575), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5573, 5575), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5652), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5650, 5652), True, 'import matplotlib.pyplot as plt\n'), ((5663, 5690), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (5673, 5690), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5943), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5941, 5943), True, 'import matplotlib.pyplot as plt\n'), ((6021, 6033), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6031, 6033), True, 'import matplotlib.pyplot as plt\n'), ((6075, 6099), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (6097, 6099), True, 'import matplotlib.pyplot as plt\n'), ((6104, 6115), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6113, 6115), True, 'import matplotlib.pyplot as plt\n'), ((371, 384), 'numpy.min', 'np.min', (['data1'], {}), '(data1)\n', (377, 384), True, 'import numpy as np\n'), ((644, 657), 'numpy.max', 'np.max', (['data1'], {}), '(data1)\n', (650, 657), True, 'import numpy as np\n'), ((673, 686), 'numpy.max', 'np.max', (['data2'], {}), '(data2)\n', (679, 686), True, 'import numpy as np\n'), ((1761, 1772), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1770, 1772), True, 'import matplotlib.pyplot as plt\n'), ((2189, 2200), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2198, 2200), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2556), 'toolbox.fits.gauss', 'gauss', (['*param1'], {}), '(*param1)\n', (2547, 2556), False, 'from toolbox.fits import gauss\n'), ((2780, 2794), 'toolbox.fits.gauss', 'gauss', (['*param2'], {}), '(*param2)\n', (2785, 2794), False, 'from toolbox.fits import gauss\n'), ((3545, 3558), 'numpy.log', 'np.log', (['Bscan'], {}), '(Bscan)\n', (3551, 3558), True, 'import numpy as np\n'), ((4941, 5005), 'matplotlib.pyplot.savefig', 'plt.savefig', (['(save_dir + arguments.input_file)'], {'bbox_inches': 'extent'}), '(save_dir + arguments.input_file, bbox_inches=extent)\n', (4952, 5005), True, 'import matplotlib.pyplot as plt\n'), ((7316, 7378), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'figure_' + 'LP11')"], {'bbox_inches': 'extent'}), "(save_dir + 'figure_' + 'LP11', bbox_inches=extent)\n", (7327, 7378), True, 'import matplotlib.pyplot as plt\n'), ((7546, 7608), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'figure_' + 'LP01')"], {'bbox_inches': 'extent'}), "(save_dir + 'figure_' + 'LP01', bbox_inches=extent)\n", (7557, 7608), True, 'import matplotlib.pyplot as plt\n'), ((7674, 7699), 'numpy.array', 'np.array', (['self.Bscan_LP01'], {}), '(self.Bscan_LP01)\n', (7682, 7699), True, 'import numpy as np\n'), ((7726, 7751), 'numpy.array', 'np.array', (['self.Bscan_LP11'], {}), '(self.Bscan_LP11)\n', (7734, 7751), True, 'import numpy as np\n'), ((7881, 7909), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (7891, 7909), True, 'import matplotlib.pyplot as plt\n'), ((7963, 7994), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.6, 0.1, 0.3, 0.03]'], {}), '([0.6, 0.1, 0.3, 0.03])\n', (7971, 7994), True, 'import matplotlib.pyplot as plt\n'), ((8022, 8054), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.6, 0.15, 0.3, 0.03]'], {}), '([0.6, 0.15, 0.3, 0.03])\n', (8030, 8054), True, 'import matplotlib.pyplot as plt\n'), ((8077, 8110), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.7, 0.25, 0.1, 0.075]'], {}), '([0.7, 0.25, 0.1, 0.075])\n', (8085, 8110), True, 'import matplotlib.pyplot as plt\n'), ((8134, 8166), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.7, 0.8, 0.1, 0.075]'], {}), '([0.7, 0.8, 0.1, 0.075])\n', (8142, 8166), True, 'import matplotlib.pyplot as plt\n'), ((8186, 8218), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.8, 0.5, 0.1, 0.075]'], {}), '([0.8, 0.5, 0.1, 0.075])\n', (8194, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8240, 8272), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.6, 0.5, 0.1, 0.075]'], {}), '([0.6, 0.5, 0.1, 0.075])\n', (8248, 8272), True, 'import matplotlib.pyplot as plt\n'), ((8572, 8666), 'matplotlib.widgets.Slider', 'Slider', (['axVmin_intensity', '"""Vmin"""', 'Min_LP11', 'Max_LP11'], {'valinit': 'Min_LP11', 'valstep': 'Nstep_LP11'}), "(axVmin_intensity, 'Vmin', Min_LP11, Max_LP11, valinit=Min_LP11,\n valstep=Nstep_LP11)\n", (8578, 8666), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((8689, 8783), 'matplotlib.widgets.Slider', 'Slider', (['axVmax_intensity', '"""Vmax"""', 'Min_LP11', 'Max_LP11'], {'valinit': 'Max_LP11', 'valstep': 'Nstep_LP11'}), "(axVmax_intensity, 'Vmax', Min_LP11, Max_LP11, valinit=Max_LP11,\n valstep=Nstep_LP11)\n", (8695, 8783), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((8801, 8834), 'matplotlib.widgets.Button', 'Button', (['axsave_LP11', '"""Save Bscan"""'], {}), "(axsave_LP11, 'Save Bscan')\n", (8807, 8834), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((8857, 8890), 'matplotlib.widgets.Button', 'Button', (['axsave_LP01', '"""Save Bscan"""'], {}), "(axsave_LP01, 'Save Bscan')\n", (8863, 8890), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((8913, 8935), 'matplotlib.widgets.Button', 'Button', (['axnext', '"""Next"""'], {}), "(axnext, 'Next')\n", (8919, 8935), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((8961, 8991), 'matplotlib.widgets.Button', 'Button', (['axprevious', '"""Previous"""'], {}), "(axprevious, 'Previous')\n", (8967, 8991), False, 'from matplotlib.widgets import Slider, Button, RadioButtons\n'), ((10070, 10080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10078, 10080), True, 'import matplotlib.pyplot as plt\n'), ((404, 424), 'numpy.log', 'np.log', (['(data1 / ref1)'], {}), '(data1 / ref1)\n', (410, 424), True, 'import numpy as np\n'), ((707, 727), 'numpy.log', 'np.log', (['(data1 / ref1)'], {}), '(data1 / ref1)\n', (713, 727), True, 'import numpy as np\n'), ((745, 765), 'numpy.log', 'np.log', (['(data2 / ref2)'], {}), '(data2 / ref2)\n', (751, 765), True, 'import numpy as np\n'), ((4400, 4412), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (4406, 4412), True, 'import numpy as np\n'), ((4418, 4430), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (4424, 4430), True, 'import numpy as np\n'), ((7783, 7806), 'numpy.log', 'np.log', (['self.Bscan_LP11'], {}), '(self.Bscan_LP11)\n', (7789, 7806), True, 'import numpy as np\n'), ((7837, 7860), 'numpy.log', 'np.log', (['self.Bscan_LP01'], {}), '(self.Bscan_LP01)\n', (7843, 7860), True, 'import numpy as np\n'), ((8303, 8327), 'numpy.min', 'np.min', (['self.dBscan_LP11'], {}), '(self.dBscan_LP11)\n', (8309, 8327), True, 'import numpy as np\n'), ((8333, 8357), 'numpy.max', 'np.max', (['self.dBscan_LP11'], {}), '(self.dBscan_LP11)\n', (8339, 8357), True, 'import numpy as np\n'), ((8391, 8415), 'numpy.min', 'np.min', (['self.dBscan_LP01'], {}), '(self.dBscan_LP01)\n', (8397, 8415), True, 'import numpy as np\n'), ((8421, 8445), 'numpy.max', 'np.max', (['self.dBscan_LP01'], {}), '(self.dBscan_LP01)\n', (8427, 8445), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.features.video_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
tf.compat.v1.enable_eager_execution()
class VideoFeatureTest(test_utils.FeatureExpectationsTestCase):
@property
def expectations(self):
np_video = np.random.randint(256, size=(128, 64, 64, 3), dtype=np.uint8)
return [
test_utils.FeatureExpectation(
name='video',
feature=features.Video(shape=(None, 64, 64, 3)),
shape=(None, 64, 64, 3),
dtype=tf.uint8,
tests=[
# Numpy array
test_utils.FeatureExpectationItem(
value=np_video,
expected=np_video,
),
# File path (Gif)
# File path (.mp4)
],
),
]
if __name__ == '__main__':
test_utils.main()
| [
"tensorflow_datasets.core.test_utils.FeatureExpectationItem",
"tensorflow_datasets.core.features.Video",
"numpy.random.randint",
"tensorflow.compat.v1.enable_eager_execution",
"tensorflow_datasets.core.test_utils.main"
] | [((927, 964), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (962, 964), True, 'import tensorflow as tf\n'), ((1680, 1697), 'tensorflow_datasets.core.test_utils.main', 'test_utils.main', ([], {}), '()\n', (1695, 1697), False, 'from tensorflow_datasets.core import test_utils\n'), ((1086, 1147), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(128, 64, 64, 3)', 'dtype': 'np.uint8'}), '(256, size=(128, 64, 64, 3), dtype=np.uint8)\n', (1103, 1147), True, 'import numpy as np\n'), ((1247, 1286), 'tensorflow_datasets.core.features.Video', 'features.Video', ([], {'shape': '(None, 64, 64, 3)'}), '(shape=(None, 64, 64, 3))\n', (1261, 1286), False, 'from tensorflow_datasets.core import features\n'), ((1419, 1487), 'tensorflow_datasets.core.test_utils.FeatureExpectationItem', 'test_utils.FeatureExpectationItem', ([], {'value': 'np_video', 'expected': 'np_video'}), '(value=np_video, expected=np_video)\n', (1452, 1487), False, 'from tensorflow_datasets.core import test_utils\n')] |
#Copyright (C) 2021 Intel Corporation
#SPDX-License-Identifier: BSD-3-Clause
import os
import numpy as np
import tensorflow.keras as keras
import tensorflow.keras.datasets.mnist as mnist
import matplotlib.pyplot as plt
class DatasetUtil:
"""This class is a convenience utility to fetch MNIST data using Keras
API, save the data in compressed NumPy files (.npz extension), and save
MNIST digits as PNG files after reading them one by one from NumPy
compressed archive (.npz extension)."""
def __init__(self, **kwargs):
self.dataset_path = kwargs.pop('dataset_path', '../datasets/MNIST')
self.dataset_type = kwargs.pop('dataset_type', 'train')
self.first_image_idx = kwargs.pop('first_image_idx', 0)
self.total_num_images = kwargs.pop('total_num_images', 25)
self.normalize_x = kwargs.pop('normalize_input', True)
self.flatten_x = kwargs.pop('flatten_input', True)
self.one_hot_y = kwargs.pop('one_hot_labels', True)
(self.x_train, self.y_train), (self.x_test, self.y_test) = \
mnist.load_data()
if self.flatten_x:
self.x_train = self.x_train.reshape((60000, 784))
self.x_test = self.x_test.reshape((10000, 784))
if self.normalize_x:
self.x_train = (self.x_train / 255) - 0.5
self.x_test = (self.x_test / 255) - 0.5
if self.one_hot_y:
self.y_train = keras.utils.to_categorical(self.y_train,
num_classes=10)
self.y_test = keras.utils.to_categorical(self.y_test,
num_classes=10)
def save_npz(self):
"""Saves training and test data in separate NumPy compressed files (
.npz extension)"""
if not os.path.exists(self.dataset_path):
print(f'Creating {os.path.realpath(self.dataset_path)}')
os.makedirs(self.dataset_path, exist_ok=True)
else:
print(f'Dataset directory {os.path.realpath(self.dataset_path)} '
f'already exists')
np.savez_compressed(os.path.join(self.dataset_path, 'x_train.npz'),
arr_0=self.x_train)
np.savez_compressed(os.path.join(self.dataset_path, 'y_train.npz'),
arr_0=self.y_train)
np.savez_compressed(os.path.join(self.dataset_path, 'x_test.npz'),
arr_0=self.x_test)
np.savez_compressed(os.path.join(self.dataset_path, 'y_test.npz'),
arr_0=self.y_test)
def save_digit_images(self):
if self.dataset_type == 'train':
x = self.x_train
elif self.dataset_type == 'test':
x = self.x_test
else:
raise ValueError('Invalid dataset type provided. Dataset type '
'should be test or train')
img_dir = os.path.realpath(self.dataset_path + '/../' +
self.dataset_type + '_images')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
for j in range(self.first_image_idx, self.first_image_idx +
self.total_num_images):
img = x[j, :]
img = img.reshape(x.shape[1], 1)
plt.imsave(img_dir + '/' + str(j) + '.png', img)
def save_labels_txt(self):
if self.dataset_type == 'train':
y = self.y_train
elif self.dataset_type == 'test':
y = self.y_test
else:
raise ValueError('Invalid dataset type provided. Dataset type '
'should be test or train')
label_file_name = self.dataset_type + '_labels.txt'
labels = y[self.first_image_idx:self.first_image_idx +
self.total_num_images, :]
np.savetxt(label_file_name, labels, fmt='%d')
if __name__ == '__main__':
db = DatasetUtil(dataset_path='./MNIST',
dataset_type='test',
first_image_idx=0,
total_num_images=100)
db.save_npz()
db.save_digit_images()
db.save_labels_txt()
| [
"os.path.exists",
"tensorflow.keras.utils.to_categorical",
"os.makedirs",
"tensorflow.keras.datasets.mnist.load_data",
"os.path.join",
"os.path.realpath",
"numpy.savetxt"
] | [((1078, 1095), 'tensorflow.keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (1093, 1095), True, 'import tensorflow.keras.datasets.mnist as mnist\n'), ((2948, 3024), 'os.path.realpath', 'os.path.realpath', (["(self.dataset_path + '/../' + self.dataset_type + '_images')"], {}), "(self.dataset_path + '/../' + self.dataset_type + '_images')\n", (2964, 3024), False, 'import os\n'), ((3875, 3920), 'numpy.savetxt', 'np.savetxt', (['label_file_name', 'labels'], {'fmt': '"""%d"""'}), "(label_file_name, labels, fmt='%d')\n", (3885, 3920), True, 'import numpy as np\n'), ((1436, 1492), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.y_train'], {'num_classes': '(10)'}), '(self.y_train, num_classes=10)\n', (1462, 1492), True, 'import tensorflow.keras as keras\n'), ((1573, 1628), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['self.y_test'], {'num_classes': '(10)'}), '(self.y_test, num_classes=10)\n', (1599, 1628), True, 'import tensorflow.keras as keras\n'), ((1826, 1859), 'os.path.exists', 'os.path.exists', (['self.dataset_path'], {}), '(self.dataset_path)\n', (1840, 1859), False, 'import os\n'), ((1942, 1987), 'os.makedirs', 'os.makedirs', (['self.dataset_path'], {'exist_ok': '(True)'}), '(self.dataset_path, exist_ok=True)\n', (1953, 1987), False, 'import os\n'), ((2146, 2192), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""x_train.npz"""'], {}), "(self.dataset_path, 'x_train.npz')\n", (2158, 2192), False, 'import os\n'), ((2270, 2316), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""y_train.npz"""'], {}), "(self.dataset_path, 'y_train.npz')\n", (2282, 2316), False, 'import os\n'), ((2394, 2439), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""x_test.npz"""'], {}), "(self.dataset_path, 'x_test.npz')\n", (2406, 2439), False, 'import os\n'), ((2516, 2561), 'os.path.join', 'os.path.join', (['self.dataset_path', '"""y_test.npz"""'], {}), "(self.dataset_path, 'y_test.npz')\n", (2528, 2561), False, 'import os\n'), ((3075, 3098), 'os.path.exists', 'os.path.exists', (['img_dir'], {}), '(img_dir)\n', (3089, 3098), False, 'import os\n'), ((3112, 3132), 'os.makedirs', 'os.makedirs', (['img_dir'], {}), '(img_dir)\n', (3123, 3132), False, 'import os\n'), ((1891, 1926), 'os.path.realpath', 'os.path.realpath', (['self.dataset_path'], {}), '(self.dataset_path)\n', (1907, 1926), False, 'import os\n'), ((2041, 2076), 'os.path.realpath', 'os.path.realpath', (['self.dataset_path'], {}), '(self.dataset_path)\n', (2057, 2076), False, 'import os\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import builtins
import functools
from itertools import repeat
# import bisect
# import shutil
import time
import json
import os
import copy
import math
import subprocess
# data generation
import dlrm_data_pytorch as dp
# numpy
import numpy as np
import multiprocessing as mp
from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process
from multiprocessing import shared_memory
import pandas as pd
import copy
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# pytorch
import torch
import torch.nn as nn
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
def dash_separated_ints(value):
vals = value.split('-')
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value)
return value
def dash_separated_floats(value):
vals = value.split('-')
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value)
return value
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2")
# j will be replaced with the table number
parser.add_argument(
"--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument(
"--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=['dot', 'cat'], default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
# data
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--dataset-multiprocessing", action="store_true", default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--mlperf-bin-shuffle", action='store_true', default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=1)
parser.add_argument("--test-num-workers", type=int, default=0)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
# Input Profiling
# Percentage Threshold
parser.add_argument("--hot-emb-gpu-mem", type=int, default=268435456, help="GPU memory for hot embeddings") #536870912 (512MB), 268435456 (256MB), 134217728 (128MB)
parser.add_argument("--ip-sampling-rate", type=int, default=5, help="Input sampling rate (in %)")
args = parser.parse_args()
### main loop ###
def time_wrap():
return time.time()
# Using CPU only for input profiling
device = torch.device("cpu")
print("Using CPU...")
# Input Profiling for datasets only
train_data, train_ld = dp.make_alibaba_data_and_loader(args)
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(list(map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb
)))
train = []
for i, train_tuple in enumerate(train_data):
lS_i, X, T = train_tuple
train.append([lS_i, X, T])
train = np.array(train, dtype = object)
train = train.tolist()
X_bytes = train[0][1].nbytes
lS_i_bytes = train[0][0].nbytes
T_bytes = train[0][2].nbytes
input_bytes = X_bytes + lS_i_bytes + T_bytes
# Shared Memories for Multiprocessing based final input classification
shm_train_hot = shared_memory.SharedMemory(create = True, size = input_bytes * len(train))
train_hot_array = np.ndarray(len(train), dtype = object, buffer = shm_train_hot.buf)
shm_train_normal = shared_memory.SharedMemory(create = True, size = input_bytes * len(train))
train_normal_array = np.ndarray(len(train), dtype = object, buffer = shm_train_normal.buf)
def single_process_ip_classification(train_data, hot_emb_dict, train_hot_array, train_normal_array, chunksize):
hot_ctr = 0
normal_ctr = 0
i = int(current_process().name)
print("Running process : ", int(current_process().name), " with pid : ", os.getpid())
for a, train_tuple in enumerate(train_data):
lS_i = []
for j, lS_i_row in enumerate(train_tuple[0]):
lS_i_t = []
for k, lS_i_index in enumerate(lS_i_row):
if (j, int(lS_i_index)) in hot_emb_dict[j].keys():
lS_i_t.append(hot_emb_dict[j][(j, int(lS_i_index))])
else:
break
if ( len(lS_i_t) == len(lS_i_row)):
lS_i.append(lS_i_t)
else:
break
if ( len(lS_i) == len(train_tuple[0])):
lS_i = np.array(lS_i).astype(np.float32)
train_tuple[0] = lS_i
train_hot_array[i*chunksize + hot_ctr] = train_tuple
hot_ctr += 1
else:
train_normal_array[i*chunksize + normal_ctr] = train_tuple
normal_ctr += 1
print("Process : ", int(current_process().name), " done with hot inputs ", hot_ctr, " and normal inputs ", normal_ctr)
# Input Profiler
print("Input Profiling Initializing!!\n")
L = args.hot_emb_gpu_mem
x = args.ip_sampling_rate
num_hot_emb = args.hot_emb_gpu_mem // (4 * args.arch_sparse_feature_size)
print("Available GPU Memory for Hot Emb : ", L / (1024 * 1024), " MB")
print("Input Sampling Rate for Profiling : ", x, "%")
# =============================== PROFILING START ======================================
profiling_begin = time_wrap()
sample_train_data_len = int((x / 100) * len(train_data))
print("Training Input Dataset Length (D) : ", len(train_data))
sampled_train_data = np.random.randint(0, len(train_data), size = sample_train_data_len)
print("Sampled Training Input Dataset Length (D^) : ", len(sampled_train_data))
# ================== Skew Table Creation ======================
skew_table = []
for i in range(len(ln_emb)):
temp_list = np.zeros((ln_emb[i],3), dtype = int)
skew_table.append(temp_list)
# =================== Filling Skew Table Emb Table ======================
for i in range(len(ln_emb)):
for j in range(ln_emb[i]):
skew_table[i][j][0] = i
# =================== Filling Skew Table Emb Index ======================
for i in range(len(ln_emb)):
for j in range(ln_emb[i]):
skew_table[i][j][1] = j
# =================== Filling Skew Table Emb Counter ======================
# Updating Skew table with sampled input profiling data
for i, sample in enumerate(sampled_train_data):
lS_i, X, label = train_data[sample]
for j, lS_i_row in enumerate(lS_i):
for k, lS_i_index in enumerate(lS_i_row):
skew_table[j][int(lS_i_index)][2] = skew_table[j][int(lS_i_index)][2] + 1
# Combining skew table list into a 2D array
skew_table_array = np.vstack(skew_table)
# =================== Sorting Skew Table based on Counter ==============
skew_table_array = skew_table_array[skew_table_array[:,2].argsort()[::-1]]
# =================== Getting hot embedding entries ====================
hot_emb_entries = skew_table_array[0:num_hot_emb]
# =================== Getting Top Emb Dict ==============================
hot_emb_dict = []
emb_dict = {}
for i in range(len(ln_emb)):
new_emb_dict = copy.deepcopy(emb_dict)
hot_emb_dict.append(new_emb_dict)
for i in range(len(hot_emb_entries)):
hot_emb_dict[hot_emb_entries[i][0]][(hot_emb_entries[i][0], hot_emb_entries[i][1])] = np.float32(i)
len_hot_emb_dict = 0
for i in range(len(hot_emb_dict)):
len_hot_emb_dict += len(hot_emb_dict[i])
del skew_table_array
print("Hot Emb Dict Size : ", (len_hot_emb_dict * 4 * args.arch_sparse_feature_size) / (1024 ** 2), " MB")
print("Hot Emb Dict Creation Completed!!")
# ===================== Input Profiling ========================
print("Starting Input Classification")
num_cores = mp.cpu_count()
print("Num Cores : ", num_cores)
chunksize = len(train) // num_cores
processes = [Process(target = single_process_ip_classification,
name = "%i" % i,
args = (train[i*chunksize : (i+1)*chunksize],
hot_emb_dict,
train_hot_array,
train_normal_array,
chunksize
)
)
for i in range(0, num_cores)]
for process in processes:
process.start()
for process in processes:
process.join()
# Removing None elements from both train hot and train normal arrays
nan_array_hot = pd.isnull(train_hot_array)
not_nan_array_hot = ~ nan_array_hot
train_hot_array = train_hot_array[not_nan_array_hot]
nan_array_normal = pd.isnull(train_normal_array)
not_nan_array_normal = ~ nan_array_normal
train_normal_array = train_normal_array[not_nan_array_normal]
print("===================== Input Profiling Stats ==================")
print("Train Hot Data : ", len(train_hot_array))
print("Train Normal Data : ", len(train_normal_array))
print("Total Data : ", len(train_hot_array) + len(train_normal_array))
print("Percentage : ", (len(train_hot_array) / (len(train_hot_array) + len(train_normal_array))) * 100 )
print("==============================================================")
# Closing the shared memories and unlinking
shm_train_hot.close()
shm_train_hot.unlink()
shm_train_normal.close()
shm_train_normal.unlink()
profiling_end = time_wrap()
print("FAE Profiling Time : ", profiling_end - profiling_begin, " s")
train_hot = np.array(train_hot_array, dtype = object)
train_normal = np.array(train_normal_array, dtype = object)
hot_emb_dict = np.array(hot_emb_dict, dtype = object)
np.savez_compressed('./data/taobao_hot_cold/train_hot.npz', train_hot)
np.savez_compressed('./data/taobao_hot_cold/train_normal.npz', train_normal)
np.savez_compressed('./data/taobao_hot_cold/hot_emb_dict.npz', hot_emb_dict)
print("Save Hot/Cold Data Completed")
sys.exit("FAE profiling completed!!")
| [
"multiprocessing.Process",
"multiprocessing.cpu_count",
"numpy.array",
"copy.deepcopy",
"sys.exit",
"argparse.ArgumentParser",
"numpy.vstack",
"os.getpid",
"numpy.savez_compressed",
"numpy.fromstring",
"argparse.ArgumentTypeError",
"time.time",
"warnings.filterwarnings",
"multiprocessing.c... | [((2924, 2949), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2947, 2949), False, 'import warnings\n'), ((2953, 3015), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (2976, 3015), False, 'import warnings\n'), ((3854, 3945), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train Deep Learning Recommendation Model (DLRM)"""'}), "(description=\n 'Train Deep Learning Recommendation Model (DLRM)')\n", (3877, 3945), False, 'import argparse\n'), ((7634, 7653), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7646, 7653), False, 'import torch\n'), ((7743, 7780), 'dlrm_data_pytorch.make_alibaba_data_and_loader', 'dp.make_alibaba_data_and_loader', (['args'], {}), '(args)\n', (7774, 7780), True, 'import dlrm_data_pytorch as dp\n'), ((7795, 7854), 'numpy.fromstring', 'np.fromstring', (['args.arch_embedding_size'], {'dtype': 'int', 'sep': '"""-"""'}), "(args.arch_embedding_size, dtype=int, sep='-')\n", (7808, 7854), True, 'import numpy as np\n'), ((8194, 8223), 'numpy.array', 'np.array', (['train'], {'dtype': 'object'}), '(train, dtype=object)\n', (8202, 8223), True, 'import numpy as np\n'), ((11706, 11727), 'numpy.vstack', 'np.vstack', (['skew_table'], {}), '(skew_table)\n', (11715, 11727), True, 'import numpy as np\n'), ((12799, 12813), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (12811, 12813), True, 'import multiprocessing as mp\n'), ((13374, 13400), 'pandas.isnull', 'pd.isnull', (['train_hot_array'], {}), '(train_hot_array)\n', (13383, 13400), True, 'import pandas as pd\n'), ((13518, 13547), 'pandas.isnull', 'pd.isnull', (['train_normal_array'], {}), '(train_normal_array)\n', (13527, 13547), True, 'import pandas as pd\n'), ((14369, 14408), 'numpy.array', 'np.array', (['train_hot_array'], {'dtype': 'object'}), '(train_hot_array, dtype=object)\n', (14377, 14408), True, 'import numpy as np\n'), ((14428, 14470), 'numpy.array', 'np.array', (['train_normal_array'], {'dtype': 'object'}), '(train_normal_array, dtype=object)\n', (14436, 14470), True, 'import numpy as np\n'), ((14490, 14526), 'numpy.array', 'np.array', (['hot_emb_dict'], {'dtype': 'object'}), '(hot_emb_dict, dtype=object)\n', (14498, 14526), True, 'import numpy as np\n'), ((14533, 14603), 'numpy.savez_compressed', 'np.savez_compressed', (['"""./data/taobao_hot_cold/train_hot.npz"""', 'train_hot'], {}), "('./data/taobao_hot_cold/train_hot.npz', train_hot)\n", (14552, 14603), True, 'import numpy as np\n'), ((14606, 14682), 'numpy.savez_compressed', 'np.savez_compressed', (['"""./data/taobao_hot_cold/train_normal.npz"""', 'train_normal'], {}), "('./data/taobao_hot_cold/train_normal.npz', train_normal)\n", (14625, 14682), True, 'import numpy as np\n'), ((14685, 14761), 'numpy.savez_compressed', 'np.savez_compressed', (['"""./data/taobao_hot_cold/hot_emb_dict.npz"""', 'hot_emb_dict'], {}), "('./data/taobao_hot_cold/hot_emb_dict.npz', hot_emb_dict)\n", (14704, 14761), True, 'import numpy as np\n'), ((14804, 14841), 'sys.exit', 'sys.exit', (['"""FAE profiling completed!!"""'], {}), "('FAE profiling completed!!')\n", (14812, 14841), False, 'import sys\n'), ((7570, 7581), 'time.time', 'time.time', ([], {}), '()\n', (7579, 7581), False, 'import time\n'), ((10839, 10874), 'numpy.zeros', 'np.zeros', (['(ln_emb[i], 3)'], {'dtype': 'int'}), '((ln_emb[i], 3), dtype=int)\n', (10847, 10874), True, 'import numpy as np\n'), ((12178, 12201), 'copy.deepcopy', 'copy.deepcopy', (['emb_dict'], {}), '(emb_dict)\n', (12191, 12201), False, 'import copy\n'), ((12370, 12383), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (12380, 12383), True, 'import numpy as np\n'), ((12904, 13087), 'multiprocessing.Process', 'Process', ([], {'target': 'single_process_ip_classification', 'name': "('%i' % i)", 'args': '(train[i * chunksize:(i + 1) * chunksize], hot_emb_dict, train_hot_array,\n train_normal_array, chunksize)'}), "(target=single_process_ip_classification, name='%i' % i, args=(train\n [i * chunksize:(i + 1) * chunksize], hot_emb_dict, train_hot_array,\n train_normal_array, chunksize))\n", (12911, 13087), False, 'from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process\n'), ((9112, 9123), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9121, 9123), False, 'import os\n'), ((3373, 3460), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is not a valid dash separated list of ints' % value)"], {}), "('%s is not a valid dash separated list of ints' %\n value)\n", (3399, 3460), False, 'import argparse\n'), ((3619, 3709), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('%s is not a valid dash separated list of floats' % value)"], {}), "(\n '%s is not a valid dash separated list of floats' % value)\n", (3645, 3709), False, 'import argparse\n'), ((9010, 9027), 'multiprocessing.current_process', 'current_process', ([], {}), '()\n', (9025, 9027), False, 'from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process\n'), ((9071, 9088), 'multiprocessing.current_process', 'current_process', ([], {}), '()\n', (9086, 9088), False, 'from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process\n'), ((9852, 9869), 'multiprocessing.current_process', 'current_process', ([], {}), '()\n', (9867, 9869), False, 'from multiprocessing import Process, Pool, Manager, Queue, Lock, current_process\n'), ((9593, 9607), 'numpy.array', 'np.array', (['lS_i'], {}), '(lS_i)\n', (9601, 9607), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import pickle
from glob import glob
import pdb
import functools
from multiprocessing import Pool
import xml.etree.ElementTree as ET
import cv2
import numpy as np
from tqdm import tqdm
config = {
"exemplar_size":127,
"instance_size":255,
"context_amount":0.5,
"sample_type":"uniform",
"max_translate":12, # max translation of random shift
"scale_resize":0.15, # scale step of instance image
}
def crop_and_pad(img,cx,cy,model_sz,original_sz,img_mean=None):
"""Argument:
img: raw image to be cropped
cx,cy: center x,y of the bbox in img
model_sz: target size for the cropped and padded image
original_sz: cropped image size in original img before resize
Return:
image_patch: processed image
scale: scale factor
"""
im_h,im_w,_ = img.shape
xmin = cx - (original_sz - 1) / 2
xmax = xmin + original_sz - 1
ymin = cy - (original_sz - 1) / 2
ymax = ymin + original_sz - 1
left = int(round(max(0,-xmin)))
top = int(round(max(0,-ymin)))
right = int(round(max(0,xmax-im_w+1)))
bottom = int(round(max(0,ymax-im_h+1)))
xmin = int(round(xmin + left))
xmax = int(round(xmax + left))
ymin = int(round(ymin + top))
ymax = int(round(ymax + top))
r,c,k = img.shape
if any([top,bottom,left,right]):
te_im = np.zeros((r+top+bottom,c+left+right,k),np.uint8)
te_im[top:top+r,left:left+c,:] = img
if top:
te_im[0:top,left:left+c,:] = img_mean
if bottom:
te_im[r+top:,left:left+c,:] = img_mean
if left:
te_im[:,0:left,:] = img_mean
if right:
te_im[:,c+left:,:] = img_mean
im_patch_original = te_im[int(ymin):int(ymax+1),
int(xmin):int(xmax+1),:]
else:
im_patch_original = img[int(ymin):int(ymax+1),int(xmin):int(xmax+1),:]
if not np.array_equal(model_sz,original_sz):
im_patch = cv2.resize(im_patch_original,(model_sz,model_sz))
else:
im_patch = im_patch_original
scale = model_sz / im_patch_original.shape[0]
return im_patch,scale
def get_instance_image(img,bbox,size_z,size_x,context_amount,img_mean=None):
"""Argument
bbox: bounding box of exemplar i.e. (centerx,centery,width,height)
size_z: size of exemplar
size_x: size of cropped instance
context_amount: context pixels around the target box
img_mean: mean value in pixel of three channels i.e. (60,70,80)
Return:
instance_img: cropped instance image
w_x: scaled width of bbox
h_x: scaled height of bbox
scale_x: scale factor for bbox
"""
cx,cy,w,h = bbox #float type
wc_z = w + context_amount * (w+h)
hc_z = h + context_amount * (w+h)
s_z = np.sqrt(wc_z * hc_z) # width of the crop exemplar box
s_x = size_x * (s_z / size_z) # width of the cropped instance image
instance_img,scale_x = crop_and_pad(img,cx,cy,
model_sz = size_x,
original_sz = s_x,
img_mean = img_mean)
w_x = w * scale_x
h_x = h * scale_x
# point_1 = (size_x+ 1 -w_x)/2, (size_x + 1 - h_x)/2
# point_2 = (size_x+ 1 +w_x)/2, (size_x + 1 + h_x)/2
# frame = cv2.rectangle(instance_img,(int(point_1[0]),int(point_1[1])),
# (int(point_2[0]),int(point_2[1])),(0,255,0),2)
# cv2.imwrite("res.jpg",frame)
return instance_img,w_x,h_x,scale_x
def worker(output_dir,video_dir):
image_names = glob(os.path.join(video_dir,"*.JPEG"))
image_names = sorted(image_names,
key=lambda x: int(x.split("/")[-1].split(".")[0]))
video_name = video_dir.split("/")[-1]
save_folder = os.path.join(output_dir,video_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
trajs = {}
for image_name in image_names:
img = cv2.imread(image_name)
img_mean = tuple(map(int,img.mean(axis=(0,1))))
anno_name = image_name.replace("Data","Annotations")
anno_name = anno_name.replace("JPEG","xml")
tree = ET.parse(anno_name)
root = tree.getroot()
bboxes = []
filename = root.find("filename").text
for obj in root.iter("object"):
bbox = obj.find("bndbox")
bbox = list(map(int,[bbox.find("xmin").text,
bbox.find("ymin").text,
bbox.find("xmax").text,
bbox.find("ymax").text]))
trkid = int(obj.find("trackid").text)
if trkid in trajs:
trajs[trkid].append(filename)
else:
trajs[trkid] = [filename]
instance_crop_size = int(
np.ceil((config["instance_size"]+config["max_translate"]*2) * (1+config["scale_resize"])))
bbox = np.array(
[(bbox[2]+bbox[0])/2,(bbox[3]+bbox[1])/2,bbox[2]-bbox[0] + 1,
bbox[3] - bbox[1] + 1])
instance_img,w,h,_ = get_instance_image(img,bbox,
config["exemplar_size"],
instance_crop_size,
config["context_amount"],
img_mean)
instance_img_name = os.path.join(save_folder,
filename+".{:02d}.x_{:.2f}_{:.2f}.jpg".format(trkid,w,h))
cv2.imwrite(instance_img_name,instance_img)
return video_name,trajs
def main():
# get all videos # 4417
video_dir = "ILSVRC2015/Data/VID"
all_videos = glob(os.path.join(video_dir,"train/ILSVRC2015_VID_train_0000/*")) +\
glob(os.path.join(video_dir,"train/ILSVRC2015_VID_train_0001/*")) + \
glob(os.path.join(video_dir,"train/ILSVRC2015_VID_train_0002/*")) + \
glob(os.path.join(video_dir,"train/ILSVRC2015_VID_train_0003/*")) + \
glob(os.path.join(video_dir,"val/*"))
output_dir = "./VID_15"
meta_data = []
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with Pool(processes=4) as pool:
pool_func = pool.imap_unordered(functools.partial(worker,output_dir),all_videos)
"""
for i,ret in enumerate(pool_func):
meta_data.append(ret)
print("Process:{}/{}".format(i+1,len(all_videos)))
"""
for ret in tqdm(pool_func,total=len(all_videos)):
meta_data.append(ret)
pickle.dump(meta_data,open(os.path.join(output_dir,"meta_data.pkl"),"wb"))
print("Done")
if __name__ == "__main__":
main()
| [
"os.path.exists",
"cv2.imwrite",
"numpy.ceil",
"numpy.sqrt",
"xml.etree.ElementTree.parse",
"os.makedirs",
"os.path.join",
"numpy.array",
"numpy.zeros",
"numpy.array_equal",
"os.mkdir",
"multiprocessing.Pool",
"functools.partial",
"cv2.resize",
"cv2.imread"
] | [((2821, 2841), 'numpy.sqrt', 'np.sqrt', (['(wc_z * hc_z)'], {}), '(wc_z * hc_z)\n', (2828, 2841), True, 'import numpy as np\n'), ((3712, 3748), 'os.path.join', 'os.path.join', (['output_dir', 'video_name'], {}), '(output_dir, video_name)\n', (3724, 3748), False, 'import os\n'), ((1361, 1420), 'numpy.zeros', 'np.zeros', (['(r + top + bottom, c + left + right, k)', 'np.uint8'], {}), '((r + top + bottom, c + left + right, k), np.uint8)\n', (1369, 1420), True, 'import numpy as np\n'), ((1913, 1950), 'numpy.array_equal', 'np.array_equal', (['model_sz', 'original_sz'], {}), '(model_sz, original_sz)\n', (1927, 1950), True, 'import numpy as np\n'), ((1970, 2021), 'cv2.resize', 'cv2.resize', (['im_patch_original', '(model_sz, model_sz)'], {}), '(im_patch_original, (model_sz, model_sz))\n', (1980, 2021), False, 'import cv2\n'), ((3511, 3544), 'os.path.join', 'os.path.join', (['video_dir', '"""*.JPEG"""'], {}), "(video_dir, '*.JPEG')\n", (3523, 3544), False, 'import os\n'), ((3759, 3786), 'os.path.exists', 'os.path.exists', (['save_folder'], {}), '(save_folder)\n', (3773, 3786), False, 'import os\n'), ((3796, 3817), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (3804, 3817), False, 'import os\n'), ((3883, 3905), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (3893, 3905), False, 'import cv2\n'), ((4091, 4110), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_name'], {}), '(anno_name)\n', (4099, 4110), True, 'import xml.etree.ElementTree as ET\n'), ((5922, 5948), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (5936, 5948), False, 'import os\n'), ((5958, 5981), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (5969, 5981), False, 'import os\n'), ((5992, 6009), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(4)'}), '(processes=4)\n', (5996, 6009), False, 'from multiprocessing import Pool\n'), ((4866, 4977), 'numpy.array', 'np.array', (['[(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2, bbox[2] - bbox[0] + 1, \n bbox[3] - bbox[1] + 1]'], {}), '([(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2, bbox[2] - bbox[\n 0] + 1, bbox[3] - bbox[1] + 1])\n', (4874, 4977), True, 'import numpy as np\n'), ((5358, 5402), 'cv2.imwrite', 'cv2.imwrite', (['instance_img_name', 'instance_img'], {}), '(instance_img_name, instance_img)\n', (5369, 5402), False, 'import cv2\n'), ((5826, 5858), 'os.path.join', 'os.path.join', (['video_dir', '"""val/*"""'], {}), "(video_dir, 'val/*')\n", (5838, 5858), False, 'import os\n'), ((6060, 6097), 'functools.partial', 'functools.partial', (['worker', 'output_dir'], {}), '(worker, output_dir)\n', (6077, 6097), False, 'import functools\n'), ((4756, 4855), 'numpy.ceil', 'np.ceil', (["((config['instance_size'] + config['max_translate'] * 2) * (1 + config[\n 'scale_resize']))"], {}), "((config['instance_size'] + config['max_translate'] * 2) * (1 +\n config['scale_resize']))\n", (4763, 4855), True, 'import numpy as np\n'), ((5752, 5812), 'os.path.join', 'os.path.join', (['video_dir', '"""train/ILSVRC2015_VID_train_0003/*"""'], {}), "(video_dir, 'train/ILSVRC2015_VID_train_0003/*')\n", (5764, 5812), False, 'import os\n'), ((6401, 6442), 'os.path.join', 'os.path.join', (['output_dir', '"""meta_data.pkl"""'], {}), "(output_dir, 'meta_data.pkl')\n", (6413, 6442), False, 'import os\n'), ((5678, 5738), 'os.path.join', 'os.path.join', (['video_dir', '"""train/ILSVRC2015_VID_train_0002/*"""'], {}), "(video_dir, 'train/ILSVRC2015_VID_train_0002/*')\n", (5690, 5738), False, 'import os\n'), ((5531, 5591), 'os.path.join', 'os.path.join', (['video_dir', '"""train/ILSVRC2015_VID_train_0000/*"""'], {}), "(video_dir, 'train/ILSVRC2015_VID_train_0000/*')\n", (5543, 5591), False, 'import os\n'), ((5604, 5664), 'os.path.join', 'os.path.join', (['video_dir', '"""train/ILSVRC2015_VID_train_0001/*"""'], {}), "(video_dir, 'train/ILSVRC2015_VID_train_0001/*')\n", (5616, 5664), False, 'import os\n')] |
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data, targets = fetch_olivetti_faces(return_X_y=True)
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces,))
test = test[face_ids, :]
n_pixels = data.shape[1]
# Upper half of the faces
X_train = train[:, : (n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2 :]
X_test = test[:, : (n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2 :]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(
n_estimators=10, max_features=32, random_state=0
),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2.0 * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces")
sub.axis("off")
sub.imshow(
true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest"
)
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est)
sub.axis("off")
sub.imshow(
completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest",
)
plt.show()
| [
"sklearn.linear_model.LinearRegression",
"numpy.hstack",
"sklearn.utils.validation.check_random_state",
"sklearn.ensemble.ExtraTreesRegressor",
"sklearn.neighbors.KNeighborsRegressor",
"sklearn.datasets.fetch_olivetti_faces",
"matplotlib.pyplot.figure",
"sklearn.linear_model.RidgeCV",
"matplotlib.py... | [((893, 930), 'sklearn.datasets.fetch_olivetti_faces', 'fetch_olivetti_faces', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (913, 930), False, 'from sklearn.datasets import fetch_olivetti_faces\n'), ((1064, 1085), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['(4)'], {}), '(4)\n', (1082, 1085), False, 'from sklearn.utils.validation import check_random_state\n'), ((1877, 1927), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2.0 * n_cols, 2.26 * n_faces)'}), '(figsize=(2.0 * n_cols, 2.26 * n_faces))\n', (1887, 1927), True, 'import matplotlib.pyplot as plt\n'), ((1928, 1997), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Face completion with multi-output estimators"""'], {'size': '(16)'}), "('Face completion with multi-output estimators', size=16)\n", (1940, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2837), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2835, 2837), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1517), 'sklearn.ensemble.ExtraTreesRegressor', 'ExtraTreesRegressor', ([], {'n_estimators': '(10)', 'max_features': '(32)', 'random_state': '(0)'}), '(n_estimators=10, max_features=32, random_state=0)\n', (1467, 1517), False, 'from sklearn.ensemble import ExtraTreesRegressor\n'), ((1545, 1566), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {}), '()\n', (1564, 1566), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((1593, 1611), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1609, 1611), False, 'from sklearn.linear_model import LinearRegression\n'), ((1626, 1635), 'sklearn.linear_model.RidgeCV', 'RidgeCV', ([], {}), '()\n', (1633, 1635), False, 'from sklearn.linear_model import RidgeCV\n'), ((2040, 2073), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_test[i])'], {}), '((X_test[i], y_test[i]))\n', (2049, 2073), True, 'import numpy as np\n'), ((2099, 2143), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_faces', 'n_cols', '(i * n_cols + 1)'], {}), '(n_faces, n_cols, i * n_cols + 1)\n', (2110, 2143), True, 'import matplotlib.pyplot as plt\n'), ((2168, 2232), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_faces', 'n_cols', '(i * n_cols + 1)'], {'title': '"""true faces"""'}), "(n_faces, n_cols, i * n_cols + 1, title='true faces')\n", (2179, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2433, 2479), 'numpy.hstack', 'np.hstack', (['(X_test[i], y_test_predict[est][i])'], {}), '((X_test[i], y_test_predict[est][i]))\n', (2442, 2479), True, 'import numpy as np\n'), ((2513, 2561), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_faces', 'n_cols', '(i * n_cols + 2 + j)'], {}), '(n_faces, n_cols, i * n_cols + 2 + j)\n', (2524, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2654), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_faces', 'n_cols', '(i * n_cols + 2 + j)'], {'title': 'est'}), '(n_faces, n_cols, i * n_cols + 2 + j, title=est)\n', (2606, 2654), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
digits = load_digits()
X = np.round(digits.data / 16.)
y_classed = digits.target
target_arr = digits.target_names
X, X_test, y, y_test = train_test_split(X, y_classed, test_size=0.33,
random_state=42)
m = X.shape[0]
n = X.shape[1]
N = target_arr.shape[0]
m_test = X_test.shape[1]
theta = np.zeros((n, N))
for k in range(N):
theta[:, k] = np.sum(X[y == k], axis=0) / len(X[y == k])
unique, counts = np.unique(y, return_counts=True)
priors = np.array([x / np.sum(counts) for x in counts])
class_probs = np.zeros((m, N))
for i, x in enumerate(X):
for k in range(N):
prior = priors[k]
lklhd = np.prod(theta[:, k] ** x * (1 - theta[:, k]) ** (1 - x))
pstrr_k = prior * lklhd
class_probs[i, k] = pstrr_k
class_probs /= np.sum(class_probs, axis=1, keepdims=True)
y_pred_train = np.argmax(class_probs, axis=1)
cm_train = confusion_matrix(y_pred_train, y)
# print(cm_train)
# print(r)
train_accuracy = accuracy_score(y, y_pred_train)
print('training accuracy: {}, trying to beat 0.913549459684123'.format(
train_accuracy))
class_probs_test = np.zeros((m, N))
for i, xt in enumerate(X_test):
for k in range(N):
prior = priors[k]
lklhd = np.prod(theta[:, k] ** xt * (1 - theta[:, k]) ** (1 - xt))
pstrr_k = prior * lklhd
class_probs_test[i, k] = pstrr_k
class_probs_test /= np.sum(class_probs_test, axis=1, keepdims=True)
y_pred_test = np.argmax(class_probs_test, axis=1)
cm_test = confusion_matrix(y_pred_test, y_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
# ========================= EOF ====================================================================
| [
"numpy.prod",
"numpy.unique",
"sklearn.model_selection.train_test_split",
"numpy.argmax",
"sklearn.datasets.load_digits",
"numpy.sum",
"numpy.zeros",
"sklearn.metrics.accuracy_score",
"numpy.round",
"sklearn.metrics.confusion_matrix"
] | [((184, 197), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (195, 197), False, 'from sklearn.datasets import load_digits\n'), ((202, 230), 'numpy.round', 'np.round', (['(digits.data / 16.0)'], {}), '(digits.data / 16.0)\n', (210, 230), True, 'import numpy as np\n'), ((313, 376), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y_classed'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y_classed, test_size=0.33, random_state=42)\n', (329, 376), False, 'from sklearn.model_selection import train_test_split\n'), ((506, 522), 'numpy.zeros', 'np.zeros', (['(n, N)'], {}), '((n, N))\n', (514, 522), True, 'import numpy as np\n'), ((621, 653), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (630, 653), True, 'import numpy as np\n'), ((725, 741), 'numpy.zeros', 'np.zeros', (['(m, N)'], {}), '((m, N))\n', (733, 741), True, 'import numpy as np\n'), ((975, 1017), 'numpy.sum', 'np.sum', (['class_probs'], {'axis': '(1)', 'keepdims': '(True)'}), '(class_probs, axis=1, keepdims=True)\n', (981, 1017), True, 'import numpy as np\n'), ((1033, 1063), 'numpy.argmax', 'np.argmax', (['class_probs'], {'axis': '(1)'}), '(class_probs, axis=1)\n', (1042, 1063), True, 'import numpy as np\n'), ((1075, 1108), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_pred_train', 'y'], {}), '(y_pred_train, y)\n', (1091, 1108), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1157, 1188), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred_train'], {}), '(y, y_pred_train)\n', (1171, 1188), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1302, 1318), 'numpy.zeros', 'np.zeros', (['(m, N)'], {}), '((m, N))\n', (1310, 1318), True, 'import numpy as np\n'), ((1570, 1617), 'numpy.sum', 'np.sum', (['class_probs_test'], {'axis': '(1)', 'keepdims': '(True)'}), '(class_probs_test, axis=1, keepdims=True)\n', (1576, 1617), True, 'import numpy as np\n'), ((1632, 1667), 'numpy.argmax', 'np.argmax', (['class_probs_test'], {'axis': '(1)'}), '(class_probs_test, axis=1)\n', (1641, 1667), True, 'import numpy as np\n'), ((1678, 1715), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_pred_test', 'y_test'], {}), '(y_pred_test, y_test)\n', (1694, 1715), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((1733, 1768), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (1747, 1768), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((560, 585), 'numpy.sum', 'np.sum', (['X[y == k]'], {'axis': '(0)'}), '(X[y == k], axis=0)\n', (566, 585), True, 'import numpy as np\n'), ((834, 890), 'numpy.prod', 'np.prod', (['(theta[:, k] ** x * (1 - theta[:, k]) ** (1 - x))'], {}), '(theta[:, k] ** x * (1 - theta[:, k]) ** (1 - x))\n', (841, 890), True, 'import numpy as np\n'), ((1417, 1475), 'numpy.prod', 'np.prod', (['(theta[:, k] ** xt * (1 - theta[:, k]) ** (1 - xt))'], {}), '(theta[:, k] ** xt * (1 - theta[:, k]) ** (1 - xt))\n', (1424, 1475), True, 'import numpy as np\n'), ((677, 691), 'numpy.sum', 'np.sum', (['counts'], {}), '(counts)\n', (683, 691), True, 'import numpy as np\n')] |
import os
import tkinter as tk
import tkinter.ttk as ttk
from configparser import ConfigParser
import tkinter.messagebox
import sys
import cv2 as cv
import numpy as np
from PIL import Image, ImageTk
class CalibrateDetection:
webcam_ip = ""
lower_skin = np.array([255, 255, 255])
upper_skin = np.array([0, 0, 0])
is_debug = 0
IMAGE_DIMENSIONS = 852, 480
def __init__(self, use_ip_webcam, webcam_ip):
# change the webcam_ip to 0 if you are using a webcame connected to pc
# constructor
self.webcam_ip = webcam_ip
if use_ip_webcam:
self.cap = cv.VideoCapture(self.webcam_ip) #ip webcam connect
else:
self.cap = cv.VideoCapture(0) #integrated webcam connect
#if webcam is not detected
if self.cap is None or not self.cap.isOpened():
tkinter.messagebox.showerror('Error', 'Your webcam was not detected.')
sys.exit(1)
def loadGUI(self):
# building GUI
self.root = tk.Tk()
#add icon to window
icon = "hand_icon.ico"
if not hasattr(sys, "frozen"):
icon = os.path.join(os.path.dirname(__file__), icon)
else:
icon = os.path.join(sys.prefix, icon)
self.root.iconbitmap(default=icon)
self.root.title("Calibration Menu")
self.root.resizable(False, False)
# this was UI was built using pygubu and i recommend ignoring this
self.frame1 = ttk.Frame(self.root)
self.canvas1 = tk.Canvas(self.frame1)
self.canvas1.configure(height="480", width="1127")
self.canvas1.pack(side="top")
self.scale1 = ttk.Scale(self.frame1)
self.upper_hue = tk.IntVar(value=self.upper_skin[0])
self.scale1.configure(
from_="0", orient="horizontal", to="255", variable=self.upper_hue
)
self.scale1.place(anchor="nw", relwidth=".16", x="15", y="40")
self.scale1.configure(command=self.updateHSV)
self.entry1 = ttk.Entry(self.frame1)
self.entry1.configure(justify="center", textvariable=self.upper_hue)
self.entry1.delete("0", "end")
self.entry1.insert("0", self.upper_skin[0])
self.entry1.place(anchor="nw", relwidth=".024", x="205", y="40")
self.label1 = ttk.Label(self.frame1)
self.label1.configure(text="Upper")
self.label1.place(anchor="nw", x="238", y="40")
self.label2 = ttk.Label(self.frame1)
self.label2.configure(text="_____")
self.label2.place(anchor="nw", x="15", y="17")
self.label3 = ttk.Label(self.frame1)
self.label3.configure(text="Hue")
self.label3.place(anchor="nw", x="15", y="13")
self.scale2 = ttk.Scale(self.frame1)
self.lower_hue = tk.IntVar(value=self.lower_skin[0])
self.scale2.configure(
from_="0", orient="horizontal", to="255", variable=self.lower_hue
)
self.scale2.place(anchor="nw", relwidth=".16", x="15", y="62")
self.scale2.configure(command=self.updateHSV)
self.entry3 = ttk.Entry(self.frame1)
self.entry3.configure(justify="center", textvariable=self.lower_hue)
self.entry3.delete("0", "end")
self.entry3.insert("0", self.lower_skin[0])
self.entry3.place(anchor="nw", relwidth=".024", x="205", y="62")
self.label4 = ttk.Label(self.frame1)
self.label4.configure(text="Lower")
self.label4.place(anchor="nw", x="238", y="62")
self.label5 = ttk.Label(self.frame1)
self.label5.configure(text="___________")
self.label5.place(anchor="nw", x="15", y="89")
self.label6 = ttk.Label(self.frame1)
self.label6.configure(text="Saturation")
self.label6.place(anchor="nw", x="15", y="85")
self.scale3 = ttk.Scale(self.frame1)
self.upper_saturation = tk.IntVar(value=self.upper_skin[1])
self.scale3.configure(
from_="0", orient="horizontal", to="255", variable=self.upper_saturation
)
self.scale3.place(anchor="nw", relwidth=".16", x="15", y="112")
self.scale3.configure(command=self.updateHSV)
self.scale4 = ttk.Scale(self.frame1)
self.lower_saturation = tk.IntVar(value=self.lower_skin[1])
self.scale4.configure(
from_="0", orient="horizontal", to="255", variable=self.lower_saturation
)
self.scale4.place(anchor="nw", relwidth=".16", x="15", y="134")
self.scale4.configure(command=self.updateHSV)
self.entry4 = ttk.Entry(self.frame1)
self.entry4.configure(justify="center", textvariable=self.upper_saturation)
self.entry4.delete("0", "end")
self.entry4.insert("0", self.upper_skin[1])
self.entry4.place(anchor="nw", relwidth=".024", x="205", y="112")
self.entry5 = ttk.Entry(self.frame1)
self.entry5.configure(justify="center", textvariable=self.lower_saturation)
self.entry5.delete("0", "end")
self.entry5.insert("0", self.lower_skin[1])
self.entry5.place(anchor="nw", relwidth=".024", x="205", y="134")
self.label7 = ttk.Label(self.frame1)
self.label7.configure(text="Upper")
self.label7.place(anchor="nw", x="238", y="112")
self.label8 = ttk.Label(self.frame1)
self.label8.configure(text="Lower")
self.label8.place(anchor="nw", x="238", y="134")
self.label9 = ttk.Label(self.frame1)
self.label9.configure(text="______")
self.label9.place(anchor="nw", x="15", y="162")
self.label10 = ttk.Label(self.frame1)
self.label10.configure(text="Value")
self.label10.place(anchor="nw", x="15", y="158")
self.scale5 = ttk.Scale(self.frame1)
self.upper_value = tk.IntVar(value=self.upper_skin[2])
self.scale5.configure(
from_="0", orient="horizontal", to="255", variable=self.upper_value
)
self.scale5.place(anchor="nw", relwidth=".16", x="15", y="184")
self.scale5.configure(command=self.updateHSV)
self.scale6 = ttk.Scale(self.frame1)
self.lower_value = tk.IntVar(value=self.lower_skin[2])
self.scale6.configure(
from_="0", orient="horizontal", to="255", variable=self.lower_value
)
self.scale6.place(anchor="nw", relwidth=".16", x="15", y="206")
self.scale6.configure(command=self.updateHSV)
self.entry6 = ttk.Entry(self.frame1)
self.entry6.configure(justify="center", textvariable=self.upper_value)
self.entry6.delete("0", "end")
self.entry6.insert("0", self.upper_skin[2])
self.entry6.place(anchor="nw", relwidth=".024", x="205", y="184")
self.entry7 = ttk.Entry(self.frame1)
self.entry7.configure(justify="center", textvariable=self.lower_value)
self.entry7.delete("0", "end")
self.entry7.insert("0", self.lower_skin[2])
self.entry7.place(anchor="nw", relwidth=".024", x="205", y="206")
self.label11 = ttk.Label(self.frame1)
self.label11.configure(text="Upper")
self.label11.place(anchor="nw", x="238", y="184")
self.label12 = ttk.Label(self.frame1)
self.label12.configure(text="Lower")
self.label12.place(anchor="nw", x="238", y="206")
self.button1 = ttk.Button(self.frame1)
self.button1.configure(text="Save & Exit")
self.button1.place(anchor="nw", relwidth="0.14", x="100", y="238")
self.button1.configure(command=self.saveExit)
self.button2 = ttk.Button(self.frame1)
self.button2.configure(text="Exit")
self.button2.place(anchor="nw", x="15", y="238")
self.button2.configure(command=self.exit)
self.video_panel = ttk.Label(self.frame1)
self.video_panel.place(anchor="nw", height="480", width="852", x="275", y="0")
self.current_image = None
self.frame1.configure(height="200", width="200")
self.frame1.pack(side="top")
# Main widget
self.mainwindow = self.frame1
self.videoLoop()
def videoLoop(self):
# used to display the video in the config menu
_, img = self.cap.read()
img = cv.resize(img, self.IMAGE_DIMENSIONS)
img = cv.flip(img, 1)
img = cv.GaussianBlur(img, (5, 5), 0)
# converts image bgr -> hsv and removes colors that are not in range
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, self.lower_skin, self.upper_skin)
mask = cv.bitwise_not(mask)
res = cv.bitwise_and(img, img, mask=mask)
#puts image into tk window
cvimage = cv.cvtColor(res, cv.COLOR_BGR2RGBA)
self.current_image = Image.fromarray(cvimage)
imgtk = ImageTk.PhotoImage(image=self.current_image)
self.video_panel.imgtk = imgtk
self.video_panel.config(image=imgtk)
self.root.after(30, self.videoLoop)
def updateHSV(self, scale_value):
# when value sliders are moved
# keeps set values to whole numbers
self.lower_hue.set(round(self.lower_hue.get()))
self.upper_hue.set(round(self.upper_hue.get()))
self.lower_saturation.set(round(self.lower_saturation.get()))
self.upper_saturation.set(round(self.upper_saturation.get()))
self.lower_value.set(round(self.lower_value.get()))
self.upper_value.set(round(self.upper_value.get()))
# make sure values dont overlap
if self.lower_hue.get() > self.upper_hue.get():
self.upper_hue.set(self.lower_hue.get())
if self.lower_saturation.get() > self.upper_saturation.get():
self.upper_saturation.set(self.lower_saturation.get())
if self.lower_value.get() > self.upper_value.get():
self.upper_value.set(self.lower_value.get())
# sets upper and lower hsv values
self.lower_skin = np.array(
[self.lower_hue.get(), self.lower_saturation.get(), self.lower_value.get()]
)
self.upper_skin = np.array(
[self.upper_hue.get(), self.upper_saturation.get(), self.upper_value.get()]
)
def saveExit(self):
#saves calibration values on save & exit
config = ConfigParser()
file = "config.ini"
config.read(file)
config.set("Calibration", "lower_hue", str(self.lower_hue.get()))
config.set("Calibration", "lower_saturation", str(self.lower_saturation.get()))
config.set("Calibration", "lower_value", str(self.lower_value.get()))
config.set("Calibration", "upper_hue", str(self.upper_hue.get()))
config.set("Calibration", "upper_saturation", str(self.upper_saturation.get()))
config.set("Calibration", "upper_value", str(self.upper_value.get()))
with open(file, "w") as configfile:
config.write(configfile)
self.root.destroy()
def exit(self):
self.root.destroy()
def initalCalibration(self):
# this is used for giving the user an estimate for the calibration settings when they left click on their hand
def selectRGBValue(event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN: # checks mouse left button down condition
colors = [0] * 3
colors[0] = hsv[y, x, 0] # Hue
colors[1] = hsv[y, x, 1] # Saturation
colors[2] = hsv[y, x, 2] # Value
# making sure guesstimated hue isnt going to be out of bounds
if colors[0] > 240:
colors[0] = 240
elif colors[0] < 15:
colors[0] = 15
self.lower_skin = np.array([colors[0] - 15, 20, 80])
self.upper_skin = np.array([colors[0] + 15, 255, 230])
cv.namedWindow("Calibration Menu")
cv.setMouseCallback("Calibration Menu", selectRGBValue)
while True:
# reads and resizes image
_, img = self.cap.read()
img = cv.resize(img, self.IMAGE_DIMENSIONS)
img = cv.flip(img, 1)
img = cv.blur(img, (5, 5))
# converts image rgb -> hsv and removes colors that are not in range
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, self.lower_skin, self.upper_skin)
mask = cv.bitwise_not(mask)
res = cv.bitwise_and(img, img, mask=mask)
res = cv.putText(
res,
"Click around your hand till its nearly completely black and press the key 'f'",
(19, 19),
cv.FONT_HERSHEY_PLAIN,
1.25,
(0, 0, 0),
1,
cv.LINE_AA,
)
res = cv.putText(
res,
"Click around your hand till its nearly completely black and press the key 'f'",
(21, 21),
cv.FONT_HERSHEY_PLAIN,
1.25,
(0, 0, 0),
1,
cv.LINE_AA,
)
res = cv.putText(
res,
"Click around your hand till its nearly completely black and press the key 'f'",
(20, 20),
cv.FONT_HERSHEY_PLAIN,
1.25,
(255, 100, 0),
1,
cv.LINE_AA,
)
cv.imshow("Calibration Menu", res)
# press key to stop program
if cv.waitKey(1) & 0xFF == ord("f"):
break
cv.destroyAllWindows()
| [
"tkinter.ttk.Button",
"configparser.ConfigParser",
"cv2.imshow",
"numpy.array",
"tkinter.Canvas",
"cv2.destroyAllWindows",
"sys.exit",
"cv2.setMouseCallback",
"tkinter.ttk.Entry",
"tkinter.ttk.Frame",
"tkinter.ttk.Label",
"cv2.waitKey",
"PIL.ImageTk.PhotoImage",
"cv2.blur",
"cv2.putText"... | [((265, 290), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (273, 290), True, 'import numpy as np\n'), ((308, 327), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (316, 327), True, 'import numpy as np\n'), ((1014, 1021), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1019, 1021), True, 'import tkinter as tk\n'), ((1482, 1502), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.root'], {}), '(self.root)\n', (1491, 1502), True, 'import tkinter.ttk as ttk\n'), ((1526, 1548), 'tkinter.Canvas', 'tk.Canvas', (['self.frame1'], {}), '(self.frame1)\n', (1535, 1548), True, 'import tkinter as tk\n'), ((1668, 1690), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (1677, 1690), True, 'import tkinter.ttk as ttk\n'), ((1716, 1751), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.upper_skin[0]'}), '(value=self.upper_skin[0])\n', (1725, 1751), True, 'import tkinter as tk\n'), ((2018, 2040), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (2027, 2040), True, 'import tkinter.ttk as ttk\n'), ((2304, 2326), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (2313, 2326), True, 'import tkinter.ttk as ttk\n'), ((2449, 2471), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (2458, 2471), True, 'import tkinter.ttk as ttk\n'), ((2593, 2615), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (2602, 2615), True, 'import tkinter.ttk as ttk\n'), ((2735, 2757), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (2744, 2757), True, 'import tkinter.ttk as ttk\n'), ((2783, 2818), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.lower_skin[0]'}), '(value=self.lower_skin[0])\n', (2792, 2818), True, 'import tkinter as tk\n'), ((3085, 3107), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (3094, 3107), True, 'import tkinter.ttk as ttk\n'), ((3371, 3393), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (3380, 3393), True, 'import tkinter.ttk as ttk\n'), ((3516, 3538), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (3525, 3538), True, 'import tkinter.ttk as ttk\n'), ((3666, 3688), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (3675, 3688), True, 'import tkinter.ttk as ttk\n'), ((3815, 3837), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (3824, 3837), True, 'import tkinter.ttk as ttk\n'), ((3870, 3905), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.upper_skin[1]'}), '(value=self.upper_skin[1])\n', (3879, 3905), True, 'import tkinter as tk\n'), ((4180, 4202), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (4189, 4202), True, 'import tkinter.ttk as ttk\n'), ((4235, 4270), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.lower_skin[1]'}), '(value=self.lower_skin[1])\n', (4244, 4270), True, 'import tkinter as tk\n'), ((4545, 4567), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (4554, 4567), True, 'import tkinter.ttk as ttk\n'), ((4839, 4861), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (4848, 4861), True, 'import tkinter.ttk as ttk\n'), ((5133, 5155), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (5142, 5155), True, 'import tkinter.ttk as ttk\n'), ((5279, 5301), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (5288, 5301), True, 'import tkinter.ttk as ttk\n'), ((5425, 5447), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (5434, 5447), True, 'import tkinter.ttk as ttk\n'), ((5572, 5594), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (5581, 5594), True, 'import tkinter.ttk as ttk\n'), ((5719, 5741), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (5728, 5741), True, 'import tkinter.ttk as ttk\n'), ((5769, 5804), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.upper_skin[2]'}), '(value=self.upper_skin[2])\n', (5778, 5804), True, 'import tkinter as tk\n'), ((6074, 6096), 'tkinter.ttk.Scale', 'ttk.Scale', (['self.frame1'], {}), '(self.frame1)\n', (6083, 6096), True, 'import tkinter.ttk as ttk\n'), ((6124, 6159), 'tkinter.IntVar', 'tk.IntVar', ([], {'value': 'self.lower_skin[2]'}), '(value=self.lower_skin[2])\n', (6133, 6159), True, 'import tkinter as tk\n'), ((6429, 6451), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (6438, 6451), True, 'import tkinter.ttk as ttk\n'), ((6718, 6740), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.frame1'], {}), '(self.frame1)\n', (6727, 6740), True, 'import tkinter.ttk as ttk\n'), ((7008, 7030), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (7017, 7030), True, 'import tkinter.ttk as ttk\n'), ((7157, 7179), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (7166, 7179), True, 'import tkinter.ttk as ttk\n'), ((7306, 7329), 'tkinter.ttk.Button', 'ttk.Button', (['self.frame1'], {}), '(self.frame1)\n', (7316, 7329), True, 'import tkinter.ttk as ttk\n'), ((7533, 7556), 'tkinter.ttk.Button', 'ttk.Button', (['self.frame1'], {}), '(self.frame1)\n', (7543, 7556), True, 'import tkinter.ttk as ttk\n'), ((7735, 7757), 'tkinter.ttk.Label', 'ttk.Label', (['self.frame1'], {}), '(self.frame1)\n', (7744, 7757), True, 'import tkinter.ttk as ttk\n'), ((8188, 8225), 'cv2.resize', 'cv.resize', (['img', 'self.IMAGE_DIMENSIONS'], {}), '(img, self.IMAGE_DIMENSIONS)\n', (8197, 8225), True, 'import cv2 as cv\n'), ((8240, 8255), 'cv2.flip', 'cv.flip', (['img', '(1)'], {}), '(img, 1)\n', (8247, 8255), True, 'import cv2 as cv\n'), ((8270, 8301), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (8285, 8301), True, 'import cv2 as cv\n'), ((8394, 8428), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2HSV'], {}), '(img, cv.COLOR_BGR2HSV)\n', (8405, 8428), True, 'import cv2 as cv\n'), ((8444, 8493), 'cv2.inRange', 'cv.inRange', (['hsv', 'self.lower_skin', 'self.upper_skin'], {}), '(hsv, self.lower_skin, self.upper_skin)\n', (8454, 8493), True, 'import cv2 as cv\n'), ((8509, 8529), 'cv2.bitwise_not', 'cv.bitwise_not', (['mask'], {}), '(mask)\n', (8523, 8529), True, 'import cv2 as cv\n'), ((8544, 8579), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (8558, 8579), True, 'import cv2 as cv\n'), ((8634, 8669), 'cv2.cvtColor', 'cv.cvtColor', (['res', 'cv.COLOR_BGR2RGBA'], {}), '(res, cv.COLOR_BGR2RGBA)\n', (8645, 8669), True, 'import cv2 as cv\n'), ((8699, 8723), 'PIL.Image.fromarray', 'Image.fromarray', (['cvimage'], {}), '(cvimage)\n', (8714, 8723), False, 'from PIL import Image, ImageTk\n'), ((8740, 8784), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'self.current_image'}), '(image=self.current_image)\n', (8758, 8784), False, 'from PIL import Image, ImageTk\n'), ((10215, 10229), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (10227, 10229), False, 'from configparser import ConfigParser\n'), ((11796, 11830), 'cv2.namedWindow', 'cv.namedWindow', (['"""Calibration Menu"""'], {}), "('Calibration Menu')\n", (11810, 11830), True, 'import cv2 as cv\n'), ((11839, 11894), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""Calibration Menu"""', 'selectRGBValue'], {}), "('Calibration Menu', selectRGBValue)\n", (11858, 11894), True, 'import cv2 as cv\n'), ((13561, 13583), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (13581, 13583), True, 'import cv2 as cv\n'), ((614, 645), 'cv2.VideoCapture', 'cv.VideoCapture', (['self.webcam_ip'], {}), '(self.webcam_ip)\n', (629, 645), True, 'import cv2 as cv\n'), ((702, 720), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (717, 720), True, 'import cv2 as cv\n'), ((935, 946), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (943, 946), False, 'import sys\n'), ((1223, 1253), 'os.path.join', 'os.path.join', (['sys.prefix', 'icon'], {}), '(sys.prefix, icon)\n', (1235, 1253), False, 'import os\n'), ((12010, 12047), 'cv2.resize', 'cv.resize', (['img', 'self.IMAGE_DIMENSIONS'], {}), '(img, self.IMAGE_DIMENSIONS)\n', (12019, 12047), True, 'import cv2 as cv\n'), ((12066, 12081), 'cv2.flip', 'cv.flip', (['img', '(1)'], {}), '(img, 1)\n', (12073, 12081), True, 'import cv2 as cv\n'), ((12100, 12120), 'cv2.blur', 'cv.blur', (['img', '(5, 5)'], {}), '(img, (5, 5))\n', (12107, 12120), True, 'import cv2 as cv\n'), ((12221, 12255), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2HSV'], {}), '(img, cv.COLOR_BGR2HSV)\n', (12232, 12255), True, 'import cv2 as cv\n'), ((12275, 12324), 'cv2.inRange', 'cv.inRange', (['hsv', 'self.lower_skin', 'self.upper_skin'], {}), '(hsv, self.lower_skin, self.upper_skin)\n', (12285, 12324), True, 'import cv2 as cv\n'), ((12344, 12364), 'cv2.bitwise_not', 'cv.bitwise_not', (['mask'], {}), '(mask)\n', (12358, 12364), True, 'import cv2 as cv\n'), ((12383, 12418), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (12397, 12418), True, 'import cv2 as cv\n'), ((12438, 12608), 'cv2.putText', 'cv.putText', (['res', '"""Click around your hand till its nearly completely black and press the key \'f\'"""', '(19, 19)', 'cv.FONT_HERSHEY_PLAIN', '(1.25)', '(0, 0, 0)', '(1)', 'cv.LINE_AA'], {}), '(res,\n "Click around your hand till its nearly completely black and press the key \'f\'"\n , (19, 19), cv.FONT_HERSHEY_PLAIN, 1.25, (0, 0, 0), 1, cv.LINE_AA)\n', (12448, 12608), True, 'import cv2 as cv\n'), ((12761, 12931), 'cv2.putText', 'cv.putText', (['res', '"""Click around your hand till its nearly completely black and press the key \'f\'"""', '(21, 21)', 'cv.FONT_HERSHEY_PLAIN', '(1.25)', '(0, 0, 0)', '(1)', 'cv.LINE_AA'], {}), '(res,\n "Click around your hand till its nearly completely black and press the key \'f\'"\n , (21, 21), cv.FONT_HERSHEY_PLAIN, 1.25, (0, 0, 0), 1, cv.LINE_AA)\n', (12771, 12931), True, 'import cv2 as cv\n'), ((13084, 13258), 'cv2.putText', 'cv.putText', (['res', '"""Click around your hand till its nearly completely black and press the key \'f\'"""', '(20, 20)', 'cv.FONT_HERSHEY_PLAIN', '(1.25)', '(255, 100, 0)', '(1)', 'cv.LINE_AA'], {}), '(res,\n "Click around your hand till its nearly completely black and press the key \'f\'"\n , (20, 20), cv.FONT_HERSHEY_PLAIN, 1.25, (255, 100, 0), 1, cv.LINE_AA)\n', (13094, 13258), True, 'import cv2 as cv\n'), ((13405, 13439), 'cv2.imshow', 'cv.imshow', (['"""Calibration Menu"""', 'res'], {}), "('Calibration Menu', res)\n", (13414, 13439), True, 'import cv2 as cv\n'), ((1154, 1179), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1169, 1179), False, 'import os\n'), ((11668, 11702), 'numpy.array', 'np.array', (['[colors[0] - 15, 20, 80]'], {}), '([colors[0] - 15, 20, 80])\n', (11676, 11702), True, 'import numpy as np\n'), ((11737, 11773), 'numpy.array', 'np.array', (['[colors[0] + 15, 255, 230]'], {}), '([colors[0] + 15, 255, 230])\n', (11745, 11773), True, 'import numpy as np\n'), ((13496, 13509), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (13506, 13509), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python
# coding: utf-8
# ## TH_EventReader
#
# This code will load TH events using cmlreaders and then find the missing path data using the log files.
import os
import warnings
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from cmlreaders import CMLReader, get_data_index
def get_cmlevents(subj, montage=None, session=None, exp='TH1'):
""" Returns the reformatted events df for subj and mont.
This events struct does not include pathInfo, since that isn't
recorded in the system used by cmlreaders. To get pathInfo you
need to use `read_path_log`.
"""
#------Load data index for RAM
df = get_data_index("r1")
#------Specify the df for this subject and exp
this_df = df[(df['subject']==subj) & (df['experiment']==exp)]
#------Find out the sessions, localization, and montage for this subject
if session is None: # default to first sess
session = this_df['session'].iloc[0]
if montage is None: # default to first mont
montage = this_df['montage'].iloc[0]
#------Get more specific df
this_specific_df = (this_df[(this_df['session'] == session)
& (this_df['montage'] == montage)])
loc = int(this_specific_df.iloc()[0]['localization'])
#-------Subjs with a montage above 0 have aliases used in log files
subject_alias = this_specific_df['subject_alias'].iloc[0]
# ^ use .iloc[0] bc this_specific_df has only one item
#------For some subjs the sess ID system changed over time,
# and we need to know the original sess ID for certain log
# files access
orig_sess_ID = this_specific_df['original_session'].iloc[0]
if type(orig_sess_ID) == str:
orig_sess_ID = np.float64(orig_sess_ID)
# I do it as float first in case of NaN
if orig_sess_ID == int(orig_sess_ID):
orig_sess_ID = int(orig_sess_ID)
if np.isnan(orig_sess_ID):
orig_sess_ID = session
#------Use CMLReader to read the events structure
reader = CMLReader(subj, exp, session=session,
montage=montage, localization=loc)
events = reader.load('events')
events['original_session_ID'] = orig_sess_ID
events['subject_alias'] = subject_alias
# remove the unhelpful and inconsistent SESS_START event
events = events[events['type'] != 'SESS_START']
return events
def get_baseline_mstimes(events):
""" Reads the .txt logfile to find the start and end mstimes
For all baseline periods. As defined in Miller et. al (2018),
baseline periods for TH are the time at the start of a trial
before nvavigation.
Args:
events (pd.DataFrame)
Returns:
events (pd.DataFrame) with added fields:
['baseline_start', 'baseline_end']
"""
events = events.copy()
monts_and_sess = events[['subject_alias', 'original_session_ID']].drop_duplicates()
exp = events['experiment'].iloc[0]
# get the baseline data
base_dfs = []
for (index, (subj_str, sess)) in monts_and_sess.iterrows():
log_file = (f'/data10/RAM/subjects/{subj_str}'
f'/behavioral/{exp}/session_{sess}/{subj_str}Log.txt')
with open(log_file, 'r') as f:
log = f.read().split('\n')
baseline_starts = []
baseline_ends = []
current_start = np.nan
for line in log:
tokens = line.split('\t')
for i, token in enumerate(tokens):
# whichever of these start tokens appears
# last before the nav will be the baseline start period
# this is because its a little inconsistent about which appears
if (token == 'HOMEBASE_TRANSPORT_ENDED'
or token == 'HOMEBASE_TRANSPORT_STARTED'
or token == 'SHOWING_INSTRUCTIONS'
):
current_start = int(tokens[0])
elif token == 'TRIAL_NAVIGATION_STARTED':
baseline_ends.append(int(tokens[0]))
baseline_starts.append(current_start)
base_dfs.append(pd.DataFrame(
{'baseline_start': baseline_starts, 'baseline_end': baseline_ends,
'session': sess}))
# put the baseline data into the events
events['baseline_start'] = np.nan
events['baseline_end'] = np.nan
for df in base_dfs:
sess_locs = events['original_session_ID']==df['session'].iloc[0]
for trial in events[sess_locs]['trial'].unique():
events.loc[sess_locs&(events['trial']==trial),
'baseline_start'] = df.loc[trial, 'baseline_start']
events.loc[sess_locs&(events['trial']==trial),
'baseline_end'] = df.loc[trial, 'baseline_end']
events['baseline_start'] = events['baseline_start'].astype(int)
events['baseline_end'] = events['baseline_end'].astype(int)
return events
def read_path_log(events):
""" Reads the .par log file of navigation data and organizes it to
fit with the rest of the events DatFrame.
Args:
events (pd.DataFrame): Events struct including 'subject'
and 'original_session_ID', and event info.
Returns
pd.DataFrame of events with added 'pathInfo' column
"""
#-----Setup
events = events.copy()
monts_and_sess = events[['subject_alias', 'original_session_ID']].drop_duplicates()
exp = events['experiment'].iloc[0]
#------This array will hold all the path data:
events['pathInfo'] = [[] for i in range(len(events))]
def add_path_info(trial, chestNum, pathInfo):
"""Helper method to insert pathInfo to events df."""
locs = ((events['trial']==trial)
&(events['chestNum']==chestNum))
if events.loc[locs].empty:
# sometimes there's a path that doesn't correlate to an event
# for example if he ran out of time before reaching the chest
return
# need to ignore settingWithCopyWarning here
pd.set_option('mode.chained_assignment',None)
for i, event in events[locs].iterrows():
events['pathInfo'].loc[i] = pathInfo
pd.reset_option("mode.chained_assignment")
#------Iterate sessions bc each has its own par file
for (index, (subj_str, sess)) in monts_and_sess.iterrows():
#------Read the par log file
par_file = (f'/data10/RAM/subjects/{subj_str}'
f'/behavioral/{exp}/session_{sess}/playerPaths.par')
with open(par_file) as f:
lines = f.read().split('\n')
#------Init tracking vars
current_event_start = 0
trial = '?'
chestNum = '?'
event_pathInfo = []
#------Go through the log file data
for line_num, line in enumerate(lines):
#------Collect the path data in this line of the log file
# the log file has lines containing [mstime, event_start_mstime, mstime_relative_to_event_start,
# trial, chestNum, x, y, heading]
# this dict will eventually have the keys: ['mstime', 'x', 'y', 'heading']
path_data = {}
for index, token in enumerate(line.split('\t')): # see each datum in line
if token == '':
# this is the empty last line of the log file
break
if index == 0: # event mstime
mstime = int(token)
path_data['mstime'] = mstime
elif index == 1: # event start mstime
token_event_start = int(token)
#------Check if on a new event
if token_event_start > current_event_start:
if current_event_start > 0: # if its 0 that means we are on 1st line of log file
# add this event's pathInfo to the events df
add_path_info(trial, chestNum, event_pathInfo)
event_pathInfo = []
current_event_start = token_event_start
elif index == 3:
trial = int(token)
elif index == 4:
# this is the chestNum. It is logged at starting
# from chest 0, but in python events the first chest
# has chestNum 1, so we need to add 1 to the value
chestNum = int(token)+1
elif index == 5: # player x position
path_data['x'] = float(token)
elif index == 6: # player y postion
path_data['y'] = float(token)
elif index == 7: # player heading
path_data['heading'] = float(token)
if path_data: # add this data to the event's pathInfo
event_pathInfo.append(path_data)
if event_pathInfo:
add_path_info(trial, chestNum, event_pathInfo)
return events
def get_nav_epochs(path):
""" Given path data of 1 event, determine the start
and end of the navigation epoch.
Args:
path (list): list of path datapoints each containing
['mstime', 'x', 'y', 'heading']
Returns:
move_start (int): start of the nav epoch, in mstime.
move_end (int): end of the nav epoch, in msitme.
"""
xs = [p['x'] for p in path]
ys = [p['y'] for p in path]
dirs = [p['heading'] for p in path]
ts = [p['mstime'] for p in path]
started = False
move_start = ts[-1]
move_end = ts[-1]
ending_pos = [np.nan,np.nan,np.nan]
for i, (x,y,d,t) in enumerate(zip(xs, ys, dirs, ts)):
# determine if one of the xyd have changed,
# that means movement has started
if ((i>0) and
((x!=xs[i-1])
or (y!=ys[i-1])
or (d!=dirs[i-1])
)):
move_start = t
started = True
elif started:
# subj has started movement and is now stopped/paused
pos = [x,y,d]
if pos != ending_pos:
# if pos is ending_pos this is just a continuation of the
# break in movement, not a new break. We want to find the
# final movement break, so we rewrite the move_end for each
# new movement break we encounter.
move_end = t
ending_pos = pos
return move_start, move_end
def get_savename(subj, montage, session, exp):
""" Returns a relavent file path for saving events data.
Args:
subj (str)
montage (int)
session (int)
exp (str)
Returns:
str file path ending in '.pkl'
"""
if montage > 0:
subj = f'{subj}_{montage}'
main_dir = __file__.split('src')[0] + 'data/'
if not os.path.exists(main_dir):
# if installed with pip
main_dir = __file__.split('TH_EventReader.py')[0] + 'data/'
if not os.path.exists(main_dir):
os.mkdir(main_dir)
exp_dir = main_dir + exp + '/'
if not os.path.exists(exp_dir):
os.mkdir(exp_dir)
subj_dir = exp_dir + subj + '/'
if not os.path.exists(subj_dir):
os.mkdir(subj_dir)
return subj_dir + f'session_{session}.pkl'
def save_events(events, subj, montage, session, exp):
""" Saves the events in the relevant file path.
"""
fname = get_savename(subj, montage, session, exp)
events.to_pickle(fname)
def load_events(subj, montage, session, exp):
"""Loads the events from the relevant file path."""
return pd.read_pickle(get_savename(subj, montage, session, exp))
def get_events(subj, montage, session, exp,
recalc=False, save=True):
""" Returns the reformatted events df with 'pathInfo'.
Args:
subj (str)
montage (int)
session (int)
exp (str)
recalc (bool): If False, attempts to load presaved data.
save (bool): If True, will save the events in a filepath
determined by `get_savename`.
Returns:
pd.DataFrame containing events
"""
if not recalc:
save_fname = get_savename(subj, montage, session, exp)
if os.path.exists(save_fname):
try:
return load_events(subj, montage, session, exp)
except:
pass
events = get_cmlevents(subj, montage, session, exp)
# get baselines
events = get_baseline_mstimes(events)
# get path
events = read_path_log(events)
# get nav epochs
move_starts = []
move_ends = []
for i, event in events.iterrows():
move_start, move_end = get_nav_epochs(event['pathInfo'])
move_starts.append(move_start)
move_ends.append(move_end)
events['nav_start'] = move_starts
events['nav_end'] = move_ends
if save:
save_events(events, subj, montage, session, exp)
return events
def get_monts_and_sess_pairs(subj, exp='TH1'):
""" Returns a df of mont/sess pairs for this subj in this exp.
iterate easily through this list in the following format:
"for (index, (mont, sess)) in monts_and_sess_pairs(subj).iterrows():"
"""
df = get_data_index("r1")
subjexp_df = (df[(df['subject']==subj) &
(df['experiment']==exp)]
)[['montage', 'session']]
subjexp_df.index = range(len(subjexp_df))
return subjexp_df
def exp_df(exp='TH1'):
""" Returns a df with ['subj', 'montage', 'session', 'exp']
for each subj in this experiment. Use this for iterations.
"""
warnings.filterwarnings('ignore')
df = get_data_index("r1")
df = df[df['experiment'] == exp]
df['subj'] = df.pop('subject')
df['exp'] = df.pop('experiment')
warnings.resetwarnings()
return df[['subj', 'montage', 'session', 'exp']]
def reload_all(exp):
""" Reloads all events from a particular experiment. Helpful if the events have
been previously loaded and saved with an older version of TH_EventReader.
"""
df = exp_df(exp)
for i, row in df.iterrows():
print([key for key in row], end=' -> ')
try:
events = get_events(**row, recalc=True)
print('Success!')
except FileNotFoundError as e:
print(e) | [
"os.path.exists",
"pandas.reset_option",
"numpy.float64",
"warnings.resetwarnings",
"pandas.set_option",
"numpy.isnan",
"cmlreaders.CMLReader",
"os.mkdir",
"pandas.DataFrame",
"cmlreaders.get_data_index",
"warnings.filterwarnings"
] | [((678, 698), 'cmlreaders.get_data_index', 'get_data_index', (['"""r1"""'], {}), "('r1')\n", (692, 698), False, 'from cmlreaders import CMLReader, get_data_index\n'), ((1955, 1977), 'numpy.isnan', 'np.isnan', (['orig_sess_ID'], {}), '(orig_sess_ID)\n', (1963, 1977), True, 'import numpy as np\n'), ((2077, 2149), 'cmlreaders.CMLReader', 'CMLReader', (['subj', 'exp'], {'session': 'session', 'montage': 'montage', 'localization': 'loc'}), '(subj, exp, session=session, montage=montage, localization=loc)\n', (2086, 2149), False, 'from cmlreaders import CMLReader, get_data_index\n'), ((13629, 13649), 'cmlreaders.get_data_index', 'get_data_index', (['"""r1"""'], {}), "('r1')\n", (13643, 13649), False, 'from cmlreaders import CMLReader, get_data_index\n'), ((14024, 14057), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (14047, 14057), False, 'import warnings\n'), ((14067, 14087), 'cmlreaders.get_data_index', 'get_data_index', (['"""r1"""'], {}), "('r1')\n", (14081, 14087), False, 'from cmlreaders import CMLReader, get_data_index\n'), ((14201, 14225), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (14223, 14225), False, 'import warnings\n'), ((1784, 1808), 'numpy.float64', 'np.float64', (['orig_sess_ID'], {}), '(orig_sess_ID)\n', (1794, 1808), True, 'import numpy as np\n'), ((6214, 6260), 'pandas.set_option', 'pd.set_option', (['"""mode.chained_assignment"""', 'None'], {}), "('mode.chained_assignment', None)\n", (6227, 6260), True, 'import pandas as pd\n'), ((6366, 6408), 'pandas.reset_option', 'pd.reset_option', (['"""mode.chained_assignment"""'], {}), "('mode.chained_assignment')\n", (6381, 6408), True, 'import pandas as pd\n'), ((11158, 11182), 'os.path.exists', 'os.path.exists', (['main_dir'], {}), '(main_dir)\n', (11172, 11182), False, 'import os\n'), ((11402, 11425), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (11416, 11425), False, 'import os\n'), ((11435, 11452), 'os.mkdir', 'os.mkdir', (['exp_dir'], {}), '(exp_dir)\n', (11443, 11452), False, 'import os\n'), ((11501, 11525), 'os.path.exists', 'os.path.exists', (['subj_dir'], {}), '(subj_dir)\n', (11515, 11525), False, 'import os\n'), ((11535, 11553), 'os.mkdir', 'os.mkdir', (['subj_dir'], {}), '(subj_dir)\n', (11543, 11553), False, 'import os\n'), ((12601, 12627), 'os.path.exists', 'os.path.exists', (['save_fname'], {}), '(save_fname)\n', (12615, 12627), False, 'import os\n'), ((4249, 4350), 'pandas.DataFrame', 'pd.DataFrame', (["{'baseline_start': baseline_starts, 'baseline_end': baseline_ends,\n 'session': sess}"], {}), "({'baseline_start': baseline_starts, 'baseline_end':\n baseline_ends, 'session': sess})\n", (4261, 4350), True, 'import pandas as pd\n'), ((11299, 11323), 'os.path.exists', 'os.path.exists', (['main_dir'], {}), '(main_dir)\n', (11313, 11323), False, 'import os\n'), ((11337, 11355), 'os.mkdir', 'os.mkdir', (['main_dir'], {}), '(main_dir)\n', (11345, 11355), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# simplified bsd-3 license
"""Script for basic auditory oddball paradigm with 4:1 ratio of standards to deviants using
designated wav files from HD. Stimulus sequence is psuedorandomized such that deviants never
occur consecutively and are separated by at least 3 standards.
Notes:
Formula for converting decimal integer into number of bits borrowed from
http://www.exploringbinary.com/number-of-bits-in-a-decimal-integer
"""
import numpy as np
from os import path as op
from paradigm.expyfun import ExperimentController
from paradigm.expyfun.stimuli import read_wav
from paradigm.expyfun._trigger_controllers import decimals_to_binary
from paradigm.expyfun import assert_version
assert_version('8511a4d')
def presentation_order(n_pres, n_standard_follow, seed):
"""Return psuedorandomized array"""
rand = np.random.RandomState(seed)
n_trial = sum(n_pres)
ti = 0
order = np.zeros(n_trial, dtype=int)
while ti < n_trial:
trial_type = np.where(sum(n_pres) * rand.rand()
< np.cumsum(n_pres))[0][0]
order[ti] = trial_type
ti += 1
n_pres[trial_type] -= 1
if trial_type > 0:
order[ti:ti + n_standard_follow] = 0
ti += n_standard_follow
order = order + 2
order = np.insert(order, 0, np.ones(20) * 2)
return order
stim_dir = op.join(op.dirname(__file__), 'stimuli/mmn')
sound_files = {2: op.join(stim_dir, 'Dp01bw6-rms.wav'), # midpoint standard
3: op.join(stim_dir, 'Dp01bw1-rms.wav'), # ba endpoint
4: op.join(stim_dir, 'Dp01bw10-rms.wav')} # wa endpoint
wavs = [np.ascontiguousarray(read_wav(v))
for _, v in sorted(sound_files.items())]
# Begin experiment
with ExperimentController('syllable', stim_db=80, stim_fs=24414, stim_rms=0.01,
check_rms=None, suppress_resamp=True) as ec:
# convert participant to int
seed = int(ec.participant) if ec.participant else 555
trials = presentation_order([480, 100, 100], 3, seed)
# convert number of unique trial types into number of bits
n_bits = int(np.floor(np.log2(max(trials)))) + 1
rng = np.random.RandomState(seed) # seed generator with participant
last_time = -np.inf
for trial in trials:
# stamp trigger line prior to stimulus onset
trial_name = op.basename(sound_files[trial][:-4])
wav = read_wav(op.join(stim_dir, sound_files[trial]))
ec.clear_buffer()
ec.load_buffer(wav[0])
ec.identify_trial(ec_id=trial_name,
ttl_id=decimals_to_binary([trial], [n_bits]))
# our next start time is our last start time, plus
# the stimulus duration, plus min wait time, plus random amount
stim_len = float(len(wav[0])) / ec.fs # in seconds
when = last_time + stim_len + .5 + rng.rand(1)
ec.write_data_line('soa', value=when - last_time)
last_time = ec.start_stimulus(when=when) # ustamps stimulus onset
# wait through tone duration to stop the playback
ec.wait_secs(stim_len)
ec.stop()
ec.trial_ok()
ec.check_force_quit() # make sure we're not trying to quit
| [
"numpy.ones",
"paradigm.expyfun.ExperimentController",
"paradigm.expyfun._trigger_controllers.decimals_to_binary",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"os.path.basename",
"numpy.cumsum",
"paradigm.expyfun.stimuli.read_wav",
"paradigm.expyfun.assert_version",
"numpy.random.RandomSta... | [((806, 831), 'paradigm.expyfun.assert_version', 'assert_version', (['"""8511a4d"""'], {}), "('8511a4d')\n", (820, 831), False, 'from paradigm.expyfun import assert_version\n'), ((942, 969), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (963, 969), True, 'import numpy as np\n'), ((1019, 1047), 'numpy.zeros', 'np.zeros', (['n_trial'], {'dtype': 'int'}), '(n_trial, dtype=int)\n', (1027, 1047), True, 'import numpy as np\n'), ((1494, 1514), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (1504, 1514), True, 'from os import path as op\n'), ((1549, 1585), 'os.path.join', 'op.join', (['stim_dir', '"""Dp01bw6-rms.wav"""'], {}), "(stim_dir, 'Dp01bw6-rms.wav')\n", (1556, 1585), True, 'from os import path as op\n'), ((1626, 1662), 'os.path.join', 'op.join', (['stim_dir', '"""Dp01bw1-rms.wav"""'], {}), "(stim_dir, 'Dp01bw1-rms.wav')\n", (1633, 1662), True, 'from os import path as op\n'), ((1697, 1734), 'os.path.join', 'op.join', (['stim_dir', '"""Dp01bw10-rms.wav"""'], {}), "(stim_dir, 'Dp01bw10-rms.wav')\n", (1704, 1734), True, 'from os import path as op\n'), ((1867, 1983), 'paradigm.expyfun.ExperimentController', 'ExperimentController', (['"""syllable"""'], {'stim_db': '(80)', 'stim_fs': '(24414)', 'stim_rms': '(0.01)', 'check_rms': 'None', 'suppress_resamp': '(True)'}), "('syllable', stim_db=80, stim_fs=24414, stim_rms=0.01,\n check_rms=None, suppress_resamp=True)\n", (1887, 1983), False, 'from paradigm.expyfun import ExperimentController\n'), ((2288, 2315), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2309, 2315), True, 'import numpy as np\n'), ((1780, 1791), 'paradigm.expyfun.stimuli.read_wav', 'read_wav', (['v'], {}), '(v)\n', (1788, 1791), False, 'from paradigm.expyfun.stimuli import read_wav\n'), ((2474, 2510), 'os.path.basename', 'op.basename', (['sound_files[trial][:-4]'], {}), '(sound_files[trial][:-4])\n', (2485, 2510), True, 'from os import path as op\n'), ((1439, 1450), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (1446, 1450), True, 'import numpy as np\n'), ((2534, 2571), 'os.path.join', 'op.join', (['stim_dir', 'sound_files[trial]'], {}), '(stim_dir, sound_files[trial])\n', (2541, 2571), True, 'from os import path as op\n'), ((2715, 2752), 'paradigm.expyfun._trigger_controllers.decimals_to_binary', 'decimals_to_binary', (['[trial]', '[n_bits]'], {}), '([trial], [n_bits])\n', (2733, 2752), False, 'from paradigm.expyfun._trigger_controllers import decimals_to_binary\n'), ((1160, 1177), 'numpy.cumsum', 'np.cumsum', (['n_pres'], {}), '(n_pres)\n', (1169, 1177), True, 'import numpy as np\n')] |
""" This file defines the BADMM-based GPS algorithm. """
import copy
import logging
import numpy as np
import scipy as sp
import sys
# sys.path.append('/'.join(str.split(__file__, '/')[:-2]))
from gps.algorithm.algorithm import Algorithm
from gps.algorithm.algorithm_utils import PolicyInfo
from gps.algorithm.config import ALG_BADMM
from gps.sample.sample_list import SampleList
LOGGER = logging.getLogger(__name__)
class AlgorithmBADMM(Algorithm):
"""
Sample-based joint policy learning and trajectory optimization with
BADMM-based guided policy search algorithm.
"""
def __init__(self, hyperparams):
config = copy.deepcopy(ALG_BADMM)
config.update(hyperparams)
Algorithm.__init__(self, config)
policy_prior = self._hyperparams['policy_prior']
for m in range(self.M):
self.cur[m].pol_info = PolicyInfo(self._hyperparams)
self.cur[m].pol_info.policy_prior = \
policy_prior['type'](policy_prior)
self.policy_opt = self._hyperparams['policy_opt']['type'](
self._hyperparams['policy_opt'], self.dO, self.dU
)
def iteration(self, sample_lists):
"""
Run iteration of BADMM-based guided policy search.
Args:
sample_lists: List of SampleList objects for each condition.
"""
for m in range(self.M):
self.cur[m].sample_list = sample_lists[m]
self._set_interp_values()
self._update_dynamics() # Update dynamics model using all sample.
self._update_step_size() # KL Divergence step size.
for m in range(self.M):
# save initial kl for debugging / visualization
self.cur[m].pol_info.init_kl = self._policy_kl(m)[0]
# Run inner loop to compute new policies.
for inner_itr in range(self._hyperparams['inner_iterations']):
#TODO: Could start from init controller.
if self.iteration_count > 0 or inner_itr > 0:
# Update the policy.
self._update_policy(inner_itr)
for m in range(self.M):
self._update_policy_fit(m) # Update policy priors.
if self.iteration_count > 0 or inner_itr > 0:
step = (inner_itr == self._hyperparams['inner_iterations'] - 1)
# Update dual variables.
for m in range(self.M):
self._policy_dual_step(m, step=step)
self._update_trajectories()
self._advance_iteration_variables()
def _set_interp_values(self):
"""
Use iteration-based interpolation to set values of some
schedule-based parameters.
"""
# Compute temporal interpolation value.
t = min((self.iteration_count + 1.0) /
(self._hyperparams['iterations'] - 1), 1)
# Perform iteration-based interpolation of entropy penalty.
if type(self._hyperparams['ent_reg_schedule']) in (int, float):
self.policy_opt.set_ent_reg(self._hyperparams['ent_reg_schedule'])
else:
sch = self._hyperparams['ent_reg_schedule']
self.policy_opt.set_ent_reg(
np.exp(np.interp(t, np.linspace(0, 1, num=len(sch)),
np.log(sch)))
)
# Perform iteration-based interpolation of Lagrange multiplier.
if type(self._hyperparams['lg_step_schedule']) in (int, float):
self._hyperparams['lg_step'] = self._hyperparams['lg_step_schedule']
else:
sch = self._hyperparams['lg_step_schedule']
self._hyperparams['lg_step'] = np.exp(
np.interp(t, np.linspace(0, 1, num=len(sch)), np.log(sch))
)
def _update_step_size(self):
""" Evaluate costs on samples, and adjust the step size. """
# Evaluate cost function for all conditions and samples.
for m in range(self.M):
self._update_policy_fit(m, init=True)
self._eval_cost(m)
# Adjust step size relative to the previous iteration.
if self.iteration_count >= 1 and self.prev[m].sample_list:
self._stepadjust(m)
def _update_policy(self, inner_itr):
""" Compute the new policy. """
dU, dO, T = self.dU, self.dO, self.T
# Compute target mean, cov, and weight for each sample.
obs_data, tgt_mu = np.zeros((0, T, dO)), np.zeros((0, T, dU))
tgt_prc, tgt_wt = np.zeros((0, T, dU, dU)), np.zeros((0, T))
for m in range(self.M):
samples = self.cur[m].sample_list
X = samples.get_X()
N = len(samples)
if inner_itr > 0:
traj, pol_info = self.new_traj_distr[m], self.cur[m].pol_info
else:
traj, pol_info = self.cur[m].traj_distr, self.cur[m].pol_info
mu = np.zeros((N, T, dU))
prc = np.zeros((N, T, dU, dU))
wt = np.zeros((N, T))
# Get time-indexed actions.
for t in range(T):
# Compute actions along this trajectory.
prc[:, t, :, :] = np.tile(traj.inv_pol_covar[t, :, :],
[N, 1, 1])
for i in range(N):
mu[i, t, :] = \
(traj.K[t, :, :].dot(X[i, t, :]) + traj.k[t, :]) - \
np.linalg.solve(
prc[i, t, :, :] / pol_info.pol_wt[t],
pol_info.lambda_K[t, :, :].dot(X[i, t, :]) + \
pol_info.lambda_k[t, :]
)
wt[:, t].fill(pol_info.pol_wt[t])
tgt_mu = np.concatenate((tgt_mu, mu))
tgt_prc = np.concatenate((tgt_prc, prc))
tgt_wt = np.concatenate((tgt_wt, wt))
obs_data = np.concatenate((obs_data, samples.get_obs()))
self.policy_opt.update(obs_data, tgt_mu, tgt_prc, tgt_wt)
def _update_policy_fit(self, m, init=False):
"""
Re-estimate the local policy values in the neighborhood of the
trajectory.
Args:
m: Condition
init: Whether this is the initial fitting of the policy.
"""
dX, dU, T = self.dX, self.dU, self.T
# Choose samples to use.
samples = self.cur[m].sample_list
N = len(samples)
pol_info = self.cur[m].pol_info
X = samples.get_X()
obs = samples.get_obs().copy()
pol_mu, pol_sig = self.policy_opt.prob(obs)[:2]
pol_info.pol_mu, pol_info.pol_sig = pol_mu, pol_sig
# Update policy prior.
policy_prior = pol_info.policy_prior
if init:
samples = SampleList(self.cur[m].sample_list)
mode = self._hyperparams['policy_sample_mode']
else:
samples = SampleList([])
mode = 'add' # Don't replace with empty samples
policy_prior.update(samples, self.policy_opt, mode)
# Fit linearization and store in pol_info.
pol_info.pol_K, pol_info.pol_k, pol_info.pol_S = \
policy_prior.fit(X, pol_mu, pol_sig)
for t in range(T):
pol_info.chol_pol_S[t, :, :] = \
sp.linalg.cholesky(pol_info.pol_S[t, :, :])
def _policy_dual_step(self, m, step=False):
"""
Update the dual variables for the specified condition.
Args:
m: Condition
step: Whether or not to update pol_wt.
"""
dU, T = self.dU, self.T
samples = self.cur[m].sample_list
N = len(samples)
X = samples.get_X()
if 'new_traj_distr' in dir(self):
traj, pol_info = self.new_traj_distr[m], self.cur[m].pol_info
else:
traj, pol_info = self.cur[m].traj_distr, self.cur[m].pol_info
# Compute trajectory action at each sampled state.
traj_mu = np.zeros((N, T, dU))
for i in range(N):
for t in range(T):
traj_mu[i, t, :] = traj.K[t, :, :].dot(X[i, t, :]) + \
traj.k[t, :]
# Compute policy action at each sampled state.
pol_mu = pol_info.pol_mu
# Compute the difference and increment based on pol_wt.
for t in range(T):
tU, pU = traj_mu[:, t, :], pol_mu[:, t, :]
# Increment mean term.
pol_info.lambda_k[t, :] -= self._hyperparams['policy_dual_rate'] * \
pol_info.pol_wt[t] * \
traj.inv_pol_covar[t, :, :].dot(np.mean(tU - pU, axis=0))
# Increment covariance term.
t_covar, p_covar = traj.K[t, :, :], pol_info.pol_K[t, :, :]
pol_info.lambda_K[t, :, :] -= \
self._hyperparams['policy_dual_rate_covar'] * \
pol_info.pol_wt[t] * \
traj.inv_pol_covar[t, :, :].dot(t_covar - p_covar)
# Compute KL divergence.
kl_m = self._policy_kl(m)[0]
if step:
lg_step = self._hyperparams['lg_step']
# Increment pol_wt based on change in KL divergence.
if self._hyperparams['fixed_lg_step'] == 1:
# Take fixed size step.
pol_info.pol_wt = np.array([
max(wt + lg_step, 0) for wt in pol_info.pol_wt
])
elif self._hyperparams['fixed_lg_step'] == 2:
# (In/De)crease based on change in constraint
# satisfaction.
if hasattr(pol_info, 'prev_kl'):
kl_change = kl_m / pol_info.prev_kl
for i in range(len(pol_info.pol_wt)):
if kl_change[i] < 0.8:
pol_info.pol_wt[i] *= 0.5
elif kl_change[i] >= 0.95:
pol_info.pol_wt[i] *= 2.0
elif self._hyperparams['fixed_lg_step'] == 3:
# (In/De)crease based on difference from average.
if hasattr(pol_info, 'prev_kl'):
lower = np.mean(kl_m) - \
self._hyperparams['exp_step_lower'] * np.std(kl_m)
upper = np.mean(kl_m) + \
self._hyperparams['exp_step_upper'] * np.std(kl_m)
for i in range(len(pol_info.pol_wt)):
if kl_m[i] < lower:
pol_info.pol_wt[i] *= \
self._hyperparams['exp_step_decrease']
elif kl_m[i] >= upper:
pol_info.pol_wt[i] *= \
self._hyperparams['exp_step_increase']
else:
# Standard DGD step.
pol_info.pol_wt = np.array([
max(pol_info.pol_wt[t] + lg_step * kl_m[t], 0)
for t in range(T)
])
pol_info.prev_kl = kl_m
def _advance_iteration_variables(self):
"""
Move all 'cur' variables to 'prev', reinitialize 'cur'
variables, and advance iteration counter.
"""
Algorithm._advance_iteration_variables(self)
for m in range(self.M):
self.cur[m].traj_info.last_kl_step = \
self.prev[m].traj_info.last_kl_step
self.cur[m].pol_info = copy.deepcopy(self.prev[m].pol_info)
def _stepadjust(self, m):
"""
Calculate new step sizes.
Args:
m: Condition
"""
# Compute values under Laplace approximation. This is the policy
# that the previous samples were actually drawn from under the
# dynamics that were estimated from the previous samples.
prev_laplace_obj, prev_laplace_kl = self._estimate_cost(
self.prev[m].traj_distr, self.prev[m].traj_info, self.prev[m].pol_info, m
)
# This is the policy that we just used under the dynamics that
# were estimated from the previous samples (so this is the cost
# we thought we would have).
new_pred_laplace_obj, new_pred_laplace_kl = self._estimate_cost(
self.cur[m].traj_distr, self.prev[m].traj_info, self.prev[m].pol_info, m
)
# This is the actual cost we have under the current trajectory
# based on the latest samples.
new_actual_laplace_obj, new_actual_laplace_kl = self._estimate_cost(
self.cur[m].traj_distr, self.cur[m].traj_info, self.cur[m].pol_info, m
)
# Measure the entropy of the current trajectory (for printout).
ent = self._measure_ent(m)
# Compute actual objective values based on the samples.
prev_mc_obj = np.mean(np.sum(self.prev[m].cs, axis=1), axis=0)
new_mc_obj = np.mean(np.sum(self.cur[m].cs, axis=1), axis=0)
# Compute sample-based estimate of KL divergence between policy
# and trajectories.
new_mc_kl = self._policy_kl(m)[0]
if self.iteration_count >= 1 and self.prev[m].sample_list:
prev_mc_kl = self._policy_kl(m, prev=True)[0]
else:
prev_mc_kl = np.zeros_like(new_mc_kl)
# Compute full policy KL divergence objective terms by applying
# the Lagrange multipliers.
pol_wt = self.cur[m].pol_info.pol_wt
prev_laplace_kl_sum = np.sum(prev_laplace_kl * pol_wt)
new_pred_laplace_kl_sum = np.sum(new_pred_laplace_kl * pol_wt)
new_actual_laplace_kl_sum = np.sum(new_actual_laplace_kl * pol_wt)
prev_mc_kl_sum = np.sum(prev_mc_kl * pol_wt)
new_mc_kl_sum = np.sum(new_mc_kl * pol_wt)
LOGGER.debug(
'Trajectory step: ent: %f cost: %f -> %f KL: %f -> %f',
ent, prev_mc_obj, new_mc_obj, prev_mc_kl_sum, new_mc_kl_sum
)
# Compute predicted and actual improvement.
predicted_impr = np.sum(prev_laplace_obj) + prev_laplace_kl_sum - \
np.sum(new_pred_laplace_obj) - new_pred_laplace_kl_sum
actual_impr = np.sum(prev_laplace_obj) + prev_laplace_kl_sum - \
np.sum(new_actual_laplace_obj) - new_actual_laplace_kl_sum
# Print improvement details.
LOGGER.debug('Previous cost: Laplace: %f MC: %f',
np.sum(prev_laplace_obj), prev_mc_obj)
LOGGER.debug('Predicted new cost: Laplace: %f MC: %f',
np.sum(new_pred_laplace_obj), new_mc_obj)
LOGGER.debug('Actual new cost: Laplace: %f MC: %f',
np.sum(new_actual_laplace_obj), new_mc_obj)
LOGGER.debug('Previous KL: Laplace: %f MC: %f',
np.sum(prev_laplace_kl), np.sum(prev_mc_kl))
LOGGER.debug('Predicted new KL: Laplace: %f MC: %f',
np.sum(new_pred_laplace_kl), np.sum(new_mc_kl))
LOGGER.debug('Actual new KL: Laplace: %f MC: %f',
np.sum(new_actual_laplace_kl), np.sum(new_mc_kl))
LOGGER.debug('Previous w KL: Laplace: %f MC: %f',
prev_laplace_kl_sum, prev_mc_kl_sum)
LOGGER.debug('Predicted w new KL: Laplace: %f MC: %f',
new_pred_laplace_kl_sum, new_mc_kl_sum)
LOGGER.debug('Actual w new KL: Laplace %f MC: %f',
new_actual_laplace_kl_sum, new_mc_kl_sum)
LOGGER.debug('Predicted/actual improvement: %f / %f',
predicted_impr, actual_impr)
# Compute actual KL step taken at last iteration.
actual_step = self.cur[m].traj_info.last_kl_step / \
(self._hyperparams['kl_step'] * self.T)
if actual_step < self.cur[m].step_mult:
self.cur[m].step_mult = max(actual_step,
self._hyperparams['min_step_mult'])
self._set_new_mult(predicted_impr, actual_impr, m)
def _policy_kl(self, m, prev=False):
"""
Monte-Carlo estimate of KL divergence between policy and
trajectory.
"""
dU, T = self.dU, self.T
if prev:
traj, pol_info = self.prev[m].traj_distr, self.cur[m].pol_info
samples = self.prev[m].sample_list
else:
traj, pol_info = self.cur[m].traj_distr, self.cur[m].pol_info
samples = self.cur[m].sample_list
N = len(samples)
X, obs = samples.get_X(), samples.get_obs()
kl, kl_m = np.zeros((N, T)), np.zeros(T)
kl_l, kl_lm = np.zeros((N, T)), np.zeros(T)
# Compute policy mean and covariance at each sample.
pol_mu, _, pol_prec, pol_det_sigma = self.policy_opt.prob(obs.copy())
# Compute KL divergence.
for t in range(T):
# Compute trajectory action at sample.
traj_mu = np.zeros((N, dU))
for i in range(N):
traj_mu[i, :] = traj.K[t, :, :].dot(X[i, t, :]) + traj.k[t, :]
diff = pol_mu[:, t, :] - traj_mu
tr_pp_ct = pol_prec[:, t, :, :] * traj.pol_covar[t, :, :]
k_ln_det_ct = 0.5 * dU + np.sum(
np.log(np.diag(traj.chol_pol_covar[t, :, :]))
)
ln_det_cp = np.log(pol_det_sigma[:, t])
# IMPORTANT: Note that this assumes that pol_prec does not
# depend on state!!!!
# (Only the last term makes this assumption.)
d_pp_d = np.sum(diff * (diff.dot(pol_prec[1, t, :, :])), axis=1)
kl[:, t] = 0.5 * np.sum(np.sum(tr_pp_ct, axis=1), axis=1) - \
k_ln_det_ct + 0.5 * ln_det_cp + 0.5 * d_pp_d
tr_pp_ct_m = np.mean(tr_pp_ct, axis=0)
kl_m[t] = 0.5 * np.sum(np.sum(tr_pp_ct_m, axis=0), axis=0) - \
k_ln_det_ct + 0.5 * np.mean(ln_det_cp) + \
0.5 * np.mean(d_pp_d)
# Compute trajectory action at sample with Lagrange
# multiplier.
traj_mu = np.zeros((N, dU))
for i in range(N):
traj_mu[i, :] = \
(traj.K[t, :, :] - pol_info.lambda_K[t, :, :]).dot(
X[i, t, :]
) + (traj.k[t, :] - pol_info.lambda_k[t, :])
# Compute KL divergence with Lagrange multiplier.
diff_l = pol_mu[:, t, :] - traj_mu
d_pp_d_l = np.sum(diff_l * (diff_l.dot(pol_prec[1, t, :, :])),
axis=1)
kl_l[:, t] = 0.5 * np.sum(np.sum(tr_pp_ct, axis=1), axis=1) - \
k_ln_det_ct + 0.5 * ln_det_cp + 0.5 * d_pp_d_l
kl_lm[t] = 0.5 * np.sum(np.sum(tr_pp_ct_m, axis=0), axis=0) - \
k_ln_det_ct + 0.5 * np.mean(ln_det_cp) + \
0.5 * np.mean(d_pp_d_l)
return kl_m, kl, kl_lm, kl_l
def _estimate_cost(self, traj_distr, traj_info, pol_info, m):
"""
Compute Laplace approximation to expected cost.
Args:
traj_distr: A linear Gaussian policy object.
traj_info: A TrajectoryInfo object.
pol_info: Policy linearization info.
m: Condition number.
"""
# Constants.
T, dU, dX = self.T, self.dU, self.dX
# Perform forward pass (note that we repeat this here, because
# traj_info may have different dynamics from the ones that were
# used to compute the distribution already saved in traj).
mu, sigma = self.traj_opt.forward(traj_distr, traj_info)
# Compute cost.
predicted_cost = np.zeros(T)
for t in range(T):
predicted_cost[t] = traj_info.cc[t] + 0.5 * \
(np.sum(sigma[t, :, :] * traj_info.Cm[t, :, :]) +
mu[t, :].T.dot(traj_info.Cm[t, :, :]).dot(mu[t, :])) + \
mu[t, :].T.dot(traj_info.cv[t, :])
# Compute KL divergence.
predicted_kl = np.zeros(T)
for t in range(T):
inv_pS = np.linalg.solve(
pol_info.chol_pol_S[t, :, :],
np.linalg.solve(pol_info.chol_pol_S[t, :, :].T, np.eye(dU))
)
Ufb = pol_info.pol_K[t, :, :].dot(mu[t, :dX].T) + \
pol_info.pol_k[t, :]
diff = mu[t, dX:] - Ufb
Kbar = traj_distr.K[t, :, :] - pol_info.pol_K[t, :, :]
predicted_kl[t] = 0.5 * (diff).dot(inv_pS).dot(diff) + \
0.5 * np.sum(traj_distr.pol_covar[t, :, :] * inv_pS) + \
0.5 * np.sum(
sigma[t, :dX, :dX] * Kbar.T.dot(inv_pS).dot(Kbar)
) + np.sum(
np.log(np.diag(pol_info.chol_pol_S[t, :, :]))
) - np.sum(
np.log(np.diag(traj_distr.chol_pol_covar[t, :, :]))
) + 0.5 * dU
return predicted_cost, predicted_kl
def compute_costs(self, m, eta, augment=True):
""" Compute cost estimates used in the LQR backward pass. """
traj_info, traj_distr = self.cur[m].traj_info, self.cur[m].traj_distr
if not augment: # Whether to augment cost with term to penalize KL
return traj_info.Cm, traj_info.cv
pol_info = self.cur[m].pol_info
multiplier = self._hyperparams['max_ent_traj']
T, dU, dX = traj_distr.T, traj_distr.dU, traj_distr.dX
Cm, cv = np.copy(traj_info.Cm), np.copy(traj_info.cv)
# Modify policy action via Lagrange multiplier.
cv[:, dX:] -= pol_info.lambda_k
Cm[:, dX:, :dX] -= pol_info.lambda_K
Cm[:, :dX, dX:] -= np.transpose(pol_info.lambda_K, [0, 2, 1])
#Pre-process the costs with KL-divergence terms.
TKLm = np.zeros((T, dX+dU, dX+dU))
TKLv = np.zeros((T, dX+dU))
PKLm = np.zeros((T, dX+dU, dX+dU))
PKLv = np.zeros((T, dX+dU))
fCm, fcv = np.zeros(Cm.shape), np.zeros(cv.shape)
for t in range(T):
K, k = traj_distr.K[t, :, :], traj_distr.k[t, :]
inv_pol_covar = traj_distr.inv_pol_covar[t, :, :]
# Trajectory KL-divergence terms.
TKLm[t, :, :] = np.vstack([
np.hstack([
K.T.dot(inv_pol_covar).dot(K),
-K.T.dot(inv_pol_covar)]),
np.hstack([-inv_pol_covar.dot(K), inv_pol_covar])
])
TKLv[t, :] = np.concatenate([
K.T.dot(inv_pol_covar).dot(k), -inv_pol_covar.dot(k)
])
# Policy KL-divergence terms.
inv_pol_S = np.linalg.solve(
pol_info.chol_pol_S[t, :, :],
np.linalg.solve(pol_info.chol_pol_S[t, :, :].T, np.eye(dU))
)
KB, kB = pol_info.pol_K[t, :, :], pol_info.pol_k[t, :]
PKLm[t, :, :] = np.vstack([
np.hstack([KB.T.dot(inv_pol_S).dot(KB), -KB.T.dot(inv_pol_S)]),
np.hstack([-inv_pol_S.dot(KB), inv_pol_S])
])
PKLv[t, :] = np.concatenate([
KB.T.dot(inv_pol_S).dot(kB), -inv_pol_S.dot(kB)
])
wt = pol_info.pol_wt[t]
fCm[t, :, :] = (Cm[t, :, :] + TKLm[t, :, :] * eta +
PKLm[t, :, :] * wt) / (eta + wt + multiplier)
fcv[t, :] = (cv[t, :] + TKLv[t, :] * eta +
PKLv[t, :] * wt) / (eta + wt + multiplier)
return fCm, fcv
| [
"logging.getLogger",
"numpy.log",
"gps.algorithm.algorithm_utils.PolicyInfo",
"scipy.linalg.cholesky",
"copy.deepcopy",
"numpy.mean",
"numpy.concatenate",
"numpy.tile",
"numpy.eye",
"gps.sample.sample_list.SampleList",
"gps.algorithm.algorithm.Algorithm._advance_iteration_variables",
"numpy.st... | [((394, 421), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (411, 421), False, 'import logging\n'), ((647, 671), 'copy.deepcopy', 'copy.deepcopy', (['ALG_BADMM'], {}), '(ALG_BADMM)\n', (660, 671), False, 'import copy\n'), ((715, 747), 'gps.algorithm.algorithm.Algorithm.__init__', 'Algorithm.__init__', (['self', 'config'], {}), '(self, config)\n', (733, 747), False, 'from gps.algorithm.algorithm import Algorithm\n'), ((7993, 8013), 'numpy.zeros', 'np.zeros', (['(N, T, dU)'], {}), '((N, T, dU))\n', (8001, 8013), True, 'import numpy as np\n'), ((11217, 11261), 'gps.algorithm.algorithm.Algorithm._advance_iteration_variables', 'Algorithm._advance_iteration_variables', (['self'], {}), '(self)\n', (11255, 11261), False, 'from gps.algorithm.algorithm import Algorithm\n'), ((13431, 13463), 'numpy.sum', 'np.sum', (['(prev_laplace_kl * pol_wt)'], {}), '(prev_laplace_kl * pol_wt)\n', (13437, 13463), True, 'import numpy as np\n'), ((13498, 13534), 'numpy.sum', 'np.sum', (['(new_pred_laplace_kl * pol_wt)'], {}), '(new_pred_laplace_kl * pol_wt)\n', (13504, 13534), True, 'import numpy as np\n'), ((13571, 13609), 'numpy.sum', 'np.sum', (['(new_actual_laplace_kl * pol_wt)'], {}), '(new_actual_laplace_kl * pol_wt)\n', (13577, 13609), True, 'import numpy as np\n'), ((13635, 13662), 'numpy.sum', 'np.sum', (['(prev_mc_kl * pol_wt)'], {}), '(prev_mc_kl * pol_wt)\n', (13641, 13662), True, 'import numpy as np\n'), ((13687, 13713), 'numpy.sum', 'np.sum', (['(new_mc_kl * pol_wt)'], {}), '(new_mc_kl * pol_wt)\n', (13693, 13713), True, 'import numpy as np\n'), ((19568, 19579), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (19576, 19579), True, 'import numpy as np\n'), ((19925, 19936), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (19933, 19936), True, 'import numpy as np\n'), ((21600, 21642), 'numpy.transpose', 'np.transpose', (['pol_info.lambda_K', '[0, 2, 1]'], {}), '(pol_info.lambda_K, [0, 2, 1])\n', (21612, 21642), True, 'import numpy as np\n'), ((21716, 21747), 'numpy.zeros', 'np.zeros', (['(T, dX + dU, dX + dU)'], {}), '((T, dX + dU, dX + dU))\n', (21724, 21747), True, 'import numpy as np\n'), ((21759, 21781), 'numpy.zeros', 'np.zeros', (['(T, dX + dU)'], {}), '((T, dX + dU))\n', (21767, 21781), True, 'import numpy as np\n'), ((21795, 21826), 'numpy.zeros', 'np.zeros', (['(T, dX + dU, dX + dU)'], {}), '((T, dX + dU, dX + dU))\n', (21803, 21826), True, 'import numpy as np\n'), ((21838, 21860), 'numpy.zeros', 'np.zeros', (['(T, dX + dU)'], {}), '((T, dX + dU))\n', (21846, 21860), True, 'import numpy as np\n'), ((873, 902), 'gps.algorithm.algorithm_utils.PolicyInfo', 'PolicyInfo', (['self._hyperparams'], {}), '(self._hyperparams)\n', (883, 902), False, 'from gps.algorithm.algorithm_utils import PolicyInfo\n'), ((4435, 4455), 'numpy.zeros', 'np.zeros', (['(0, T, dO)'], {}), '((0, T, dO))\n', (4443, 4455), True, 'import numpy as np\n'), ((4457, 4477), 'numpy.zeros', 'np.zeros', (['(0, T, dU)'], {}), '((0, T, dU))\n', (4465, 4477), True, 'import numpy as np\n'), ((4504, 4528), 'numpy.zeros', 'np.zeros', (['(0, T, dU, dU)'], {}), '((0, T, dU, dU))\n', (4512, 4528), True, 'import numpy as np\n'), ((4530, 4546), 'numpy.zeros', 'np.zeros', (['(0, T)'], {}), '((0, T))\n', (4538, 4546), True, 'import numpy as np\n'), ((4907, 4927), 'numpy.zeros', 'np.zeros', (['(N, T, dU)'], {}), '((N, T, dU))\n', (4915, 4927), True, 'import numpy as np\n'), ((4946, 4970), 'numpy.zeros', 'np.zeros', (['(N, T, dU, dU)'], {}), '((N, T, dU, dU))\n', (4954, 4970), True, 'import numpy as np\n'), ((4988, 5004), 'numpy.zeros', 'np.zeros', (['(N, T)'], {}), '((N, T))\n', (4996, 5004), True, 'import numpy as np\n'), ((5768, 5796), 'numpy.concatenate', 'np.concatenate', (['(tgt_mu, mu)'], {}), '((tgt_mu, mu))\n', (5782, 5796), True, 'import numpy as np\n'), ((5819, 5849), 'numpy.concatenate', 'np.concatenate', (['(tgt_prc, prc)'], {}), '((tgt_prc, prc))\n', (5833, 5849), True, 'import numpy as np\n'), ((5871, 5899), 'numpy.concatenate', 'np.concatenate', (['(tgt_wt, wt)'], {}), '((tgt_wt, wt))\n', (5885, 5899), True, 'import numpy as np\n'), ((6792, 6827), 'gps.sample.sample_list.SampleList', 'SampleList', (['self.cur[m].sample_list'], {}), '(self.cur[m].sample_list)\n', (6802, 6827), False, 'from gps.sample.sample_list import SampleList\n'), ((6923, 6937), 'gps.sample.sample_list.SampleList', 'SampleList', (['[]'], {}), '([])\n', (6933, 6937), False, 'from gps.sample.sample_list import SampleList\n'), ((7314, 7357), 'scipy.linalg.cholesky', 'sp.linalg.cholesky', (['pol_info.pol_S[t, :, :]'], {}), '(pol_info.pol_S[t, :, :])\n', (7332, 7357), True, 'import scipy as sp\n'), ((11436, 11472), 'copy.deepcopy', 'copy.deepcopy', (['self.prev[m].pol_info'], {}), '(self.prev[m].pol_info)\n', (11449, 11472), False, 'import copy\n'), ((12805, 12836), 'numpy.sum', 'np.sum', (['self.prev[m].cs'], {'axis': '(1)'}), '(self.prev[m].cs, axis=1)\n', (12811, 12836), True, 'import numpy as np\n'), ((12875, 12905), 'numpy.sum', 'np.sum', (['self.cur[m].cs'], {'axis': '(1)'}), '(self.cur[m].cs, axis=1)\n', (12881, 12905), True, 'import numpy as np\n'), ((13222, 13246), 'numpy.zeros_like', 'np.zeros_like', (['new_mc_kl'], {}), '(new_mc_kl)\n', (13235, 13246), True, 'import numpy as np\n'), ((14352, 14376), 'numpy.sum', 'np.sum', (['prev_laplace_obj'], {}), '(prev_laplace_obj)\n', (14358, 14376), True, 'import numpy as np\n'), ((14475, 14503), 'numpy.sum', 'np.sum', (['new_pred_laplace_obj'], {}), '(new_pred_laplace_obj)\n', (14481, 14503), True, 'import numpy as np\n'), ((14598, 14628), 'numpy.sum', 'np.sum', (['new_actual_laplace_obj'], {}), '(new_actual_laplace_obj)\n', (14604, 14628), True, 'import numpy as np\n'), ((14719, 14742), 'numpy.sum', 'np.sum', (['prev_laplace_kl'], {}), '(prev_laplace_kl)\n', (14725, 14742), True, 'import numpy as np\n'), ((14744, 14762), 'numpy.sum', 'np.sum', (['prev_mc_kl'], {}), '(prev_mc_kl)\n', (14750, 14762), True, 'import numpy as np\n'), ((14846, 14873), 'numpy.sum', 'np.sum', (['new_pred_laplace_kl'], {}), '(new_pred_laplace_kl)\n', (14852, 14873), True, 'import numpy as np\n'), ((14875, 14892), 'numpy.sum', 'np.sum', (['new_mc_kl'], {}), '(new_mc_kl)\n', (14881, 14892), True, 'import numpy as np\n'), ((14973, 15002), 'numpy.sum', 'np.sum', (['new_actual_laplace_kl'], {}), '(new_actual_laplace_kl)\n', (14979, 15002), True, 'import numpy as np\n'), ((15004, 15021), 'numpy.sum', 'np.sum', (['new_mc_kl'], {}), '(new_mc_kl)\n', (15010, 15021), True, 'import numpy as np\n'), ((16462, 16478), 'numpy.zeros', 'np.zeros', (['(N, T)'], {}), '((N, T))\n', (16470, 16478), True, 'import numpy as np\n'), ((16480, 16491), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (16488, 16491), True, 'import numpy as np\n'), ((16514, 16530), 'numpy.zeros', 'np.zeros', (['(N, T)'], {}), '((N, T))\n', (16522, 16530), True, 'import numpy as np\n'), ((16532, 16543), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (16540, 16543), True, 'import numpy as np\n'), ((16816, 16833), 'numpy.zeros', 'np.zeros', (['(N, dU)'], {}), '((N, dU))\n', (16824, 16833), True, 'import numpy as np\n'), ((17204, 17231), 'numpy.log', 'np.log', (['pol_det_sigma[:, t]'], {}), '(pol_det_sigma[:, t])\n', (17210, 17231), True, 'import numpy as np\n'), ((17658, 17683), 'numpy.mean', 'np.mean', (['tr_pp_ct'], {'axis': '(0)'}), '(tr_pp_ct, axis=0)\n', (17665, 17683), True, 'import numpy as np\n'), ((17976, 17993), 'numpy.zeros', 'np.zeros', (['(N, dU)'], {}), '((N, dU))\n', (17984, 17993), True, 'import numpy as np\n'), ((21386, 21407), 'numpy.copy', 'np.copy', (['traj_info.Cm'], {}), '(traj_info.Cm)\n', (21393, 21407), True, 'import numpy as np\n'), ((21409, 21430), 'numpy.copy', 'np.copy', (['traj_info.cv'], {}), '(traj_info.cv)\n', (21416, 21430), True, 'import numpy as np\n'), ((21878, 21896), 'numpy.zeros', 'np.zeros', (['Cm.shape'], {}), '(Cm.shape)\n', (21886, 21896), True, 'import numpy as np\n'), ((21898, 21916), 'numpy.zeros', 'np.zeros', (['cv.shape'], {}), '(cv.shape)\n', (21906, 21916), True, 'import numpy as np\n'), ((5167, 5214), 'numpy.tile', 'np.tile', (['traj.inv_pol_covar[t, :, :]', '[N, 1, 1]'], {}), '(traj.inv_pol_covar[t, :, :], [N, 1, 1])\n', (5174, 5214), True, 'import numpy as np\n'), ((14032, 14060), 'numpy.sum', 'np.sum', (['new_pred_laplace_obj'], {}), '(new_pred_laplace_obj)\n', (14038, 14060), True, 'import numpy as np\n'), ((14176, 14206), 'numpy.sum', 'np.sum', (['new_actual_laplace_obj'], {}), '(new_actual_laplace_obj)\n', (14182, 14206), True, 'import numpy as np\n'), ((3735, 3746), 'numpy.log', 'np.log', (['sch'], {}), '(sch)\n', (3741, 3746), True, 'import numpy as np\n'), ((8625, 8649), 'numpy.mean', 'np.mean', (['(tU - pU)'], {'axis': '(0)'}), '(tU - pU, axis=0)\n', (8632, 8649), True, 'import numpy as np\n'), ((13965, 13989), 'numpy.sum', 'np.sum', (['prev_laplace_obj'], {}), '(prev_laplace_obj)\n', (13971, 13989), True, 'import numpy as np\n'), ((14109, 14133), 'numpy.sum', 'np.sum', (['prev_laplace_obj'], {}), '(prev_laplace_obj)\n', (14115, 14133), True, 'import numpy as np\n'), ((17848, 17863), 'numpy.mean', 'np.mean', (['d_pp_d'], {}), '(d_pp_d)\n', (17855, 17863), True, 'import numpy as np\n'), ((18773, 18790), 'numpy.mean', 'np.mean', (['d_pp_d_l'], {}), '(d_pp_d_l)\n', (18780, 18790), True, 'import numpy as np\n'), ((20112, 20122), 'numpy.eye', 'np.eye', (['dU'], {}), '(dU)\n', (20118, 20122), True, 'import numpy as np\n'), ((22679, 22689), 'numpy.eye', 'np.eye', (['dU'], {}), '(dU)\n', (22685, 22689), True, 'import numpy as np\n'), ((3299, 3310), 'numpy.log', 'np.log', (['sch'], {}), '(sch)\n', (3305, 3310), True, 'import numpy as np\n'), ((17127, 17164), 'numpy.diag', 'np.diag', (['traj.chol_pol_covar[t, :, :]'], {}), '(traj.chol_pol_covar[t, :, :])\n', (17134, 17164), True, 'import numpy as np\n'), ((17799, 17817), 'numpy.mean', 'np.mean', (['ln_det_cp'], {}), '(ln_det_cp)\n', (17806, 17817), True, 'import numpy as np\n'), ((18724, 18742), 'numpy.mean', 'np.mean', (['ln_det_cp'], {}), '(ln_det_cp)\n', (18731, 18742), True, 'import numpy as np\n'), ((19686, 19732), 'numpy.sum', 'np.sum', (['(sigma[t, :, :] * traj_info.Cm[t, :, :])'], {}), '(sigma[t, :, :] * traj_info.Cm[t, :, :])\n', (19692, 19732), True, 'import numpy as np\n'), ((20765, 20808), 'numpy.diag', 'np.diag', (['traj_distr.chol_pol_covar[t, :, :]'], {}), '(traj_distr.chol_pol_covar[t, :, :])\n', (20772, 20808), True, 'import numpy as np\n'), ((10142, 10155), 'numpy.mean', 'np.mean', (['kl_m'], {}), '(kl_m)\n', (10149, 10155), True, 'import numpy as np\n'), ((10267, 10280), 'numpy.mean', 'np.mean', (['kl_m'], {}), '(kl_m)\n', (10274, 10280), True, 'import numpy as np\n'), ((17530, 17554), 'numpy.sum', 'np.sum', (['tr_pp_ct'], {'axis': '(1)'}), '(tr_pp_ct, axis=1)\n', (17536, 17554), True, 'import numpy as np\n'), ((17719, 17745), 'numpy.sum', 'np.sum', (['tr_pp_ct_m'], {'axis': '(0)'}), '(tr_pp_ct_m, axis=0)\n', (17725, 17745), True, 'import numpy as np\n'), ((18503, 18527), 'numpy.sum', 'np.sum', (['tr_pp_ct'], {'axis': '(1)'}), '(tr_pp_ct, axis=1)\n', (18509, 18527), True, 'import numpy as np\n'), ((18644, 18670), 'numpy.sum', 'np.sum', (['tr_pp_ct_m'], {'axis': '(0)'}), '(tr_pp_ct_m, axis=0)\n', (18650, 18670), True, 'import numpy as np\n'), ((20663, 20700), 'numpy.diag', 'np.diag', (['pol_info.chol_pol_S[t, :, :]'], {}), '(pol_info.chol_pol_S[t, :, :])\n', (20670, 20700), True, 'import numpy as np\n'), ((10226, 10238), 'numpy.std', 'np.std', (['kl_m'], {}), '(kl_m)\n', (10232, 10238), True, 'import numpy as np\n'), ((10351, 10363), 'numpy.std', 'np.std', (['kl_m'], {}), '(kl_m)\n', (10357, 10363), True, 'import numpy as np\n'), ((20441, 20487), 'numpy.sum', 'np.sum', (['(traj_distr.pol_covar[t, :, :] * inv_pS)'], {}), '(traj_distr.pol_covar[t, :, :] * inv_pS)\n', (20447, 20487), True, 'import numpy as np\n')] |
#hpart = 'horizontal partition', vpart = 'vertical partition'
from numpy import hstack, vstack
def merge_2x2(TL, TR, BL, BR, A):
if TL.shape[0] > 0 and TL.shape[1] > 0:
for i in range(TL.shape[0]):
for j in range(TL.shape[1]):
A[i,j] = TL[i,j];
if TR.shape[0] > 0 and TR.shape[1]> 0:
for i in range(TR.shape[0]):
for j in range(TR.shape[1]):
A[i, TL.shape[1] + j] = TR[i,j];
if BL.shape[0] > 0 and BL.shape[1]> 0:
for i in range(BL.shape[0]):
for j in range(BL.shape[1]):
A[i + TL.shape[0], j ] = BL[i,j];
if BR.shape[0] > 0 and BR.shape[1]> 0:
for i in range(BR.shape[0]):
for j in range(BR.shape[1]):
A[i+TL.shape[0], j+TL.shape[1]] = BR[i,j];
def merge_2x1(T, B, A):
if T.shape[0] > 0 and T.shape[1] > 0:
for i in range(T.shape[0]):
for j in range(T.shape[1]):
A[i,j] = T[i,j];
if B.shape[0] > 0 and B.shape[1]> 0:
for i in range(B.shape[0]):
for j in range(B.shape[1]):
A[i+T.shape[0], j] = B[i,j];
def merge_1x2(L, R, A):
if L.shape[0] > 0 and L.shape[1] > 0:
for i in range(L.shape[0]):
for j in range(L.shape[1]):
A[i,j] = L[i,j];
if R.shape[0] > 0 and R.shape[1]> 0:
for i in range(R.shape[0]):
for j in range(R.shape[1]):
A[i, j+L.shape[1]] = R[i,j];
def cont_with_1x3_to_1x2(A0, A1, A2, side='LEFT'):
if side == 'LEFT':
return hstack((A0, A1)), A2
else:
return A0, hstack((A1, A2))
def cont_with_3x1_to_2x1(A0, A1, A2, side='TOP'):
if side == 'TOP':
return vstack((A0, A1)), A2
else:
return A0, vstack((A1, A2))
def cont_with_3x3_to_2x2(A00, A01, A02, \
A10, A11, A12, \
A20, A21, A22, quad='TL'):
if quad == 'TL':
TL = vstack((hstack((A00, A01)), \
hstack((A10, A11))))
TR = vstack((A02, \
A12))
BL = hstack((A20, A21))
BR = A22
elif quad == 'TR':
TL = vstack((A00, \
A10))
TR = vstack((hstack((A01, A02)), \
hstack((A11, A12))))
BL = A20
BR = hstack((A21, A22))
elif quad == 'BL':
TL = hstack((A00, A01))
TR = A02
BL = vstack((hstack((A10, A11)), \
hstack((A20, A21))))
BR = vstack((A12, \
A22))
elif quad == 'BR':
TL = A00
TR = hstack((A01, A02))
BL = vstack((A10, \
A20))
BR = vstack((hstack((A11, A12)), \
hstack((A21, A22))))
return TL, TR, \
BL, BR
def part_1x2(A, size=0, side='LEFT'):
if size < 0:
raise IndexError('size < 0')
elif size > A.shape[1]:
raise IndexError('size > col dimension')
elif side not in ('LEFT', 'RIGHT'):
raise ValueError('side must be LEFT or RIGHT')
vpart = size if side == 'LEFT' else A.shape[1] - size
AL, AR = A[:, :vpart], A[:, vpart:]
return AL, AR
def part_2x1(A, size=0, side='TOP'):
if size < 0:
raise IndexError('size < 0')
elif size > A.shape[0]:
raise IndexError('size > row dimension')
elif side not in ('TOP', 'BOTTOM'):
raise ValueError('side must be TOP or BOTTOM')
hpart = size if side == 'TOP' else A.shape[0] - size
AT, \
AB = A[:hpart, :], \
A[hpart:, :]
return AT, \
AB
def part_2x2(A, m, n, quad):
if quad not in ('TL', 'TR', 'BL', 'BR'):
raise ValueError('quadrant must be TL, TR, BL, or BR')
hpart = m if quad in ('TL', 'TR') else A.shape[0] - m
vpart = n if quad in ('TL', 'BL') else A.shape[1] - n
TL, TR = A[:hpart, :vpart], A[:hpart, vpart:]
BL, BR = A[hpart:, :vpart], A[hpart:, vpart:]
return TL, TR, BL, BR
def repart_1x2_to_1x3(AL, AR, n=1, side='RIGHT'):
if side == 'RIGHT':
vpart = n
A0 = AL
A1 = AR[:, :vpart]
A2 = AR[:, vpart:]
else:
vpart = AL.shape[1] - n
A0 = AL[:, :vpart]
A1 = AL[:, vpart:]
A2 = AR
return A0, A1, A2
def repart_2x1_to_3x1(AT, \
AB, m=1, side='BOTTOM'):
top_end = AT.shape[0] if side == 'BOTTOM' else AT.shape[0] - m
bottom_start = 0 if side == 'TOP' else m
A0 = AT[:top_end, :]
A2 = AB[bottom_start:, :]
if side == 'BOTTOM':
A1 = AB[:bottom_start, :]
else:
A1 = AT[top_end:, :]
return A0, A1, A2
def repart_2x2_to_3x3(ATL, ATR, \
ABL, ABR, m=1, n=1, quad='BR'):
hpart = ATL.shape[0] - m if quad in ('TL', 'TR') else m
vpart = ATL.shape[1] - n if quad in ('TL', 'BL') else n
if quad == 'TL':
A00, A01, A02 = ATL[:hpart, :vpart], ATL[:hpart, vpart:], ATR[:hpart, :]
A10, A11, A12 = ATL[hpart:, :vpart], ATL[hpart:, vpart:], ATR[hpart:, :]
A20, A21, A22 = ABL[:, :vpart], ABL[:, vpart:], ABR[:, :]
elif quad == 'TR':
A00, A01, A02 = ATL[:hpart, :], ATR[:hpart, :vpart], ATR[:hpart, vpart:]
A10, A11, A12 = ATL[hpart:, :], ATR[hpart:, :vpart], ATR[hpart:, vpart:]
A20, A21, A22 = ABL[:, :], ABR[:, :vpart], ABR[:, vpart:]
elif quad == 'BL':
A00, A01, A02 = ATL[:, :vpart], ATL[:, vpart:], ATR[:, :]
A10, A11, A12 = ABL[:hpart, :vpart], ABL[:hpart, vpart:], ABR[:hpart, :]
A20, A21, A22 = ABL[hpart:, :vpart], ABL[hpart:, vpart:], ABR[hpart:, :]
elif quad == 'BR':
A00, A01, A02 = ATL[:, :], ATR[:, :vpart], ATR[:, vpart:]
A10, A11, A12 = ABL[:hpart, :], ABR[:hpart, :vpart], ABR[:hpart, vpart:]
A20, A21, A22 = ABL[hpart:, :], ABR[hpart:, :vpart], ABR[hpart:, vpart:]
return A00, A01, A02, \
A10, A11, A12, \
A20, A21, A22
| [
"numpy.vstack",
"numpy.hstack"
] | [((2062, 2080), 'numpy.vstack', 'vstack', (['(A02, A12)'], {}), '((A02, A12))\n', (2068, 2080), False, 'from numpy import hstack, vstack\n'), ((2117, 2135), 'numpy.hstack', 'hstack', (['(A20, A21)'], {}), '((A20, A21))\n', (2123, 2135), False, 'from numpy import hstack, vstack\n'), ((1582, 1598), 'numpy.hstack', 'hstack', (['(A0, A1)'], {}), '((A0, A1))\n', (1588, 1598), False, 'from numpy import hstack, vstack\n'), ((1632, 1648), 'numpy.hstack', 'hstack', (['(A1, A2)'], {}), '((A1, A2))\n', (1638, 1648), False, 'from numpy import hstack, vstack\n'), ((1738, 1754), 'numpy.vstack', 'vstack', (['(A0, A1)'], {}), '((A0, A1))\n', (1744, 1754), False, 'from numpy import hstack, vstack\n'), ((1788, 1804), 'numpy.vstack', 'vstack', (['(A1, A2)'], {}), '((A1, A2))\n', (1794, 1804), False, 'from numpy import hstack, vstack\n'), ((2190, 2208), 'numpy.vstack', 'vstack', (['(A00, A10)'], {}), '((A00, A10))\n', (2196, 2208), False, 'from numpy import hstack, vstack\n'), ((2347, 2365), 'numpy.hstack', 'hstack', (['(A21, A22)'], {}), '((A21, A22))\n', (2353, 2365), False, 'from numpy import hstack, vstack\n'), ((1985, 2003), 'numpy.hstack', 'hstack', (['(A00, A01)'], {}), '((A00, A01))\n', (1991, 2003), False, 'from numpy import hstack, vstack\n'), ((2028, 2046), 'numpy.hstack', 'hstack', (['(A10, A11)'], {}), '((A10, A11))\n', (2034, 2046), False, 'from numpy import hstack, vstack\n'), ((2403, 2421), 'numpy.hstack', 'hstack', (['(A00, A01)'], {}), '((A00, A01))\n', (2409, 2421), False, 'from numpy import hstack, vstack\n'), ((2537, 2555), 'numpy.vstack', 'vstack', (['(A12, A22)'], {}), '((A12, A22))\n', (2543, 2555), False, 'from numpy import hstack, vstack\n'), ((2253, 2271), 'numpy.hstack', 'hstack', (['(A01, A02)'], {}), '((A01, A02))\n', (2259, 2271), False, 'from numpy import hstack, vstack\n'), ((2296, 2314), 'numpy.hstack', 'hstack', (['(A11, A12)'], {}), '((A11, A12))\n', (2302, 2314), False, 'from numpy import hstack, vstack\n'), ((2633, 2651), 'numpy.hstack', 'hstack', (['(A01, A02)'], {}), '((A01, A02))\n', (2639, 2651), False, 'from numpy import hstack, vstack\n'), ((2665, 2683), 'numpy.vstack', 'vstack', (['(A10, A20)'], {}), '((A10, A20))\n', (2671, 2683), False, 'from numpy import hstack, vstack\n'), ((2460, 2478), 'numpy.hstack', 'hstack', (['(A10, A11)'], {}), '((A10, A11))\n', (2466, 2478), False, 'from numpy import hstack, vstack\n'), ((2503, 2521), 'numpy.hstack', 'hstack', (['(A20, A21)'], {}), '((A20, A21))\n', (2509, 2521), False, 'from numpy import hstack, vstack\n'), ((2728, 2746), 'numpy.hstack', 'hstack', (['(A11, A12)'], {}), '((A11, A12))\n', (2734, 2746), False, 'from numpy import hstack, vstack\n'), ((2771, 2789), 'numpy.hstack', 'hstack', (['(A21, A22)'], {}), '((A21, A22))\n', (2777, 2789), False, 'from numpy import hstack, vstack\n')] |
import numpy as np
from info import freq_to_notes
class Note:
def __init__(self, pitch, signal, loudness, timestamp, duration=None, typ=None):
self.pitch = round(pitch, 3)
self.signal = round(signal, 3)
self.loudness = round(loudness, 3)
self.timestamp = timestamp
self.given_pitch = self.closest_pitch(pitch)
self.duration = duration
self.typ = typ
note_info = freq_to_notes[self.given_pitch]
self.id = note_info["id"]
self.note = note_info["note"]
self.octave = note_info["octave"]
self.alter = note_info["alter"]
def closest_pitch(self, pitch):
"""
Given a pitch finds the closest musical note. This is determined by the absolute distance in frequency (Hertz).
"""
pitches = np.array(list(freq_to_notes.keys()))
idx = (np.abs(pitches - pitch)).argmin()
return pitches[idx]
def getInfo(self):
"""
Returns all the information stored in a musical note.
"""
return (self.timestamp, self.id, self.signal, self.pitch, self.given_pitch,
self.loudness, self.note, self.octave, self.alter)
def describe(self):
"""
Prints all the information describing a note.
"""
note = str(self.note)
note += ("#" if self.alter else "")
print(f"\n{note}, octave: {self.octave}, actual pitch: {self.pitch}Hz, ideal pitch: {self.given_pitch}Hz")
print(f"timestamp: {self.timestamp}")
| [
"numpy.abs",
"info.freq_to_notes.keys"
] | [((892, 912), 'info.freq_to_notes.keys', 'freq_to_notes.keys', ([], {}), '()\n', (910, 912), False, 'from info import freq_to_notes\n'), ((934, 957), 'numpy.abs', 'np.abs', (['(pitches - pitch)'], {}), '(pitches - pitch)\n', (940, 957), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from problem_data_gen import setup_pendulum_system
from experiment import experiment
from plotting import single_system_plot
if __name__ == "__main__":
plt.close('all')
vi_results_all, pi_results_all = [], []
noise_levels = np.array([0.00, 0.1, 1.00])
system, K0, L0 = setup_pendulum_system()
a_orig, b_orig, c_orig = np.copy(system.a), np.copy(system.b), np.copy(system.c)
for noise_level in noise_levels:
# Set up the problem
system.a = noise_level*a_orig
system.b = noise_level*b_orig
system.c = noise_level*c_orig
# Run the experiment
vi_results, pi_results = experiment(system, K0, L0, show_diagnostic=True)
# Store the results
vi_results_all.append(vi_results)
pi_results_all.append(pi_results)
# Plots
single_system_plot(vi_results_all, pi_results_all, noise_levels, x_axis_type='Iterations')
single_system_plot(vi_results_all, pi_results_all, noise_levels, x_axis_type='Wall clock time (s)')
| [
"numpy.copy",
"experiment.experiment",
"plotting.single_system_plot",
"matplotlib.pyplot.close",
"numpy.array",
"problem_data_gen.setup_pendulum_system"
] | [((210, 226), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (219, 226), True, 'import matplotlib.pyplot as plt\n'), ((291, 316), 'numpy.array', 'np.array', (['[0.0, 0.1, 1.0]'], {}), '([0.0, 0.1, 1.0])\n', (299, 316), True, 'import numpy as np\n'), ((341, 364), 'problem_data_gen.setup_pendulum_system', 'setup_pendulum_system', ([], {}), '()\n', (362, 364), False, 'from problem_data_gen import setup_pendulum_system\n'), ((874, 968), 'plotting.single_system_plot', 'single_system_plot', (['vi_results_all', 'pi_results_all', 'noise_levels'], {'x_axis_type': '"""Iterations"""'}), "(vi_results_all, pi_results_all, noise_levels,\n x_axis_type='Iterations')\n", (892, 968), False, 'from plotting import single_system_plot\n'), ((969, 1072), 'plotting.single_system_plot', 'single_system_plot', (['vi_results_all', 'pi_results_all', 'noise_levels'], {'x_axis_type': '"""Wall clock time (s)"""'}), "(vi_results_all, pi_results_all, noise_levels,\n x_axis_type='Wall clock time (s)')\n", (987, 1072), False, 'from plotting import single_system_plot\n'), ((395, 412), 'numpy.copy', 'np.copy', (['system.a'], {}), '(system.a)\n', (402, 412), True, 'import numpy as np\n'), ((414, 431), 'numpy.copy', 'np.copy', (['system.b'], {}), '(system.b)\n', (421, 431), True, 'import numpy as np\n'), ((433, 450), 'numpy.copy', 'np.copy', (['system.c'], {}), '(system.c)\n', (440, 450), True, 'import numpy as np\n'), ((695, 743), 'experiment.experiment', 'experiment', (['system', 'K0', 'L0'], {'show_diagnostic': '(True)'}), '(system, K0, L0, show_diagnostic=True)\n', (705, 743), False, 'from experiment import experiment\n')] |
from traitlets.config import Configurable
from traitlets import (
Int,
List,
Unicode,
)
import numpy as np
import logging
from event.arguments.prepare.event_vocab import TypedEventVocab
from event.arguments.prepare.event_vocab import EmbbedingVocab
from event.arguments.prepare.hash_cloze_data import HashParam
from event.arguments.prepare.hash_cloze_data import SlotHandler
import xml.etree.ElementTree as ET
import os
logger = logging.getLogger(__name__)
class ImplicitArgResources(Configurable):
"""Resource class."""
raw_corpus_name = Unicode(help='Raw corpus name').tag(config=True)
event_embedding_path = Unicode(help='Event Embedding path').tag(config=True)
word_embedding_path = Unicode(help='Word Embedding path').tag(config=True)
event_vocab_path = Unicode(help='Event Vocab').tag(config=True)
word_vocab_path = Unicode(help='Word Vocab').tag(config=True)
raw_lookup_path = Unicode(help='Raw Lookup Vocab.').tag(config=True)
min_vocab_count = Int(help='The min vocab cutoff threshold.',
default_value=50).tag(config=True)
def __init__(self, **kwargs):
super(ImplicitArgResources, self).__init__(**kwargs)
self.event_embedding = np.load(self.event_embedding_path)
self.word_embedding = np.load(self.word_embedding_path)
# Add padding and two unk to the vocab.
self.event_embed_vocab = EmbbedingVocab.with_extras(
self.event_vocab_path)
# Add padding to the vocab.
self.word_embed_vocab = EmbbedingVocab(self.word_vocab_path, True)
self.predicate_count = self.count_predicates(self.event_vocab_path)
logger.info(
f"{len(self.event_embed_vocab.vocab)} events in embedding.")
logger.info(
f"{len(self.word_embed_vocab.vocab)} words in embedding.")
self.typed_event_vocab = TypedEventVocab(self.raw_lookup_path)
logger.info("Loaded typed vocab, including oov words.")
hash_params = HashParam(**kwargs)
self.slot_handler = SlotHandler(hash_params)
self.h_nom_dep_map, self.h_nom_slots = self.hash_nom_mappings()
self.h_frame_dep_map, self.h_frame_slots = self.hash_frame_mappings()
@staticmethod
def count_predicates(vocab_file):
pred_count = 0
with open(vocab_file) as din:
for line in din:
word, count = line.split()
if word.endswith('-pred'):
pred_count += int(count)
return pred_count
def hash_frame_mappings(self):
"""Hash the frame mapping, and map the frames to the most frequent
dependency.
:return:
Args:
Returns:
"""
h_frame_dep_map = {}
frame_deps = self.slot_handler.frame_deps
for (frame, fe), pred_deps in frame_deps.items():
fid = self.event_embed_vocab.get_index(frame, None)
for pred, dep, count in pred_deps:
pred_id = self.event_embed_vocab.get_index(
self.typed_event_vocab.get_pred_rep({'predicate': pred}),
None)
if (fid, fe, pred_id) not in h_frame_dep_map:
# Map to the most frequent dependency type.
h_frame_dep_map[(fid, fe, pred_id)] = dep
frame_prior = self.slot_handler.frame_priority
h_frame_slots = {}
for frame_name, fes in frame_prior.items():
fid = self.event_embed_vocab.get_index(
frame_name, self.typed_event_vocab.unk_frame)
h_frame_slots[fid] = set()
for fe in fes:
fe_name = fe['fe_name']
fe_id = self.event_embed_vocab.get_index(
self.typed_event_vocab.get_fe_rep(frame_name, fe_name),
self.typed_event_vocab.unk_fe
)
h_frame_slots[fid].add(fe_id)
return h_frame_dep_map, h_frame_slots
def hash_nom_mappings(self):
"""The mapping information in the slot handler are string based, we
convert them to the hashed version for easy reading.
Args:
Returns:
"""
def prop_to_index(argx):
r = argx.lower()
if r[3] == 'M':
return 4
else:
return int(r[3])
#
# This nombank mapping is the one hand-crafted, contains the 10
# nombank predicates.
nom_map = self.slot_handler.nombank_mapping
predicate_slots = {}
nom_dep_map = {}
for nom, (verb_form, arg_map) in nom_map.items():
pred_id = self.typed_event_vocab.get_pred_rep(
{'predicate': nom, 'verb_form': verb_form}
)
predicate_slots[pred_id] = []
for arg_role, dep in arg_map.items():
if not dep == '-':
arg_index = prop_to_index(arg_role)
predicate_slots[pred_id].append(arg_index)
nom_dep_map[(pred_id, arg_index)] = dep
# This mapping is automatically gathered from data, mapping from the
# verb and proposition to the dependency.
prop_deps = self.slot_handler.prop_deps
# TODO: Here, we should read the prop dep data by converting the
# predicates into nomninals, by using the nom->verb mapping
# The nom-> verb mapping
for (verb, prop_role), dep in prop_deps.items():
if verb in self.slot_handler.verb_nom_form:
nom = self.slot_handler.verb_nom_form[verb]
pred_id = self.typed_event_vocab.get_pred_rep(
{'predicate': nom, 'verb_form': verb}
)
arg_index = prop_to_index(prop_role)
nom_dep_map[(pred_id, arg_index)] = dep
return nom_dep_map, predicate_slots
def load_framenet_slots(framenet_path, event_emb_vocab):
frame_slots = {}
ns = {'fn': 'http://framenet.icsi.berkeley.edu'}
num_unseen = 0
for frame_file in os.listdir(framenet_path):
if not frame_file.endswith('.xml'):
continue
with open(os.path.join(framenet_path, frame_file)) as frame_data:
tree = ET.parse(frame_data)
root = tree.getroot()
frame = root.attrib['name']
fid = event_emb_vocab.get_index(frame, None)
all_fes = []
for fe_node in root.findall('fn:FE', ns):
fe = fe_node.attrib['name']
all_fes.append(fe.lower())
if not fid == -1:
frame_slots[fid] = all_fes
else:
num_unseen += 1
logging.info(f"Loaded {len(frame_slots)} frames, {num_unseen} frames are "
f"not seen in the parsed dataset.")
return frame_slots
def load_nombank_dep_map(nombank_map_path, typed_event_vocab):
slot_names = ['arg0', 'arg1', 'arg2', 'arg3', 'arg4']
nombank_map = {}
with open(nombank_map_path) as map_file:
for line in map_file:
if not line.startswith('#'):
fields = line.strip().split()
noun, verb = fields[0:2]
pred = typed_event_vocab.get_pred_rep(
{'predicate': noun, 'verb_form': verb}
)
key_values = zip(slot_names, fields[2:])
nombank_map[pred] = {
'verb': verb,
'noun': noun,
'slots': dict(key_values)
}
logging.info("Loaded Nombank frame mapping.")
return nombank_map
| [
"logging.getLogger",
"os.listdir",
"event.arguments.prepare.event_vocab.EmbbedingVocab",
"event.arguments.prepare.hash_cloze_data.HashParam",
"xml.etree.ElementTree.parse",
"event.arguments.prepare.event_vocab.TypedEventVocab",
"event.arguments.prepare.event_vocab.EmbbedingVocab.with_extras",
"os.path... | [((443, 470), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (460, 470), False, 'import logging\n'), ((6098, 6123), 'os.listdir', 'os.listdir', (['framenet_path'], {}), '(framenet_path)\n', (6108, 6123), False, 'import os\n'), ((7600, 7645), 'logging.info', 'logging.info', (['"""Loaded Nombank frame mapping."""'], {}), "('Loaded Nombank frame mapping.')\n", (7612, 7645), False, 'import logging\n'), ((1236, 1270), 'numpy.load', 'np.load', (['self.event_embedding_path'], {}), '(self.event_embedding_path)\n', (1243, 1270), True, 'import numpy as np\n'), ((1301, 1334), 'numpy.load', 'np.load', (['self.word_embedding_path'], {}), '(self.word_embedding_path)\n', (1308, 1334), True, 'import numpy as np\n'), ((1417, 1466), 'event.arguments.prepare.event_vocab.EmbbedingVocab.with_extras', 'EmbbedingVocab.with_extras', (['self.event_vocab_path'], {}), '(self.event_vocab_path)\n', (1443, 1466), False, 'from event.arguments.prepare.event_vocab import EmbbedingVocab\n'), ((1549, 1591), 'event.arguments.prepare.event_vocab.EmbbedingVocab', 'EmbbedingVocab', (['self.word_vocab_path', '(True)'], {}), '(self.word_vocab_path, True)\n', (1563, 1591), False, 'from event.arguments.prepare.event_vocab import EmbbedingVocab\n'), ((1891, 1928), 'event.arguments.prepare.event_vocab.TypedEventVocab', 'TypedEventVocab', (['self.raw_lookup_path'], {}), '(self.raw_lookup_path)\n', (1906, 1928), False, 'from event.arguments.prepare.event_vocab import TypedEventVocab\n'), ((2016, 2035), 'event.arguments.prepare.hash_cloze_data.HashParam', 'HashParam', ([], {}), '(**kwargs)\n', (2025, 2035), False, 'from event.arguments.prepare.hash_cloze_data import HashParam\n'), ((2065, 2089), 'event.arguments.prepare.hash_cloze_data.SlotHandler', 'SlotHandler', (['hash_params'], {}), '(hash_params)\n', (2076, 2089), False, 'from event.arguments.prepare.hash_cloze_data import SlotHandler\n'), ((563, 594), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Raw corpus name"""'}), "(help='Raw corpus name')\n", (570, 594), False, 'from traitlets import Int, List, Unicode\n'), ((639, 675), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Event Embedding path"""'}), "(help='Event Embedding path')\n", (646, 675), False, 'from traitlets import Int, List, Unicode\n'), ((719, 754), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Word Embedding path"""'}), "(help='Word Embedding path')\n", (726, 754), False, 'from traitlets import Int, List, Unicode\n'), ((796, 823), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Event Vocab"""'}), "(help='Event Vocab')\n", (803, 823), False, 'from traitlets import Int, List, Unicode\n'), ((863, 889), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Word Vocab"""'}), "(help='Word Vocab')\n", (870, 889), False, 'from traitlets import Int, List, Unicode\n'), ((930, 963), 'traitlets.Unicode', 'Unicode', ([], {'help': '"""Raw Lookup Vocab."""'}), "(help='Raw Lookup Vocab.')\n", (937, 963), False, 'from traitlets import Int, List, Unicode\n'), ((1004, 1065), 'traitlets.Int', 'Int', ([], {'help': '"""The min vocab cutoff threshold."""', 'default_value': '(50)'}), "(help='The min vocab cutoff threshold.', default_value=50)\n", (1007, 1065), False, 'from traitlets import Int, List, Unicode\n'), ((6284, 6304), 'xml.etree.ElementTree.parse', 'ET.parse', (['frame_data'], {}), '(frame_data)\n', (6292, 6304), True, 'import xml.etree.ElementTree as ET\n'), ((6209, 6248), 'os.path.join', 'os.path.join', (['framenet_path', 'frame_file'], {}), '(framenet_path, frame_file)\n', (6221, 6248), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
A = plt.imread('images/profile.jpg')
#print(A)
print(np.shape(A))
print(type(A))
print(A.dtype)
plt.imshow(A)
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.shape",
"matplotlib.pyplot.imread",
"matplotlib.pyplot.show"
] | [((56, 88), 'matplotlib.pyplot.imread', 'plt.imread', (['"""images/profile.jpg"""'], {}), "('images/profile.jpg')\n", (66, 88), True, 'import matplotlib.pyplot as plt\n'), ((148, 161), 'matplotlib.pyplot.imshow', 'plt.imshow', (['A'], {}), '(A)\n', (158, 161), True, 'import matplotlib.pyplot as plt\n'), ((162, 172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (170, 172), True, 'import matplotlib.pyplot as plt\n'), ((105, 116), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (113, 116), True, 'import numpy as np\n')] |
import numpy as np
import taichi as ti
if ti.has_pytorch():
import torch
@ti.torch_test
def test_torch_ad():
n = 32
x = ti.field(ti.f32, shape=n, needs_grad=True)
y = ti.field(ti.f32, shape=n, needs_grad=True)
@ti.kernel
def torch_kernel():
for i in range(n):
# Do whatever complex operations here
y[n - i - 1] = x[i] * x[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
x.from_torch(inp)
torch_kernel()
outp = y.to_torch()
return outp
@staticmethod
def backward(ctx, outp_grad):
ti.clear_all_gradients()
y.grad.from_torch(outp_grad)
torch_kernel.grad()
inp_grad = x.grad.to_torch()
return inp_grad
sqr = Sqr.apply
for i in range(10):
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32),
requires_grad=True)
sqr(X).sum().backward()
ret = X.grad.cpu().numpy()
for j in range(n):
assert ret[j] == 4
@ti.torch_test
def test_torch_ad_gpu():
if not torch.cuda.is_available():
return
device = torch.device('cuda:0')
n = 32
x = ti.field(ti.f32, shape=n, needs_grad=True)
y = ti.field(ti.f32, shape=n, needs_grad=True)
@ti.kernel
def torch_kernel():
for i in range(n):
# Do whatever complex operations here
y[n - i - 1] = x[i] * x[i]
class Sqr(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
x.from_torch(inp)
torch_kernel()
outp = y.to_torch(device=device)
return outp
@staticmethod
def backward(ctx, outp_grad):
ti.clear_all_gradients()
y.grad.from_torch(outp_grad)
torch_kernel.grad()
inp_grad = x.grad.to_torch(device=device)
return inp_grad
sqr = Sqr.apply
for i in range(10):
X = torch.tensor(2 * np.ones((n, ), dtype=np.float32),
requires_grad=True,
device=device)
sqr(X).sum().backward()
ret = X.grad.cpu().numpy()
for j in range(n):
assert ret[j] == 4
| [
"taichi.has_pytorch",
"numpy.ones",
"taichi.field",
"taichi.clear_all_gradients",
"torch.cuda.is_available",
"torch.device"
] | [((44, 60), 'taichi.has_pytorch', 'ti.has_pytorch', ([], {}), '()\n', (58, 60), True, 'import taichi as ti\n'), ((137, 179), 'taichi.field', 'ti.field', (['ti.f32'], {'shape': 'n', 'needs_grad': '(True)'}), '(ti.f32, shape=n, needs_grad=True)\n', (145, 179), True, 'import taichi as ti\n'), ((188, 230), 'taichi.field', 'ti.field', (['ti.f32'], {'shape': 'n', 'needs_grad': '(True)'}), '(ti.f32, shape=n, needs_grad=True)\n', (196, 230), True, 'import taichi as ti\n'), ((1221, 1243), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1233, 1243), False, 'import torch\n'), ((1264, 1306), 'taichi.field', 'ti.field', (['ti.f32'], {'shape': 'n', 'needs_grad': '(True)'}), '(ti.f32, shape=n, needs_grad=True)\n', (1272, 1306), True, 'import taichi as ti\n'), ((1315, 1357), 'taichi.field', 'ti.field', (['ti.f32'], {'shape': 'n', 'needs_grad': '(True)'}), '(ti.f32, shape=n, needs_grad=True)\n', (1323, 1357), True, 'import taichi as ti\n'), ((1165, 1190), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1188, 1190), False, 'import torch\n'), ((667, 691), 'taichi.clear_all_gradients', 'ti.clear_all_gradients', ([], {}), '()\n', (689, 691), True, 'import taichi as ti\n'), ((1807, 1831), 'taichi.clear_all_gradients', 'ti.clear_all_gradients', ([], {}), '()\n', (1829, 1831), True, 'import taichi as ti\n'), ((908, 939), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'np.float32'}), '((n,), dtype=np.float32)\n', (915, 939), True, 'import numpy as np\n'), ((2061, 2092), 'numpy.ones', 'np.ones', (['(n,)'], {'dtype': 'np.float32'}), '((n,), dtype=np.float32)\n', (2068, 2092), True, 'import numpy as np\n')] |
import contextlib
import joblib
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
from sklearn.model_selection import LeaveOneOut, KFold, LeavePOut
from sklearn import linear_model
from tqdm import tqdm
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
def get_out_of_set_accuracy(test, train, x, y):
intercept_train = np.ones((len(train) ,1 ))
x_train = np.concatenate((intercept_train, train[x]), 1)
beta = np.linalg.lstsq(x_train, train[y], rcond=-1)[0]
intercept_test = np.ones((len(test), 1))
x_test = np.concatenate((intercept_test, test[x]), 1)
predicted = x_test @ beta
return np.argmax(predicted) == np.argmax(test[y])
def get_cv2_accuracy(data, x, y, pbar=True):
splitter = LeavePOut(2)
n_splits = splitter.get_n_splits(data)
correct = np.zeros(n_splits)
splits = enumerate(splitter.split(data))
if pbar:
splits = tqdm(splits, total=n_splits)
for ix, (train, test) in splits:
correct[ix] = get_out_of_set_accuracy(
data.iloc[test], data.iloc[train], x, y)
return np.mean(correct)
def get_null(data, x, y, n=250, n_jobs=7):
null_accuracies = np.zeros(n)
def shuffle_and_get_cv2_accuracy():
shuffled_data = data.copy()
shuffled_data[y] = shuffled_data[y].sample(frac=1.).values
return get_cv2_accuracy(shuffled_data, x, y, pbar=False)
with tqdm_joblib(tqdm(desc="Null distro", total=n)) as progress_bar:
null_accuracies = Parallel(n_jobs=n_jobs)(
delayed(shuffle_and_get_cv2_accuracy)() for i in range(n))
return null_accuracies
| [
"numpy.mean",
"sklearn.model_selection.LeavePOut",
"tqdm.tqdm",
"numpy.argmax",
"joblib.Parallel",
"numpy.zeros",
"numpy.linalg.lstsq",
"numpy.concatenate",
"joblib.delayed"
] | [((1099, 1145), 'numpy.concatenate', 'np.concatenate', (['(intercept_train, train[x])', '(1)'], {}), '((intercept_train, train[x]), 1)\n', (1113, 1145), True, 'import numpy as np\n'), ((1265, 1309), 'numpy.concatenate', 'np.concatenate', (['(intercept_test, test[x])', '(1)'], {}), '((intercept_test, test[x]), 1)\n', (1279, 1309), True, 'import numpy as np\n'), ((1457, 1469), 'sklearn.model_selection.LeavePOut', 'LeavePOut', (['(2)'], {}), '(2)\n', (1466, 1469), False, 'from sklearn.model_selection import LeaveOneOut, KFold, LeavePOut\n'), ((1527, 1545), 'numpy.zeros', 'np.zeros', (['n_splits'], {}), '(n_splits)\n', (1535, 1545), True, 'import numpy as np\n'), ((1802, 1818), 'numpy.mean', 'np.mean', (['correct'], {}), '(correct)\n', (1809, 1818), True, 'import numpy as np\n'), ((1887, 1898), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1895, 1898), True, 'import numpy as np\n'), ((1158, 1202), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['x_train', 'train[y]'], {'rcond': '(-1)'}), '(x_train, train[y], rcond=-1)\n', (1173, 1202), True, 'import numpy as np\n'), ((1352, 1372), 'numpy.argmax', 'np.argmax', (['predicted'], {}), '(predicted)\n', (1361, 1372), True, 'import numpy as np\n'), ((1376, 1394), 'numpy.argmax', 'np.argmax', (['test[y]'], {}), '(test[y])\n', (1385, 1394), True, 'import numpy as np\n'), ((1623, 1651), 'tqdm.tqdm', 'tqdm', (['splits'], {'total': 'n_splits'}), '(splits, total=n_splits)\n', (1627, 1651), False, 'from tqdm import tqdm\n'), ((2130, 2163), 'tqdm.tqdm', 'tqdm', ([], {'desc': '"""Null distro"""', 'total': 'n'}), "(desc='Null distro', total=n)\n", (2134, 2163), False, 'from tqdm import tqdm\n'), ((2208, 2231), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (2216, 2231), False, 'from joblib import Parallel, delayed\n'), ((2245, 2282), 'joblib.delayed', 'delayed', (['shuffle_and_get_cv2_accuracy'], {}), '(shuffle_and_get_cv2_accuracy)\n', (2252, 2282), False, 'from joblib import Parallel, delayed\n')] |
"""
Cross validation of the hyperparameters
"""
import csv
import numpy as np
import torch
import os
from argparse import ArgumentParser
from itertools import product
from torch.optim import Adam, SGD
from torch.utils.data import DataLoader
from datasets import CrossValidationDataset, CytomineDataset
from evaluate import evaluate
from losses import Loss
from metrics import IoU, DiceCoefficient, HausdorffDistance
from model import NuClick
from train import train, validate
# Check if GPU is available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def parse_arguments():
"""
Parse the arguments of the program.
Return
------
args : class argparse.Namespace
The parsed arguments.
"""
parser = ArgumentParser(
description="Cros validation for the hyperparameters."
)
parser.add_argument(
'--bs',
dest='batch_size',
type=int,
default=16,
help="The batch size for the dataset."
)
parser.add_argument(
'--epochs',
type=int,
default=100,
help="Number of epochs to train the model."
)
parser.add_argument(
'--data',
help="Path to the dataset."
)
parser.add_argument(
'--dest',
default='./',
help="The path to save the CSV results."
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
# Reproducibility
torch.manual_seed(0)
np.random.seed(0)
# Load the dataset
dirnames = next(os.walk(args.data))[1]
dirnames.remove('test')
dirnames = [os.path.join(args.data, dirname) for dirname in dirnames]
cross = []
for dirname in dirnames:
folds = dirnames.copy()
folds.remove(dirname)
cross.append((folds, dirname))
# Create the test set
test_data = CytomineDataset(os.path.join(args.data, 'test'))
testloader = DataLoader(test_data, args.batch_size, shuffle=True)
# Loss function and metrics
criterion = Loss()
# Statistics
header = ['epoch', 'train_mean_loss', 'train_std_loss', 'val_mean_loss',
'val_std_loss', 'iou', 'dice', 'haus']
data_name = os.path.basename(os.path.normpath(args.data))
# Parameters to test
optimizers = [Adam, SGD]
lrs = [1.0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5]
wds = [1e-1, 1e-2, 1e-3]
# The cross-validation
for index, (folds, val) in enumerate(cross):
print(f'Fold {index}')
# Create the train and validation sets for this combination of folds
train_data = CrossValidationDataset(folds)
trainloader = DataLoader(train_data, args.batch_size, shuffle=True)
val_data = CytomineDataset(val)
valloader = DataLoader(val_data, args.batch_size, shuffle=True)
for (optim, lr, wd) in product(optimizers, lrs, wds):
print()
print(f'{optim.__name__:>4}, lr: {lr:>6}, wd: {wd:>5}')
print('-' * 27)
# Statistics
csv_name = f'cv-{data_name}-{optim.__name__}-lr={lr}-wd={wd}.csv'
stat_path = os.path.join(args.dest, csv_name)
with open(stat_path, 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
# Create the model
model = NuClick().to(device)
# Create the optimizer
optimizer = optim(model.parameters(), lr=lr, weight_decay=wd)
print(" epoch train_loss val_loss iou dice haus")
print("------ ----------- ---------- ------- ------- --------")
# Training the model
for epoch in range(args.epochs):
# Train the model for one epoch
train_losses = train(model, trainloader, criterion, optimizer)
# Perform the validation test on the model
val_losses = validate(model, valloader, criterion)
# Perform the validation test on the metrics
iou = evaluate(model, valloader, IoU())
dice = evaluate(model, valloader, DiceCoefficient())
haus = evaluate(model, valloader, HausdorffDistance())
# Statistics
with open(stat_path, 'a', newline='') as file:
csv.writer(file).writerow([
epoch,
np.mean(train_losses),
np.std(train_losses),
np.mean(val_losses),
np.std(val_losses),
iou,
dice,
haus
])
print(
f"{epoch:>6}", ' ',
f"{np.mean(train_losses):>10.4f} ",
f"{np.mean(val_losses):>10.4f}", ' ',
f"{iou:>5.4f}", ' ',
f"{dice:>5.4f}", ' ',
f"{haus:>7.4f}"
)
| [
"csv.DictWriter",
"torch.cuda.is_available",
"os.walk",
"numpy.mean",
"argparse.ArgumentParser",
"train.validate",
"metrics.DiceCoefficient",
"itertools.product",
"os.path.normpath",
"numpy.random.seed",
"model.NuClick",
"csv.writer",
"numpy.std",
"torch.manual_seed",
"train.train",
"m... | [((762, 832), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Cros validation for the hyperparameters."""'}), "(description='Cros validation for the hyperparameters.')\n", (776, 832), False, 'from argparse import ArgumentParser\n'), ((1471, 1491), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (1488, 1491), False, 'import torch\n'), ((1496, 1513), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1510, 1513), True, 'import numpy as np\n'), ((1939, 1991), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data', 'args.batch_size'], {'shuffle': '(True)'}), '(test_data, args.batch_size, shuffle=True)\n', (1949, 1991), False, 'from torch.utils.data import DataLoader\n'), ((2041, 2047), 'losses.Loss', 'Loss', ([], {}), '()\n', (2045, 2047), False, 'from losses import Loss\n'), ((540, 565), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (563, 565), False, 'import torch\n'), ((1625, 1657), 'os.path.join', 'os.path.join', (['args.data', 'dirname'], {}), '(args.data, dirname)\n', (1637, 1657), False, 'import os\n'), ((1889, 1920), 'os.path.join', 'os.path.join', (['args.data', '"""test"""'], {}), "(args.data, 'test')\n", (1901, 1920), False, 'import os\n'), ((2229, 2256), 'os.path.normpath', 'os.path.normpath', (['args.data'], {}), '(args.data)\n', (2245, 2256), False, 'import os\n'), ((2595, 2624), 'datasets.CrossValidationDataset', 'CrossValidationDataset', (['folds'], {}), '(folds)\n', (2617, 2624), False, 'from datasets import CrossValidationDataset, CytomineDataset\n'), ((2647, 2700), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data', 'args.batch_size'], {'shuffle': '(True)'}), '(train_data, args.batch_size, shuffle=True)\n', (2657, 2700), False, 'from torch.utils.data import DataLoader\n'), ((2720, 2740), 'datasets.CytomineDataset', 'CytomineDataset', (['val'], {}), '(val)\n', (2735, 2740), False, 'from datasets import CrossValidationDataset, CytomineDataset\n'), ((2761, 2812), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data', 'args.batch_size'], {'shuffle': '(True)'}), '(val_data, args.batch_size, shuffle=True)\n', (2771, 2812), False, 'from torch.utils.data import DataLoader\n'), ((2845, 2874), 'itertools.product', 'product', (['optimizers', 'lrs', 'wds'], {}), '(optimizers, lrs, wds)\n', (2852, 2874), False, 'from itertools import product\n'), ((1558, 1576), 'os.walk', 'os.walk', (['args.data'], {}), '(args.data)\n', (1565, 1576), False, 'import os\n'), ((3120, 3153), 'os.path.join', 'os.path.join', (['args.dest', 'csv_name'], {}), '(args.dest, csv_name)\n', (3132, 3153), False, 'import os\n'), ((3238, 3277), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'header'}), '(file, fieldnames=header)\n', (3252, 3277), False, 'import csv\n'), ((3819, 3866), 'train.train', 'train', (['model', 'trainloader', 'criterion', 'optimizer'], {}), '(model, trainloader, criterion, optimizer)\n', (3824, 3866), False, 'from train import train, validate\n'), ((3956, 3993), 'train.validate', 'validate', (['model', 'valloader', 'criterion'], {}), '(model, valloader, criterion)\n', (3964, 3993), False, 'from train import train, validate\n'), ((3367, 3376), 'model.NuClick', 'NuClick', ([], {}), '()\n', (3374, 3376), False, 'from model import NuClick\n'), ((4105, 4110), 'metrics.IoU', 'IoU', ([], {}), '()\n', (4108, 4110), False, 'from metrics import IoU, DiceCoefficient, HausdorffDistance\n'), ((4162, 4179), 'metrics.DiceCoefficient', 'DiceCoefficient', ([], {}), '()\n', (4177, 4179), False, 'from metrics import IoU, DiceCoefficient, HausdorffDistance\n'), ((4231, 4250), 'metrics.HausdorffDistance', 'HausdorffDistance', ([], {}), '()\n', (4248, 4250), False, 'from metrics import IoU, DiceCoefficient, HausdorffDistance\n'), ((4365, 4381), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (4375, 4381), False, 'import csv\n'), ((4448, 4469), 'numpy.mean', 'np.mean', (['train_losses'], {}), '(train_losses)\n', (4455, 4469), True, 'import numpy as np\n'), ((4495, 4515), 'numpy.std', 'np.std', (['train_losses'], {}), '(train_losses)\n', (4501, 4515), True, 'import numpy as np\n'), ((4541, 4560), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (4548, 4560), True, 'import numpy as np\n'), ((4586, 4604), 'numpy.std', 'np.std', (['val_losses'], {}), '(val_losses)\n', (4592, 4604), True, 'import numpy as np\n'), ((4804, 4825), 'numpy.mean', 'np.mean', (['train_losses'], {}), '(train_losses)\n', (4811, 4825), True, 'import numpy as np\n'), ((4860, 4879), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (4867, 4879), True, 'import numpy as np\n')] |
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
x = np.linspace(0, 5, 100)
y1 = np.power(2, x)
y2 = scipy.misc.factorial(x)
plt.plot(x, y1)
plt.plot(x, y2)
plt.grid(True)
plt.savefig('../../img/question_4_plots/g.png')
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.power",
"matplotlib.pyplot.plot",
"numpy.linspace"
] | [((74, 96), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (85, 96), True, 'import numpy as np\n'), ((102, 116), 'numpy.power', 'np.power', (['(2)', 'x'], {}), '(2, x)\n', (110, 116), True, 'import numpy as np\n'), ((147, 162), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y1'], {}), '(x, y1)\n', (155, 162), True, 'import matplotlib.pyplot as plt\n'), ((163, 178), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (171, 178), True, 'import matplotlib.pyplot as plt\n'), ((180, 194), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (188, 194), True, 'import matplotlib.pyplot as plt\n'), ((195, 242), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../../img/question_4_plots/g.png"""'], {}), "('../../img/question_4_plots/g.png')\n", (206, 242), True, 'import matplotlib.pyplot as plt\n')] |
import torch
from importlib import reload
# import os; os.chdir("/home/wamsterd/local_scratch/git/CauseEffectPairs/")
from torch.autograd import Variable
from torch.optim import SGD
import matplotlib.pyplot as plt
import numpy as np
import train; reload(train)
import model.net as net; reload(net)
torch.manual_seed(123456)
n_units = int(10e4)
a, b, c, d = 1, 2, -.5, 1.2
x = Variable(torch.Tensor(np.linspace(-2, 2, n_units).reshape(n_units, 1)), requires_grad = True)
y = a + b * x + c * (x**2) + d * (x**3)
# plot data
plt.scatter(x.detach().numpy(), y.detach().numpy())
plt.show()
model = net.TwoLayerNet(1, 40, 1)
# model = net.ThreeLayerNet(1, 20, 20, 1)
# for param in model.parameters():
# print(param)
# param0 = param
# loss_fn = net.loss_fn
optimizer = SGD(model.parameters(), lr=0.001)#, momentum=0.9)#, weight_decay=.1)
num_epochs = 1000
# train(model, num_epochs, x, y, net.loss_fn, optimizer)
train.train(model, num_epochs, x, y,
torch.nn.MSELoss(),
optimizer,
gradient_clip = (-10, 10))
pred = model.forward(x)
print("mean mse:", torch.nn.MSELoss()(pred, x).data)
plt.scatter(x.detach().numpy(), y.detach().numpy())
plt.scatter(x.detach().numpy(), pred.detach().numpy(), c = "red")
plt.show()
| [
"torch.manual_seed",
"torch.nn.MSELoss",
"model.net.TwoLayerNet",
"numpy.linspace",
"importlib.reload",
"matplotlib.pyplot.show"
] | [((248, 261), 'importlib.reload', 'reload', (['train'], {}), '(train)\n', (254, 261), False, 'from importlib import reload\n'), ((287, 298), 'importlib.reload', 'reload', (['net'], {}), '(net)\n', (293, 298), False, 'from importlib import reload\n'), ((300, 325), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (317, 325), False, 'import torch\n'), ((578, 588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (586, 588), True, 'import matplotlib.pyplot as plt\n'), ((598, 623), 'model.net.TwoLayerNet', 'net.TwoLayerNet', (['(1)', '(40)', '(1)'], {}), '(1, 40, 1)\n', (613, 623), True, 'import model.net as net\n'), ((1240, 1250), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1248, 1250), True, 'import matplotlib.pyplot as plt\n'), ((971, 989), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (987, 989), False, 'import torch\n'), ((1087, 1105), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1103, 1105), False, 'import torch\n'), ((400, 427), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'n_units'], {}), '(-2, 2, n_units)\n', (411, 427), True, 'import numpy as np\n')] |
import warnings
import numpy as np
import numpy.testing as npt
import matplotlib
import matplotlib.mlab as mlab
import nitime.timeseries as ts
import nitime.analysis as nta
import platform
# Some tests might require python version 2.5 or above:
if float(platform.python_version()[:3]) < 2.5:
old_python = True
else:
old_python = False
# Matplotlib older than 0.99 will have some issues with the normalization of t
if float(matplotlib.__version__[:3]) < 0.99:
w_s = "You have a relatively old version of Matplotlib. "
w_s += " Estimation of the PSD DC component might not be as expected"
w_s += " Consider updating Matplotlib: http://matplotlib.sourceforge.net/"
warnings.warn(w_s, Warning)
old_mpl = True
else:
old_mpl = False
def test_CoherenceAnalyzer():
methods = (None,
{"this_method": 'welch', "NFFT": 256},
{"this_method": 'multi_taper_csd'},
{"this_method": 'periodogram_csd', "NFFT": 256})
Fs = np.pi
t = np.arange(1024)
x = np.sin(10 * t) + np.random.rand(t.shape[-1])
y = np.sin(10 * t) + np.random.rand(t.shape[-1])
# Third time-series used for calculation of partial coherence:
z = np.sin(10 * t)
T = ts.TimeSeries(np.vstack([x, y, z]), sampling_rate=np.pi)
n_series = T.shape[0]
for unwrap in [True, False]:
for method in methods:
C = nta.CoherenceAnalyzer(T, method, unwrap_phases=unwrap)
if method is None:
# This is the default behavior (grab the NFFT from the number
# of frequencies):
npt.assert_equal(C.coherence.shape, (n_series, n_series,
C.frequencies.shape[0]))
elif (method['this_method'] == 'welch' or
method['this_method'] == 'periodogram_csd'):
npt.assert_equal(C.coherence.shape, (n_series, n_series,
method['NFFT'] // 2 + 1))
else:
npt.assert_equal(C.coherence.shape, (n_series, n_series,
len(t) // 2 + 1))
# Coherence symmetry:
npt.assert_equal(C.coherence[0, 1], C.coherence[1, 0])
# Phase/delay asymmetry:
npt.assert_equal(C.phase[0, 1], -1 * C.phase[1, 0])
# The very first one is a nan, test from second and onwards:
npt.assert_almost_equal(C.delay[0, 1][1:], -1 * C.delay[1, 0][1:])
if method is not None and method['this_method'] == 'welch':
S = nta.SpectralAnalyzer(T, method)
npt.assert_almost_equal(S.cpsd[0], C.frequencies)
npt.assert_almost_equal(S.cpsd[1], C.spectrum)
# Test that partial coherence runs through and has the right number
# of dimensions:
npt.assert_equal(len(C.coherence_partial.shape), 4)
@npt.dec.skipif(old_mpl)
def test_SparseCoherenceAnalyzer():
Fs = np.pi
t = np.arange(256)
x = np.sin(10 * t) + np.random.rand(t.shape[-1])
y = np.sin(10 * t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x, y]), sampling_rate=Fs)
C1 = nta.SparseCoherenceAnalyzer(T, ij=((0, 1), (1, 0)))
C2 = nta.CoherenceAnalyzer(T)
# Coherence symmetry:
npt.assert_equal(np.abs(C1.coherence[0, 1]), np.abs(C1.coherence[1, 0]))
npt.assert_equal(np.abs(C1.coherency[0, 1]), np.abs(C1.coherency[1, 0]))
# Make sure you get the same answers as you would from the standard
# CoherenceAnalyzer:
npt.assert_almost_equal(C2.coherence[0, 1], C1.coherence[0, 1])
# This is the PSD (for the first time-series in the object):
npt.assert_almost_equal(C2.spectrum[0, 0], C1.spectrum[0])
# And the second (for good measure):
npt.assert_almost_equal(C2.spectrum[1, 1], C1.spectrum[1])
# The relative phases should be equal
npt.assert_almost_equal(C2.phase[0, 1], C1.relative_phases[0, 1])
# But not the absolute phases (which have the same shape):
npt.assert_equal(C1.phases[0].shape, C1.relative_phases[0, 1].shape)
# The delay is equal:
npt.assert_almost_equal(C2.delay[0, 1], C1.delay[0, 1])
# Make sure that you would get an error if you provided a method other than
# 'welch':
npt.assert_raises(ValueError, nta.SparseCoherenceAnalyzer, T,
method=dict(this_method='foo'))
def test_MTCoherenceAnalyzer():
"""Test the functionality of the multi-taper spectral coherence """
Fs = np.pi
t = np.arange(256)
x = np.sin(10 * t) + np.random.rand(t.shape[-1])
y = np.sin(10 * t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x, y]), sampling_rate=Fs)
n_series = T.shape[0]
NFFT = t.shape[0] // 2 + 1
for adaptive in [True, False]:
C = nta.MTCoherenceAnalyzer(T, adaptive=adaptive)
npt.assert_equal(C.frequencies.shape[0], NFFT)
npt.assert_equal(C.coherence.shape, (n_series, n_series, NFFT))
npt.assert_equal(C.confidence_interval.shape, (n_series, n_series,
NFFT))
@npt.dec.skipif(old_python)
def test_warn_short_tseries():
"""
A warning is provided when the time-series is shorter than
the NFFT + n_overlap.
The implementation of this test is based on this:
http://docs.python.org/library/warnings.html#testing-warnings
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
# The following should throw a warning, because 70 is smaller than the
# default NFFT=64 + n_overlap=32:
nta.CoherenceAnalyzer(ts.TimeSeries(np.random.rand(2, 70),
sampling_rate=1))
# Verify some things
npt.assert_equal(len(w), 1)
def test_SeedCoherenceAnalyzer():
""" Test the SeedCoherenceAnalyzer """
methods = (None,
{"this_method": 'welch', "NFFT": 256},
{"this_method": 'multi_taper_csd'},
{"this_method": 'periodogram_csd', "NFFT": 256})
Fs = np.pi
t = np.arange(256)
seed1 = np.sin(10 * t) + np.random.rand(t.shape[-1])
seed2 = np.sin(10 * t) + np.random.rand(t.shape[-1])
target = np.sin(10 * t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs)
T_seed1 = ts.TimeSeries(seed1, sampling_rate=Fs)
T_seed2 = ts.TimeSeries(np.vstack([seed1, seed2]), sampling_rate=Fs)
T_target = ts.TimeSeries(np.vstack([seed1, target]), sampling_rate=Fs)
for this_method in methods:
if this_method is None or this_method['this_method'] == 'welch':
C1 = nta.CoherenceAnalyzer(T, method=this_method)
C2 = nta.SeedCoherenceAnalyzer(T_seed1, T_target,
method=this_method)
C3 = nta.SeedCoherenceAnalyzer(T_seed2, T_target,
method=this_method)
npt.assert_almost_equal(C1.coherence[0, 1], C2.coherence[1])
npt.assert_almost_equal(C2.coherence[1], C3.coherence[0, 1])
npt.assert_almost_equal(C1.phase[0, 1], C2.relative_phases[1])
npt.assert_almost_equal(C1.delay[0, 1], C2.delay[1])
else:
npt.assert_raises(ValueError, nta.SeedCoherenceAnalyzer, T_seed1,
T_target, this_method)
def test_SeedCoherenceAnalyzer_same_Fs():
"""
Providing two time-series with different sampling rates throws an error
"""
Fs1 = np.pi
Fs2 = 2 * np.pi
t = np.arange(256)
T1 = ts.TimeSeries(np.random.rand(t.shape[-1]),
sampling_rate=Fs1)
T2 = ts.TimeSeries(np.random.rand(t.shape[-1]),
sampling_rate=Fs2)
npt.assert_raises(ValueError, nta.SeedCoherenceAnalyzer, T1, T2)
| [
"numpy.testing.assert_equal",
"numpy.random.rand",
"nitime.analysis.MTCoherenceAnalyzer",
"numpy.testing.assert_raises",
"numpy.sin",
"numpy.arange",
"nitime.analysis.CoherenceAnalyzer",
"nitime.analysis.SeedCoherenceAnalyzer",
"numpy.testing.assert_almost_equal",
"numpy.vstack",
"warnings.simpl... | [((2952, 2975), 'numpy.testing.dec.skipif', 'npt.dec.skipif', (['old_mpl'], {}), '(old_mpl)\n', (2966, 2975), True, 'import numpy.testing as npt\n'), ((5194, 5220), 'numpy.testing.dec.skipif', 'npt.dec.skipif', (['old_python'], {}), '(old_python)\n', (5208, 5220), True, 'import numpy.testing as npt\n'), ((694, 721), 'warnings.warn', 'warnings.warn', (['w_s', 'Warning'], {}), '(w_s, Warning)\n', (707, 721), False, 'import warnings\n'), ((1000, 1015), 'numpy.arange', 'np.arange', (['(1024)'], {}), '(1024)\n', (1009, 1015), True, 'import numpy as np\n'), ((1197, 1211), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (1203, 1211), True, 'import numpy as np\n'), ((3035, 3049), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (3044, 3049), True, 'import numpy as np\n'), ((3224, 3275), 'nitime.analysis.SparseCoherenceAnalyzer', 'nta.SparseCoherenceAnalyzer', (['T'], {'ij': '((0, 1), (1, 0))'}), '(T, ij=((0, 1), (1, 0)))\n', (3251, 3275), True, 'import nitime.analysis as nta\n'), ((3285, 3309), 'nitime.analysis.CoherenceAnalyzer', 'nta.CoherenceAnalyzer', (['T'], {}), '(T)\n', (3306, 3309), True, 'import nitime.analysis as nta\n'), ((3594, 3657), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.coherence[0, 1]', 'C1.coherence[0, 1]'], {}), '(C2.coherence[0, 1], C1.coherence[0, 1])\n', (3617, 3657), True, 'import numpy.testing as npt\n'), ((3727, 3785), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.spectrum[0, 0]', 'C1.spectrum[0]'], {}), '(C2.spectrum[0, 0], C1.spectrum[0])\n', (3750, 3785), True, 'import numpy.testing as npt\n'), ((3831, 3889), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.spectrum[1, 1]', 'C1.spectrum[1]'], {}), '(C2.spectrum[1, 1], C1.spectrum[1])\n', (3854, 3889), True, 'import numpy.testing as npt\n'), ((3937, 4002), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.phase[0, 1]', 'C1.relative_phases[0, 1]'], {}), '(C2.phase[0, 1], C1.relative_phases[0, 1])\n', (3960, 4002), True, 'import numpy.testing as npt\n'), ((4070, 4138), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C1.phases[0].shape', 'C1.relative_phases[0, 1].shape'], {}), '(C1.phases[0].shape, C1.relative_phases[0, 1].shape)\n', (4086, 4138), True, 'import numpy.testing as npt\n'), ((4170, 4225), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.delay[0, 1]', 'C1.delay[0, 1]'], {}), '(C2.delay[0, 1], C1.delay[0, 1])\n', (4193, 4225), True, 'import numpy.testing as npt\n'), ((4597, 4611), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (4606, 4611), True, 'import numpy as np\n'), ((6251, 6265), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (6260, 6265), True, 'import numpy as np\n'), ((6520, 6558), 'nitime.timeseries.TimeSeries', 'ts.TimeSeries', (['seed1'], {'sampling_rate': 'Fs'}), '(seed1, sampling_rate=Fs)\n', (6533, 6558), True, 'import nitime.timeseries as ts\n'), ((7740, 7754), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (7749, 7754), True, 'import numpy as np\n'), ((7950, 8014), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError', 'nta.SeedCoherenceAnalyzer', 'T1', 'T2'], {}), '(ValueError, nta.SeedCoherenceAnalyzer, T1, T2)\n', (7967, 8014), True, 'import numpy.testing as npt\n'), ((1024, 1038), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (1030, 1038), True, 'import numpy as np\n'), ((1041, 1068), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (1055, 1068), True, 'import numpy as np\n'), ((1077, 1091), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (1083, 1091), True, 'import numpy as np\n'), ((1094, 1121), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (1108, 1121), True, 'import numpy as np\n'), ((1234, 1254), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (1243, 1254), True, 'import numpy as np\n'), ((3058, 3072), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (3064, 3072), True, 'import numpy as np\n'), ((3075, 3102), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (3089, 3102), True, 'import numpy as np\n'), ((3111, 3125), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (3117, 3125), True, 'import numpy as np\n'), ((3128, 3155), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (3142, 3155), True, 'import numpy as np\n'), ((3178, 3195), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (3187, 3195), True, 'import numpy as np\n'), ((3358, 3384), 'numpy.abs', 'np.abs', (['C1.coherence[0, 1]'], {}), '(C1.coherence[0, 1])\n', (3364, 3384), True, 'import numpy as np\n'), ((3386, 3412), 'numpy.abs', 'np.abs', (['C1.coherence[1, 0]'], {}), '(C1.coherence[1, 0])\n', (3392, 3412), True, 'import numpy as np\n'), ((3435, 3461), 'numpy.abs', 'np.abs', (['C1.coherency[0, 1]'], {}), '(C1.coherency[0, 1])\n', (3441, 3461), True, 'import numpy as np\n'), ((3463, 3489), 'numpy.abs', 'np.abs', (['C1.coherency[1, 0]'], {}), '(C1.coherency[1, 0])\n', (3469, 3489), True, 'import numpy as np\n'), ((4620, 4634), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (4626, 4634), True, 'import numpy as np\n'), ((4637, 4664), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (4651, 4664), True, 'import numpy as np\n'), ((4673, 4687), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (4679, 4687), True, 'import numpy as np\n'), ((4690, 4717), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (4704, 4717), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (4749, 4757), True, 'import numpy as np\n'), ((4881, 4926), 'nitime.analysis.MTCoherenceAnalyzer', 'nta.MTCoherenceAnalyzer', (['T'], {'adaptive': 'adaptive'}), '(T, adaptive=adaptive)\n', (4904, 4926), True, 'import nitime.analysis as nta\n'), ((4935, 4981), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.frequencies.shape[0]', 'NFFT'], {}), '(C.frequencies.shape[0], NFFT)\n', (4951, 4981), True, 'import numpy.testing as npt\n'), ((4990, 5053), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.coherence.shape', '(n_series, n_series, NFFT)'], {}), '(C.coherence.shape, (n_series, n_series, NFFT))\n', (5006, 5053), True, 'import numpy.testing as npt\n'), ((5062, 5135), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.confidence_interval.shape', '(n_series, n_series, NFFT)'], {}), '(C.confidence_interval.shape, (n_series, n_series, NFFT))\n', (5078, 5135), True, 'import numpy.testing as npt\n'), ((5490, 5526), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (5513, 5526), False, 'import warnings\n'), ((5594, 5625), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (5615, 5625), False, 'import warnings\n'), ((6278, 6292), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (6284, 6292), True, 'import numpy as np\n'), ((6295, 6322), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (6309, 6322), True, 'import numpy as np\n'), ((6335, 6349), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (6341, 6349), True, 'import numpy as np\n'), ((6352, 6379), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (6366, 6379), True, 'import numpy as np\n'), ((6393, 6407), 'numpy.sin', 'np.sin', (['(10 * t)'], {}), '(10 * t)\n', (6399, 6407), True, 'import numpy as np\n'), ((6410, 6437), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (6424, 6437), True, 'import numpy as np\n'), ((6460, 6486), 'numpy.vstack', 'np.vstack', (['[seed1, target]'], {}), '([seed1, target])\n', (6469, 6486), True, 'import numpy as np\n'), ((6587, 6612), 'numpy.vstack', 'np.vstack', (['[seed1, seed2]'], {}), '([seed1, seed2])\n', (6596, 6612), True, 'import numpy as np\n'), ((6661, 6687), 'numpy.vstack', 'np.vstack', (['[seed1, target]'], {}), '([seed1, target])\n', (6670, 6687), True, 'import numpy as np\n'), ((7779, 7806), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (7793, 7806), True, 'import numpy as np\n'), ((7874, 7901), 'numpy.random.rand', 'np.random.rand', (['t.shape[-1]'], {}), '(t.shape[-1])\n', (7888, 7901), True, 'import numpy as np\n'), ((259, 284), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (282, 284), False, 'import platform\n'), ((1383, 1437), 'nitime.analysis.CoherenceAnalyzer', 'nta.CoherenceAnalyzer', (['T', 'method'], {'unwrap_phases': 'unwrap'}), '(T, method, unwrap_phases=unwrap)\n', (1404, 1437), True, 'import nitime.analysis as nta\n'), ((2212, 2266), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.coherence[0, 1]', 'C.coherence[1, 0]'], {}), '(C.coherence[0, 1], C.coherence[1, 0])\n', (2228, 2266), True, 'import numpy.testing as npt\n'), ((2317, 2368), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.phase[0, 1]', '(-1 * C.phase[1, 0])'], {}), '(C.phase[0, 1], -1 * C.phase[1, 0])\n', (2333, 2368), True, 'import numpy.testing as npt\n'), ((2455, 2521), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C.delay[0, 1][1:]', '(-1 * C.delay[1, 0][1:])'], {}), '(C.delay[0, 1][1:], -1 * C.delay[1, 0][1:])\n', (2478, 2521), True, 'import numpy.testing as npt\n'), ((6829, 6873), 'nitime.analysis.CoherenceAnalyzer', 'nta.CoherenceAnalyzer', (['T'], {'method': 'this_method'}), '(T, method=this_method)\n', (6850, 6873), True, 'import nitime.analysis as nta\n'), ((6891, 6955), 'nitime.analysis.SeedCoherenceAnalyzer', 'nta.SeedCoherenceAnalyzer', (['T_seed1', 'T_target'], {'method': 'this_method'}), '(T_seed1, T_target, method=this_method)\n', (6916, 6955), True, 'import nitime.analysis as nta\n'), ((7016, 7080), 'nitime.analysis.SeedCoherenceAnalyzer', 'nta.SeedCoherenceAnalyzer', (['T_seed2', 'T_target'], {'method': 'this_method'}), '(T_seed2, T_target, method=this_method)\n', (7041, 7080), True, 'import nitime.analysis as nta\n'), ((7137, 7197), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C1.coherence[0, 1]', 'C2.coherence[1]'], {}), '(C1.coherence[0, 1], C2.coherence[1])\n', (7160, 7197), True, 'import numpy.testing as npt\n'), ((7210, 7270), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C2.coherence[1]', 'C3.coherence[0, 1]'], {}), '(C2.coherence[1], C3.coherence[0, 1])\n', (7233, 7270), True, 'import numpy.testing as npt\n'), ((7283, 7345), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C1.phase[0, 1]', 'C2.relative_phases[1]'], {}), '(C1.phase[0, 1], C2.relative_phases[1])\n', (7306, 7345), True, 'import numpy.testing as npt\n'), ((7358, 7410), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['C1.delay[0, 1]', 'C2.delay[1]'], {}), '(C1.delay[0, 1], C2.delay[1])\n', (7381, 7410), True, 'import numpy.testing as npt\n'), ((7438, 7530), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError', 'nta.SeedCoherenceAnalyzer', 'T_seed1', 'T_target', 'this_method'], {}), '(ValueError, nta.SeedCoherenceAnalyzer, T_seed1, T_target,\n this_method)\n', (7455, 7530), True, 'import numpy.testing as npt\n'), ((1598, 1684), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.coherence.shape', '(n_series, n_series, C.frequencies.shape[0])'], {}), '(C.coherence.shape, (n_series, n_series, C.frequencies.\n shape[0]))\n', (1614, 1684), True, 'import numpy.testing as npt\n'), ((2615, 2646), 'nitime.analysis.SpectralAnalyzer', 'nta.SpectralAnalyzer', (['T', 'method'], {}), '(T, method)\n', (2635, 2646), True, 'import nitime.analysis as nta\n'), ((2663, 2712), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['S.cpsd[0]', 'C.frequencies'], {}), '(S.cpsd[0], C.frequencies)\n', (2686, 2712), True, 'import numpy.testing as npt\n'), ((2729, 2775), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['S.cpsd[1]', 'C.spectrum'], {}), '(S.cpsd[1], C.spectrum)\n', (2752, 2775), True, 'import numpy.testing as npt\n'), ((5820, 5841), 'numpy.random.rand', 'np.random.rand', (['(2)', '(70)'], {}), '(2, 70)\n', (5834, 5841), True, 'import numpy as np\n'), ((1867, 1954), 'numpy.testing.assert_equal', 'npt.assert_equal', (['C.coherence.shape', "(n_series, n_series, method['NFFT'] // 2 + 1)"], {}), "(C.coherence.shape, (n_series, n_series, method['NFFT'] // \n 2 + 1))\n", (1883, 1954), True, 'import numpy.testing as npt\n')] |
import numpy as np
import pandas as pd
df = pd.read_csv('data/Merged Dataset after smoothing.csv',sep=',')
print("Number of data points: %d \n" % df.shape[0])
print("Number of defaults:")
counts = df.MD_EARN_WNE_P6.value_counts()
print (counts)
# from ggplot import *
# ggplot(df,aes("DEFAULT")) + geom_histogram(binwidth=1) + xlab("Defaulted?") + ylab("Number of people")
# Drop string field and id field
df.drop('INSTNM', axis=1, inplace=True)
df.drop('UNITID', axis=1, inplace=True)
df.drop('CITY', axis=1, inplace=True)
df.drop('STABBR', axis=1, inplace=True)
print("The", df.shape[1], "features (and their data types) are: \n ", df.dtypes, "\n")
# Partition the features from the class to predict
df_X = df[df.columns[df.columns != 'MD_EARN_WNE_P6']].copy()
df_y = df['MD_EARN_WNE_P6'].copy()
# df['DEFAULT'].value_counts().plot(kind = 'bar', title = 'Distribution of classes')
from sklearn.model_selection import train_test_split
# (random_state): we use a fixed random seed so we get the same results every time.
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2, random_state=0)
print ("\nNumber of training instances: ", len(X_train), "\nNumber of test instances: ", len(X_test))
print("\nDataset description: \n", X_train.describe())
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
# with sklearn
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
y_pred_train = regr.predict(X_train)
print(y_train.values)
print(y_pred_train)
print(np.diff(y_train.values, y_pred_train))
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.diff",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression"
] | [((45, 108), 'pandas.read_csv', 'pd.read_csv', (['"""data/Merged Dataset after smoothing.csv"""'], {'sep': '""","""'}), "('data/Merged Dataset after smoothing.csv', sep=',')\n", (56, 108), True, 'import pandas as pd\n'), ((1067, 1126), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df_X', 'df_y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(df_X, df_y, test_size=0.2, random_state=0)\n', (1083, 1126), False, 'from sklearn.model_selection import train_test_split\n'), ((1402, 1433), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1431, 1433), False, 'from sklearn import linear_model\n'), ((1888, 1925), 'numpy.diff', 'np.diff', (['y_train.values', 'y_pred_train'], {}), '(y_train.values, y_pred_train)\n', (1895, 1925), True, 'import numpy as np\n'), ((1657, 1691), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1675, 1691), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((1776, 1800), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1784, 1800), False, 'from sklearn.metrics import mean_squared_error, r2_score\n')] |
from __future__ import print_function
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import logging
import numpy as np
from time import time
import utils as U
import codecs
from optimizers import get_optimizer
from model import create_model
import keras.backend as K
from keras.preprocessing import sequence
import reader as dataset
from tqdm import tqdm
import pandas as pd
import json
from nltk.corpus import wordnet as wn
from collections import Counter
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class Node(object):
"""Class represents the node objects, which are displayed in the JSON file.
Args:
id: String that specifies the topic label
group: Integer that specifies the color of the node
occurrences: String that specifies the number of topic occurrences
words: Lists of representative words
sentences: List of representative sentences
"""
def __init__(self, id, group, occurrences, words, sentences):
self.id = id
self.group = group
self.occurrences = occurrences
self.words = words
self.sentences = sentences
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
class Link(object):
"""Class represents the link objects, which are displayed in the JSON file.
Args:
source: String that specifies the topic label of the first node, the link is connected to
target: String that specifies the topic label of the second node, the link is connected to
value: Float that specifies the similarity (correlation) between source and target
"""
def __init__(self, source, target, value):
self.source = source
self.target = target
self.value = value
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=2)
class TopWord(object):
"""Class represents top words that are displayed in the JSON file
Args:
rank: Integer that specifies the rank in the word list (e.g., 1 --> Most representative word)
word: Unicode that specifies the word
similarity: String that specifies the similarity (correlation) to the topic embedding
"""
def __init__(self, rank, word, similarity):
self.rank = rank
self.word = word
self.similarity = similarity
class TopSentence(object):
"""Class represents top sentences that are displayed in the JSON file
Args:
rank: Integer that specifies the rank in the sentence list (e.g., 1 --> Most representative sentence)
sentence: Unicode that specifies the sentence
"""
def __init__(self, rank, sentence):
self.rank = rank
self.sentence = sentence
class Train(object):
"""Class used to train the model and generate relevant topic information
Args:
args: Argparse instance that contains the relevant parameters
logger: Logger instance
out_dir: String that contains the path to the output directory
"""
def __init__(self, args, logger, out_dir):
self.args = args
self.logger = logger
self.out_dir = out_dir
self.vocab, train_x, test_x, self.overall_maxlen = dataset.get_data(self.args.domain,
vocab_size=self.args.vocab_size,
maxlen=self.args.maxlen)
self.train_x = sequence.pad_sequences(train_x, maxlen=self.overall_maxlen)
self.test_x = sequence.pad_sequences(test_x, maxlen=self.overall_maxlen)
self.vis_path = self.out_dir + "/visualization"
U.mkdir_p(self.vis_path)
def sentence_batch_generator(self, data, batch_size):
""" Generates batches based on the data.
Args:
data: Numpy array of the data
batch_size: Integer that specifies the batch size (e.g. 64)
Returns:
"""
n_batch = len(data) / batch_size
batch_count = 0
np.random.shuffle(data)
while True:
if batch_count == n_batch:
np.random.shuffle(data)
batch_count = 0
batch = data[batch_count * batch_size: (batch_count + 1) * batch_size]
batch_count += 1
yield batch
def negative_batch_generator(self, data, batch_size, neg_size):
"""Generates negative batches based on the data.
Args:
data: Numpy array of the data
batch_size: Integer that specifies the batch size (e.g. 64)
neg_size: Integer that specifies the number of negative instances
Returns:
"""
data_len = data.shape[0]
dim = data.shape[1]
while True:
indices = np.random.choice(data_len, batch_size * neg_size)
samples = data[indices].reshape(batch_size, neg_size, dim)
yield samples
def write_topics(self, word_emb, topic_emb, epoch, vocab_inv):
"""Writes relevant topic information with similar words to .log file for each epoch.
Args:
word_emb: Numpy array that contains the word embeddings
topic_emb: Numpy array that contains the topic embeddings
epoch: Integer that specifies the current epoch
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
Returns:
"""
# In final epoch, write in main directory
if epoch == self.args.epochs:
topic_file = codecs.open(self.out_dir + '/topics.log', 'w', 'utf-8')
# In other epochs, write in subdirectory
else:
topic_file = codecs.open(self.out_dir + '/topics/topic_epoch_' + str(epoch) + '.log', 'w', 'utf-8')
# Get the most similar words for every topic
for topic in range(self.args.num_topics):
desc = topic_emb[topic]
sims = word_emb.dot(desc.T)
ordered_words = np.argsort(sims)[::-1]
found_words = 0
desc_list = []
# Save most similar words until enough words are found
for word in ordered_words:
if found_words == self.args.labeling_num_words:
break
elif vocab_inv[word] != "<unk>":
# Save word and associated similarity
desc_list.append(vocab_inv[word] + "|" + str(sims[word]))
found_words += 1
# Write most similar words to file
topic_file.write('Topic %d:\n' % topic)
topic_file.write(' '.join(desc_list) + '\n\n')
# Returns a dataframe containing the most similar words for every topic and a list containing the topic names
def get_similar_words(self, model, vocab_inv):
"""
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
Returns:
topic_labels: Lists that contains the topic names (Based on selecting the most similar word)
word_df: DataFrame that contains the most similar words of every topic
"""
# Get all word and topic embeddings
word_emb = K.get_value(model.get_layer('word_emb').embeddings)
word_emb = word_emb / np.linalg.norm(word_emb, axis=-1, keepdims=True)
topic_emb = K.get_value(model.get_layer('topic_emb').W)
topic_emb = topic_emb / np.linalg.norm(topic_emb, axis=-1, keepdims=True)
word_df = pd.DataFrame(columns=['topic', 'rank', 'word', "similarity"])
topic_labels = []
# Iterate through every topic and calculate the most similar words
for topic in range(self.args.num_topics):
desc = topic_emb[topic]
sims = word_emb.dot(desc.T)
ordered_words = np.argsort(sims)[::-1]
found_words = 0
# Calculate topic labels
for word in ordered_words:
if vocab_inv[word] != "<unk>" and vocab_inv[word] not in topic_labels:
topic_labels.append(vocab_inv[word])
break
# Calculate most similar words and save them in word_df
for word in ordered_words:
if found_words == self.args.labeling_num_words:
break
elif vocab_inv[word] != "<unk>":
word_df.loc[len(word_df)] = (
topic_labels[topic], found_words + 1, vocab_inv[word], str(round(sims[word], 2)))
found_words += 1
return topic_labels, word_df
# Returns a dataframe containing the most similar sentences for every topic
def get_similar_sentences(self, topic_labels, topic_probs):
"""Selects the most similar sentences for every topic.
Args:
topic_labels: List that contains the topic labels
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
sentence_df: DataFrame that contains the most similar sentences for every topic
"""
train_sen_file = codecs.open('./datasets/' + self.args.domain + '/train.txt', 'r', 'utf-8')
sentences = []
# Read in all sentences that are in the input data
for line in train_sen_file:
words = line.strip().split()
sentences.append(words)
# Calculate the sentences with the highest topic probabilities
max_indices = np.argsort(topic_probs, axis=0)[::-1]
max_probs = np.sort(topic_probs, axis=0)[::-1]
sentence_df = pd.DataFrame(columns=['topic', 'rank', 'sentence'])
similar_sentences = codecs.open(self.out_dir + '/similar_sentences', 'w', 'utf-8')
# Iterate through the topics and get most similar sentences
for topic_ind in range(self.args.num_topics):
similar_sentences.write("Topic " + str(topic_ind) + ": " + str(topic_labels[topic_ind]) + "\n")
curr_ind_col = max_indices[:, topic_ind]
curr_prob_col = max_probs[:, topic_ind]
# Write the most similar sentences to a file and save them to the sentence_df DataFrame
for rank in range(self.args.num_sentences):
similar_sentences.write(' '.join(sentences[curr_ind_col[rank]]) + " --> Probability: "
+ str(curr_prob_col[rank]) + "\n")
sentence_df.loc[len(sentence_df)] = (
str(topic_labels[topic_ind]), rank + 1, ' '.join(sentences[curr_ind_col[rank]]))
similar_sentences.write("\n")
return sentence_df
def get_json_objects(self, model, vocab_inv, topic_probs):
"""Retrieves the nodes and links that should be saved in the JSON file for the visualization.
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
nodes: List that contains all the node objects that should be shown in the visualization
links: List that contains all the link objects that should be shown in the visualization
"""
topic_labels, word_df = self.get_similar_words(model, vocab_inv)
sentences_df = self.get_similar_sentences(topic_labels, topic_probs)
df = pd.DataFrame(topic_probs, columns=topic_labels)
predict_labels = df.idxmax(axis=1)
corr_df = df.corr(method="pearson")
topic_occ = []
# Calculate the topic occurrences
for topic_label in topic_labels:
topic_occ.append((predict_labels == topic_label).sum())
nodes = []
links = []
# Specify the ranks for the most similar words and sentences based on the parameters
top_word_ranks = [i for i in range(1, self.args.num_words + 1)]
top_sen_ranks = [i for i in range(1, self.args.num_sentences + 1)]
# Get the topic labels
topic_labels = self.calculate_initial_labels(word_df)
# Iterate through all topics and get the most similar words and sentences
for i in range(corr_df.shape[1]):
top_words = word_df[word_df["topic"] == str(corr_df.columns[i])].word[0:len(top_word_ranks)].values
top_word_similarities = word_df[word_df["topic"] == str(corr_df.columns[i])].similarity[
0:len(top_word_ranks)].values
top_sentences = sentences_df[sentences_df["topic"] == str(corr_df.columns[i])].sentence.values
word_objects = []
sentence_objects = []
# Create word and sentence objects and append them to the nodes and links lists
for word_ind in range(len(top_words)):
word_objects.append(
TopWord(top_word_ranks[word_ind], top_words[word_ind], top_word_similarities[word_ind]))
for sen_ind in range(len(top_sentences)):
sentence_objects.append(TopSentence(top_sen_ranks[sen_ind], top_sentences[sen_ind]))
nodes.append(Node(str(topic_labels[i]), i, str(topic_occ[i]), word_objects, sentence_objects))
for j in range(0, i):
links.append(Link(nodes[i].id, nodes[j].id, corr_df.iloc[i, j].round(2)))
return nodes, links
def calculate_initial_labels(self, word_df):
"""Calculates the topic labels based on the number of shared hypernyms. If no shared hypernym is detected, the most similar word is used instead.
Args:
word_df: DataFrame that contains the most similar words of every topic
Returns:
topic_labels: List that contains the topic labels
"""
topic_word_lists = []
topic_labels = []
curr_topic = 0
num_hypernyms = 0
hypernym_file = codecs.open(self.out_dir + '/topic_labels.log', 'w', 'utf-8')
metric_file = codecs.open(self.out_dir + '/metrics.log', 'a', 'utf-8')
metric_comparison_file = codecs.open('./code/output_dir/' + self.args.domain + '/metrics.log', 'a', 'utf-8')
# Iterate through all the topics and append the most similar words
for curr_ind in range(self.args.num_topics):
topic_word_lists.append(
word_df.iloc[curr_topic * self.args.labeling_num_words: self.args.labeling_num_words * (curr_topic + 1),
2].values)
curr_topic += 1
# Go through the most similar words of every topic
for topic_li in topic_word_lists:
overall_hypernym_li = []
path_distance = 0
# Iterate through the words
for word in topic_li:
try:
inv_hypernym_path = wn.synsets(str(word))[0].hypernym_paths()[0][::-1]
except:
continue
specific_hypernym_li = []
# Iterate through the hypernym path and only consider the path where distance <= distance to root hypernym
for entry in inv_hypernym_path:
max_path_len = len(inv_hypernym_path) / 2
# Save hypernyms for every topic in a specific list
if path_distance < max_path_len:
specific_hypernym_li.append(str(entry)[8:-7])
path_distance += 1
path_distance = 0
# Save hypernyms of one topic in a large list that contains all hypernyms
overall_hypernym_li.append(specific_hypernym_li)
common_hypernyms = []
# Index and index2 are the lists that contain the hypernyms for the given topic number (e.g. index=1 --> Hypernyms for topic 1)
for index in range(len(overall_hypernym_li) - 1):
for index2 in range(index + 1, len(overall_hypernym_li)):
hypernym_found = False
# Iterate over all hypernyms
for entry in overall_hypernym_li[index]:
for entry2 in overall_hypernym_li[index2]:
# Save the hypernym if two different words are compared and no lower hypernym was already found
if entry == entry2 and hypernym_found is False:
common_hypernyms.append(entry)
hypernym_found = True
break
else:
continue
# If no hypernyms are found, use the most similar word
if len(common_hypernyms) == 0:
top_word = self.get_top_word(topic_li, topic_labels)
# If hypernyms are found, get the hypernym with the lowest number of occurrences that is not already used
else:
top_word = self.get_top_hypernym(topic_li, topic_labels, Counter(common_hypernyms).most_common())
num_hypernyms += sum(Counter(common_hypernyms).values())
topic_labels.append(top_word)
hypernym_file.write('Topic %s:' % (top_word) + "\n")
hypernym_file.write(' - Common hypernyms: %s' % (Counter(common_hypernyms).most_common()) + "\n")
hypernym_file.write(' - Similar words: %s' % (topic_li) + "\n" + "\n")
# Write information to multiple logging files
avg_num_hypernyms = float("{0:.2f}".format(num_hypernyms / float(self.args.num_topics)))
hypernyms_per_word = float("{0:.2f}".format(avg_num_hypernyms / float(self.args.labeling_num_words)))
hypernym_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
hypernym_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
hypernym_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n")
metric_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
metric_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
metric_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n" + "\n")
metric_comparison_file.write('Hypernyms per Word: %s' % (hypernyms_per_word) + "\n")
metric_comparison_file.write('Average number of hypernyms: %s' % (avg_num_hypernyms) + "\n")
metric_comparison_file.write('Number of hypernyms found: %s' % num_hypernyms + "\n" + "\n")
return topic_labels
def get_top_word(self, topic_li, topic_labels):
"""Retrieves the most similar word and sets it as label.
Args:
topic_li: Numpy array that contains the most similar words
topic_labels: List that contains the previous topic labels (Required because we do not want duplicate topic labels)
Returns:
"""
# Iterate through most similar words and take first one that is not already used and unequal to <unk>
for word in topic_li:
if word != "<unk>" and word not in topic_labels:
return word
# If every shared hypernyms is already used as label and every similar word is already used, use the generic name "topic_" + index instead
return "topic_" + str(len(topic_labels))
def get_top_hypernym(self, topic_li, topic_labels, common_hypernyms):
"""Retrives the most commonly shared lowest hypernym.
Args:
topic_li: Numpy array that contains the most similar words
topic_labels: List that contains the previous topic labels (Required because we do not want duplicate topic labels)
common_hypernyms: List that contains the shared hypernyms as (entry, occurrence) tuples
Returns:
"""
# Iterate through the common hypernyms and use the most frequent one that is not already used as label
for common_hypernym in common_hypernyms:
if common_hypernym[0] not in topic_labels:
return common_hypernym[0]
# If all shared hypernyms are already used as label for another topic, use the top word instead
return self.get_top_word(topic_li, topic_labels)
def write_json(self, model, vocab_inv, topic_probs):
"""Writes all relevant topic information to a JSON file so that it can be imported in the visualization and labeling tool.
Args:
model: Keras model object
vocab_inv: Dictionary that maps the index of every word in the vocab file to the corresponding word (In descending order based on occurrences)
topic_probs: Numpy array that contains the probability for every sentence-topic combination
Returns:
"""
nodes, links = self.get_json_objects(model, vocab_inv, topic_probs)
self.logger.info('Writing .json file...')
# Create a String that contains all the information in a .json format
node_str = '{ "nodes": ['
link_str = ' "links": ['
for node in nodes[:-1]:
node_str += node.to_json() + ","
node_str += nodes[-1].to_json() + " ],"
for link in links[:-1]:
link_str += link.to_json() + ","
link_str += links[-1].to_json() + " ] }"
json_str = node_str + link_str
with open(self.vis_path + "/topic_information.json", "w") as f:
f.write(json_str)
self.logger.info('.json written successfully')
def build_model(self):
"""Creates the model object, which is used to calculate topics, similar words, similar sentences, topic occurrences, and topic similarities
Returns:
model: Keras model object
"""
optimizer = get_optimizer(self.args)
self.logger.info('Building model')
self.logger.info(' Number of training examples: %d', len(self.train_x))
self.logger.info(' Length of vocab: %d', len(self.vocab))
def max_margin_loss(y_true, y_pred):
return K.mean(y_pred)
model = create_model(self.args, self.overall_maxlen, self.vocab)
# Freeze the word embedding layer
model.get_layer('word_emb').trainable = False
# Check option to fix clusters instead of training them
if self.args.fix_clusters == "yes":
model.get_layer('topic_emb').trainable = False
model.compile(optimizer=optimizer, loss=max_margin_loss, metrics=[max_margin_loss])
return model
def train_model(self, model):
"""Train the model based on the hyperparameters defined.
Args:
model: Keras model object that is returned after calling Train.build_model()
Returns:
"""
vocab_inv = {}
for w, ind in self.vocab.items():
vocab_inv[ind] = w
sen_gen = self.sentence_batch_generator(self.train_x, self.args.batch_size)
neg_gen = self.negative_batch_generator(self.train_x, self.args.batch_size, self.args.neg_size)
batches_per_epoch = len(self.train_x) / self.args.batch_size
# batches_per_epoch = 1000
self.logger.info("Batches per epoch: %d", batches_per_epoch)
self.logger.info(
'--------------------------------------------------------------------------------------------------------------------------')
min_loss = float('inf')
loss_li = []
for ii in xrange(self.args.epochs):
t0 = time()
loss, max_margin_loss = 0., 0.
for b in tqdm(xrange(batches_per_epoch)):
sen_input = sen_gen.next()
neg_input = neg_gen.next()
batch_loss, batch_max_margin_loss = model.train_on_batch([sen_input, neg_input],
np.ones((self.args.batch_size, 1)))
loss += batch_loss / batches_per_epoch
max_margin_loss += batch_max_margin_loss / batches_per_epoch
tr_time = time() - t0
self.logger.info('Epoch %d, train: %is' % (ii + 1, tr_time))
self.logger.info(' Total loss: %.4f, max_margin_loss: %.4f, ortho_reg: %.4f' % (
loss, max_margin_loss, loss - max_margin_loss))
if loss < min_loss:
self.logger.info(' Loss < min_loss')
min_loss = loss
word_emb = K.get_value(model.get_layer('word_emb').embeddings)
topic_emb = K.get_value(model.get_layer('topic_emb').W)
word_emb = word_emb / np.linalg.norm(word_emb, axis=-1, keepdims=True)
topic_emb = topic_emb / np.linalg.norm(topic_emb, axis=-1, keepdims=True)
model.save_weights(self.out_dir + '/model_param')
self.write_topics(word_emb, topic_emb, ii + 1, vocab_inv)
training_detail_file = codecs.open(self.out_dir + '/training_details.log', 'a', 'utf-8')
training_detail_file.write('Epoch %d, train: %is' % (ii + 1, tr_time) + "\n")
training_detail_file.write('Total loss: %.4f, max_margin_loss: %.4f, ortho_reg: %.4f' % (
loss, max_margin_loss, loss - max_margin_loss) + "\n")
loss_li.append(float("{0:.4f}".format(loss)))
else:
self.logger.info(' Loss > min_loss')
loss_li.append(float("{0:.4f}".format(loss)))
# In Final Epoch
if ii + 1 == self.args.epochs:
self.logger.info('Training finished')
self.logger.info('Calculating most representative topic sentences...')
test_fn = K.function([model.get_layer('sentence_input').input, K.learning_phase()],
[model.get_layer('att_weights').output, model.get_layer('p_t').output])
# If argument is not given explicitly by the user calculate good default value (One batch + One batch per 5000 entries)
if self.args.probability_batches == 0:
num_probability_batches = 1 + len(self.train_x) / 5000
self.logger.info('Using %s probability batches...', num_probability_batches)
else:
num_probability_batches = self.args.probability_batches
split_inputs = np.array_split(self.train_x, num_probability_batches)
_, topic_probs = test_fn([split_inputs[0], 0])
for split_input in split_inputs[1:]:
_, curr_topic_prob = test_fn([split_input, 0])
topic_probs = np.append(topic_probs, curr_topic_prob, axis=0)
self.logger.info('Most representative sentences calculated successfully')
self.write_json(model, vocab_inv, topic_probs)
self.save_model_loss(loss_li)
# os.system(
# "python ./code/coherence_score.py -f ./code/output_dir/organic_food_preprocessed/" + self.args.conf + "/topics.log -c ./preprocessed_data/organic_food_preprocessed/train.txt -o ./code/output_dir/organic_food_preprocessed/" + self.args.conf)
def save_model_loss(self, loss_li):
"""Creates plots of the training loss and saves them as .png and .pdf files.
Args:
loss_li: List that contains the model loss for every epoch
Returns:
"""
metric_file = codecs.open(self.out_dir + '/metrics.log', 'a', 'utf-8')
metric_comparison_file = codecs.open('./code/output_dir/' + self.args.domain + '/metrics.log', 'a', 'utf-8')
metric_file.write('Final loss: %s' % (loss_li[-1]) + "\n")
metric_file.write('Loss development: %s' % (loss_li) + "\n" + "\n")
metric_comparison_file.write('Final loss: %s' % (loss_li[-1]) + "\n")
metric_comparison_file.write('Loss development: %s' % (loss_li) + "\n" + "\n")
epoch_li = [epoch for epoch in range(1, self.args.epochs + 1)]
fig, ax = plt.subplots(figsize=(16, 8))
ax.set_xlabel("Epoch", fontsize=18, weight="bold")
ax.set_ylabel("Loss", fontsize=18, weight="bold")
ax.set_title('Model loss', fontsize=20, weight="bold")
plt.plot(epoch_li, loss_li)
plt.savefig(self.out_dir + "/model_loss.pdf", format="pdf")
plt.savefig(self.out_dir + "/model_loss.png", format="png")
def main():
logging.basicConfig(
filename='out.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("--domain", dest="domain", type=str, metavar='<str>', required=True,
help="domain of the corpus")
parser.add_argument("--conf", dest="conf", type=str, metavar='<str>', required=True,
help="Train configuration for the given domain")
parser.add_argument("--emb-path", dest="emb_path", type=str, metavar='<str>', required=True,
help="The path to the word embedding file")
parser.add_argument("--num-topics", dest="num_topics", type=int, metavar='<int>', default=20,
help="The number of topics specified that are calculated by the model (default=20)")
parser.add_argument("--vocab-size", dest="vocab_size", type=int, metavar='<int>', default=9000,
help="Vocab size. '0' means no limit (default=9000)")
parser.add_argument("--num-words", dest="num_words", type=int, metavar='<int>', default=10,
help="Number of most similar words displayed for each topic")
parser.add_argument("--num-sentences", dest="num_sentences", type=int, metavar='<int>', default=10,
help="Number of most similar sentences displayed for each topic")
parser.add_argument("--labeling-num-words", dest="labeling_num_words", type=int, metavar='<int>', default=25,
help="Number of most similar words used to generate the labels")
parser.add_argument("--batch-size", dest="batch_size", type=int, metavar='<int>', default=64,
help="Batch size used for training (default=64)")
parser.add_argument("--epochs", dest="epochs", type=int, metavar='<int>', default=20,
help="Number of epochs (default=20)")
parser.add_argument("--neg-size", dest="neg_size", type=int, metavar='<int>', default=20,
help="Number of negative instances (default=20)")
parser.add_argument("--maxlen", dest="maxlen", type=int, metavar='<int>', default=0,
help="Maximum allowed number of words during training. '0' means no limit (default=0)")
parser.add_argument("--algorithm", dest="algorithm", type=str, metavar='<str>', default='adam',
help="Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=adam)")
parser.add_argument("--fix-clusters", dest="fix_clusters", type=str, metavar='<str>', default="no",
help="Fix initial clusters (yes or no)")
parser.add_argument("--ortho-reg", dest="ortho_reg", type=float, metavar='<float>', default=0.1,
help="The weight of orthogonal regularization (default=0.1)")
parser.add_argument("--probability-batches", dest="probability_batches", type=int, metavar='<int>', default=0,
help="Calculation of topic probabilities is split into batches to avoid out of memory error."
"If an out of memory error or bus error occurs, increase this value.")
parser.add_argument("--emb-dim", dest="emb_dim", type=int, metavar='<int>', default=300,
help="Embeddings dimension (default=300)")
parser.add_argument("--emb-type", dest="emb_type", type=str, metavar='<str>', default="glove_finetuned",
help="The type of word vectors to use")
args = parser.parse_args()
out_dir = './code/output_dir/' + args.domain + '/' + args.conf
U.mkdir_p(out_dir)
U.mkdir_p(out_dir + "/topics")
U.print_args(args, out_dir + '/train_params')
U.print_args(args, out_dir + '/metrics.log')
assert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}, "Invalid algorithm argument"
assert args.fix_clusters in {'yes', 'no'}, "Invalid fix_clusters argument"
assert args.labeling_num_words >= args.num_words, "Number of words used to generate labels must be >= Number of words displayed in visualization"
np.random.seed(1234)
trainer = Train(args, logger, out_dir)
model = trainer.build_model()
trainer.train_model(model)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"utils.mkdir_p",
"keras.backend.learning_phase",
"numpy.argsort",
"numpy.array_split",
"numpy.linalg.norm",
"keras.preprocessing.sequence.pad_sequences",
"argparse.ArgumentParser",
"numpy.sort",
"json.dumps",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"pandas.DataFra... | [((588, 609), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (602, 609), False, 'import matplotlib\n'), ((29012, 29124), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""out.log"""', 'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(filename='out.log', level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (29031, 29124), False, 'import logging\n'), ((29158, 29185), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (29175, 29185), False, 'import logging\n'), ((29200, 29225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (29223, 29225), False, 'import argparse\n'), ((32702, 32720), 'utils.mkdir_p', 'U.mkdir_p', (['out_dir'], {}), '(out_dir)\n', (32711, 32720), True, 'import utils as U\n'), ((32725, 32755), 'utils.mkdir_p', 'U.mkdir_p', (["(out_dir + '/topics')"], {}), "(out_dir + '/topics')\n", (32734, 32755), True, 'import utils as U\n'), ((32760, 32805), 'utils.print_args', 'U.print_args', (['args', "(out_dir + '/train_params')"], {}), "(args, out_dir + '/train_params')\n", (32772, 32805), True, 'import utils as U\n'), ((32810, 32854), 'utils.print_args', 'U.print_args', (['args', "(out_dir + '/metrics.log')"], {}), "(args, out_dir + '/metrics.log')\n", (32822, 32854), True, 'import utils as U\n'), ((33209, 33229), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (33223, 33229), True, 'import numpy as np\n'), ((1327, 1399), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(2)'}), '(self, default=lambda o: o.__dict__, sort_keys=True, indent=2)\n', (1337, 1399), False, 'import json\n'), ((2026, 2098), 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(2)'}), '(self, default=lambda o: o.__dict__, sort_keys=True, indent=2)\n', (2036, 2098), False, 'import json\n'), ((3539, 3636), 'reader.get_data', 'dataset.get_data', (['self.args.domain'], {'vocab_size': 'self.args.vocab_size', 'maxlen': 'self.args.maxlen'}), '(self.args.domain, vocab_size=self.args.vocab_size, maxlen=\n self.args.maxlen)\n', (3555, 3636), True, 'import reader as dataset\n'), ((3807, 3866), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['train_x'], {'maxlen': 'self.overall_maxlen'}), '(train_x, maxlen=self.overall_maxlen)\n', (3829, 3866), False, 'from keras.preprocessing import sequence\n'), ((3889, 3947), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['test_x'], {'maxlen': 'self.overall_maxlen'}), '(test_x, maxlen=self.overall_maxlen)\n', (3911, 3947), False, 'from keras.preprocessing import sequence\n'), ((4012, 4036), 'utils.mkdir_p', 'U.mkdir_p', (['self.vis_path'], {}), '(self.vis_path)\n', (4021, 4036), True, 'import utils as U\n'), ((4378, 4401), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (4395, 4401), True, 'import numpy as np\n'), ((8020, 8081), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['topic', 'rank', 'word', 'similarity']"}), "(columns=['topic', 'rank', 'word', 'similarity'])\n", (8032, 8081), True, 'import pandas as pd\n'), ((9651, 9725), 'codecs.open', 'codecs.open', (["('./datasets/' + self.args.domain + '/train.txt')", '"""r"""', '"""utf-8"""'], {}), "('./datasets/' + self.args.domain + '/train.txt', 'r', 'utf-8')\n", (9662, 9725), False, 'import codecs\n'), ((10132, 10183), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['topic', 'rank', 'sentence']"}), "(columns=['topic', 'rank', 'sentence'])\n", (10144, 10183), True, 'import pandas as pd\n'), ((10212, 10274), 'codecs.open', 'codecs.open', (["(self.out_dir + '/similar_sentences')", '"""w"""', '"""utf-8"""'], {}), "(self.out_dir + '/similar_sentences', 'w', 'utf-8')\n", (10223, 10274), False, 'import codecs\n'), ((12047, 12094), 'pandas.DataFrame', 'pd.DataFrame', (['topic_probs'], {'columns': 'topic_labels'}), '(topic_probs, columns=topic_labels)\n', (12059, 12094), True, 'import pandas as pd\n'), ((14539, 14600), 'codecs.open', 'codecs.open', (["(self.out_dir + '/topic_labels.log')", '"""w"""', '"""utf-8"""'], {}), "(self.out_dir + '/topic_labels.log', 'w', 'utf-8')\n", (14550, 14600), False, 'import codecs\n'), ((14623, 14679), 'codecs.open', 'codecs.open', (["(self.out_dir + '/metrics.log')", '"""a"""', '"""utf-8"""'], {}), "(self.out_dir + '/metrics.log', 'a', 'utf-8')\n", (14634, 14679), False, 'import codecs\n'), ((14713, 14800), 'codecs.open', 'codecs.open', (["('./code/output_dir/' + self.args.domain + '/metrics.log')", '"""a"""', '"""utf-8"""'], {}), "('./code/output_dir/' + self.args.domain + '/metrics.log', 'a',\n 'utf-8')\n", (14724, 14800), False, 'import codecs\n'), ((22348, 22372), 'optimizers.get_optimizer', 'get_optimizer', (['self.args'], {}), '(self.args)\n', (22361, 22372), False, 'from optimizers import get_optimizer\n'), ((22664, 22720), 'model.create_model', 'create_model', (['self.args', 'self.overall_maxlen', 'self.vocab'], {}), '(self.args, self.overall_maxlen, self.vocab)\n', (22676, 22720), False, 'from model import create_model\n'), ((28040, 28096), 'codecs.open', 'codecs.open', (["(self.out_dir + '/metrics.log')", '"""a"""', '"""utf-8"""'], {}), "(self.out_dir + '/metrics.log', 'a', 'utf-8')\n", (28051, 28096), False, 'import codecs\n'), ((28130, 28217), 'codecs.open', 'codecs.open', (["('./code/output_dir/' + self.args.domain + '/metrics.log')", '"""a"""', '"""utf-8"""'], {}), "('./code/output_dir/' + self.args.domain + '/metrics.log', 'a',\n 'utf-8')\n", (28141, 28217), False, 'import codecs\n'), ((28612, 28641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (28624, 28641), True, 'import matplotlib.pyplot as plt\n'), ((28830, 28857), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_li', 'loss_li'], {}), '(epoch_li, loss_li)\n', (28838, 28857), True, 'import matplotlib.pyplot as plt\n'), ((28866, 28925), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.out_dir + '/model_loss.pdf')"], {'format': '"""pdf"""'}), "(self.out_dir + '/model_loss.pdf', format='pdf')\n", (28877, 28925), True, 'import matplotlib.pyplot as plt\n'), ((28934, 28993), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.out_dir + '/model_loss.png')"], {'format': '"""png"""'}), "(self.out_dir + '/model_loss.png', format='png')\n", (28945, 28993), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5188), 'numpy.random.choice', 'np.random.choice', (['data_len', '(batch_size * neg_size)'], {}), '(data_len, batch_size * neg_size)\n', (5155, 5188), True, 'import numpy as np\n'), ((5960, 6015), 'codecs.open', 'codecs.open', (["(self.out_dir + '/topics.log')", '"""w"""', '"""utf-8"""'], {}), "(self.out_dir + '/topics.log', 'w', 'utf-8')\n", (5971, 6015), False, 'import codecs\n'), ((7806, 7854), 'numpy.linalg.norm', 'np.linalg.norm', (['word_emb'], {'axis': '(-1)', 'keepdims': '(True)'}), '(word_emb, axis=-1, keepdims=True)\n', (7820, 7854), True, 'import numpy as np\n'), ((7951, 8000), 'numpy.linalg.norm', 'np.linalg.norm', (['topic_emb'], {'axis': '(-1)', 'keepdims': '(True)'}), '(topic_emb, axis=-1, keepdims=True)\n', (7965, 8000), True, 'import numpy as np\n'), ((10016, 10047), 'numpy.argsort', 'np.argsort', (['topic_probs'], {'axis': '(0)'}), '(topic_probs, axis=0)\n', (10026, 10047), True, 'import numpy as np\n'), ((10074, 10102), 'numpy.sort', 'np.sort', (['topic_probs'], {'axis': '(0)'}), '(topic_probs, axis=0)\n', (10081, 10102), True, 'import numpy as np\n'), ((22632, 22646), 'keras.backend.mean', 'K.mean', (['y_pred'], {}), '(y_pred)\n', (22638, 22646), True, 'import keras.backend as K\n'), ((24076, 24082), 'time.time', 'time', ([], {}), '()\n', (24080, 24082), False, 'from time import time\n'), ((4478, 4501), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (4495, 4501), True, 'import numpy as np\n'), ((6399, 6415), 'numpy.argsort', 'np.argsort', (['sims'], {}), '(sims)\n', (6409, 6415), True, 'import numpy as np\n'), ((8338, 8354), 'numpy.argsort', 'np.argsort', (['sims'], {}), '(sims)\n', (8348, 8354), True, 'import numpy as np\n'), ((24630, 24636), 'time.time', 'time', ([], {}), '()\n', (24634, 24636), False, 'from time import time\n'), ((25505, 25570), 'codecs.open', 'codecs.open', (["(self.out_dir + '/training_details.log')", '"""a"""', '"""utf-8"""'], {}), "(self.out_dir + '/training_details.log', 'a', 'utf-8')\n", (25516, 25570), False, 'import codecs\n'), ((26963, 27016), 'numpy.array_split', 'np.array_split', (['self.train_x', 'num_probability_batches'], {}), '(self.train_x, num_probability_batches)\n', (26977, 27016), True, 'import numpy as np\n'), ((24438, 24472), 'numpy.ones', 'np.ones', (['(self.args.batch_size, 1)'], {}), '((self.args.batch_size, 1))\n', (24445, 24472), True, 'import numpy as np\n'), ((25185, 25233), 'numpy.linalg.norm', 'np.linalg.norm', (['word_emb'], {'axis': '(-1)', 'keepdims': '(True)'}), '(word_emb, axis=-1, keepdims=True)\n', (25199, 25233), True, 'import numpy as np\n'), ((25274, 25323), 'numpy.linalg.norm', 'np.linalg.norm', (['topic_emb'], {'axis': '(-1)', 'keepdims': '(True)'}), '(topic_emb, axis=-1, keepdims=True)\n', (25288, 25323), True, 'import numpy as np\n'), ((27236, 27283), 'numpy.append', 'np.append', (['topic_probs', 'curr_topic_prob'], {'axis': '(0)'}), '(topic_probs, curr_topic_prob, axis=0)\n', (27245, 27283), True, 'import numpy as np\n'), ((26337, 26355), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (26353, 26355), True, 'import keras.backend as K\n'), ((17606, 17631), 'collections.Counter', 'Counter', (['common_hypernyms'], {}), '(common_hypernyms)\n', (17613, 17631), False, 'from collections import Counter\n'), ((17684, 17709), 'collections.Counter', 'Counter', (['common_hypernyms'], {}), '(common_hypernyms)\n', (17691, 17709), False, 'from collections import Counter\n'), ((17891, 17916), 'collections.Counter', 'Counter', (['common_hypernyms'], {}), '(common_hypernyms)\n', (17898, 17916), False, 'from collections import Counter\n')] |
import torch
from models.auxiliaries.physics_model_interface import PhysicsModel
from data.base_dataset import BaseDataset
import scipy.io as io
import numpy as np
from torch import from_numpy, empty
from util.util import normalize
class RegCycleGANDataset(BaseDataset):
def initialize(self, opt, phase):
self.phase = phase
self.opt = opt
self.roi = opt.roi
self.root = opt.dataroot
self.physics_model: PhysicsModel = opt.physics_model
self.empty_tensor = empty(0)
# Select relevant part of dataset
if opt.representation == 'real':
channel_index = slice(0,1)
elif opt.representation == 'imag':
channel_index = slice(1,2)
else:
channel_index = slice(None, None)
if phase == 'train':
self.selection = slice(0, opt.val_offset)
elif phase == 'val':
self.selection = slice(opt.val_offset, opt.test_offset)
else:
self.selection = slice(opt.test_offset, None)
# Load dataset from .mat file
all_data = io.loadmat(opt.dataroot)
self.dataset = np.array(all_data[opt.dataname]).astype(float)
self.innit_length(self.dataset.shape[-1])
self.dataset = self.dataset[self.selection, channel_index, self.roi]
if self.opt.representation == 'mag':
self.dataset = np.expand_dims(np.sqrt(self.dataset[:,0,:]**2 + self.dataset[:,1,:]**2), 0)
self.dataset = from_numpy(normalize(self.dataset))
self.A_size = len(self.dataset)
# Load labels from .mat file
self.labels = []
# if self.phase != 'test':
for label_name in self.physics_model.get_label_names():
if not label_name in all_data:
print('WARNING: ' + label_name + ' not found in dataroot!')
continue
self.labels.append(all_data[label_name])
self.num_labels = len(self.labels)
self.labels = from_numpy(np.transpose(np.concatenate(self.labels, 0)))
self.label_sampler = self.labels[self.selection]
# Either use random or fixed labels
# if self.opt.useAlabels:
# permutation = np.random.permutation(self.A_size)
# self.B_sampler = lambda ind: self.label_sampler[permutation[ind]]
# else:
self.B_sampler = self.generate_B_sample
def generate_B_sample(self, index = None):
param = torch.rand((1, self.num_labels))
return self.physics_model.param_to_quantity(param).squeeze(0)
def innit_length(self, full_length):
self.opt.full_data_length = full_length
self.opt.data_length = len(range(0, full_length)[self.roi])
def __getitem__(self, index):
sample: dict = {
'A': self.dataset[index % self.A_size],
'label_A': self.label_sampler[index % self.A_size]
}
# if self.phase != 'test':
# sample['label_A'] = self.label_sampler[index % self.A_size]
if self.phase == 'train':
sample['B'] = self.B_sampler(index)
return sample
def __len__(self):
return self.A_size
def name(self):
return 'Reg-CycleGAN-Dataset' | [
"numpy.sqrt",
"util.util.normalize",
"scipy.io.loadmat",
"numpy.array",
"numpy.concatenate",
"torch.empty",
"torch.rand"
] | [((510, 518), 'torch.empty', 'empty', (['(0)'], {}), '(0)\n', (515, 518), False, 'from torch import from_numpy, empty\n'), ((1102, 1126), 'scipy.io.loadmat', 'io.loadmat', (['opt.dataroot'], {}), '(opt.dataroot)\n', (1112, 1126), True, 'import scipy.io as io\n'), ((2459, 2491), 'torch.rand', 'torch.rand', (['(1, self.num_labels)'], {}), '((1, self.num_labels))\n', (2469, 2491), False, 'import torch\n'), ((1506, 1529), 'util.util.normalize', 'normalize', (['self.dataset'], {}), '(self.dataset)\n', (1515, 1529), False, 'from util.util import normalize\n'), ((1150, 1182), 'numpy.array', 'np.array', (['all_data[opt.dataname]'], {}), '(all_data[opt.dataname])\n', (1158, 1182), True, 'import numpy as np\n'), ((1411, 1475), 'numpy.sqrt', 'np.sqrt', (['(self.dataset[:, 0, :] ** 2 + self.dataset[:, 1, :] ** 2)'], {}), '(self.dataset[:, 0, :] ** 2 + self.dataset[:, 1, :] ** 2)\n', (1418, 1475), True, 'import numpy as np\n'), ((2019, 2049), 'numpy.concatenate', 'np.concatenate', (['self.labels', '(0)'], {}), '(self.labels, 0)\n', (2033, 2049), True, 'import numpy as np\n')] |
import taichi as ti
import numpy as np
A = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0],
])
def conv(A, B):
m, n = A.shape
s, t = B.shape
C = np.zeros((m + s - 1, n + t - 1), dtype=A.dtype)
for i in range(m):
for j in range(n):
for k in range(s):
for l in range(t):
C[i + k, j + l] += A[i, j] * B[k, l]
return C
B = A
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
B = conv(B, A)
print(B)
| [
"numpy.array",
"numpy.zeros"
] | [((45, 88), 'numpy.array', 'np.array', (['[[0, 1, 0], [1, 0, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, 0, 1], [0, 1, 0]])\n', (53, 88), True, 'import numpy as np\n'), ((156, 203), 'numpy.zeros', 'np.zeros', (['(m + s - 1, n + t - 1)'], {'dtype': 'A.dtype'}), '((m + s - 1, n + t - 1), dtype=A.dtype)\n', (164, 203), True, 'import numpy as np\n')] |
"""
Questão 2 do laboratorio 7: Interpolação por MMQ pela seria de Fourier(exponencial)
"""
import numpy as np
from math import pi, sin
import matplotlib.pyplot as plt
def sistemaAumentado(x, y, dim):
m = len(x)
A = np.empty((dim, dim))
b = np.empty((dim))
soma = []
for i in range(0, dim + 2):
aux = 0
for k in range(0, m):
aux = aux + x[k] ** i
soma.append(aux)
for i in range(0, dim):
for j in range(i, dim):
A[i, j] = soma[i + j]
if (i != j):
A[j, i] = A[i, j]
b = []
for i in range(0, dim):
aux = 0
for k in range(0, m):
aux = aux + y[k] * (x[k] ** (i))
b.append(aux)
return A, b
T = 2*pi
Im = 1
x = np.linspace(-T/2, T/2, 30)
y = np.array([])
for i in x:
if (-T/2) <= i < 0:
y = np.append(y, 0)
else:
y = np.append(y, Im*sin((2*pi/T)*i))
n_x = np.arange(x[0], x[-1], 0.001)
A, b = sistemaAumentado(x, y, 3)
aprox = np.linalg.solve(A, b)
aprox = aprox[::-1]
p = np.poly1d(aprox)
plt.plot(x, y, 'ro', label='Pontos marcados')
plt.plot(n_x, p(n_x), 'r-', label='MMQ')
plt.title('MMQ')
plt.xlabel('X')
plt.ylabel('Y')
plt.style.use('ggplot')
plt.tight_layout()
plt.legend(loc='best')
plt.grid()
plt.show()
| [
"numpy.linalg.solve",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.empty",
"matplotlib.pyplot.tight_layout",
"nump... | [((768, 798), 'numpy.linspace', 'np.linspace', (['(-T / 2)', '(T / 2)', '(30)'], {}), '(-T / 2, T / 2, 30)\n', (779, 798), True, 'import numpy as np\n'), ((799, 811), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (807, 811), True, 'import numpy as np\n'), ((937, 966), 'numpy.arange', 'np.arange', (['x[0]', 'x[-1]', '(0.001)'], {}), '(x[0], x[-1], 0.001)\n', (946, 966), True, 'import numpy as np\n'), ((1008, 1029), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (1023, 1029), True, 'import numpy as np\n'), ((1054, 1070), 'numpy.poly1d', 'np.poly1d', (['aprox'], {}), '(aprox)\n', (1063, 1070), True, 'import numpy as np\n'), ((1071, 1116), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ro"""'], {'label': '"""Pontos marcados"""'}), "(x, y, 'ro', label='Pontos marcados')\n", (1079, 1116), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1174), 'matplotlib.pyplot.title', 'plt.title', (['"""MMQ"""'], {}), "('MMQ')\n", (1167, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1190), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1185, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1206), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1201, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1230), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1220, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1249), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1247, 1249), True, 'import matplotlib.pyplot as plt\n'), ((1250, 1272), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1260, 1272), True, 'import matplotlib.pyplot as plt\n'), ((1273, 1283), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1281, 1283), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1292, 1294), True, 'import matplotlib.pyplot as plt\n'), ((226, 246), 'numpy.empty', 'np.empty', (['(dim, dim)'], {}), '((dim, dim))\n', (234, 246), True, 'import numpy as np\n'), ((255, 268), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (263, 268), True, 'import numpy as np\n'), ((860, 875), 'numpy.append', 'np.append', (['y', '(0)'], {}), '(y, 0)\n', (869, 875), True, 'import numpy as np\n'), ((914, 933), 'math.sin', 'sin', (['(2 * pi / T * i)'], {}), '(2 * pi / T * i)\n', (917, 933), False, 'from math import pi, sin\n')] |
"""
Perceptual decision-making task, loosely based on the random dot motion
discrimination task.
Response of neurons in the lateral intraparietal area during a combined visual
discrimination reaction time task.
<NAME> & <NAME>, JNS 2002.
http://www.jneurosci.org/content/22/21/9475.abstract
Reaction-time version.
"""
from __future__ import division
import numpy as np
from pycog import tasktools
#-----------------------------------------------------------------------------------------
# Network structure
#-----------------------------------------------------------------------------------------
Nin = 3
N = 100
Nout = 2
# E/I
ei, EXC, INH = tasktools.generate_ei(N)
# Start cue
START = 2
#-----------------------------------------------------------------------------------------
# Output connectivity
#-----------------------------------------------------------------------------------------
Cout = np.zeros((Nout, N))
Cout[:,EXC] = 1
#-----------------------------------------------------------------------------------------
# Task structure
#-----------------------------------------------------------------------------------------
cohs = [1, 2, 4, 8, 16]
in_outs = [1, -1]
nconditions = len(cohs)*len(in_outs)
pcatch = 5/(nconditions + 1)
SCALE = 3.2
def scale(coh):
return (1 + SCALE*coh/100)/2
def generate_trial(rng, dt, params):
#-------------------------------------------------------------------------------------
# Select task condition
#-------------------------------------------------------------------------------------
catch_trial = False
if params['name'] in ['gradient', 'test']:
if params.get('catch', rng.rand() < pcatch):
catch_trial = True
else:
coh = params.get('coh', rng.choice(cohs))
in_out = params.get('in_out', rng.choice(in_outs))
elif params['name'] == 'validation':
b = params['minibatch_index'] % (nconditions + 1)
if b == 0:
catch_trial = True
else:
k0, k1 = tasktools.unravel_index(b-1, (len(cohs), len(in_outs)))
coh = cohs[k0]
in_out = in_outs[k1]
else:
raise ValueError("Unknown trial type.")
#-------------------------------------------------------------------------------------
# Epochs
#-------------------------------------------------------------------------------------
if catch_trial:
epochs = {'T': 2000}
else:
if params['name'] == 'test':
fixation = 300
stimulus = 1500
else:
fixation = 100
stimulus = 800
no_reward = 300
T = fixation + stimulus
epochs = {
'fixation': (0, fixation),
'stimulus': (fixation, T),
'decision': (fixation + no_reward, T)
}
epochs['T'] = T
#-------------------------------------------------------------------------------------
# Trial info
#-------------------------------------------------------------------------------------
t, e = tasktools.get_epochs_idx(dt, epochs) # Time, task epochs in discrete time
trial = {'t': t, 'epochs': epochs} # Trial
if catch_trial:
trial['info'] = {}
else:
# Correct choice
if in_out > 0:
choice = 0
else:
choice = 1
# Trial info
trial['info'] = {'coh': coh, 'in_out': in_out, 'choice': choice}
#-------------------------------------------------------------------------------------
# Inputs
#-------------------------------------------------------------------------------------
X = np.zeros((len(t), Nin))
if not catch_trial:
# Stimulus
X[e['stimulus'],choice] = scale(+coh)
X[e['stimulus'],1-choice] = scale(-coh)
# Start cue
X[e['stimulus'],START] = 1
trial['inputs'] = X
#-------------------------------------------------------------------------------------
# Target output
#-------------------------------------------------------------------------------------
if params.get('target_output', False):
Y = np.zeros((len(t), Nout)) # Output matrix
M = np.zeros_like(Y) # Mask matrix
# Hold values
hi = 1.2
lo = 0.2
if catch_trial:
Y[:] = lo
M[:] = 1
else:
# Fixation
Y[e['fixation'],:] = lo
# Decision
Y[e['decision'],choice] = hi
Y[e['decision'],1-choice] = lo
# Only care about fixation and decision periods
M[e['fixation']+e['decision'],:] = 1
# Outputs and mask
trial['outputs'] = Y
trial['mask'] = M
#-------------------------------------------------------------------------------------
return trial
# Performance measure
performance = tasktools.performance_2afc
# Termination criterion
TARGET_PERFORMANCE = 85
def terminate(pcorrect_history):
return np.mean(pcorrect_history[-5:]) > TARGET_PERFORMANCE
# Validation dataset
n_validation = 100*(nconditions + 1)
| [
"numpy.mean",
"pycog.tasktools.generate_ei",
"numpy.zeros",
"pycog.tasktools.get_epochs_idx",
"numpy.zeros_like"
] | [((666, 690), 'pycog.tasktools.generate_ei', 'tasktools.generate_ei', (['N'], {}), '(N)\n', (687, 690), False, 'from pycog import tasktools\n'), ((927, 946), 'numpy.zeros', 'np.zeros', (['(Nout, N)'], {}), '((Nout, N))\n', (935, 946), True, 'import numpy as np\n'), ((3131, 3167), 'pycog.tasktools.get_epochs_idx', 'tasktools.get_epochs_idx', (['dt', 'epochs'], {}), '(dt, epochs)\n', (3155, 3167), False, 'from pycog import tasktools\n'), ((4283, 4299), 'numpy.zeros_like', 'np.zeros_like', (['Y'], {}), '(Y)\n', (4296, 4299), True, 'import numpy as np\n'), ((5093, 5123), 'numpy.mean', 'np.mean', (['pcorrect_history[-5:]'], {}), '(pcorrect_history[-5:])\n', (5100, 5123), True, 'import numpy as np\n')] |
import numpy as np
def differential_evolution(fobj, bounds, mut=0.8, crossprob=0.7, popsize=30, gens=1000, mode='best/1'):
# Gets number of parameters (length of genome vector)
num_params = len(bounds)
# Initializes the population genomes with values drawn from uniform distribution in the range [0,1]
pop = np.random.rand(popsize, num_params)
# Gets the boundaries for each parameter to scale the population genomes
min_b, max_b = np.asarray(bounds).T
# Scales the population genomes from the range [0,1] to the range specified by the parameter boundaries
diff = np.fabs(min_b - max_b)
pop_scaled = min_b + pop * diff
# Evaluates fitness for each individual in the population by calculating the objective to minimize
unfitness = np.asarray([fobj(ind) for ind in pop_scaled])
# Gets the best individual of the population
best_idx = np.argmin(unfitness)
best = pop_scaled[best_idx]
for i in range(gens):
print('Best unfitness in generation %d: %f' % (i + 1, unfitness[best_idx]))
# For each individual:
for j in range(popsize):
# Selects three individuals from the population different than himself(no jerking off) for reproduction
if mode == 'best/1':
idxs = [idx for idx in range(popsize) if (idx != j and idx != best_idx)]
a = best
b, c = pop[np.random.choice(idxs, 2, replace=False)]
mutant = np.clip(a + mut * (b - c), 0, 1)
elif mode == 'best/2':
idxs = [idx for idx in range(popsize) if (idx != j and idx != best_idx)]
a = best
b, c, d, e = pop[np.random.choice(idxs, 4, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c + d - e), 0, 1)
elif mode == 'rand/1':
idxs = [idx for idx in range(popsize) if idx != j]
a, b, c = pop[np.random.choice(idxs, 3, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c), 0, 1)
elif mode == 'rand/2':
idxs = [idx for idx in range(popsize) if idx != j]
a, b, c, d, e = pop[np.random.choice(idxs, 5, replace=False)]
# Generates a mutant by applying the differential mutation (and clips to keep in range [0,1])
mutant = np.clip(a + mut * (b - c + d - e), 0, 1)
# Selects parameters of the individual to crossover with the mutant with the probability of crossover
cross_points = np.random.rand(num_params) < crossprob
# If some parameter results to need crossover ...
if not np.any(cross_points):
# selects the index of that parameter for crossover
cross_points[np.random.randint(0, num_params)] = True
# The parameters of the individual's genome that require crossover gets changed for those of the mutant,
# producing a new individual
trial = np.where(cross_points, mutant, pop[j])
# Scales the genome of the new individual from the range [0,1] to the range specified by the parameter
# boundaries
trial_denorm = min_b + trial * diff
# Evaluates fitness of new individual
f = fobj(trial_denorm)
# If better than the previous one, keeps the new one
if f < unfitness[j]:
unfitness[j] = f
pop[j] = trial
# If better than the best one so far, updates the record
if f < unfitness[best_idx]:
best_idx = j
best = trial_denorm
yield best, unfitness[best_idx]
| [
"numpy.clip",
"numpy.fabs",
"numpy.random.rand",
"numpy.where",
"numpy.random.choice",
"numpy.asarray",
"numpy.any",
"numpy.random.randint",
"numpy.argmin"
] | [((326, 361), 'numpy.random.rand', 'np.random.rand', (['popsize', 'num_params'], {}), '(popsize, num_params)\n', (340, 361), True, 'import numpy as np\n'), ((598, 620), 'numpy.fabs', 'np.fabs', (['(min_b - max_b)'], {}), '(min_b - max_b)\n', (605, 620), True, 'import numpy as np\n'), ((886, 906), 'numpy.argmin', 'np.argmin', (['unfitness'], {}), '(unfitness)\n', (895, 906), True, 'import numpy as np\n'), ((458, 476), 'numpy.asarray', 'np.asarray', (['bounds'], {}), '(bounds)\n', (468, 476), True, 'import numpy as np\n'), ((3200, 3238), 'numpy.where', 'np.where', (['cross_points', 'mutant', 'pop[j]'], {}), '(cross_points, mutant, pop[j])\n', (3208, 3238), True, 'import numpy as np\n'), ((1470, 1502), 'numpy.clip', 'np.clip', (['(a + mut * (b - c))', '(0)', '(1)'], {}), '(a + mut * (b - c), 0, 1)\n', (1477, 1502), True, 'import numpy as np\n'), ((2742, 2768), 'numpy.random.rand', 'np.random.rand', (['num_params'], {}), '(num_params)\n', (2756, 2768), True, 'import numpy as np\n'), ((2862, 2882), 'numpy.any', 'np.any', (['cross_points'], {}), '(cross_points)\n', (2868, 2882), True, 'import numpy as np\n'), ((1403, 1443), 'numpy.random.choice', 'np.random.choice', (['idxs', '(2)'], {'replace': '(False)'}), '(idxs, 2, replace=False)\n', (1419, 1443), True, 'import numpy as np\n'), ((1862, 1902), 'numpy.clip', 'np.clip', (['(a + mut * (b - c + d - e))', '(0)', '(1)'], {}), '(a + mut * (b - c + d - e), 0, 1)\n', (1869, 1902), True, 'import numpy as np\n'), ((2981, 3013), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_params'], {}), '(0, num_params)\n', (2998, 3013), True, 'import numpy as np\n'), ((1685, 1725), 'numpy.random.choice', 'np.random.choice', (['idxs', '(4)'], {'replace': '(False)'}), '(idxs, 4, replace=False)\n', (1701, 1725), True, 'import numpy as np\n'), ((2212, 2244), 'numpy.clip', 'np.clip', (['(a + mut * (b - c))', '(0)', '(1)'], {}), '(a + mut * (b - c), 0, 1)\n', (2219, 2244), True, 'import numpy as np\n'), ((2035, 2075), 'numpy.random.choice', 'np.random.choice', (['idxs', '(3)'], {'replace': '(False)'}), '(idxs, 3, replace=False)\n', (2051, 2075), True, 'import numpy as np\n'), ((2560, 2600), 'numpy.clip', 'np.clip', (['(a + mut * (b - c + d - e))', '(0)', '(1)'], {}), '(a + mut * (b - c + d - e), 0, 1)\n', (2567, 2600), True, 'import numpy as np\n'), ((2383, 2423), 'numpy.random.choice', 'np.random.choice', (['idxs', '(5)'], {'replace': '(False)'}), '(idxs, 5, replace=False)\n', (2399, 2423), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import requests # Coleta de conteúdo em Webpage
from requests.exceptions import HTTPError
from bs4 import BeautifulSoup as bs # Scraping webpages
from time import sleep
import json
import re #biblioteca para trabalhar com regular expressions - regex
import string
import unidecode
import nltk
#nltk.download('punkt')
#nltk.download('stopwords')
from nltk.stem import SnowballStemmer
from nltk.tokenize import word_tokenize
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from operator import itemgetter
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>|&[.*?]')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
#remove todas as pontuações e retorna lista de palavras
def clean_text (text):
text = text.translate(str.maketrans('', '', string.punctuation)) #remove todas as pontuações: '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.replace('\n',' ').strip()
text = text.lower()
text = unidecode.unidecode(text)
return text
def read_stackoverflow_overview(tags=[], tab='Frequent', pages=5):
link = 'https://stackoverflow.com/questions'
selector='question-summary'
if tags:
tags_link = '/tagged/'
pre=''
for t in tags:
tags_link += str(pre) + t
pre = '+or+'
link += tags_link
link += '?tab='+tab
questions_text = ''
soup_selection = []
for page in range(1,pages+1):
page_link = '&page='+str(page)
try:
request = requests.get(link+page_link)
request.raise_for_status()
try:
soup = bs(request.text, 'html.parser')
soup_selection.append(soup.select('.'+selector))
except: print ("Could not transform to soup object by selecting ",selector)
except HTTPError:
print ("Could not download page ", page)
sleep(0.05)
return soup_selection
def questions_overview(questions_overview_raw):
questions_overview = { 'questions':[]}
for soups in questions_overview_raw:
for soup in soups:
title = soup.select_one('.question-hyperlink').getText()
link = 'https://stackoverflow.com'+soup.select_one('.question-hyperlink').get('href')
summary = soup.select_one('.excerpt').getText()
vote_count = soup.select_one('.vote-count-post').getText()
answers_count = soup.select_one('.answered-accepted')
answers_count = re.sub('\D','',answers_count.getText('')) if answers_count else '0'
views = re.sub('views','',soup.select_one('.views').attrs['title'])
views = re.sub(',','',views)
tags = []
for tag in soup.select('.post-tag'): tags.append(tag.getText())
questions_overview['questions'].append({
'title': title,
'link': link,
'summary': summary,
'vote_count': int(vote_count),
'answers_count': int(answers_count),
'views': int(views),
'tags': tags,
'full_question': '',
'best_answer': '',
})
questions_df = pd.DataFrame(questions_overview['questions'])
return questions_df
def read_question_detail(questions_df):
idx = 0
for link in questions_df['link']:
question = []
answer = []
try:
request = requests.get(link)
request.raise_for_status()
try:
soup = bs(request.text, 'html.parser')
questions_df['full_question'][idx] = soup.find("div", {"id": "question"}).select_one('.post-text').getText()
questions_df['best_answer'][idx] = soup.find("div", {"id": "answers"}).select_one('.post-text').getText()
except:
print ("Could not transform to soup object by selecting")
except HTTPError:
print ("Could not download page")
idx += 1
sleep(0.05)
return questions_df
def stackoverflow_vocabulary(questions_df):
docs_stem_words = []
vocabulary = {}
stop_words = stopwords.words('english')
#stop_words.append(['could', 'would', 'might', 'can', 'should'])
snowball_stemmer = SnowballStemmer("english")
for index in range(len(questions_df)):
text = questions_df['title'][index] + questions_df['full_question'][index] + questions_df['best_answer'][index]
tokentext = word_tokenize(clean_text(text))
stem_words = [snowball_stemmer.stem(word) for word in tokentext if not word in stop_words and len(word) > 2 and word not in string.punctuation]
docs_stem_words.append(stem_words)
#Inicializa vocabulário sem repetição de palavras
for word in stem_words:
vocabulary[word] = 0
#Contabiliza ocorrência de cada palavra em todos os documentos
for words in docs_stem_words:
for word in words:
vocabulary[word] += 1
return vocabulary, docs_stem_words
#Criar índice invertido para viabilizar buscas
def create_InvertedIndex(vocabulary, docs_stem_words):
invertedList = dict()
for term in vocabulary:
invertedList[term] = list()
index = 0
for stem_words in docs_stem_words:
frequencia = 0
for word in stem_words:
if word == term:
frequencia += 1
if frequencia > 0:
invertedList[term].append([index, frequencia])
index += 1
invertedList[term].sort(key=itemgetter(1), reverse=True)
# Serialize data into file:
json.dump(invertedList, open( "stackoverflow_InvertedIndex.json", 'w' ) )
return
def simple_stemming_docs(documents):
snowball_stemmer = SnowballStemmer("english")
stop_words = stopwords.words('english')
#stop_words.append('could', 'would', 'might', 'can', 'should')
tokens = sum([word_tokenize(clean_text(document)) for document in documents], [])
stem_words = [snowball_stemmer.stem(word) for word in tokens if not word in stop_words and len(word) > 2 and word not in string.punctuation]
return stem_words
def simple_lookup_query(query, invertedList):
terms = simple_stemming_docs([query])
docs_index = {}
for term in terms:
if term in invertedList.keys():
docs_index[term] = [index[0] for index in invertedList[term]]
else:
docs_index['missingTerm'] = None
return docs_index
def make_clickable(val):
return '<a href="{}">{}</a>'.format(val,val)
#dataframe com várias colunas
def make_clickable2(val):
# target _blank to open new window
return '<a target="_blank" href="{}">{}</a>'.format(val, val)
def print_search_result(docs_index, questions_df, operator='OR', num_results = 5):
resultList=[lista[1] for lista in docs_index.items()]
responseSet = []
if operator == 'AND' and 'missingTerm' in docs_index.keys():
resultList = []
elif 'missingTerm' in docs_index.keys():
resultList.remove(None)
if len(resultList) == 1:
responseSet = resultList[0]
#Realiza a interseção entre os conjuntos
for i in range(len(resultList)-1):
#Operador AND
if operator == 'AND':
responseSet.append(list(set(resultList[i]).intersection(resultList[i+1])))
else:
#Operador OR
responseSet.append(list(set(resultList[i]).union(resultList[i+1])))
print("Foram encontrados ", len(np.unique(responseSet)), " perguntas no stackoverflow compatíveis com a sua busca. Veja as respostas mais relevantes:\n")
#Monta o Resultado
results = pd.DataFrame(columns=['title','link','summary'])
#results = pd.DataFrame(columns=['title','link','summary'], index=range(len(responseSet)))
idx=0
for doc_idx in np.unique(responseSet):
results.at[idx,'title'] = questions_df['title'][doc_idx]
results.at[idx,'link'] = questions_df['link'][doc_idx]
results.at[idx,'summary'] = questions_df['summary'][doc_idx]
#results['title'].iloc[idx] = questions_df['title'][doc_idx]
#results.title[idx] = questions_df['title'][doc_idx]
#results['link'].iloc[idx] = questions_df['link'][doc_idx]
#results.link[idx] = questions_df['link'][doc_idx]
#results['summary'].iloc[idx] = questions_df['summary'][doc_idx]
#results.summary[idx] = questions_df['summary'][doc_idx]
idx += 1
#results['link'] = results['link'].apply(make_clickable2)
return results #.style.format({'link': make_clickable2})
def clean_tags (text):
text = text.lower()
text = text.translate(str.maketrans('', '', string.punctuation)) #remove todas as pontuações: '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.replace(',',' ').strip()
text = " ".join(text.split())
text = unidecode.unidecode(text)
text = text.split(' ')
return text
def remove_invalid_tags(tags, stack_tags):
valid_tags = []
[valid_tags.append(tag) for tag in tags if tag in stack_tags]
return valid_tags
def read_stackoverflow_tags():
stack_tags = pd.read_csv('stackoverflow_tags.csv')
stack_tags = stack_tags['tagName'].values.tolist()
return stack_tags
def refresh_stackoverflow(tags=['python','pandas'],tab='Frequent',pages=2):
questions_overview_raw = read_stackoverflow_overview(tags,tab='Frequent',pages=2)
questions_df = questions_overview(questions_overview_raw)
questions_df = read_question_detail(questions_df)
vocabulary, docs_stem_words = stackoverflow_vocabulary(questions_df)
create_InvertedIndex(vocabulary, docs_stem_words)
return questions_df
'''
if __name__ == '__main__':
questions_df = refresh_stackoverflow(tags=['tell','flutter'],tab='Frequent',pages=2)
invertedList = json.load( open( "stackoverflow_InvertedIndex.json" ) )
searchTerms = input("Digite os termos de busca: ")
operator = input("Deseja buscar documentos que contenha todos os termos da busca? (S/N)")
if operator.lower() == 's': operator = 'AND'
else: operator = 'OR'
docs_index = simple_lookup_query(searchTerms,invertedList)
result = print_search_result(docs_index,questions_df,operator)'''
'''tell me about flutter
Casos com erro
how can I create an user interface?
how can I create an user interface flutter django'''
| [
"nltk.stem.SnowballStemmer",
"nltk.corpus.stopwords.words",
"numpy.unique",
"re.compile",
"pandas.read_csv",
"time.sleep",
"requests.get",
"bs4.BeautifulSoup",
"operator.itemgetter",
"unidecode.unidecode",
"pandas.DataFrame",
"re.sub"
] | [((608, 634), 're.compile', 're.compile', (['"""<.*?>|&[.*?]"""'], {}), "('<.*?>|&[.*?]')\n", (618, 634), False, 'import re\n'), ((649, 677), 're.sub', 're.sub', (['cleanr', '""""""', 'raw_html'], {}), "(cleanr, '', raw_html)\n", (655, 677), False, 'import re\n'), ((990, 1015), 'unidecode.unidecode', 'unidecode.unidecode', (['text'], {}), '(text)\n', (1009, 1015), False, 'import unidecode\n'), ((2985, 3030), 'pandas.DataFrame', 'pd.DataFrame', (["questions_overview['questions']"], {}), "(questions_overview['questions'])\n", (2997, 3030), True, 'import pandas as pd\n'), ((3852, 3878), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (3867, 3878), False, 'from nltk.corpus import stopwords\n'), ((3971, 3997), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (3986, 3997), False, 'from nltk.stem import SnowballStemmer\n'), ((5500, 5526), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (5515, 5526), False, 'from nltk.stem import SnowballStemmer\n'), ((5544, 5570), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (5559, 5570), False, 'from nltk.corpus import stopwords\n'), ((7397, 7447), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['title', 'link', 'summary']"}), "(columns=['title', 'link', 'summary'])\n", (7409, 7447), True, 'import pandas as pd\n'), ((7571, 7593), 'numpy.unique', 'np.unique', (['responseSet'], {}), '(responseSet)\n', (7580, 7593), True, 'import numpy as np\n'), ((8598, 8623), 'unidecode.unidecode', 'unidecode.unidecode', (['text'], {}), '(text)\n', (8617, 8623), False, 'import unidecode\n'), ((8868, 8905), 'pandas.read_csv', 'pd.read_csv', (['"""stackoverflow_tags.csv"""'], {}), "('stackoverflow_tags.csv')\n", (8879, 8905), True, 'import pandas as pd\n'), ((1816, 1827), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (1821, 1827), False, 'from time import sleep\n'), ((3710, 3721), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (3715, 3721), False, 'from time import sleep\n'), ((1483, 1513), 'requests.get', 'requests.get', (['(link + page_link)'], {}), '(link + page_link)\n', (1495, 1513), False, 'import requests\n'), ((2520, 2542), 're.sub', 're.sub', (['""","""', '""""""', 'views'], {}), "(',', '', views)\n", (2526, 2542), False, 'import re\n'), ((3205, 3223), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (3217, 3223), False, 'import requests\n'), ((7237, 7259), 'numpy.unique', 'np.unique', (['responseSet'], {}), '(responseSet)\n', (7246, 7259), True, 'import numpy as np\n'), ((1571, 1602), 'bs4.BeautifulSoup', 'bs', (['request.text', '"""html.parser"""'], {}), "(request.text, 'html.parser')\n", (1573, 1602), True, 'from bs4 import BeautifulSoup as bs\n'), ((3283, 3314), 'bs4.BeautifulSoup', 'bs', (['request.text', '"""html.parser"""'], {}), "(request.text, 'html.parser')\n", (3285, 3314), True, 'from bs4 import BeautifulSoup as bs\n'), ((5287, 5300), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (5297, 5300), False, 'from operator import itemgetter\n')] |
import numpy as np
from astropy.io import fits
from astropy.table import Table
from specutils import Spectrum1D, SpectrumList
def create_spectrum_hdu(data_len):
# Create a minimal header for the purposes of testing
data = np.random.random((data_len, 3))
table = Table(data=data, names=['WAVELENGTH', 'FLUX', 'ERROR'])
hdu = fits.BinTableHDU(table, name='EXTRACT1D')
hdu.header['TUNIT1'] = 'um'
hdu.header['TUNIT2'] = 'mJy'
hdu.header['TUNIT3'] = 'mJy'
return hdu
def test_jwst_loader(tmpdir):
tmpfile = str(tmpdir.join('jwst.fits'))
hdulist = fits.HDUList()
# Make sure the file has a primary HDU
hdulist.append(fits.PrimaryHDU())
# Add several BinTableHDUs that contain spectral data
hdulist.append(create_spectrum_hdu(100))
hdulist.append(create_spectrum_hdu(120))
hdulist.append(create_spectrum_hdu(110))
# JWST data product will always contain an ASDF header which is a BinTable
hdulist.append(fits.BinTableHDU(name='ASDF'))
hdulist.writeto(tmpfile)
data = SpectrumList.read(tmpfile, format='JWST')
assert len(data) == 3
for item in data:
assert isinstance(item, Spectrum1D)
assert data[0].shape == (100,)
assert data[1].shape == (120,)
assert data[2].shape == (110,)
| [
"specutils.SpectrumList.read",
"astropy.io.fits.HDUList",
"astropy.table.Table",
"numpy.random.random",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.BinTableHDU"
] | [((234, 265), 'numpy.random.random', 'np.random.random', (['(data_len, 3)'], {}), '((data_len, 3))\n', (250, 265), True, 'import numpy as np\n'), ((278, 333), 'astropy.table.Table', 'Table', ([], {'data': 'data', 'names': "['WAVELENGTH', 'FLUX', 'ERROR']"}), "(data=data, names=['WAVELENGTH', 'FLUX', 'ERROR'])\n", (283, 333), False, 'from astropy.table import Table\n'), ((345, 386), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['table'], {'name': '"""EXTRACT1D"""'}), "(table, name='EXTRACT1D')\n", (361, 386), False, 'from astropy.io import fits\n'), ((593, 607), 'astropy.io.fits.HDUList', 'fits.HDUList', ([], {}), '()\n', (605, 607), False, 'from astropy.io import fits\n'), ((1052, 1093), 'specutils.SpectrumList.read', 'SpectrumList.read', (['tmpfile'], {'format': '"""JWST"""'}), "(tmpfile, format='JWST')\n", (1069, 1093), False, 'from specutils import Spectrum1D, SpectrumList\n'), ((670, 687), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (685, 687), False, 'from astropy.io import fits\n'), ((980, 1009), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', ([], {'name': '"""ASDF"""'}), "(name='ASDF')\n", (996, 1009), False, 'from astropy.io import fits\n')] |
import numpy as np
from jina.drivers.helper import extract_docs, array2pb
from jina.proto import jina_pb2
def test_extract_docs():
d = jina_pb2.Document()
contents, docs_pts, bad_doc_ids = extract_docs([d], embedding=True)
assert len(bad_doc_ids) > 0
assert contents is None
vec = np.random.random([2, 2])
d.embedding.CopyFrom(array2pb(vec))
contents, docs_pts, bad_doc_ids = extract_docs([d], embedding=True)
assert len(bad_doc_ids) == 0
np.testing.assert_equal(contents[0], vec)
| [
"jina.drivers.helper.extract_docs",
"jina.proto.jina_pb2.Document",
"numpy.testing.assert_equal",
"numpy.random.random",
"jina.drivers.helper.array2pb"
] | [((142, 161), 'jina.proto.jina_pb2.Document', 'jina_pb2.Document', ([], {}), '()\n', (159, 161), False, 'from jina.proto import jina_pb2\n'), ((201, 234), 'jina.drivers.helper.extract_docs', 'extract_docs', (['[d]'], {'embedding': '(True)'}), '([d], embedding=True)\n', (213, 234), False, 'from jina.drivers.helper import extract_docs, array2pb\n'), ((306, 330), 'numpy.random.random', 'np.random.random', (['[2, 2]'], {}), '([2, 2])\n', (322, 330), True, 'import numpy as np\n'), ((409, 442), 'jina.drivers.helper.extract_docs', 'extract_docs', (['[d]'], {'embedding': '(True)'}), '([d], embedding=True)\n', (421, 442), False, 'from jina.drivers.helper import extract_docs, array2pb\n'), ((480, 521), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['contents[0]', 'vec'], {}), '(contents[0], vec)\n', (503, 521), True, 'import numpy as np\n'), ((356, 369), 'jina.drivers.helper.array2pb', 'array2pb', (['vec'], {}), '(vec)\n', (364, 369), False, 'from jina.drivers.helper import extract_docs, array2pb\n')] |
import numpy as np
from pybasicbayes.distributions import AutoRegression, DiagonalRegression, Regression
def get_empirical_ar_params(train_datas, params):
"""
Estimate the parameters of an AR observation model
by fitting a single AR model to the entire dataset.
"""
assert isinstance(train_datas, list) and len(train_datas) > 0
datadimension = train_datas[0].shape[1]
assert params["nu_0"] > datadimension + 1
# Initialize the observation parameters
obs_params = dict(nu_0=params["nu_0"],
S_0=params['S_0'],
M_0=params['M_0'],
K_0=params['K_0'],
affine=params['affine'])
# Fit an AR model to the entire dataset
obs_distn = AutoRegression(**obs_params)
obs_distn.max_likelihood(train_datas)
# Use the inferred noise covariance as the prior mean
# E_{IW}[S] = S_0 / (nu_0 - datadimension - 1)
obs_params["S_0"] = obs_distn.sigma * (params["nu_0"] - datadimension - 1)
obs_params["M_0"] = obs_distn.A.copy()
return obs_params
def expected_hmm_logprob(pi_0, trans_matrix, stats):
"""
:param pi_0: initial distribution
:param trans_matrix: transition matrix
:param stats: tuple (E[z_t], \sum_t E[z_t z_{t+1}.T])
:return: E_{q(z)} [ log p(z) ]
"""
E_z, sum_E_ztztp1T, _ = stats
T, K = E_z.shape
assert sum_E_ztztp1T.shape == (K, K)
out = 0
out += np.dot(E_z[0], np.log(pi_0))
out += np.sum(sum_E_ztztp1T * np.log(trans_matrix))
return out
def hmm_entropy(params, stats):
log_transmatrix, log_pi_0, aBl, _ = params
E_z, sum_E_ztztp1T, log_Z = stats
T, K = E_z.shape
assert aBl.shape == (T, K)
assert sum_E_ztztp1T.shape == (K, K)
assert log_transmatrix.shape == (K, K)
neg_entropy = np.sum(E_z[0] * log_pi_0)
neg_entropy += np.sum(E_z * aBl)
neg_entropy += np.sum(sum_E_ztztp1T * log_transmatrix)
neg_entropy -= log_Z
return -neg_entropy
def expected_gaussian_logprob(mu, sigma, stats):
D = mu.shape[0]
J = np.linalg.inv(sigma)
h = J.dot(mu)
muJmuT = mu.dot(J).dot(mu.T)
logdetJ = np.linalg.slogdet(J)[1]
x, xxT, n = stats
c1, c2 = ('i,i->', 'ij,ij->') if x.ndim == 1 \
else ('i,ni->n', 'ij,nij->n')
out = -1. / 2 * np.einsum(c2, J, xxT)
out += np.einsum(c1, h, x)
out += -n / 2. * muJmuT
out += -D / 2. * np.log(2 * np.pi) + n / 2. * logdetJ
return out
def expected_regression_log_prob(regression, stats):
if isinstance(regression, DiagonalRegression):
return expected_diag_regression_log_prob(
regression.A, regression.sigmasq_flat, stats)
elif isinstance(regression, Regression):
return expected_dense_regression_log_prob(
regression.A, regression.sigma, stats)
else:
raise Exception("Unrecognized regression object! {}".format(regression))
def expected_dense_regression_log_prob(A, Sigma, stats):
"""
Expected log likelihood of p(y | x) where
y ~ N(Ax, Sigma)
and expectation is wrt q(y,x). We only need expected
sufficient statistics E[yy.T], E[yx.T], E[xx.T], and n,
where n is the number of observations.
:param A: regression matrix
:param Sigma: observation covariance
:param stats: tuple (E[yy.T], E[yx.T], E[xx.T], n)
:return: E[log p(y | x)]
"""
yyT, yxT, xxT, n = stats[-4:]
contract = 'ij,nij->n' if yyT.ndim == 3 else 'ij,ij->'
D = A.shape[0]
Si = np.linalg.inv(Sigma)
SiA = Si.dot(A)
ASiA = A.T.dot(SiA)
out = -1. / 2 * np.einsum(contract, ASiA, xxT)
out += np.einsum(contract, SiA, yxT)
out += -1. / 2 * np.einsum(contract, Si, yyT)
out += -D / 2 * np.log(2 * np.pi) + n / 2. * np.linalg.slogdet(Si)[1]
return out
def expected_diag_regression_log_prob(A, sigmasq, stats):
"""
Expected log likelihood of p(y | x) where
y_{n,d} ~ N(a_d^\trans x_n, sigma_d^2)
and expectation is wrt q(y,x). We only need expected
sufficient statistics E[yy.T], E[yx.T], E[xx.T], and n,
where n is the number of observations.
:param A: regression matrix
:param sigma: diagonal observation variance
:param stats: tuple (E[yy.T], E[yx.T], E[xx.T], mask)
:return: E[log p(y | x)]
"""
D_out, D_in = A.shape
assert sigmasq.shape == (D_out,)
ysq, yxT, xxT, mask = stats[-4:]
T = ysq.shape[0]
assert ysq.shape == (T, D_out)
assert yxT.shape == (T, D_out, D_in)
# xxT has different shapes depending on whether or not data is missing
# with missing data, it is (T, Dout, Din, Din) since we need to mask
# off certain xxT pairs. If there's no mask, it's always the same for
# every output dimension. To make it easy, we just broadcast along
# the Dout dimension for the no-missing-data case.
if xxT.shape == (T, D_in, D_in):
xxT = xxT[:,None,:,:]
else:
assert xxT.shape == (T, D_out, D_in, D_in)
assert mask.shape == (T, D_out)
AAT = np.array([np.outer(a, a) for a in A])
n = mask.sum(1)
J_node = AAT[None, :, :, :] / sigmasq[None, :, None, None]
h_node = (mask / sigmasq)[:,:,None] * A[None, :, :]
out = -1 / 2. * np.sum(J_node * xxT, axis=(1, 2, 3))
out += np.sum(h_node * yxT, axis=(1, 2))
out += -1 / 2. * np.sum(mask / sigmasq * ysq, axis=1)
out += -n / 2. * np.log(2 * np.pi)
out += -1 / 2. * np.sum(mask * np.log(sigmasq), axis=1)
assert out.shape == (T,)
return out
def lds_entropy(info_params, stats):
# Extract the info params that make up the variational factor
J_init, h_init, log_Z_init, \
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair, \
J_node, h_node, log_Z_node = info_params
# Extract the expected sufficient statistics
_lds_normalizer, E_x, Var_x, E_xtp1_xtT = stats
E_x_xT = Var_x + E_x[:, :, None] * E_x[:, None, :]
contract = 'tij,tji->'
# Initial potential
nep = -1. / 2 * np.sum(J_init * E_x_xT[0])
nep += h_init.dot(E_x[0])
nep += log_Z_init
# Pair potentials
nep += -1. / 2 * np.einsum(contract, J_pair_22, E_x_xT[1:])
nep += - np.einsum(contract, np.swapaxes(J_pair_21, 1, 2), E_xtp1_xtT)
nep += -1. / 2 * np.einsum(contract, J_pair_11, E_x_xT[:-1])
nep += np.sum(h_pair_1 * E_x[:-1])
nep += np.sum(h_pair_2 * E_x[1:])
nep += np.sum(log_Z_pair)
# Node potentials -- with single emission, J_node is 2D
nep += -1. / 2 * np.einsum(
'tij,tji->' if J_node.ndim == 3 else 'ij,tji->', J_node, E_x_xT)
nep += np.sum(h_node * E_x)
nep += np.sum(log_Z_node)
# Normalizer
nep += -_lds_normalizer
return -nep
def symmetric_blk_tridiagonal_logdet(diagonal_array, off_diagonal_array):
T = len(diagonal_array)
n = diagonal_array.shape[1]
J = np.zeros((T * n, T * n))
for t in np.arange(T):
J[t * n: t * n + n, t * n: t * n + n] = diagonal_array[t]
for t in np.arange(T-1):
J[t * n: t * n + n, t * n + n: t * n + 2 * n] += off_diagonal_array[t].T
J[t * n + n: t * n + 2 * n, t * n: t * n + n] += off_diagonal_array[t]
return np.linalg.slogdet(J)[1]
def test_lds_entropy(info_params):
# Extract the info params that make up the variational factor
J_init, h_init, log_Z_init, \
J_pair_11, J_pair_21, J_pair_22, h_pair_1, h_pair_2, log_Z_pair, \
J_node, h_node, log_Z_node = info_params
T, D = h_node.shape
# Compute the variational entropy by constructing the full Gaussian params.
diagonal_array = J_node.copy()
diagonal_array[0] += J_init
diagonal_array[:-1] += J_pair_11
diagonal_array[1:] += J_pair_22
off_diagonal_array = J_pair_21.copy()
ve = -1. / 2 * symmetric_blk_tridiagonal_logdet(diagonal_array, off_diagonal_array)
ve += 1. / 2 * T * D * (1 + np.log(2 * np.pi))
return ve
def gaussian_map_estimation(stats, gaussian):
D = gaussian.D
x, xxT, n = stats
# Add "pseudocounts" from the prior
mu_0, sigma_0, kappa_0, nu_0 = \
gaussian.mu_0, gaussian.sigma_0, gaussian.kappa_0, gaussian.nu_0
xxT += sigma_0 + kappa_0 * np.outer(mu_0, mu_0)
x += kappa_0 * mu_0
n += nu_0 + 2 + D
# SVD is necessary to check if the max likelihood solution is
# degenerate, which can happen in the EM algorithm
if n < D or (np.linalg.svd(xxT, compute_uv=False) > 1e-6).sum() < D:
raise Exception("Can't to MAP when effective observations < D")
# Set the MAP params
gaussian.mu = x / n
gaussian.sigma = xxT / n - np.outer(gaussian.mu, gaussian.mu)
def regression_map_estimation(stats, regression):
D_out = regression.D_out
# Add prior and likelihood statistics
if isinstance(regression, DiagonalRegression):
regression.max_likelihood(data=None, stats=stats)
else:
sum_tuples = lambda lst: list(map(sum, zip(*lst)))
yyT, yxT, xxT, n = sum_tuples([stats, regression.natural_hypparam])
A = np.linalg.solve(xxT, yxT.T).T
sigma = (yyT - A.dot(yxT.T)) / n
# Make sure sigma is symmetric
symmetrize = lambda A: (A + A.T) / 2.
sigma = 1e-10 * np.eye(D_out) + symmetrize(sigma)
regression.A = A
regression.sigma = sigma
def gaussian_logprior(gaussian):
D = gaussian.D
mu, sigma = gaussian.mu, gaussian.sigma
mu_0, sigma_0, kappa_0, nu_0 = \
gaussian.mu_0, gaussian.sigma_0, gaussian.kappa_0, gaussian.nu_0
# Inverse Wishart IW(sigma | sigma_0, nu_0)
from pybasicbayes.util.stats import invwishart_log_partitionfunction
lp = invwishart_log_partitionfunction(sigma_0, nu_0)
lp += -(nu_0 + D + 1) / 2.0 * np.linalg.slogdet(sigma)[1]
lp += -0.5 * np.trace(np.linalg.solve(sigma, sigma_0))
# Normal N(mu | mu_0, Sigma / kappa_0)
from scipy.linalg import solve_triangular
S_chol = np.linalg.cholesky(sigma / kappa_0)
x = solve_triangular(S_chol, mu - mu_0, lower=True)
lp += -1. / 2. * np.dot(x, x) \
- D / 2 * np.log(2 * np.pi) \
- np.log(S_chol.diagonal()).sum()
return lp
def regression_logprior(regression):
if isinstance(regression, DiagonalRegression):
return diag_regression_logprior(regression)
elif isinstance(regression, Regression):
return dense_regression_logprior(regression)
def diag_regression_logprior(regression):
from scipy.stats import multivariate_normal, gamma
A = regression.A
sigmasq = regression.sigmasq_flat
J, h, alpha, beta = \
regression.J_0, regression.h_0, regression.alpha_0, regression.beta_0
Sigma = np.linalg.inv(J)
mu = Sigma.dot(h)
lp = 0
for d in range(regression.D_out):
lp += multivariate_normal(mu, Sigma).logpdf(A[d])
lp += gamma(alpha, scale=1./beta).logpdf(1. / sigmasq[d])
return lp
def dense_regression_logprior(regression):
A = regression.A
Sigmainv = np.linalg.inv(regression.sigma)
Sigmainv_A = Sigmainv.dot(A)
AT_Sigmainv_A = A.T.dot(Sigmainv_A)
logdetSigmainv = np.linalg.slogdet(Sigmainv)[1]
A, B, C, d = regression.natural_hypparam
bilinear_term = -1./2 * np.trace(A.dot(Sigmainv)) \
+ np.trace(B.T.dot(Sigmainv_A)) \
- 1./2 * np.trace(C.dot(AT_Sigmainv_A)) \
+ 1./2 * d * logdetSigmainv
# log normalizer term
from pybasicbayes.util.stats import mniw_log_partitionfunction
Z = mniw_log_partitionfunction(
*regression._natural_to_standard(regression.natural_hypparam))
return bilinear_term - Z
| [
"scipy.stats.multivariate_normal",
"numpy.log",
"numpy.einsum",
"numpy.arange",
"numpy.dot",
"pybasicbayes.util.stats.invwishart_log_partitionfunction",
"scipy.linalg.solve_triangular",
"numpy.eye",
"numpy.linalg.slogdet",
"numpy.outer",
"numpy.linalg.svd",
"numpy.linalg.cholesky",
"numpy.li... | [((761, 789), 'pybasicbayes.distributions.AutoRegression', 'AutoRegression', ([], {}), '(**obs_params)\n', (775, 789), False, 'from pybasicbayes.distributions import AutoRegression, DiagonalRegression, Regression\n'), ((1846, 1871), 'numpy.sum', 'np.sum', (['(E_z[0] * log_pi_0)'], {}), '(E_z[0] * log_pi_0)\n', (1852, 1871), True, 'import numpy as np\n'), ((1891, 1908), 'numpy.sum', 'np.sum', (['(E_z * aBl)'], {}), '(E_z * aBl)\n', (1897, 1908), True, 'import numpy as np\n'), ((1928, 1967), 'numpy.sum', 'np.sum', (['(sum_E_ztztp1T * log_transmatrix)'], {}), '(sum_E_ztztp1T * log_transmatrix)\n', (1934, 1967), True, 'import numpy as np\n'), ((2097, 2117), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma'], {}), '(sigma)\n', (2110, 2117), True, 'import numpy as np\n'), ((2373, 2392), 'numpy.einsum', 'np.einsum', (['c1', 'h', 'x'], {}), '(c1, h, x)\n', (2382, 2392), True, 'import numpy as np\n'), ((3549, 3569), 'numpy.linalg.inv', 'np.linalg.inv', (['Sigma'], {}), '(Sigma)\n', (3562, 3569), True, 'import numpy as np\n'), ((3677, 3706), 'numpy.einsum', 'np.einsum', (['contract', 'SiA', 'yxT'], {}), '(contract, SiA, yxT)\n', (3686, 3706), True, 'import numpy as np\n'), ((5331, 5364), 'numpy.sum', 'np.sum', (['(h_node * yxT)'], {'axis': '(1, 2)'}), '(h_node * yxT, axis=(1, 2))\n', (5337, 5364), True, 'import numpy as np\n'), ((6368, 6395), 'numpy.sum', 'np.sum', (['(h_pair_1 * E_x[:-1])'], {}), '(h_pair_1 * E_x[:-1])\n', (6374, 6395), True, 'import numpy as np\n'), ((6407, 6433), 'numpy.sum', 'np.sum', (['(h_pair_2 * E_x[1:])'], {}), '(h_pair_2 * E_x[1:])\n', (6413, 6433), True, 'import numpy as np\n'), ((6445, 6463), 'numpy.sum', 'np.sum', (['log_Z_pair'], {}), '(log_Z_pair)\n', (6451, 6463), True, 'import numpy as np\n'), ((6641, 6661), 'numpy.sum', 'np.sum', (['(h_node * E_x)'], {}), '(h_node * E_x)\n', (6647, 6661), True, 'import numpy as np\n'), ((6673, 6691), 'numpy.sum', 'np.sum', (['log_Z_node'], {}), '(log_Z_node)\n', (6679, 6691), True, 'import numpy as np\n'), ((6899, 6923), 'numpy.zeros', 'np.zeros', (['(T * n, T * n)'], {}), '((T * n, T * n))\n', (6907, 6923), True, 'import numpy as np\n'), ((6937, 6949), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (6946, 6949), True, 'import numpy as np\n'), ((7030, 7046), 'numpy.arange', 'np.arange', (['(T - 1)'], {}), '(T - 1)\n', (7039, 7046), True, 'import numpy as np\n'), ((9662, 9709), 'pybasicbayes.util.stats.invwishart_log_partitionfunction', 'invwishart_log_partitionfunction', (['sigma_0', 'nu_0'], {}), '(sigma_0, nu_0)\n', (9694, 9709), False, 'from pybasicbayes.util.stats import invwishart_log_partitionfunction\n'), ((9934, 9969), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['(sigma / kappa_0)'], {}), '(sigma / kappa_0)\n', (9952, 9969), True, 'import numpy as np\n'), ((9978, 10025), 'scipy.linalg.solve_triangular', 'solve_triangular', (['S_chol', '(mu - mu_0)'], {'lower': '(True)'}), '(S_chol, mu - mu_0, lower=True)\n', (9994, 10025), False, 'from scipy.linalg import solve_triangular\n'), ((10675, 10691), 'numpy.linalg.inv', 'np.linalg.inv', (['J'], {}), '(J)\n', (10688, 10691), True, 'import numpy as np\n'), ((10983, 11014), 'numpy.linalg.inv', 'np.linalg.inv', (['regression.sigma'], {}), '(regression.sigma)\n', (10996, 11014), True, 'import numpy as np\n'), ((1487, 1499), 'numpy.log', 'np.log', (['pi_0'], {}), '(pi_0)\n', (1493, 1499), True, 'import numpy as np\n'), ((2183, 2203), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['J'], {}), '(J)\n', (2200, 2203), True, 'import numpy as np\n'), ((2340, 2361), 'numpy.einsum', 'np.einsum', (['c2', 'J', 'xxT'], {}), '(c2, J, xxT)\n', (2349, 2361), True, 'import numpy as np\n'), ((3635, 3665), 'numpy.einsum', 'np.einsum', (['contract', 'ASiA', 'xxT'], {}), '(contract, ASiA, xxT)\n', (3644, 3665), True, 'import numpy as np\n'), ((3728, 3756), 'numpy.einsum', 'np.einsum', (['contract', 'Si', 'yyT'], {}), '(contract, Si, yyT)\n', (3737, 3756), True, 'import numpy as np\n'), ((5283, 5319), 'numpy.sum', 'np.sum', (['(J_node * xxT)'], {'axis': '(1, 2, 3)'}), '(J_node * xxT, axis=(1, 2, 3))\n', (5289, 5319), True, 'import numpy as np\n'), ((5386, 5422), 'numpy.sum', 'np.sum', (['(mask / sigmasq * ysq)'], {'axis': '(1)'}), '(mask / sigmasq * ysq, axis=1)\n', (5392, 5422), True, 'import numpy as np\n'), ((5444, 5461), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5450, 5461), True, 'import numpy as np\n'), ((6051, 6077), 'numpy.sum', 'np.sum', (['(J_init * E_x_xT[0])'], {}), '(J_init * E_x_xT[0])\n', (6057, 6077), True, 'import numpy as np\n'), ((6174, 6216), 'numpy.einsum', 'np.einsum', (['contract', 'J_pair_22', 'E_x_xT[1:]'], {}), '(contract, J_pair_22, E_x_xT[1:])\n', (6183, 6216), True, 'import numpy as np\n'), ((6313, 6356), 'numpy.einsum', 'np.einsum', (['contract', 'J_pair_11', 'E_x_xT[:-1]'], {}), '(contract, J_pair_11, E_x_xT[:-1])\n', (6322, 6356), True, 'import numpy as np\n'), ((6546, 6620), 'numpy.einsum', 'np.einsum', (["('tij,tji->' if J_node.ndim == 3 else 'ij,tji->')", 'J_node', 'E_x_xT'], {}), "('tij,tji->' if J_node.ndim == 3 else 'ij,tji->', J_node, E_x_xT)\n", (6555, 6620), True, 'import numpy as np\n'), ((7217, 7237), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['J'], {}), '(J)\n', (7234, 7237), True, 'import numpy as np\n'), ((8622, 8656), 'numpy.outer', 'np.outer', (['gaussian.mu', 'gaussian.mu'], {}), '(gaussian.mu, gaussian.mu)\n', (8630, 8656), True, 'import numpy as np\n'), ((11109, 11136), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Sigmainv'], {}), '(Sigmainv)\n', (11126, 11136), True, 'import numpy as np\n'), ((1535, 1555), 'numpy.log', 'np.log', (['trans_matrix'], {}), '(trans_matrix)\n', (1541, 1555), True, 'import numpy as np\n'), ((2442, 2459), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2448, 2459), True, 'import numpy as np\n'), ((3777, 3794), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3783, 3794), True, 'import numpy as np\n'), ((5094, 5108), 'numpy.outer', 'np.outer', (['a', 'a'], {}), '(a, a)\n', (5102, 5108), True, 'import numpy as np\n'), ((6250, 6278), 'numpy.swapaxes', 'np.swapaxes', (['J_pair_21', '(1)', '(2)'], {}), '(J_pair_21, 1, 2)\n', (6261, 6278), True, 'import numpy as np\n'), ((7902, 7919), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (7908, 7919), True, 'import numpy as np\n'), ((8207, 8227), 'numpy.outer', 'np.outer', (['mu_0', 'mu_0'], {}), '(mu_0, mu_0)\n', (8215, 8227), True, 'import numpy as np\n'), ((9048, 9075), 'numpy.linalg.solve', 'np.linalg.solve', (['xxT', 'yxT.T'], {}), '(xxT, yxT.T)\n', (9063, 9075), True, 'import numpy as np\n'), ((9744, 9768), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['sigma'], {}), '(sigma)\n', (9761, 9768), True, 'import numpy as np\n'), ((9798, 9829), 'numpy.linalg.solve', 'np.linalg.solve', (['sigma', 'sigma_0'], {}), '(sigma, sigma_0)\n', (9813, 9829), True, 'import numpy as np\n'), ((3806, 3827), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Si'], {}), '(Si)\n', (3823, 3827), True, 'import numpy as np\n'), ((5497, 5512), 'numpy.log', 'np.log', (['sigmasq'], {}), '(sigmasq)\n', (5503, 5512), True, 'import numpy as np\n'), ((9229, 9242), 'numpy.eye', 'np.eye', (['D_out'], {}), '(D_out)\n', (9235, 9242), True, 'import numpy as np\n'), ((10047, 10059), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (10053, 10059), True, 'import numpy as np\n'), ((10082, 10099), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (10088, 10099), True, 'import numpy as np\n'), ((10778, 10808), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu', 'Sigma'], {}), '(mu, Sigma)\n', (10797, 10808), False, 'from scipy.stats import multivariate_normal, gamma\n'), ((10836, 10866), 'scipy.stats.gamma', 'gamma', (['alpha'], {'scale': '(1.0 / beta)'}), '(alpha, scale=1.0 / beta)\n', (10841, 10866), False, 'from scipy.stats import multivariate_normal, gamma\n'), ((8413, 8449), 'numpy.linalg.svd', 'np.linalg.svd', (['xxT'], {'compute_uv': '(False)'}), '(xxT, compute_uv=False)\n', (8426, 8449), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper file containing activation functions
"""
import numpy as np
def sigmoid(x):
"""Description: Calculates the sigmoid for each value in the the input array
Params:
x: Array for which sigmoid is to be calculated
Returns:
ndarray: Sigmoid of the input
"""
return 1.0 / (1.0 + np.exp(-x))
def delta_sigmoid(x):
"""Description: Calculates the sigmoid derivative for the input array
Params:
x: Array for which sigmoid derivative is to be calculated
Returns:
ndarray: Sigmoid derivative of the input
"""
return sigmoid(x) * (1 - sigmoid(x))
def softmax(x):
"""Description: Calculates softmax for each set of scores in the input array
Params:
x: Array for which softmax is to be calculated
(axis_0 is the feature dimension, axis_1 is the n_samples dim)
Returns:
ndarray: Softmax of the input
"""
e_x = np.exp(x - np.max(x, axis=0))
return e_x / e_x.sum(axis=0)
def relu(x):
"""Description: Calculates ReLU for each value in the input array
Params:
x: Array for which ReLU is to be calculated
Returns:
ndarray: ReLU of the input
"""
return np.maximum(x, 0)
def delta_relu(x):
"""
Description: Calculates the ReLU derivative for the input array
Params:
x: Array for which ReLU derivative is to be calculated
Returns:
ndarray: ReLU derivative of the input
"""
return np.greater(x, 0).astype(np.float32)
def linear(x):
"""
Description: Calculates the linear activation for the input array
Params:
x: Array for which linear activation is to be calculated
Returns:
ndarray: Linear activation of the input
"""
return x
def delta_linear(x):
"""
Description: Calculates the linear activation derivative for for the input array
Params:
x: Array for which linear activation derivative is to be calculated
Returns:
ndarray: Linear activation derivative of the input
"""
return np.ones(x.shape).astype(np.float32)
def activation_function(x, type="linear"):
"""
Description: Helper function for calculating activation of the input
Params:
out: Array for which activation is to be calculated
type: Type of the activation function
(can be linear, sigmoid, relu, softmax)
Returns:
ndarray: Activation of the input
"""
if (type == "linear"):
return linear(x)
elif type == "sigmoid":
return sigmoid(x)
elif type == "relu":
return relu(x)
elif type == "softmax":
return softmax(x)
else:
raise ValueError('Invalid activation type entered')
def activation_derivative(x, name="linear"):
"""Description: Helper function for calculating activation derivative of the input
Params:
out: Array for which activation derivative is to be calculated
name: Type of the activation derivative function
(can be linear, sigmoid, relu, softmax)
Returns:
ndarray: Activation derivative of the input
"""
if (name == "linear"):
return delta_linear(x)
elif (name == "sigmoid"):
return delta_sigmoid(x)
elif (name == "relu"):
return delta_relu(x)
else:
raise ValueError('Invalid activation type entered')
| [
"numpy.greater",
"numpy.ones",
"numpy.max",
"numpy.exp",
"numpy.maximum"
] | [((1261, 1277), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (1271, 1277), True, 'import numpy as np\n'), ((368, 378), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (374, 378), True, 'import numpy as np\n'), ((991, 1008), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (997, 1008), True, 'import numpy as np\n'), ((1508, 1524), 'numpy.greater', 'np.greater', (['x', '(0)'], {}), '(x, 0)\n', (1518, 1524), True, 'import numpy as np\n'), ((2053, 2069), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (2060, 2069), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""This file contains code for use with "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2013 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
1, Original link -
refer to https://github.com/AllenDowney/ThinkBayes/blob/master/code/redline_data.py
2, As http://developer.mbta.com/lib/rthr/red.csv and http://developer.mbta.com/lib/rthr/red.json become invalid any more, I have to rewrite the logic.
https://api-v3.mbta.com/docs/swagger/index.html and https://www.mbta.com/developers/v3-api provide REST interface for querying the real schedule.
"""
import csv
import json
import numpy
import os
import sys
import redis
# import urllib3
from urllib import request
from io import TextIOWrapper
from datetime import datetime, time, timedelta
from time import sleep
class Redis(object):
"""Provides access to a Redis instance on Redis To Go"""
host = 'localhost'
port = 6379
def __init__(self):
try:
password = os.environ['REDIS_AUTH']
except KeyError:
print('Environment variable REDIS_AUTH is not set.')
sys.exit()
self.r = redis.StrictRedis(host=self.host,
port=self.port,
password=password,
db=0)
def WriteTrainSpotting(self, timestamp, tripid, seconds, live=True):
"""Writes a trainspotting event to the database.
timestamp: int seconds since epoch
tripid: string unique id
seconds: int how many seconds away the train is
live: boolean, whether to actually write the data
"""
dt = datetime.fromtimestamp(timestamp)
day = dt.date().isoformat()
print(dt, tripid, seconds, timestamp)
if live:
self.r.sadd('days', day)
self.r.sadd(day, tripid)
self.r.zadd(tripid, seconds, timestamp)
def FindArrivals(self, start_hour=16, end_hour=18):
"""For each trip, find the best estimate of the arrival time.
start_hour: int 0-24, beginning of observation window
end_hour: int 0-24, end of window
Returns: map from string day to unsorted list of arrival datetimes
"""
days = self.r.smembers('days')
print(days)
start_time = time(hour=start_hour)
end_time = time(hour=end_hour)
arrival_map = {}
for day in days:
tripids = self.r.smembers(day)
for tripid in tripids:
pred_dt = self.GetPredictedArrival(tripid)
pred_time = pred_dt.time()
if start_time < pred_time < end_time:
arrival_map.setdefault(day, []).append(pred_dt)
return arrival_map
def GetPredictedArrival(self, tripid):
"""Gets the best predicted arrival time for a given trip.
tripid: string TripID like R98313D88
"""
pair = self.r.zrange(tripid, 0, 1, withscores=True)
timestamp, seconds = pair[0]
pred_ts = float(timestamp) + seconds
pred_dt = datetime.fromtimestamp(pred_ts)
return pred_dt
class TrainSpotting(object):
"""Represents one observation of a train."""
def __init__(self, t):
self.timestamp = int(t[0])
self.tripid = t[2]
self.seconds = int(t[6])
def ReadCsv(url = 'http://developer.mbta.com/lib/rthr/red.csv'):
"""Reads data from the red line.
Returns: list of TrainSpotting objects
"""
# urllib3.disable_warnings()
# http = urllib3.PoolManager()
# fp = http.request('GET', url)
fp = TextIOWrapper(request.urlopen(url))
reader = csv.reader(fp)
tss = []
for t in reader:
print(t)
if t[5] != 'Kendall/MIT': continue
if t[3] != 'Braintree': continue
ts = TrainSpotting(t)
tss.append(ts)
fp.close()
return tss
def ReadJson():
url = 'http://developer.mbta.com/lib/rthr/red.json'
# urllib3.disable_warnings()
# http = urllib3.PoolManager()
# json_text = http.request('GET', url)
json_text = request.urlopen(url).read()
json_obj = json.loads(json_text)
print(json_obj)
def ReadAndStore(red):
"""Read data from the MBTA and put it in the database.
red: Redis object
"""
tss = ReadCsv()
for ts in tss:
red.WriteTrainSpotting(ts.timestamp, ts.tripid, ts.seconds)
def Loop(red, start_time, end_time, delay=60):
"""Collects data from start_time until end_time.
red: Redis object to store data
start_time: datetime
end_time: datetime
delay: time to sleep between collections, in seconds
"""
if datetime.now() < start_time:
diff = start_time - datetime.now()
print('Sleeping', diff)
sleep(diff.total_seconds())
while datetime.now() < end_time:
print('Collecting')
ReadAndStore(red)
sleep(delay)
def TodayAt(hour):
"""Makes a datetime object with today's date and the given time.
hour: int 0-24
"""
now = datetime.now()
return datetime.combine(now, time(hour=hour))
def YesterdayAt(hour):
"""Makes a datetime object with yesterday's date and the given time.
hour: int 0-24
"""
now = datetime.now()
yesterday = now - timedelta(days=1)
return datetime.combine(yesterday, time(hour=hour))
def GetInterarrivals(arrival_map):
"""Finds all interarrival times in the arrival map.
arrival_map: map from string day to unsorted list of arrival datetimes
Returns: list of float interarrival times in seconds
"""
interarrival_seconds = []
for day, arrivals in sorted(arrival_map.iteritems()):
print(day, len(arrivals))
arrivals.sort()
diffs = numpy.diff(arrivals)
diffs = [diff.total_seconds() for diff in diffs]
interarrival_seconds.extend(diffs)
return interarrival_seconds
def main(script, command='collect'):
red = Redis()
if command == 'collect':
# start = TodayAt(16)
# end = TodayAt(18)
start = YesterdayAt(16)
# print("start: ", start)
end = TodayAt(18)
# print("end: ", end)
print(start, end)
Loop(red, start, end)
elif command == 'report':
arrival_map = red.FindArrivals()
interarrivals = GetInterarrivals(arrival_map)
print(repr(interarrivals))
if __name__ == '__main__':
main(*sys.argv) | [
"json.loads",
"datetime.datetime.fromtimestamp",
"datetime.time",
"numpy.diff",
"time.sleep",
"datetime.datetime.now",
"redis.StrictRedis",
"sys.exit",
"datetime.timedelta",
"csv.reader",
"urllib.request.urlopen"
] | [((3707, 3721), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (3717, 3721), False, 'import csv\n'), ((4195, 4216), 'json.loads', 'json.loads', (['json_text'], {}), '(json_text)\n', (4205, 4216), False, 'import json\n'), ((5096, 5110), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5108, 5110), False, 'from datetime import datetime, time, timedelta\n'), ((5295, 5309), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5307, 5309), False, 'from datetime import datetime, time, timedelta\n'), ((1170, 1244), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'self.host', 'port': 'self.port', 'password': 'password', 'db': '(0)'}), '(host=self.host, port=self.port, password=password, db=0)\n', (1187, 1244), False, 'import redis\n'), ((1697, 1730), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['timestamp'], {}), '(timestamp)\n', (1719, 1730), False, 'from datetime import datetime, time, timedelta\n'), ((2357, 2378), 'datetime.time', 'time', ([], {'hour': 'start_hour'}), '(hour=start_hour)\n', (2361, 2378), False, 'from datetime import datetime, time, timedelta\n'), ((2398, 2417), 'datetime.time', 'time', ([], {'hour': 'end_hour'}), '(hour=end_hour)\n', (2402, 2417), False, 'from datetime import datetime, time, timedelta\n'), ((3129, 3160), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['pred_ts'], {}), '(pred_ts)\n', (3151, 3160), False, 'from datetime import datetime, time, timedelta\n'), ((3672, 3692), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (3687, 3692), False, 'from urllib import request\n'), ((4716, 4730), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4728, 4730), False, 'from datetime import datetime, time, timedelta\n'), ((4867, 4881), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4879, 4881), False, 'from datetime import datetime, time, timedelta\n'), ((4956, 4968), 'time.sleep', 'sleep', (['delay'], {}), '(delay)\n', (4961, 4968), False, 'from time import sleep\n'), ((5144, 5159), 'datetime.time', 'time', ([], {'hour': 'hour'}), '(hour=hour)\n', (5148, 5159), False, 'from datetime import datetime, time, timedelta\n'), ((5332, 5349), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5341, 5349), False, 'from datetime import datetime, time, timedelta\n'), ((5389, 5404), 'datetime.time', 'time', ([], {'hour': 'hour'}), '(hour=hour)\n', (5393, 5404), False, 'from datetime import datetime, time, timedelta\n'), ((5801, 5821), 'numpy.diff', 'numpy.diff', (['arrivals'], {}), '(arrivals)\n', (5811, 5821), False, 'import numpy\n'), ((4152, 4172), 'urllib.request.urlopen', 'request.urlopen', (['url'], {}), '(url)\n', (4167, 4172), False, 'from urllib import request\n'), ((4773, 4787), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4785, 4787), False, 'from datetime import datetime, time, timedelta\n'), ((1133, 1143), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1141, 1143), False, 'import sys\n')] |
#!/usr/bin/env python
# coding: utf-8
# demo
"""
Author: <NAME>
Email: <EMAIL>
Create_Date: 2019/05/21
"""
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
torch.backends.cudnn.deterministic = True
torch.manual_seed(123)
import os, argparse, sys
import numpy as np
import glob
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
sys.path.append('models')
import DepthNet
# =======================
# demo
# =======================
def demo(net, args):
data_dir = args.data_dir
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for im in os.listdir(data_dir):
im_dir = os.path.join(data_dir, im)
print('Processing img: {}'.format(im_dir))
# Read image
img = Image.open(im_dir).convert('RGB')
ori_width, ori_height = img.size
int_width = args.img_size[0]
int_height = args.img_size[1]
img = img.resize((int_width, int_height), Image.ANTIALIAS)
tensor_img = img_transform(img)
# forward
input_img = torch.autograd.Variable(tensor_img.cuda().unsqueeze(0), volatile=True)
output = net(input_img)
# Normalization and save results
depth = output.squeeze().cpu().data.numpy()
min_d, max_d = depth.min(), depth.max()
depth_norm = (depth - min_d) / (max_d - min_d) * 255
depth_norm = depth_norm.astype(np.uint8)
image_pil = Image.fromarray(depth_norm)
output_dir = os.path.join(args.result_dir, im)
image_pil = image_pil.resize((ori_width, ori_height), Image.BILINEAR)
plt.imsave(output_dir, np.asarray(image_pil), cmap='inferno')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MRDP Testing/Evaluation')
parser.add_argument('--img_size', default=[448, 448], type=list, help='Image size of network input')
parser.add_argument('--data_dir', default='examples', type=str, help='Data path')
parser.add_argument('--result_dir', default='demo_results', type=str, help='Directory for saving results, default: demo_results')
parser.add_argument('--gpu_id', default=0, type=int, help='GPU id, default:0')
args = parser.parse_args()
args.checkpoint = 'model.pth.tar'
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
gpu_id = args.gpu_id
torch.cuda.device(gpu_id)
net = DepthNet.DepthNet()
net = torch.nn.DataParallel(net, device_ids=[0]).cuda()
checkpoint = torch.load(args.checkpoint)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
print('Begin to test ...')
with torch.no_grad():
demo(net, args)
print('Finished!')
| [
"matplotlib.pyplot.switch_backend",
"sys.path.append",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"torch.cuda.device",
"numpy.asarray",
"torchvision.transforms.ToTensor",
"DepthNet.DepthNet",
"torchvision.transforms.Normalize",
"warnings.filterwarnings",
"torch.manual_seed",
... | [((271, 293), 'torch.manual_seed', 'torch.manual_seed', (['(123)'], {}), '(123)\n', (288, 293), False, 'import torch\n'), ((383, 408), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (401, 408), True, 'import matplotlib.pyplot as plt\n'), ((425, 458), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (448, 458), False, 'import warnings\n'), ((482, 507), 'sys.path.append', 'sys.path.append', (['"""models"""'], {}), "('models')\n", (497, 507), False, 'import os, argparse, sys\n'), ((803, 823), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (813, 823), False, 'import os, argparse, sys\n'), ((1902, 1964), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MRDP Testing/Evaluation"""'}), "(description='MRDP Testing/Evaluation')\n", (1925, 1964), False, 'import os, argparse, sys\n'), ((2555, 2580), 'torch.cuda.device', 'torch.cuda.device', (['gpu_id'], {}), '(gpu_id)\n', (2572, 2580), False, 'import torch\n'), ((2592, 2611), 'DepthNet.DepthNet', 'DepthNet.DepthNet', ([], {}), '()\n', (2609, 2611), False, 'import DepthNet\n'), ((2689, 2716), 'torch.load', 'torch.load', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2699, 2716), False, 'import torch\n'), ((842, 868), 'os.path.join', 'os.path.join', (['data_dir', 'im'], {}), '(data_dir, im)\n', (854, 868), False, 'import os, argparse, sys\n'), ((1627, 1654), 'PIL.Image.fromarray', 'Image.fromarray', (['depth_norm'], {}), '(depth_norm)\n', (1642, 1654), False, 'from PIL import Image\n'), ((1677, 1710), 'os.path.join', 'os.path.join', (['args.result_dir', 'im'], {}), '(args.result_dir, im)\n', (1689, 1710), False, 'import os, argparse, sys\n'), ((2455, 2486), 'os.path.exists', 'os.path.exists', (['args.result_dir'], {}), '(args.result_dir)\n', (2469, 2486), False, 'import os, argparse, sys\n'), ((2496, 2524), 'os.makedirs', 'os.makedirs', (['args.result_dir'], {}), '(args.result_dir)\n', (2507, 2524), False, 'import os, argparse, sys\n'), ((2823, 2838), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2836, 2838), False, 'import torch\n'), ((683, 704), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (702, 704), True, 'import torchvision.transforms as transforms\n'), ((714, 780), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (734, 780), True, 'import torchvision.transforms as transforms\n'), ((1820, 1841), 'numpy.asarray', 'np.asarray', (['image_pil'], {}), '(image_pil)\n', (1830, 1841), True, 'import numpy as np\n'), ((2622, 2664), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {'device_ids': '[0]'}), '(net, device_ids=[0])\n', (2643, 2664), False, 'import torch\n'), ((956, 974), 'PIL.Image.open', 'Image.open', (['im_dir'], {}), '(im_dir)\n', (966, 974), False, 'from PIL import Image\n')] |
import os
import numpy as np
import torch
import stanza
import re
from tqdm import tqdm
from pytorch_pretrained_bert import BertModel, BertTokenizer
from text.dependency_relations import deprel_labels_to_id
def read_lexicon(lex_path):
lexicon = {}
with open(lex_path) as f:
for line in f:
temp = re.split(r"\s+", line.strip("\n"))
word = temp[0]
phones = temp[1:]
if word.lower() not in lexicon:
lexicon[word.lower()] = phones
return lexicon
_puncts_dict = {",":",", "。":".", "?":"?", "!":"!"}
lexicon_en = read_lexicon("lexicon/librispeech-lexicon.txt")
lexicon_py = read_lexicon("lexicon/pinyin-lexicon-r.txt")
def get_text(tokenizer, bert, nlp, lang, phone_text, raw_text):
subwords = tokenizer.tokenize(raw_text)
subword_idx = torch.tensor([tokenizer.convert_tokens_to_ids(subwords)])
bert.eval()
with torch.no_grad():
bert_emb = bert(subword_idx)[0][11][0]
words, nodes_list1, nodes_list2, deprels_id = get_dependencyparsing(nlp, raw_text)
# print(words, nodes_list1, nodes_list2, deprels_id)
word_emb = get_word_embedding(subwords, words, bert_emb)
if lang == 'zh':
phone, phone2word_idx = preprocess_mandarin(phone_text, words)
else:
phone, phone2word_idx = preprocess_english(phone_text, words)
return phone, word_emb, phone2word_idx, nodes_list1, nodes_list2, deprels_id
def preprocess_english(text, refer_words):
phones = []
phone2word_idx = []
words = re.split(r"([,:;.()\-\?\!\s+])", text)
index = 0
tmp = ''
for w in words:
if w == " " or w == "":
continue
if w.lower() in lexicon_en:
phones += lexicon_en[w.lower()]
phone2word_idx.extend([index] * len(lexicon_en[w.lower()]))
elif w in list(",:;.()?!-"):
phones.append(w)
phone2word_idx.extend([index])
tmp += w
if refer_words[index] == tmp:
index += 1
tmp = ''
assert index == len(refer_words)
assert len(phones) == len(phone2word_idx)
phones = "{" + " ".join(phones) + "}"
# print("Raw Text Sequence: {}".format(text))
# print("Phoneme Sequence: {}".format(phones))
# print("Phone_word_id: ", phone2word_idx)
return phones, phone2word_idx
def preprocess_mandarin(text, refer_words):
phones = []
phone2word_idx = []
pinyins = re.split(r"([,./\-\?\!\s+])", text)
index = 0
tmpcnt = 0
for p in pinyins:
if p in lexicon_py:
phones += lexicon_py[p]
phone2word_idx.extend([index] * len(lexicon_py[p]))
tmpcnt += 1
if tmpcnt == len(refer_words[index]):
index += 1
tmpcnt = 0
elif index < len(refer_words) and refer_words[index] in list(",。?!"):
phones.append(_puncts_dict[refer_words[index]])
phone2word_idx.extend([index])
index += 1
else:
continue
assert index == len(refer_words)
assert len(phones) == len(phone2word_idx)
phones = "{" + " ".join(phones) + "}"
# print("Raw Text Sequence: {}".format(text))
# print("Phoneme Sequence: {}".format(phones))
# print("Phone_word_id: ", phone2word_idx)
return phones, phone2word_idx
def get_dependencyparsing(nlp, sen):
doc = nlp(sen)
words = []
heads = []
deprels = []
upos = []
for sent in doc.sentences:
for word in sent.words:
words.append(word.text)
heads.append(word.head)
deprels.append(word.deprel)
upos.append(word.upos)
List1 = [i for i in range(len(heads)) if heads[i] != 0]
List2 = [heads[i] - 1 for i in range(len(heads)) if heads[i] != 0]
deprels_id = [deprel_labels_to_id[deprels[i]] for i in range(len(heads)) if heads[i] != 0]
# print("CHECK text:", sen)
# print("CHECK List1:", List1)
# print("CHECK List2:", List2)
# print("CHECK deprel:", deprel)
return words, List1, List2, deprels_id
def get_word_embedding(subwords, words, bert_emb):
tmp = ""
index = 0
last_i = 0
word_emb = torch.zeros(len(words), bert_emb.shape[1])
for i in range(len(subwords)):
tmp += subwords[i].replace("#", "").replace("[UNK]", "_")
if len(tmp) == len(words[index]):
word_emb[index] = torch.mean(bert_emb[last_i:i+1], axis=0)
index += 1
tmp = ""
last_i = i + 1
assert index == len(words)
return word_emb
def generate_english(filepath, outpath, datapath):
print("Processing {}...".format(filepath))
# stanza.download("en", "path")
nlp = stanza.Pipeline("en", "/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_en")
# https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz
# https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
bert = BertModel.from_pretrained("/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased")
tokenizer = BertTokenizer.from_pretrained("/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased/bert-base-uncased-vocab.txt")
os.makedirs((os.path.join(datapath, "wordemb")), exist_ok=True)
os.makedirs((os.path.join(datapath, "p2widx")), exist_ok=True)
os.makedirs((os.path.join(datapath, "depgraph")), exist_ok=True)
out = []
with open(filepath, "r", encoding='utf-8') as f:
for line in tqdm(f):
parts = line.strip().strip('\ufeff').split('|') # remove BOM
index, speaker, phone_text, raw_text = parts[0], parts[1], parts[2], parts[3]
phone, word_emb, phone2word_idx, nodes_list1, nodes_list2, deprels_id = get_text(tokenizer, bert, nlp, "en", phone_text, raw_text)
out.append(index + "|" + speaker + "|" + phone + "|" + phone_text + "\n")
wordemb_filename = "{}-wordemb-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "wordemb", wordemb_filename), word_emb.cpu().detach().numpy())
p2widx_filename = "{}-p2widx-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "p2widx", p2widx_filename), np.asarray(phone2word_idx))
depgraph_filename = "{}-depgraph-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "depgraph", depgraph_filename), np.asarray([nodes_list1, nodes_list2, deprels_id]))
with open(outpath, "w", encoding="utf-8") as f:
f.write("".join(out))
def generate_chinese(filepath, outpath, datapath):
print("Processing {}...".format(filepath))
# stanza.download("zh", "path")
nlp = stanza.Pipeline("zh", "/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_zh")
# https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz
# https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt
bert = BertModel.from_pretrained("/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese")
tokenizer = BertTokenizer.from_pretrained("/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese/bert-base-chinese-vocab.txt")
os.makedirs((os.path.join(datapath, "wordemb")), exist_ok=True)
os.makedirs((os.path.join(datapath, "p2widx")), exist_ok=True)
os.makedirs((os.path.join(datapath, "depgraph")), exist_ok=True)
out = []
with open(filepath, "r", encoding='utf-8') as f:
for line in tqdm(f):
parts = line.strip().strip('\ufeff').split('|') # remove BOM
index, speaker, phone_text, raw_text = parts[0], parts[1], parts[2], parts[3]
phone, word_emb, phone2word_idx, nodes_list1, nodes_list2, deprels_id = get_text(tokenizer, bert, nlp, "zh", phone_text, raw_text)
out.append(index + "|" + speaker + "|" + phone + "|" + phone_text + "\n")
wordemb_filename = "{}-wordemb-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "wordemb", wordemb_filename), word_emb.cpu().detach().numpy())
p2widx_filename = "{}-p2widx-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "p2widx", p2widx_filename), np.asarray(phone2word_idx))
depgraph_filename = "{}-depgraph-{}.npy".format(speaker, index)
np.save(os.path.join(datapath, "depgraph", depgraph_filename), np.asarray([nodes_list1, nodes_list2, deprels_id]))
with open(outpath, "w", encoding="utf-8") as f:
f.write("".join(out))
if __name__ == '__main__':
generate_chinese("preprocessed_data/DataBaker/train_grapheme.txt", "preprocessed_data/DataBaker/train.txt", "/data/training_data/preprocessed_data/DataBaker_16k/")
generate_chinese("preprocessed_data/DataBaker/val_grapheme.txt", "preprocessed_data/DataBaker/val.txt", "/data/training_data/preprocessed_data/DataBaker_16k/")
generate_chinese("preprocessed_data/DataBaker/test_grapheme.txt", "preprocessed_data/DataBaker/test.txt", "/data/training_data/preprocessed_data/DataBaker_16k/")
generate_english("preprocessed_data/LJSpeech/train_grapheme.txt", "preprocessed_data/LJSpeech/train.txt", "/data/training_data/preprocessed_data/LJSpeech/")
generate_english("preprocessed_data/LJSpeech/val_grapheme.txt","preprocessed_data/LJSpeech/val.txt", "/data/training_data/preprocessed_data/LJSpeech/")
generate_english("preprocessed_data/LJSpeech/test_grapheme.txt", "preprocessed_data/LJSpeech/test.txt", "/data/training_data/preprocessed_data/LJSpeech/")
| [
"re.split",
"pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"pytorch_pretrained_bert.BertModel.from_pretrained",
"torch.mean",
"tqdm.tqdm",
"os.path.join",
"numpy.asarray",
"torch.no_grad",
"stanza.Pipeline"
] | [((1533, 1574), 're.split', 're.split', (['"""([,:;.()\\\\-\\\\?\\\\!\\\\s+])"""', 'text'], {}), "('([,:;.()\\\\-\\\\?\\\\!\\\\s+])', text)\n", (1541, 1574), False, 'import re\n'), ((2441, 2479), 're.split', 're.split', (['"""([,./\\\\-\\\\?\\\\!\\\\s+])"""', 'text'], {}), "('([,./\\\\-\\\\?\\\\!\\\\s+])', text)\n", (2449, 2479), False, 'import re\n'), ((4704, 4790), 'stanza.Pipeline', 'stanza.Pipeline', (['"""en"""', '"""/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_en"""'], {}), "('en',\n '/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_en')\n", (4719, 4790), False, 'import stanza\n'), ((4967, 5064), 'pytorch_pretrained_bert.BertModel.from_pretrained', 'BertModel.from_pretrained', (['"""/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased"""'], {}), "(\n '/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased')\n", (4992, 5064), False, 'from pytorch_pretrained_bert import BertModel, BertTokenizer\n'), ((5076, 5210), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased/bert-base-uncased-vocab.txt"""'], {}), "(\n '/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-uncased/bert-base-uncased-vocab.txt'\n )\n", (5105, 5210), False, 'from pytorch_pretrained_bert import BertModel, BertTokenizer\n'), ((6682, 6768), 'stanza.Pipeline', 'stanza.Pipeline', (['"""zh"""', '"""/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_zh"""'], {}), "('zh',\n '/apdcephfs/private_yatsenzhou/pretrained/stanza/stanza_zh')\n", (6697, 6768), False, 'import stanza\n'), ((6945, 7042), 'pytorch_pretrained_bert.BertModel.from_pretrained', 'BertModel.from_pretrained', (['"""/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese"""'], {}), "(\n '/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese')\n", (6970, 7042), False, 'from pytorch_pretrained_bert import BertModel, BertTokenizer\n'), ((7054, 7188), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese/bert-base-chinese-vocab.txt"""'], {}), "(\n '/apdcephfs/private_yatsenzhou/pretrained/bert/bert-base-chinese/bert-base-chinese-vocab.txt'\n )\n", (7083, 7188), False, 'from pytorch_pretrained_bert import BertModel, BertTokenizer\n'), ((911, 926), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (924, 926), False, 'import torch\n'), ((5219, 5252), 'os.path.join', 'os.path.join', (['datapath', '"""wordemb"""'], {}), "(datapath, 'wordemb')\n", (5231, 5252), False, 'import os\n'), ((5287, 5319), 'os.path.join', 'os.path.join', (['datapath', '"""p2widx"""'], {}), "(datapath, 'p2widx')\n", (5299, 5319), False, 'import os\n'), ((5354, 5388), 'os.path.join', 'os.path.join', (['datapath', '"""depgraph"""'], {}), "(datapath, 'depgraph')\n", (5366, 5388), False, 'import os\n'), ((5493, 5500), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (5497, 5500), False, 'from tqdm import tqdm\n'), ((7197, 7230), 'os.path.join', 'os.path.join', (['datapath', '"""wordemb"""'], {}), "(datapath, 'wordemb')\n", (7209, 7230), False, 'import os\n'), ((7265, 7297), 'os.path.join', 'os.path.join', (['datapath', '"""p2widx"""'], {}), "(datapath, 'p2widx')\n", (7277, 7297), False, 'import os\n'), ((7332, 7366), 'os.path.join', 'os.path.join', (['datapath', '"""depgraph"""'], {}), "(datapath, 'depgraph')\n", (7344, 7366), False, 'import os\n'), ((7471, 7478), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (7475, 7478), False, 'from tqdm import tqdm\n'), ((4395, 4437), 'torch.mean', 'torch.mean', (['bert_emb[last_i:i + 1]'], {'axis': '(0)'}), '(bert_emb[last_i:i + 1], axis=0)\n', (4405, 4437), False, 'import torch\n'), ((5991, 6042), 'os.path.join', 'os.path.join', (['datapath', '"""wordemb"""', 'wordemb_filename'], {}), "(datapath, 'wordemb', wordemb_filename)\n", (6003, 6042), False, 'import os\n'), ((6170, 6219), 'os.path.join', 'os.path.join', (['datapath', '"""p2widx"""', 'p2widx_filename'], {}), "(datapath, 'p2widx', p2widx_filename)\n", (6182, 6219), False, 'import os\n'), ((6221, 6247), 'numpy.asarray', 'np.asarray', (['phone2word_idx'], {}), '(phone2word_idx)\n', (6231, 6247), True, 'import numpy as np\n'), ((6346, 6399), 'os.path.join', 'os.path.join', (['datapath', '"""depgraph"""', 'depgraph_filename'], {}), "(datapath, 'depgraph', depgraph_filename)\n", (6358, 6399), False, 'import os\n'), ((6401, 6451), 'numpy.asarray', 'np.asarray', (['[nodes_list1, nodes_list2, deprels_id]'], {}), '([nodes_list1, nodes_list2, deprels_id])\n', (6411, 6451), True, 'import numpy as np\n'), ((7969, 8020), 'os.path.join', 'os.path.join', (['datapath', '"""wordemb"""', 'wordemb_filename'], {}), "(datapath, 'wordemb', wordemb_filename)\n", (7981, 8020), False, 'import os\n'), ((8148, 8197), 'os.path.join', 'os.path.join', (['datapath', '"""p2widx"""', 'p2widx_filename'], {}), "(datapath, 'p2widx', p2widx_filename)\n", (8160, 8197), False, 'import os\n'), ((8199, 8225), 'numpy.asarray', 'np.asarray', (['phone2word_idx'], {}), '(phone2word_idx)\n', (8209, 8225), True, 'import numpy as np\n'), ((8324, 8377), 'os.path.join', 'os.path.join', (['datapath', '"""depgraph"""', 'depgraph_filename'], {}), "(datapath, 'depgraph', depgraph_filename)\n", (8336, 8377), False, 'import os\n'), ((8379, 8429), 'numpy.asarray', 'np.asarray', (['[nodes_list1, nodes_list2, deprels_id]'], {}), '([nodes_list1, nodes_list2, deprels_id])\n', (8389, 8429), True, 'import numpy as np\n')] |
import time
import numpy as np
from typing import List, Dict
from .base import BaseInstrument
from zhinst.toolkit.control.node_tree import Parameter
from zhinst.toolkit.interface import LoggerModule
_logger = LoggerModule(__name__)
MAPPINGS = {
"edge": {1: "rising", 2: "falling", 3: "both"},
"eventcount_mode": {0: "sample", 1: "increment"},
"fft_window": {
0: "rectangular",
1: "hann",
2: "hamming",
3: "blackman",
16: "exponential",
17: "cosine",
17: "sine",
18: "cosine_squared",
},
"grid_direction": {0: "forward", 1: "reverse", 2: "bidirectional"},
"grid_mode": {1: "nearest", 2: "linear", 4: "exact"},
"save_fileformat": {0: "matlab", 1: "csv", 2: "zview", 3: "sxm", 4: "hdf5"},
"type": {
0: "continuous",
1: "edge",
2: "dio",
3: "pulse",
4: "tracking",
5: "change",
6: "hardware",
7: "tracking_pulse",
8: "eventcount",
},
}
class DAQModule:
"""Implements a :class:`Data Acquisition Module` for Lock-In instruments.
The data acquisition module is a powerful tool that builds on top of LabOne.
It allows for triggered acquisition of multiple data streams on an
equidistant temporal grid. For more information on how to use the DAQ
Module, have a look at the LabOne Programming Manual.
This base class is overwritten by device specific DAQ classes with
additional signal sources and types. After setup, the nodetree of the module
is retrieved from the API and added to the DAQModule object attributes as
`zhinst-toolkit` :class:`Parameters`.
In a typical measurement using the DAQ Module one would first configure its
trigger and grid settings.
>>> # configure a measurement
>>> mfli.daq.fft_window("rectangular")
>>> mfli.daq.type("continuous")
>>> mfli.daq.grid_cols(512)
>>> mfli.daq.grid_rows(10)
The signal streams that are available for acquisition can be listed using
`signals_list(...)`.
>>> # list available signal sources ...
>>> mf.daq.signals_list()
['auxin0', 'demod0', 'demod1', 'imp0']
>>> # ... and according singal types
>>> mf.daq.signals_list("demod1")
['x', 'y', 'r', 'xiy', 'theta', 'frequency', 'auxin0', 'auxin1', 'dio']
To specify which signals should be acquired during the measurement, they
need to be added to the measurement. This is done with the
`signals_add(...)` method. Note that the return value is a string with the
exact node path that will be subscribed to during the measurement. The
string can be used later as a key in the `results` dictionary.
>>> # add signals to the measurement
>>> mf.daq.signals_clear()
>>> signal1 = mf.daq.signals_add("demod1", "r") # specify signal_source and signal_type
>>> signal2 = mf.daq.signals_add("demod1", "theta")
>>> signal3 = mf.daq.signals_add("demod1", "xiy", fft=True)
The measurement is started ...
>>> # start the measurement
>>> mf.daq.measure()
subscribed to: /dev3337/demods/0/sample.r.avg
subscribed to: /dev3337/demods/0/sample.theta.avg
subscribed to: /dev3337/demods/0/sample.xiy.fft.abs.avg
Progress: 0.0%
Progress: 40.0%
...
... and afterwards the results can be found in the `results` attribute of
the :class:`DAQModule`. The values in the dictionary are of type
:class:`DAQResults`.
>>> # retrieve the measurement results
>>> result1 = mf.daq.results[signal1]
>>> result2 = mf.daq.results[signal2]
>>> result3 = mf.daq.results[signal3]
>>> ...
>>> result1
<zhinst.toolkit.control.drivers.base.daq.DAQResult object at 0x0000023B8467D588>
path: /dev3337/demods/0/sample.xiy.fft.abs.avg
value: (10, 511)
frequency: (511,)
See below for details on
:class:`zhinst.toolkit.control.drivers.base.daq.DAQResult`.
Attributes:
signals (list): A list of node strings of signals that are added to the
measurement and will be subscribed to before data acquisition.
results (dict): A dictionary with signal strings as keys and
:class:`zhinst.toolkit.control.drivers.base.daq.DAQResult` objects
as values that hold all the data of the measurement result.
"""
def __init__(self, parent: BaseInstrument, clk_rate: float = 60e6) -> None:
self._parent = parent
self._module = None
self._signals = []
self._results = {}
self._clk_rate = clk_rate
# the `streaming_nodes` are used as all available signal sources for the data acquisition
self._signal_sources = self._parent._get_streamingnodes()
self._signal_types = {
"auxin": {"auxin1": ".Auxin0", "auxin2": ".Auxin1"},
"demod": {
"x": ".X",
"y": ".Y",
"r": ".R",
"xiy": ".xiy",
"theta": ".Theta",
"frequency": ".Frequency",
"auxin0": ".AuxIn0",
"auxin1": ".AuxIn1",
"dio": ".Dio",
},
"imp": {
"real": ".RealZ",
"imag": ".ImagZ",
"abs": ".AbsZ",
"phase": ".PhaseZ",
"frequency": ".Frequency",
"param0": ".Param0",
"param1": ".Param1",
},
"cnt": {"": ".Value"},
"pid": {"": ""},
}
self._trigger_signals = {}
self._trigger_types = {}
def _setup(self) -> None:
self._module = self._parent._controller.connection.daq_module
# add all parameters from nodetree
nodetree = self._module.get_nodetree("*")
for k, v in nodetree.items():
name = k[1:].replace("/", "_")
mapping = MAPPINGS[name] if name in MAPPINGS.keys() else None
setattr(self, name, Parameter(self, v, device=self, mapping=mapping))
self._init_settings()
def _set(self, *args, **kwargs):
if kwargs.get("sync", False):
_logger.warning(
"The daq module does not support the `sync` flag."
)
if self._module is None:
_logger.error(
"This DAQ is not connected to a dataAcquisitionModule!",
_logger.ExceptionTypes.ToolkitConnectionError,
)
return self._module.set(*args, device=self._parent.serial)
def _get(self, *args, valueonly: bool = True):
if self._module is None:
_logger.error(
"This DAQ is not connected to a dataAcquisitionModule!",
_logger.ExceptionTypes.ToolkitConnectionError,
)
data = self._module.get(*args, device=self._parent.serial)
return list(data.values())[0][0] if valueonly else data
def _init_settings(self):
self._set("preview", 1)
self._set("historylength", 10)
self._set("bandwidth", 0)
self._set("hysteresis", 0.01)
self._set("level", 0.1)
self._set("clearhistory", 1)
self._set("bandwidth", 0)
def trigger_list(self, source=None) -> List:
"""Returns a list of all the available signal sources for triggering.
Keyword Arguments:
source (str): specifies the signal source to return signal types
(default: None)
Returns:
Returns all available trigger sources by default. If the keyword is
specified with one of the trigger sources, all the available trigger
types for the trigger source are returned.
"""
sources = list(self._trigger_signals.keys())
if source is None:
return sources
else:
for signal in self._trigger_types.keys():
if signal in source:
return list(self._trigger_types[signal].keys())
def trigger(self, trigger_source: str, trigger_type: str) -> None:
"""Sets the trigger signal of the *DAQ Module*.
This method can be used to specify the signal used to trigger the data
acquisition. Use the method `trigger_list()` to see the available
trigger signal sources and types.The trigger node can also be set
directly using the module Parameter `triggernode`.
Arguments:
trigger_source (str): A string that specifies the source of the
trigger signal, e.g. "demod0".
trigger_trype (str): A string that specifies the type of the
trigger signal, e.g. "trigin1".
"""
trigger_node = self._parse_trigger(trigger_source, trigger_type)
self._set("/triggernode", trigger_node)
print(f"set trigger node to '{trigger_node}'")
def signals_list(self, source=None) -> List:
"""Returns a list of all the available signal sources for data acquisition.
Keyword Arguments:
source (str): specifies the signal source to return signal types
(default: None)
Returns:
Returns all available signal sources by default. If the keyword is
specified with one of the signal sources, all the available signal
types for the signal source are returned.
"""
sources = list(self._signal_sources.keys())
if source is None:
return sources
else:
for signal in self._signal_types.keys():
if signal in source:
return list(self._signal_types[signal].keys())
else:
return sources
def signals_add(
self,
signal_source: str,
signal_type: str = "",
operation: str = "avg",
fft: bool = False,
complex_selector: str = "abs",
) -> str:
"""Add a signal to the signals list to be subscribed to during measurement.
The specified signal is added to the property *signals* list. On
`measure()`, the *DAQ Module* subscribes to all the signal nodes in the
list.
Arguments:
signal_source (str): The source of the signal, e.g. 'demod0'. See
`signals_list()` for available signals.
Keyword Arguments:
signal_type (str): The type of the signal. Depends on the given
source, e.g. for demod signals the types'X', 'Y', 'R', 'Theta',
... are available. See `signals_list({signal source})` for
available signal types. (default: "")
operation (str): The operation that is performed on the acquired
signal, e.g. the average of data points ('avg'), the standard
deviation of the signal ('std') or single points ('replace').
(default: "avg")
fft (bool): A flag to enable the fourier transform (FFT) of the
acquired signal. (default: False)
complex_selector (str): If the FFT is enabled, this selects the
complex value of the result, e.g. 'abs', 'phase', 'real',
'imag'. (default: "abs")
Returns:
A string with the exact signal node that will be acquired during the
measurement. It can be used as a key in the `results` dict to
retrieve the measurement result corresponding to this signal, e.g.
>>> signal = mfli.daq.signal_add("demod0", "r")
/dev3337/demods/0/sample.r.avg
>>> mfli.daq.measure()
>>> ...
>>> result = mfli.daq.results[signal]
"""
signal_node = self._parse_signals(
signal_source, signal_type, operation, fft, complex_selector
)
if signal_node not in self.signals:
self._signals.append(signal_node)
return signal_node
def signals_clear(self) -> None:
"""Resets the signals list."""
self._signals = []
def measure(self, verbose: bool = True, timeout: float = 20) -> None:
"""Performs the measurement.
Starts a measurement and stores the result in `daq.results`. This
method subscribes to all the paths previously added to `daq.signals`,
then starts the measurement, waits until the measurement in finished
and eventually reads the result.
Keyword Arguments:
verbose (bool): A flag to enable or disable console output during
the measurement. (default: True)
timeout (int): The measurement will be stopped after the timeout.
The value is given in seconds. (default: 20)
Raises:
TimeoutError: if the measurement is not completed before
timeout.
"""
self._set("endless", 0)
self._set("clearhistory", 1)
for path in self.signals:
self._module.subscribe(path)
if verbose:
print(f"subscribed to: {path}")
self._module.execute()
tik = time.time()
while not self._module.finished():
if verbose:
print(f"Progress: {(self._module.progress()[0] * 100):.1f}%")
time.sleep(0.5)
tok = time.time()
if tok - tik > timeout:
_logger.error(
f"DAQ Module: Measurement timed out!",
_logger.ExceptionTypes.TimeoutError,
)
if verbose:
print("Finished")
result = self._module.read(flat=True)
self._module.finish()
self._module.unsubscribe("*")
self._get_result_from_dict(result)
def _parse_signals(
self,
signal_source: str,
signal_type: str,
operation: str,
fft: bool,
complex_selector: str,
) -> str:
signal_node = "/" + self._parent.serial
signal_node += self._parse_signal_source(signal_source)
signal_node += self._parse_signal_type(signal_type, signal_source)
signal_node += self._parse_fft(fft, complex_selector)
signal_node += self._parse_operation(operation)
return signal_node.lower()
def _parse_signal_source(self, source: str) -> str:
source = source.lower()
if source not in self._signal_sources:
_logger.error(
f"Signal source must be in {self._signal_sources.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return self._signal_sources[source]
def _parse_signal_type(self, signal_type: str, signal_source: str) -> str:
signal_source = signal_source.lower()
signal_type = signal_type.lower()
types = {}
for signal in self._signal_types.keys():
if signal in signal_source:
types = self._signal_types[signal]
if signal_type not in types.keys():
_logger.error(
f"Signal type must be in {types.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return types[signal_type]
def _parse_operation(self, operation: str) -> str:
operations = ["replace", "avg", "std"]
if operation not in operations:
_logger.error(
f"Operation must be in {operations}",
_logger.ExceptionTypes.ToolkitError,
)
if operation == "replace":
operation = ""
return f".{operation}"
def _parse_fft(self, fft: bool, selector: str) -> str:
if fft:
selectors = ["real", "imag", "abs", "phase"]
if selector not in selectors:
_logger.error(
f"Operation must be in {selectors}",
_logger.ExceptionTypes.ToolkitError,
)
return f".fft.{selector}"
else:
return ""
def _parse_trigger(self, trigger_source: str, trigger_type: str) -> str:
trigger_node = "/" + self._parent.serial
trigger_node += self._parse_trigger_source(trigger_source)
trigger_node += self._parse_trigger_type(trigger_source, trigger_type)
return trigger_node
def _parse_trigger_source(self, source: str) -> str:
source = source.lower()
sources = self._trigger_signals
if source not in sources:
_logger.error(
f"Signal source must be in {sources.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return sources[source]
def _parse_trigger_type(self, trigger_source: str, trigger_type: str) -> str:
trigger_source = trigger_source.lower()
trigger_type = trigger_type.lower()
types = {}
for signal in self._trigger_types.keys():
if signal in trigger_source:
types = self._trigger_types[signal]
if trigger_type.lower() not in types.keys():
_logger.error(
f"Signal type must be in {types.keys()}",
_logger.ExceptionTypes.ToolkitError,
)
return types[trigger_type]
def _get_result_from_dict(self, result: Dict):
self._results = {}
for node in self.signals:
node = node.lower()
if node not in result.keys():
_logger.error(
f"The signal {node} is not in {list(result.keys())}",
_logger.ExceptionTypes.ToolkitError,
)
self._results[node] = DAQResult(
node, result[node][0], clk_rate=self._clk_rate
)
def __repr__(self):
s = super().__repr__()
s += "\n\nsignals:\n"
for signal in self.signals:
s += f" - '{signal}'\n"
s += "parameters:\n"
for key, value in self.__dict__.items():
if isinstance(value, Parameter):
s += f" - {key}\n"
return s
@property
def signals(self):
return self._signals
@property
def results(self):
return self._results
class DAQResult:
"""A wrapper class around the result of a DAQ Module measurement.
The Data Acquisition Result class holds all measurement information returned
from the API. The attribute `value` is a two-dimensional numpy array with
the measured data along the measured grid. Depending on whether the time
trace or the FFT of a signal was acquired, either the `time` of `frequency`
attribute holds a 1D numpy array with the correct axis values calculated
from the measurement grid.
>>> signal = mf.daq.signals_add("demod1", "r")
>>> mf.daq.measure()
...
>>> result = mf.daq.results[signal]
>>> result
<zhinst.toolkit.control.drivers.base.daq.DAQResult object at 0x0000023B8467D588>
path: /dev3337/demods/0/sample.r.avg
value: (10, 511)
time: (511,)
>>> result.header
{'systemtime': array([1585136936490779], dtype=uint64),
'createdtimestamp': array([548560038356], dtype=uint64),
'changedtimestamp': array([548669852116], dtype=uint64),
'flags': array([1977], dtype=uint32),
...
>>> plt.imshow(result.value, extent=[result.time[0], result.time[-1], 0, result.shape[0]])
Attributes:
value (array): A 2D numpy array with the measurement result.
shape (tuple): A tuple with the shape of the acquired data which
corresponds to the according grid settings.
time (array): A 1D numpy array containing the time axis of the
measurement in seconds. Calculated from the returned timestamps
using the DAC clock rate. If the result is a Fourier transform this
value is `None`.
frequency (array): A 1D numpy array with the frequency values for FFT
measurements in Hertz. If the signal is not a FFT this value is
`None` The frequency grid is calculated from the grid settings. If
the "xiy" complex signal of the demodulator data stream is acquired,
the frequency spectrum is symmetric around 0 Hz, otherwise it is
positive.
header (dict): A dictionary containing all information about the
measurement settings.
"""
def __init__(self, path: str, result_dict: Dict, clk_rate: float = 60e6) -> None:
self._path = path
self._clk_rate = clk_rate
self._is_fft = "fft" in self._path
self._result_dict = result_dict
self._header = self._result_dict.get("header", {})
self._value = self._result_dict.get("value")
self._time = None
self._frequencies = None
if not self._is_fft:
self._time = self._claculate_time()
else:
self._frequency = self._calculate_freqs()
@property
def value(self):
return self._value
@property
def header(self):
return self._header
@property
def time(self):
return self._time
@property
def frequency(self):
return self._frequency
@property
def shape(self):
return self._value.shape
def _claculate_time(self):
timestamp = self._result_dict["timestamp"]
return (timestamp[0] - timestamp[0][0]) / self._clk_rate
def _calculate_freqs(self):
bin_count = len(self.value[0])
bin_resolution = self.header["gridcoldelta"]
frequencies = np.arange(bin_count)
bandwidth = bin_resolution * len(frequencies)
frequencies = frequencies * bin_resolution
if "xiy" in self._path:
frequencies = frequencies - bandwidth / 2.0 + bin_resolution / 2.0
return frequencies
def __repr__(self):
s = super().__repr__()
s += "\n\n"
s += f"path: {self._path}\n"
s += f"value: {self._value.shape}\n"
if self._is_fft:
s += f"frequency: {self._frequency.shape}\n"
else:
s += f"time: {self._time.shape}\n"
return s
| [
"zhinst.toolkit.control.node_tree.Parameter",
"time.sleep",
"zhinst.toolkit.interface.LoggerModule",
"time.time",
"numpy.arange"
] | [((211, 233), 'zhinst.toolkit.interface.LoggerModule', 'LoggerModule', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'from zhinst.toolkit.interface import LoggerModule\n'), ((13252, 13263), 'time.time', 'time.time', ([], {}), '()\n', (13261, 13263), False, 'import time\n'), ((21735, 21755), 'numpy.arange', 'np.arange', (['bin_count'], {}), '(bin_count)\n', (21744, 21755), True, 'import numpy as np\n'), ((13421, 13436), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13431, 13436), False, 'import time\n'), ((13455, 13466), 'time.time', 'time.time', ([], {}), '()\n', (13464, 13466), False, 'import time\n'), ((6125, 6173), 'zhinst.toolkit.control.node_tree.Parameter', 'Parameter', (['self', 'v'], {'device': 'self', 'mapping': 'mapping'}), '(self, v, device=self, mapping=mapping)\n', (6134, 6173), False, 'from zhinst.toolkit.control.node_tree import Parameter\n')] |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The NormalInverseGaussian distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import inverse_gaussian
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'NormalInverseGaussian',
]
def _log1px2(x):
return tf.where(
tf.abs(x) * np.sqrt(np.finfo(
dtype_util.as_numpy_dtype(x.dtype)).eps) <= 1.,
tf.math.log1p(x**2.),
2 * tf.math.log(tf.math.abs(x)))
class NormalInverseGaussian(distribution.AutoCompositeTensorDistribution):
"""Normal Inverse Gaussian distribution.
The [Normal-inverse Gaussian distribution]
(https://en.wikipedia.org/wiki/Normal-inverse_Gaussian_distribution)
is parameterized by a `loc`, `tailweight`, `skewness` and `scale` parameter.
#### Mathematical Details
The name of this distribution refers to it being a variance mean mixture.
In other words if `x` is sampled via:
```none
z ~ InverseGaussian(1 / gamma, 1.)
x ~ Normal(loc + skewness * z, scale * z)
```
then `x ~ NormalInverseGaussian(loc, scale, tailweight, skewness)`.
where `gamma = sqrt(tailweight ** 2 - skewness ** 2)`.
The probability density function (pdf) is,
```none
pdf(x; mu, sigma, alpha, beta) = [alpha * sigma * K1(alpha * g)] / (pi * g)
exp(sigma * gamma + beta * (x - loc))
```
where
* `loc = mu`
* `tailweight = alpha`
* `skewness = beta`
* `scale = sigma`
* `g = sqrt(sigma ** 2 + (x - mu) ** 2)`
* `gamma = sqrt(alpha ** 2 - beta ** 2)`
* `K1(x)` is the modified Bessel function of the second kind with
order parameter 1.
The support of the distribution is defined on `(-infinity, infinity)`.
Mapping to R and Python scipy's parameterization:
* R: GeneralizedHyperbolic.NIG
- mu = loc
- delta = scale
- alpha = tailweight
- beta = skewness
* Python: scipy.stats.norminvgauss
- a = tailweight
- b = skewness
- loc = loc
- Note that `scipy.stats.norminvgauss` implements the distribution as a
location-scale family. However, in the original paper, and other
implementations (such as R) do not implement it this way. Thus the
`scale` parameters here and scipy don't match unless `scale = 1`.
Warning: As mentioned above, this distribution is __not__ a location-scale
family. Specifically:
```none
NIG(loc, scale, alpha, beta) != loc + scale * NIG(0, 1, alpha, beta).
```
"""
def __init__(self,
loc,
scale,
tailweight,
skewness,
validate_args=False,
allow_nan_stats=True,
name='NormalInverseGaussian'):
"""Constructs Normal-inverse Gaussian distribution.
Args:
loc: Floating point `Tensor`, the location params of the distribution(s).
scale: Positive floating point `Tensor`, the scale params of the
distribution(s).
tailweight: Positive floating point `Tensor`, the tailweight params of the
distribution(s). Expect `|tailweight| >= |skewness|`.
skewness: Floating point `Tensor`, the skewness params of the
distribution(s). Expect `|tailweight| >= |skewness|`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
Default value: `False` (i.e. do not validate args).
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
Default value: `True`.
name: Python `str` name prefixed to Ops created by this class.
Default value: 'NormalInverseGaussian'.
"""
parameters = dict(locals())
with tf.name_scope(name):
dtype = dtype_util.common_dtype([loc, scale, tailweight, skewness],
dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
self._tailweight = tensor_util.convert_nonref_to_tensor(
tailweight, dtype=dtype, name='tailweight')
self._skewness = tensor_util.convert_nonref_to_tensor(
skewness, dtype=dtype, name='skewness')
super(NormalInverseGaussian, self).__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# pylint: disable=g-long-lambda
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
tailweight=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
# TODO(b/169874884): Support decoupled parameterization.
skewness=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=parameter_properties
.BIJECTOR_NOT_IMPLEMENTED))
# pylint: enable=g-long-lambda
@property
def loc(self):
"""Location parameter."""
return self._loc
@property
def scale(self):
"""Scale parameter."""
return self._scale
@property
def tailweight(self):
"""Tailweight parameter."""
return self._tailweight
@property
def skewness(self):
"""Skewness parameter."""
return self._skewness
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
tailweight = tf.convert_to_tensor(self.tailweight)
skewness = tf.convert_to_tensor(self.skewness)
ig_seed, normal_seed = samplers.split_seed(
seed, salt='normal_inverse_gaussian')
batch_shape = self._batch_shape_tensor(
loc=loc,
scale=scale,
tailweight=tailweight,
skewness=skewness)
w = tailweight * tf.math.exp(0.5 * tf.math.log1p(
-tf.math.square(skewness / tailweight)))
w = tf.broadcast_to(w, batch_shape)
ig_samples = inverse_gaussian.InverseGaussian(
scale / w, tf.math.square(scale)).sample(n, seed=ig_seed)
sample_shape = ps.concat([[n], batch_shape], axis=0)
normal_samples = samplers.normal(
shape=ps.convert_to_shape_tensor(sample_shape),
mean=0., stddev=1., dtype=self.dtype, seed=normal_seed)
return (loc + tf.math.sqrt(ig_samples) * (
skewness * tf.math.sqrt(ig_samples) + normal_samples))
def _log_prob(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
skewness = tf.convert_to_tensor(self.skewness)
tailweight = tf.convert_to_tensor(self.tailweight)
numpy_dtype = dtype_util.as_numpy_dtype(self.dtype)
y = (x - loc) / scale
z = _log1px2(y)
w = tailweight * tf.math.exp(0.5 * tf.math.log1p(
-tf.math.square(skewness / tailweight)))
log_unnormalized_prob = (
tfp_math.log_bessel_kve(
numpy_dtype(1.), tailweight * scale * tf.math.exp(0.5 * z)) -
0.5 * z - tailweight * scale * tf.math.exp(0.5 * z))
log_unnormalized_prob = log_unnormalized_prob + scale * skewness * y
log_normalization = np.log(np.pi) - scale * w - tf.math.log(tailweight)
return log_unnormalized_prob - log_normalization
def _mean(self):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
skewness = tf.convert_to_tensor(self.skewness)
tailweight = tf.convert_to_tensor(self.tailweight)
w = tailweight * tf.math.exp(0.5 * tf.math.log1p(
-tf.math.square(skewness / tailweight)))
return loc + (skewness * scale / w)
def _variance(self):
scale = tf.convert_to_tensor(self.scale)
skewness = tf.convert_to_tensor(self.skewness)
tailweight = tf.convert_to_tensor(self.tailweight)
scale = tf.broadcast_to(
scale, self._batch_shape_tensor(
scale=scale, tailweight=tailweight, skewness=skewness))
w = tailweight * tf.math.exp(0.5 * tf.math.log1p(
-tf.math.square(skewness / tailweight)))
return scale * tf.math.square(tailweight) / w ** 3
def _default_event_space_bijector(self):
return identity_bijector.Identity()
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
tailweight_is_ref = tensor_util.is_ref(self.tailweight)
tailweight = tf.convert_to_tensor(self.tailweight)
if (is_init != tailweight_is_ref and
is_init != tensor_util.is_ref(self.skewness)):
assertions.append(assert_util.assert_less(
tf.math.abs(self.skewness),
tailweight,
message='Expect `tailweight > |skewness|`'))
if is_init != tensor_util.is_ref(self.scale):
assertions.append(assert_util.assert_positive(
self.scale,
message='Argument `scale` must be positive.'))
if is_init != tailweight_is_ref:
assertions.append(assert_util.assert_positive(
tailweight,
message='Argument `tailweight` must be positive.'))
return assertions
| [
"tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor",
"numpy.log",
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow_probability.python.internal.prefer_static.concat",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.math.abs",
"tenso... | [((1924, 1947), 'tensorflow.compat.v2.math.log1p', 'tf.math.log1p', (['(x ** 2.0)'], {}), '(x ** 2.0)\n', (1937, 1947), True, 'import tensorflow.compat.v2 as tf\n'), ((7619, 7650), 'tensorflow.compat.v2.constant', 'tf.constant', (['[]'], {'dtype': 'tf.int32'}), '([], dtype=tf.int32)\n', (7630, 7650), True, 'import tensorflow.compat.v2 as tf\n'), ((7689, 7707), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (7703, 7707), True, 'import tensorflow.compat.v2 as tf\n'), ((7756, 7786), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.loc'], {}), '(self.loc)\n', (7776, 7786), True, 'import tensorflow.compat.v2 as tf\n'), ((7799, 7831), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.scale'], {}), '(self.scale)\n', (7819, 7831), True, 'import tensorflow.compat.v2 as tf\n'), ((7849, 7886), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.tailweight'], {}), '(self.tailweight)\n', (7869, 7886), True, 'import tensorflow.compat.v2 as tf\n'), ((7902, 7937), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.skewness'], {}), '(self.skewness)\n', (7922, 7937), True, 'import tensorflow.compat.v2 as tf\n'), ((7965, 8022), 'tensorflow_probability.python.internal.samplers.split_seed', 'samplers.split_seed', (['seed'], {'salt': '"""normal_inverse_gaussian"""'}), "(seed, salt='normal_inverse_gaussian')\n", (7984, 8022), False, 'from tensorflow_probability.python.internal import samplers\n'), ((8283, 8314), 'tensorflow.compat.v2.broadcast_to', 'tf.broadcast_to', (['w', 'batch_shape'], {}), '(w, batch_shape)\n', (8298, 8314), True, 'import tensorflow.compat.v2 as tf\n'), ((8452, 8489), 'tensorflow_probability.python.internal.prefer_static.concat', 'ps.concat', (['[[n], batch_shape]'], {'axis': '(0)'}), '([[n], batch_shape], axis=0)\n', (8461, 8489), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((8795, 8825), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.loc'], {}), '(self.loc)\n', (8815, 8825), True, 'import tensorflow.compat.v2 as tf\n'), ((8838, 8870), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.scale'], {}), '(self.scale)\n', (8858, 8870), True, 'import tensorflow.compat.v2 as tf\n'), ((8886, 8921), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.skewness'], {}), '(self.skewness)\n', (8906, 8921), True, 'import tensorflow.compat.v2 as tf\n'), ((8939, 8976), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.tailweight'], {}), '(self.tailweight)\n', (8959, 8976), True, 'import tensorflow.compat.v2 as tf\n'), ((8995, 9032), 'tensorflow_probability.python.internal.dtype_util.as_numpy_dtype', 'dtype_util.as_numpy_dtype', (['self.dtype'], {}), '(self.dtype)\n', (9020, 9032), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((9612, 9642), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.loc'], {}), '(self.loc)\n', (9632, 9642), True, 'import tensorflow.compat.v2 as tf\n'), ((9655, 9687), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.scale'], {}), '(self.scale)\n', (9675, 9687), True, 'import tensorflow.compat.v2 as tf\n'), ((9703, 9738), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.skewness'], {}), '(self.skewness)\n', (9723, 9738), True, 'import tensorflow.compat.v2 as tf\n'), ((9756, 9793), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.tailweight'], {}), '(self.tailweight)\n', (9776, 9793), True, 'import tensorflow.compat.v2 as tf\n'), ((9973, 10005), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.scale'], {}), '(self.scale)\n', (9993, 10005), True, 'import tensorflow.compat.v2 as tf\n'), ((10021, 10056), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.skewness'], {}), '(self.skewness)\n', (10041, 10056), True, 'import tensorflow.compat.v2 as tf\n'), ((10074, 10111), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.tailweight'], {}), '(self.tailweight)\n', (10094, 10111), True, 'import tensorflow.compat.v2 as tf\n'), ((10463, 10491), 'tensorflow_probability.python.bijectors.identity.Identity', 'identity_bijector.Identity', ([], {}), '()\n', (10489, 10491), True, 'from tensorflow_probability.python.bijectors import identity as identity_bijector\n'), ((10638, 10673), 'tensorflow_probability.python.internal.tensor_util.is_ref', 'tensor_util.is_ref', (['self.tailweight'], {}), '(self.tailweight)\n', (10656, 10673), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((10691, 10728), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['self.tailweight'], {}), '(self.tailweight)\n', (10711, 10728), True, 'import tensorflow.compat.v2 as tf\n'), ((5532, 5551), 'tensorflow.compat.v2.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (5545, 5551), True, 'import tensorflow.compat.v2 as tf\n'), ((5567, 5654), 'tensorflow_probability.python.internal.dtype_util.common_dtype', 'dtype_util.common_dtype', (['[loc, scale, tailweight, skewness]'], {'dtype_hint': 'tf.float32'}), '([loc, scale, tailweight, skewness], dtype_hint=tf.\n float32)\n', (5590, 5654), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((5706, 5772), 'tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor', 'tensor_util.convert_nonref_to_tensor', (['loc'], {'dtype': 'dtype', 'name': '"""loc"""'}), "(loc, dtype=dtype, name='loc')\n", (5742, 5772), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((5804, 5874), 'tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor', 'tensor_util.convert_nonref_to_tensor', (['scale'], {'dtype': 'dtype', 'name': '"""scale"""'}), "(scale, dtype=dtype, name='scale')\n", (5840, 5874), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((5911, 5996), 'tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor', 'tensor_util.convert_nonref_to_tensor', (['tailweight'], {'dtype': 'dtype', 'name': '"""tailweight"""'}), "(tailweight, dtype=dtype, name='tailweight'\n )\n", (5947, 5996), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((6026, 6102), 'tensorflow_probability.python.internal.tensor_util.convert_nonref_to_tensor', 'tensor_util.convert_nonref_to_tensor', (['skewness'], {'dtype': 'dtype', 'name': '"""skewness"""'}), "(skewness, dtype=dtype, name='skewness')\n", (6062, 6102), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((9505, 9528), 'tensorflow.compat.v2.math.log', 'tf.math.log', (['tailweight'], {}), '(tailweight)\n', (9516, 9528), True, 'import tensorflow.compat.v2 as tf\n'), ((11007, 11037), 'tensorflow_probability.python.internal.tensor_util.is_ref', 'tensor_util.is_ref', (['self.scale'], {}), '(self.scale)\n', (11025, 11037), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((1830, 1839), 'tensorflow.compat.v2.abs', 'tf.abs', (['x'], {}), '(x)\n', (1836, 1839), True, 'import tensorflow.compat.v2 as tf\n'), ((1968, 1982), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['x'], {}), '(x)\n', (1979, 1982), True, 'import tensorflow.compat.v2 as tf\n'), ((6541, 6583), 'tensorflow_probability.python.internal.parameter_properties.ParameterProperties', 'parameter_properties.ParameterProperties', ([], {}), '()\n', (6581, 6583), False, 'from tensorflow_probability.python.internal import parameter_properties\n'), ((7040, 7165), 'tensorflow_probability.python.internal.parameter_properties.ParameterProperties', 'parameter_properties.ParameterProperties', ([], {'default_constraining_bijector_fn': 'parameter_properties.BIJECTOR_NOT_IMPLEMENTED'}), '(default_constraining_bijector_fn=\n parameter_properties.BIJECTOR_NOT_IMPLEMENTED)\n', (7080, 7165), False, 'from tensorflow_probability.python.internal import parameter_properties\n'), ((8542, 8582), 'tensorflow_probability.python.internal.prefer_static.convert_to_shape_tensor', 'ps.convert_to_shape_tensor', (['sample_shape'], {}), '(sample_shape)\n', (8568, 8582), True, 'from tensorflow_probability.python.internal import prefer_static as ps\n'), ((8666, 8690), 'tensorflow.compat.v2.math.sqrt', 'tf.math.sqrt', (['ig_samples'], {}), '(ig_samples)\n', (8678, 8690), True, 'import tensorflow.compat.v2 as tf\n'), ((9358, 9378), 'tensorflow.compat.v2.math.exp', 'tf.math.exp', (['(0.5 * z)'], {}), '(0.5 * z)\n', (9369, 9378), True, 'import tensorflow.compat.v2 as tf\n'), ((9477, 9490), 'numpy.log', 'np.log', (['np.pi'], {}), '(np.pi)\n', (9483, 9490), True, 'import numpy as np\n'), ((10372, 10398), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['tailweight'], {}), '(tailweight)\n', (10386, 10398), True, 'import tensorflow.compat.v2 as tf\n'), ((10789, 10822), 'tensorflow_probability.python.internal.tensor_util.is_ref', 'tensor_util.is_ref', (['self.skewness'], {}), '(self.skewness)\n', (10807, 10822), False, 'from tensorflow_probability.python.internal import tensor_util\n'), ((11063, 11153), 'tensorflow_probability.python.internal.assert_util.assert_positive', 'assert_util.assert_positive', (['self.scale'], {'message': '"""Argument `scale` must be positive."""'}), "(self.scale, message=\n 'Argument `scale` must be positive.')\n", (11090, 11153), False, 'from tensorflow_probability.python.internal import assert_util\n'), ((11232, 11327), 'tensorflow_probability.python.internal.assert_util.assert_positive', 'assert_util.assert_positive', (['tailweight'], {'message': '"""Argument `tailweight` must be positive."""'}), "(tailweight, message=\n 'Argument `tailweight` must be positive.')\n", (11259, 11327), False, 'from tensorflow_probability.python.internal import assert_util\n'), ((8385, 8406), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['scale'], {}), '(scale)\n', (8399, 8406), True, 'import tensorflow.compat.v2 as tf\n'), ((10884, 10910), 'tensorflow.compat.v2.math.abs', 'tf.math.abs', (['self.skewness'], {}), '(self.skewness)\n', (10895, 10910), True, 'import tensorflow.compat.v2 as tf\n'), ((8714, 8738), 'tensorflow.compat.v2.math.sqrt', 'tf.math.sqrt', (['ig_samples'], {}), '(ig_samples)\n', (8726, 8738), True, 'import tensorflow.compat.v2 as tf\n'), ((9295, 9315), 'tensorflow.compat.v2.math.exp', 'tf.math.exp', (['(0.5 * z)'], {}), '(0.5 * z)\n', (9306, 9315), True, 'import tensorflow.compat.v2 as tf\n'), ((1870, 1904), 'tensorflow_probability.python.internal.dtype_util.as_numpy_dtype', 'dtype_util.as_numpy_dtype', (['x.dtype'], {}), '(x.dtype)\n', (1895, 1904), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((8235, 8272), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['(skewness / tailweight)'], {}), '(skewness / tailweight)\n', (8249, 8272), True, 'import tensorflow.compat.v2 as tf\n'), ((9142, 9179), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['(skewness / tailweight)'], {}), '(skewness / tailweight)\n', (9156, 9179), True, 'import tensorflow.compat.v2 as tf\n'), ((9857, 9894), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['(skewness / tailweight)'], {}), '(skewness / tailweight)\n', (9871, 9894), True, 'import tensorflow.compat.v2 as tf\n'), ((10313, 10350), 'tensorflow.compat.v2.math.square', 'tf.math.square', (['(skewness / tailweight)'], {}), '(skewness / tailweight)\n', (10327, 10350), True, 'import tensorflow.compat.v2 as tf\n'), ((6743, 6764), 'tensorflow_probability.python.internal.dtype_util.eps', 'dtype_util.eps', (['dtype'], {}), '(dtype)\n', (6757, 6764), False, 'from tensorflow_probability.python.internal import dtype_util\n'), ((6932, 6953), 'tensorflow_probability.python.internal.dtype_util.eps', 'dtype_util.eps', (['dtype'], {}), '(dtype)\n', (6946, 6953), False, 'from tensorflow_probability.python.internal import dtype_util\n')] |
"""
Model inference/embeddings tests.
All of these tests are designed to be run manually via::
pytest tests/intensive/model_tests.py -s -k test_<name>
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import unittest
import numpy as np
import fiftyone as fo
import fiftyone.zoo as foz
def test_apply_model():
dataset = foz.load_zoo_dataset("quickstart")
view = dataset.take(50)
model = foz.load_zoo_model("inception-v3-imagenet-torch")
view.apply_model(model, "predictions1", batch_size=8)
print(view.count_values("predictions1.label"))
model = foz.load_zoo_model("ssd-mobilenet-v1-coco-tf")
view.apply_model(model, "predictions2")
print(view.count_values("predictions2.detections.label"))
def test_compute_embeddings():
dataset = foz.load_zoo_dataset("quickstart")
view = dataset.take(50)
model = foz.load_zoo_model("mobilenet-v2-imagenet-tf1")
embeddings1a = view.compute_embeddings(model)
view.compute_embeddings(model, embeddings_field="embeddings1")
embeddings1b = np.stack(view.values("embeddings1"))
# embeddings1a and embeddings1b should match
embeddings2a = view.compute_embeddings(model, batch_size=8)
view.compute_embeddings(
model, embeddings_field="embeddings2", batch_size=8
)
embeddings2b = np.stack(view.values("embeddings2"))
# embeddings2a and embeddings2b should match
def test_compute_patch_embeddings():
dataset = foz.load_zoo_dataset("quickstart")
view = dataset.take(50)
model = foz.load_zoo_model("mobilenet-v2-imagenet-tf1")
patch_embeddings1a = view.compute_patch_embeddings(model, "ground_truth")
view.compute_patch_embeddings(
model, "ground_truth", embeddings_field="patch_embeddings1"
)
patch_embeddings1b = {
_id: e
for _id, e in zip(view.values("id"), view.values("patch_embeddings1"))
}
# patch_embeddings1a and patch_embeddings1b should match
patch_embeddings2a = view.compute_patch_embeddings(
model, "ground_truth", batch_size=8
)
view.compute_patch_embeddings(
model, "ground_truth", embeddings_field="patch_embeddings2"
)
patch_embeddings2b = {
_id: e
for _id, e in zip(view.values("id"), view.values("patch_embeddings2"))
}
# patch_embeddings2a and patch_embeddings2b should match
def test_apply_model_frames():
dataset = foz.load_zoo_dataset("quickstart-video")
view = dataset.take(2)
model = foz.load_zoo_model("inception-v3-imagenet-torch")
view.apply_model(model, "predictions1", batch_size=8)
print(view.count_values("frames.predictions1.label"))
model = foz.load_zoo_model("ssd-mobilenet-v1-coco-tf")
view.apply_model(model, "predictions2")
print(view.count_values("frames.predictions2.detections.label"))
def test_compute_embeddings_frames():
dataset = foz.load_zoo_dataset("quickstart-video")
view = dataset.take(2)
model = foz.load_zoo_model("mobilenet-v2-imagenet-tf1")
embeddings1a = view.compute_embeddings(model)
view.compute_embeddings(model, embeddings_field="embeddings1")
embeddings1b = {
_id: np.stack(e)
for _id, e in zip(view.values("id"), view.values("frames.embeddings1"))
}
# embeddings1a and embeddings1b should match
embeddings2a = view.compute_embeddings(model, batch_size=8)
view.compute_embeddings(
model, embeddings_field="embeddings2", batch_size=8
)
embeddings2b = {
_id: np.stack(e)
for _id, e in zip(view.values("id"), view.values("frames.embeddings2"))
}
# embeddings2a and embeddings2b should match
def test_compute_patch_embeddings_frames():
dataset = foz.load_zoo_dataset("quickstart-video")
view = dataset.take(2)
model = foz.load_zoo_model("mobilenet-v2-imagenet-tf1")
patch_embeddings1a = view.compute_patch_embeddings(
model, "ground_truth_detections"
)
view.compute_patch_embeddings(
model, "ground_truth_detections", embeddings_field="patch_embeddings1"
)
patch_embeddings1b = {
_id: {fn: p for fn, p in enumerate(e, 1)}
for _id, e in zip(
view.values("id"), view.values("frames.patch_embeddings1")
)
}
# patch_embeddings1a and patch_embeddings1b should match
patch_embeddings2a = view.compute_patch_embeddings(
model, "ground_truth_detections", batch_size=8
)
view.compute_patch_embeddings(
model, "ground_truth_detections", embeddings_field="patch_embeddings2"
)
patch_embeddings2b = {
_id: {fn: p for fn, p in enumerate(e, 1)}
for _id, e in zip(
view.values("id"), view.values("frames.patch_embeddings2")
)
}
# patch_embeddings2a and patch_embeddings2b should match
def test_apply_model_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.png"),
fo.Sample(filepath="non-existent2.png"),
fo.Sample(filepath="non-existent3.png"),
fo.Sample(filepath="non-existent4.png"),
]
)
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.apply_model(model, "predictions1")
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.apply_model(model, "predictions2", batch_size=2)
# TF, single inference
model = foz.load_zoo_model("ssd-mobilenet-v1-coco-tf")
dataset.apply_model(model, "predictions3")
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.apply_model(model, "predictions4", batch_size=2)
def test_compute_embeddings_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.png"),
fo.Sample(filepath="non-existent2.png"),
fo.Sample(filepath="non-existent3.png"),
fo.Sample(filepath="non-existent4.png"),
]
)
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_embeddings(model)
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_embeddings(model, batch_size=2)
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.compute_embeddings(model, batch_size=2)
def test_compute_patch_embeddings_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.png"),
fo.Sample(filepath="non-existent2.png"),
fo.Sample(filepath="non-existent3.png"),
fo.Sample(filepath="non-existent4.png"),
]
)
for sample in dataset:
sample["ground_truth"] = fo.Detections(
detections=[fo.Detection(bounding_box=[0.1, 0.1, 0.8, 0.8])]
)
sample.save()
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_patch_embeddings(model, "ground_truth")
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_patch_embeddings(model, "ground_truth", batch_size=2)
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.compute_patch_embeddings(model, "ground_truth", batch_size=2)
def test_apply_model_frames_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.mp4"),
fo.Sample(filepath="non-existent2.mp4"),
fo.Sample(filepath="non-existent3.mp4"),
fo.Sample(filepath="non-existent4.mp4"),
]
)
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.apply_model(model, "predictions1")
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.apply_model(model, "predictions2", batch_size=2)
# TF, single inference
model = foz.load_zoo_model("ssd-mobilenet-v1-coco-tf")
dataset.apply_model(model, "predictions3")
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.apply_model(model, "predictions4", batch_size=2)
def test_compute_embeddings_frames_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.mp4"),
fo.Sample(filepath="non-existent2.mp4"),
fo.Sample(filepath="non-existent3.mp4"),
fo.Sample(filepath="non-existent4.mp4"),
]
)
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_embeddings(model)
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_embeddings(model, batch_size=2)
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.compute_embeddings(model, batch_size=2)
def test_compute_patch_embeddings_frames_skip_failures():
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(filepath="non-existent1.mp4"),
fo.Sample(filepath="non-existent2.mp4"),
fo.Sample(filepath="non-existent3.mp4"),
fo.Sample(filepath="non-existent4.mp4"),
]
)
for sample in dataset:
frame = sample.frames[1]
frame["ground_truth"] = fo.Detections(
detections=[fo.Detection(bounding_box=[0.1, 0.1, 0.8, 0.8])]
)
sample.save()
# torch, data loader, single batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_patch_embeddings(model, "ground_truth")
# torch, data loader, batches
model = foz.load_zoo_model("inception-v3-imagenet-torch")
dataset.compute_patch_embeddings(model, "ground_truth", batch_size=2)
# TF, batch inference
model = foz.load_zoo_model("resnet-v2-50-imagenet-tf1")
dataset.compute_patch_embeddings(model, "ground_truth", batch_size=2)
if __name__ == "__main__":
fo.config.show_progress_bars = True
unittest.main(verbosity=2)
| [
"fiftyone.Dataset",
"fiftyone.zoo.load_zoo_dataset",
"fiftyone.Detection",
"numpy.stack",
"fiftyone.zoo.load_zoo_model",
"fiftyone.Sample",
"unittest.main"
] | [((367, 401), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart"""'], {}), "('quickstart')\n", (387, 401), True, 'import fiftyone.zoo as foz\n'), ((443, 492), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (461, 492), True, 'import fiftyone.zoo as foz\n'), ((615, 661), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""ssd-mobilenet-v1-coco-tf"""'], {}), "('ssd-mobilenet-v1-coco-tf')\n", (633, 661), True, 'import fiftyone.zoo as foz\n'), ((815, 849), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart"""'], {}), "('quickstart')\n", (835, 849), True, 'import fiftyone.zoo as foz\n'), ((891, 938), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""mobilenet-v2-imagenet-tf1"""'], {}), "('mobilenet-v2-imagenet-tf1')\n", (909, 938), True, 'import fiftyone.zoo as foz\n'), ((1484, 1518), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart"""'], {}), "('quickstart')\n", (1504, 1518), True, 'import fiftyone.zoo as foz\n'), ((1560, 1607), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""mobilenet-v2-imagenet-tf1"""'], {}), "('mobilenet-v2-imagenet-tf1')\n", (1578, 1607), True, 'import fiftyone.zoo as foz\n'), ((2439, 2479), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart-video"""'], {}), "('quickstart-video')\n", (2459, 2479), True, 'import fiftyone.zoo as foz\n'), ((2520, 2569), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (2538, 2569), True, 'import fiftyone.zoo as foz\n'), ((2699, 2745), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""ssd-mobilenet-v1-coco-tf"""'], {}), "('ssd-mobilenet-v1-coco-tf')\n", (2717, 2745), True, 'import fiftyone.zoo as foz\n'), ((2913, 2953), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart-video"""'], {}), "('quickstart-video')\n", (2933, 2953), True, 'import fiftyone.zoo as foz\n'), ((2994, 3041), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""mobilenet-v2-imagenet-tf1"""'], {}), "('mobilenet-v2-imagenet-tf1')\n", (3012, 3041), True, 'import fiftyone.zoo as foz\n'), ((3746, 3786), 'fiftyone.zoo.load_zoo_dataset', 'foz.load_zoo_dataset', (['"""quickstart-video"""'], {}), "('quickstart-video')\n", (3766, 3786), True, 'import fiftyone.zoo as foz\n'), ((3827, 3874), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""mobilenet-v2-imagenet-tf1"""'], {}), "('mobilenet-v2-imagenet-tf1')\n", (3845, 3874), True, 'import fiftyone.zoo as foz\n'), ((4899, 4911), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (4909, 4911), True, 'import fiftyone as fo\n'), ((5229, 5278), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (5247, 5278), True, 'import fiftyone.zoo as foz\n'), ((5373, 5422), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (5391, 5422), True, 'import fiftyone.zoo as foz\n'), ((5524, 5570), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""ssd-mobilenet-v1-coco-tf"""'], {}), "('ssd-mobilenet-v1-coco-tf')\n", (5542, 5570), True, 'import fiftyone.zoo as foz\n'), ((5657, 5704), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (5675, 5704), True, 'import fiftyone.zoo as foz\n'), ((5827, 5839), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (5837, 5839), True, 'import fiftyone as fo\n'), ((6157, 6206), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (6175, 6206), True, 'import fiftyone.zoo as foz\n'), ((6292, 6341), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (6310, 6341), True, 'import fiftyone.zoo as foz\n'), ((6433, 6480), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (6451, 6480), True, 'import fiftyone.zoo as foz\n'), ((6600, 6612), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (6610, 6612), True, 'import fiftyone as fo\n'), ((7111, 7160), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (7129, 7160), True, 'import fiftyone.zoo as foz\n'), ((7268, 7317), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (7286, 7317), True, 'import fiftyone.zoo as foz\n'), ((7431, 7478), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (7449, 7478), True, 'import fiftyone.zoo as foz\n'), ((7614, 7626), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (7624, 7626), True, 'import fiftyone as fo\n'), ((7944, 7993), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (7962, 7993), True, 'import fiftyone.zoo as foz\n'), ((8088, 8137), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (8106, 8137), True, 'import fiftyone.zoo as foz\n'), ((8239, 8285), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""ssd-mobilenet-v1-coco-tf"""'], {}), "('ssd-mobilenet-v1-coco-tf')\n", (8257, 8285), True, 'import fiftyone.zoo as foz\n'), ((8372, 8419), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (8390, 8419), True, 'import fiftyone.zoo as foz\n'), ((8549, 8561), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (8559, 8561), True, 'import fiftyone as fo\n'), ((8879, 8928), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (8897, 8928), True, 'import fiftyone.zoo as foz\n'), ((9014, 9063), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (9032, 9063), True, 'import fiftyone.zoo as foz\n'), ((9155, 9202), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (9173, 9202), True, 'import fiftyone.zoo as foz\n'), ((9329, 9341), 'fiftyone.Dataset', 'fo.Dataset', ([], {}), '()\n', (9339, 9341), True, 'import fiftyone as fo\n'), ((9872, 9921), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (9890, 9921), True, 'import fiftyone.zoo as foz\n'), ((10029, 10078), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""inception-v3-imagenet-torch"""'], {}), "('inception-v3-imagenet-torch')\n", (10047, 10078), True, 'import fiftyone.zoo as foz\n'), ((10192, 10239), 'fiftyone.zoo.load_zoo_model', 'foz.load_zoo_model', (['"""resnet-v2-50-imagenet-tf1"""'], {}), "('resnet-v2-50-imagenet-tf1')\n", (10210, 10239), True, 'import fiftyone.zoo as foz\n'), ((10387, 10413), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10400, 10413), False, 'import unittest\n'), ((3195, 3206), 'numpy.stack', 'np.stack', (['e'], {}), '(e)\n', (3203, 3206), True, 'import numpy as np\n'), ((3538, 3549), 'numpy.stack', 'np.stack', (['e'], {}), '(e)\n', (3546, 3549), True, 'import numpy as np\n'), ((4959, 4998), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.png"""'}), "(filepath='non-existent1.png')\n", (4968, 4998), True, 'import fiftyone as fo\n'), ((5012, 5051), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.png"""'}), "(filepath='non-existent2.png')\n", (5021, 5051), True, 'import fiftyone as fo\n'), ((5065, 5104), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.png"""'}), "(filepath='non-existent3.png')\n", (5074, 5104), True, 'import fiftyone as fo\n'), ((5118, 5157), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.png"""'}), "(filepath='non-existent4.png')\n", (5127, 5157), True, 'import fiftyone as fo\n'), ((5887, 5926), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.png"""'}), "(filepath='non-existent1.png')\n", (5896, 5926), True, 'import fiftyone as fo\n'), ((5940, 5979), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.png"""'}), "(filepath='non-existent2.png')\n", (5949, 5979), True, 'import fiftyone as fo\n'), ((5993, 6032), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.png"""'}), "(filepath='non-existent3.png')\n", (6002, 6032), True, 'import fiftyone as fo\n'), ((6046, 6085), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.png"""'}), "(filepath='non-existent4.png')\n", (6055, 6085), True, 'import fiftyone as fo\n'), ((6660, 6699), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.png"""'}), "(filepath='non-existent1.png')\n", (6669, 6699), True, 'import fiftyone as fo\n'), ((6713, 6752), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.png"""'}), "(filepath='non-existent2.png')\n", (6722, 6752), True, 'import fiftyone as fo\n'), ((6766, 6805), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.png"""'}), "(filepath='non-existent3.png')\n", (6775, 6805), True, 'import fiftyone as fo\n'), ((6819, 6858), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.png"""'}), "(filepath='non-existent4.png')\n", (6828, 6858), True, 'import fiftyone as fo\n'), ((7674, 7713), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.mp4"""'}), "(filepath='non-existent1.mp4')\n", (7683, 7713), True, 'import fiftyone as fo\n'), ((7727, 7766), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.mp4"""'}), "(filepath='non-existent2.mp4')\n", (7736, 7766), True, 'import fiftyone as fo\n'), ((7780, 7819), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.mp4"""'}), "(filepath='non-existent3.mp4')\n", (7789, 7819), True, 'import fiftyone as fo\n'), ((7833, 7872), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.mp4"""'}), "(filepath='non-existent4.mp4')\n", (7842, 7872), True, 'import fiftyone as fo\n'), ((8609, 8648), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.mp4"""'}), "(filepath='non-existent1.mp4')\n", (8618, 8648), True, 'import fiftyone as fo\n'), ((8662, 8701), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.mp4"""'}), "(filepath='non-existent2.mp4')\n", (8671, 8701), True, 'import fiftyone as fo\n'), ((8715, 8754), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.mp4"""'}), "(filepath='non-existent3.mp4')\n", (8724, 8754), True, 'import fiftyone as fo\n'), ((8768, 8807), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.mp4"""'}), "(filepath='non-existent4.mp4')\n", (8777, 8807), True, 'import fiftyone as fo\n'), ((9389, 9428), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent1.mp4"""'}), "(filepath='non-existent1.mp4')\n", (9398, 9428), True, 'import fiftyone as fo\n'), ((9442, 9481), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent2.mp4"""'}), "(filepath='non-existent2.mp4')\n", (9451, 9481), True, 'import fiftyone as fo\n'), ((9495, 9534), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent3.mp4"""'}), "(filepath='non-existent3.mp4')\n", (9504, 9534), True, 'import fiftyone as fo\n'), ((9548, 9587), 'fiftyone.Sample', 'fo.Sample', ([], {'filepath': '"""non-existent4.mp4"""'}), "(filepath='non-existent4.mp4')\n", (9557, 9587), True, 'import fiftyone as fo\n'), ((6976, 7023), 'fiftyone.Detection', 'fo.Detection', ([], {'bounding_box': '[0.1, 0.1, 0.8, 0.8]'}), '(bounding_box=[0.1, 0.1, 0.8, 0.8])\n', (6988, 7023), True, 'import fiftyone as fo\n'), ((9737, 9784), 'fiftyone.Detection', 'fo.Detection', ([], {'bounding_box': '[0.1, 0.1, 0.8, 0.8]'}), '(bounding_box=[0.1, 0.1, 0.8, 0.8])\n', (9749, 9784), True, 'import fiftyone as fo\n')] |
import numpy as np
import cv2
I = cv2.imread('beans.jpg')
G = cv2.cvtColor(I,cv2.COLOR_BGR2GRAY)
ret, T = cv2.threshold(G,127,255,cv2.THRESH_BINARY)
cv2.imshow('Thresholded', T)
cv2.waitKey(0) # press any key to continue...
## erosion
kernel = np.ones((19,19),np.uint8)
T = cv2.erode(T,kernel)
cv2.imshow('After Erosion', T)
cv2.waitKey(0) # press any key to continue...
n,C = cv2.connectedComponents(T);
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(T,'There are %d beans!'%(n-1),(20,40), font, 1, 255,2)
cv2.imshow('Num', T)
cv2.waitKey(0)
| [
"numpy.ones",
"cv2.threshold",
"cv2.erode",
"cv2.imshow",
"cv2.putText",
"cv2.connectedComponents",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread"
] | [((35, 58), 'cv2.imread', 'cv2.imread', (['"""beans.jpg"""'], {}), "('beans.jpg')\n", (45, 58), False, 'import cv2\n'), ((63, 98), 'cv2.cvtColor', 'cv2.cvtColor', (['I', 'cv2.COLOR_BGR2GRAY'], {}), '(I, cv2.COLOR_BGR2GRAY)\n', (75, 98), False, 'import cv2\n'), ((108, 153), 'cv2.threshold', 'cv2.threshold', (['G', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(G, 127, 255, cv2.THRESH_BINARY)\n', (121, 153), False, 'import cv2\n'), ((152, 180), 'cv2.imshow', 'cv2.imshow', (['"""Thresholded"""', 'T'], {}), "('Thresholded', T)\n", (162, 180), False, 'import cv2\n'), ((181, 195), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (192, 195), False, 'import cv2\n'), ((249, 276), 'numpy.ones', 'np.ones', (['(19, 19)', 'np.uint8'], {}), '((19, 19), np.uint8)\n', (256, 276), True, 'import numpy as np\n'), ((279, 299), 'cv2.erode', 'cv2.erode', (['T', 'kernel'], {}), '(T, kernel)\n', (288, 299), False, 'import cv2\n'), ((299, 329), 'cv2.imshow', 'cv2.imshow', (['"""After Erosion"""', 'T'], {}), "('After Erosion', T)\n", (309, 329), False, 'import cv2\n'), ((330, 344), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (341, 344), False, 'import cv2\n'), ((383, 409), 'cv2.connectedComponents', 'cv2.connectedComponents', (['T'], {}), '(T)\n', (406, 409), False, 'import cv2\n'), ((445, 519), 'cv2.putText', 'cv2.putText', (['T', "('There are %d beans!' % (n - 1))", '(20, 40)', 'font', '(1)', '(255)', '(2)'], {}), "(T, 'There are %d beans!' % (n - 1), (20, 40), font, 1, 255, 2)\n", (456, 519), False, 'import cv2\n'), ((512, 532), 'cv2.imshow', 'cv2.imshow', (['"""Num"""', 'T'], {}), "('Num', T)\n", (522, 532), False, 'import cv2\n'), ((533, 547), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (544, 547), False, 'import cv2\n')] |
r"""
Concentration of the eigenvalues
================================
The eigenvalues of the graph Laplacian concentrates to the same value as the
graph becomes full.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygsp as pg
n_neighbors = [1, 2, 5, 8]
fig, axes = plt.subplots(3, len(n_neighbors), figsize=(15, 8))
for k, ax in zip(n_neighbors, axes.T):
graph = pg.graphs.Ring(17, k=k)
graph.compute_fourier_basis()
graph.plot(graph.U[:, 1], ax=ax[0])
ax[0].axis('equal')
ax[1].spy(graph.W)
ax[2].plot(graph.e, '.')
ax[2].set_title('k={}'.format(k))
#graph.set_coordinates('line1D')
#graph.plot(graph.U[:, :4], ax=ax[3], title='')
# Check that the DFT matrix is an eigenbasis of the Laplacian.
U = np.fft.fft(np.identity(graph.n_vertices))
LambdaM = (graph.L.todense().dot(U)) / (U + 1e-15)
# Eigenvalues should be real.
assert np.all(np.abs(np.imag(LambdaM)) < 1e-10)
LambdaM = np.real(LambdaM)
# Check that the eigenvectors are really eigenvectors of the laplacian.
Lambda = np.mean(LambdaM, axis=0)
assert np.all(np.abs(LambdaM - Lambda) < 1e-10)
fig.tight_layout()
| [
"numpy.identity",
"numpy.abs",
"numpy.mean",
"numpy.real",
"pygsp.graphs.Ring",
"numpy.imag"
] | [((393, 416), 'pygsp.graphs.Ring', 'pg.graphs.Ring', (['(17)'], {'k': 'k'}), '(17, k=k)\n', (407, 416), True, 'import pygsp as pg\n'), ((967, 983), 'numpy.real', 'np.real', (['LambdaM'], {}), '(LambdaM)\n', (974, 983), True, 'import numpy as np\n'), ((1073, 1097), 'numpy.mean', 'np.mean', (['LambdaM'], {'axis': '(0)'}), '(LambdaM, axis=0)\n', (1080, 1097), True, 'import numpy as np\n'), ((781, 810), 'numpy.identity', 'np.identity', (['graph.n_vertices'], {}), '(graph.n_vertices)\n', (792, 810), True, 'import numpy as np\n'), ((1116, 1140), 'numpy.abs', 'np.abs', (['(LambdaM - Lambda)'], {}), '(LambdaM - Lambda)\n', (1122, 1140), True, 'import numpy as np\n'), ((926, 942), 'numpy.imag', 'np.imag', (['LambdaM'], {}), '(LambdaM)\n', (933, 942), True, 'import numpy as np\n')] |
# coding=utf-8
import os
import numpy as np
from collections import OrderedDict
from md_utils.md_common import (InvalidDataError, warning)
# Constants #
MISSING_ATOMS_MSG = "Could not find lines for atoms ({}) in timestep {} in file: {}"
TSTEP_LINE = 'ITEM: TIMESTEP'
NUM_ATOM_LINE = 'ITEM: NUMBER OF ATOMS'
BOX_LINE = 'ITEM: BOX'
ATOMS_LINE = 'ITEM: ATOMS'
# Logic #
def find_atom_data(lammps_f, atom_ids):
"""Searches and returns the given file location for atom data for the given IDs.
:param lammps_f: The LAMMPS data file to search.
:param atom_ids: The set of atom IDs to collect.
:return: A nested dict of the atoms found keyed first by time step, then by atom ID.
:raises: InvalidDataError If the file is missing atom data or is otherwise malformed.
"""
tstep_atoms = OrderedDict()
tstep_box = {}
atom_count = len(atom_ids)
empty_dims = np.full(3, np.nan)
with open(lammps_f) as lfh:
file_name = os.path.basename(lammps_f)
tstep_id = None
box_dim = np.copy(empty_dims)
tstep_val = "(no value)"
for line in lfh:
if line.startswith(TSTEP_LINE):
try:
tstep_val = next(lfh).strip()
tstep_id = int(tstep_val)
# Todo: remove if never used
except ValueError as e:
raise InvalidDataError("Invalid timestep value {}: {}".format(tstep_val, e))
elif line.startswith(NUM_ATOM_LINE):
# not needed, so just move along
next(lfh)
elif line.startswith(BOX_LINE):
try:
for coord_id in range(len(box_dim)):
box_vals = list(map(float, next(lfh).strip().split()))
if len(box_vals) == 2:
box_dim[coord_id] = box_vals[1] - box_vals[0]
except (ValueError, KeyError) as e:
raise InvalidDataError("Invalid PBC value read on timestep {}: {}".format(tstep_val, e))
elif tstep_id is not None:
atom_lines = find_atom_lines(lfh, atom_ids, tstep_id, file_name)
if len(atom_lines) != atom_count:
try:
missing_atoms_err(atom_ids, atom_lines, tstep_id, file_name)
except InvalidDataError as e:
warning(e)
warning("Skipping timestep and continuing.")
else:
tstep_atoms[tstep_id] = atom_lines
tstep_box[tstep_id] = box_dim
tstep_id = None
box_dim = empty_dims
return tstep_atoms, tstep_box
def find_atom_lines(lfh, atom_ids, tstep_id, file_name):
"""Collects the atom data for the given IDs, returning a dict keyed by atom
ID with the atom value formatted as a six-element list containing:
* Molecule ID (int)
* Atom type (int)
* Charge (float)
* X (float)
* Y (float)
* Z (float)
:param lfh: A file handle for a LAMMPS file.
:param atom_ids: The set of atom IDs to collect.
:param tstep_id: The ID for the current time step.
:param file_name: the file name (basename) for the lammps file (for error printing)
:return: A dict of atom lines keyed by atom ID (int).
:raises: InvalidDataError If the time step section is missing atom data or
is otherwise malformed.
"""
found_atoms = {}
atom_count = len(atom_ids)
for aline in lfh:
s_line = aline.split()
if len(s_line) == 7 and int(s_line[0]) in atom_ids:
# noinspection PyTypeChecker
p_line = list(map(int, s_line[:3])) + list(map(float, s_line[-4:]))
found_atoms[p_line[0]] = p_line[1:]
if len(found_atoms) == atom_count:
return found_atoms
elif aline.startswith(TSTEP_LINE):
missing_atoms_err(atom_ids, found_atoms, tstep_id, file_name)
return found_atoms
# Exception Creators #
def missing_atoms_err(atom_ids, found_atoms, tstep_id, file_name):
"""Creates and raises an exception when the function is unable to find atom
data for all of the requested IDs.
:param atom_ids: The atoms that were requested.
:param found_atoms: The collection of atoms found.
:param tstep_id: The time step ID where the atom data was missing.
:param file_name: the file name with the time step ID where atom was missing.
:raises: InvalidDataError Describing the missing atom data.
"""
missing = map(str, atom_ids.difference(found_atoms.keys()))
raise InvalidDataError(MISSING_ATOMS_MSG.format(",".join(missing),
tstep_id, file_name))
| [
"numpy.copy",
"collections.OrderedDict",
"md_utils.md_common.warning",
"os.path.basename",
"numpy.full"
] | [((810, 823), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (821, 823), False, 'from collections import OrderedDict\n'), ((891, 909), 'numpy.full', 'np.full', (['(3)', 'np.nan'], {}), '(3, np.nan)\n', (898, 909), True, 'import numpy as np\n'), ((963, 989), 'os.path.basename', 'os.path.basename', (['lammps_f'], {}), '(lammps_f)\n', (979, 989), False, 'import os\n'), ((1032, 1051), 'numpy.copy', 'np.copy', (['empty_dims'], {}), '(empty_dims)\n', (1039, 1051), True, 'import numpy as np\n'), ((2414, 2424), 'md_utils.md_common.warning', 'warning', (['e'], {}), '(e)\n', (2421, 2424), False, 'from md_utils.md_common import InvalidDataError, warning\n'), ((2449, 2493), 'md_utils.md_common.warning', 'warning', (['"""Skipping timestep and continuing."""'], {}), "('Skipping timestep and continuing.')\n", (2456, 2493), False, 'from md_utils.md_common import InvalidDataError, warning\n')] |
"""
Base classes for representing signals.
"""
import logging
from copy import deepcopy
from functools import partial
import numpy as np
import xarray as xr
from ..models.model import Model
from scipy.signal import butter, detrend, get_window, hilbert
from scipy.signal import resample as scipy_resample
from scipy.signal import sosfiltfilt
NC_EXT = ".nc"
def scipy_iir_filter_data(x, sfreq, l_freq, h_freq, l_trans_bandwidth=None, h_trans_bandwidth=None, **kwargs):
"""
Custom, scipy based filtering function with basic butterworth filter.
:param x: data to be filtered, time is the last axis
:type x: np.ndarray
:param sfreq: sampling frequency of the data in Hz
:type sfreq: float
:param l_freq: frequency below which to filter the data in Hz
:type l_freq: float|None
:param h_freq: frequency above which to filter the data in Hz
:type h_freq: float|None
:param l_trans_bandwidth: keeping for compatibility with mne
:type l_trans_bandwidth: None
:param h_trans_bandwidth: keeping for compatibility with mne
:type h_trans_bandwidth: None
:return: filtered data
:rtype: np.ndarray
"""
nyq = 0.5 * sfreq
if l_freq is not None:
low = l_freq / nyq
if h_freq is not None:
# so we have band filter
high = h_freq / nyq
if l_freq < h_freq:
btype = "bandpass"
elif l_freq > h_freq:
btype = "bandstop"
Wn = [low, high]
elif h_freq is None:
# so we have a high-pass filter
Wn = low
btype = "highpass"
elif l_freq is None:
# we have a low-pass
high = h_freq / nyq
Wn = high
btype = "lowpass"
# get butter coeffs
sos = butter(N=kwargs.pop("order", 8), Wn=Wn, btype=btype, output="sos")
return sosfiltfilt(sos, x, axis=-1)
class Signal:
name = ""
label = ""
signal_type = ""
unit = ""
description = ""
_copy_attributes = [
"name",
"label",
"signal_type",
"unit",
"description",
"process_steps",
]
PROCESS_STEPS_KEY = "process_steps"
@classmethod
def from_model_output(cls, model, group="", time_in_ms=True):
"""
Initial Signal from modelling output.
"""
assert isinstance(model, Model)
return cls(model.xr(group=group), time_in_ms=time_in_ms)
@classmethod
def from_file(cls, filename):
"""
Load signal from saved file.
:param filename: filename for the Signal
:type filename: str
"""
if not filename.endswith(NC_EXT):
filename += NC_EXT
# load NC file
xarray = xr.load_dataarray(filename)
# init class
signal = cls(xarray)
# if nc file has atrributes, copy them to signal class
process_steps = []
if xarray.attrs:
for k, v in xarray.attrs.items():
if cls.PROCESS_STEPS_KEY in k:
idx = int(k[len(cls.PROCESS_STEPS_KEY) + 1 :])
process_steps.insert(idx, v)
else:
setattr(signal, k, v)
if len(process_steps) > 0:
setattr(signal, cls.PROCESS_STEPS_KEY, process_steps)
else:
logging.warning("No metadata found, setting empty...")
return signal
def __init__(self, data, time_in_ms=False):
"""
:param data: data for the signal, assumes time dimension with time in seconds
:type data: xr.DataArray
:param time_in_ms: whether time dimension is in ms
:type time_in_ms: bool
"""
assert isinstance(data, xr.DataArray)
data = deepcopy(data)
assert "time" in data.dims, "DataArray must have time axis"
if time_in_ms:
data["time"] = data["time"] / 1000.0
data["time"] = np.around(data["time"], 6)
self.data = data
# compute dt and sampling frequency
self.dt = np.around(np.diff(data.time).mean(), 6)
self.sampling_frequency = 1.0 / self.dt
self.process_steps = list()
def __str__(self):
"""
String representation.
"""
return (
f"{self.name} representing {self.signal_type} signal with unit of "
f"{self.unit} with user-provided description: `{self.description}`. "
f"Shape of the signal is {self.shape} with dimensions {self.data.dims}."
)
def __repr__(self):
"""
Representation.
"""
return self.__str__()
def __eq__(self, other):
"""
Comparison operator.
:param other: other `Signal` to compare with
:type other: `Signal`
:return: whether two `Signals` are the same
:rtype: bool
"""
assert isinstance(other, Signal)
# assert data are the same
try:
xr.testing.assert_allclose(self.data, other.data)
eq = True
except AssertionError:
eq = False
# check attributes, but if not equal, only warn the user
for attr in self._copy_attributes:
if getattr(self, attr) != getattr(other, attr):
logging.warning(f"`{attr}` not equal between signals.")
return eq
def __getitem__(self, pos):
"""
Get item selectes in output dimension.
"""
add_steps = [f"select `{pos}` output"]
return self.__constructor__(self.data.sel(output=pos)).__finalize__(self, add_steps)
def __finalize__(self, other, add_steps=None):
"""
Copy attrbutes from other to self. Used when constructing class
instance with different data, but same metadata.
:param other: other instance of `Signal`
:type other: `Signal`
:param add_steps: add steps to preprocessing
:type add_steps: list|None
"""
assert isinstance(other, Signal)
for attr in self._copy_attributes:
setattr(self, attr, deepcopy(getattr(other, attr)))
if add_steps is not None:
self.process_steps += add_steps
return self
@property
def __constructor__(self):
"""
Return constructor, so that each child class would initiate a new
instance of the correct class, i.e. first in the method resolution
order.
"""
return self.__class__.mro()[0]
def _write_attrs_to_xr(self):
"""
Copy attributes to xarray before saving.
"""
# write attritubes to xarray
for attr in self._copy_attributes:
value = getattr(self, attr)
# if list need to unwrap
if isinstance(value, (list, tuple)):
for idx, val in enumerate(value):
self.data.attrs[f"{attr}_{idx}"] = val
else:
self.data.attrs[attr] = deepcopy(value)
def save(self, filename):
"""
Save signal.
:param filename: filename to save, currently saves to netCDF file, which is natively supported by xarray
:type filename: str
"""
self._write_attrs_to_xr()
if not filename.endswith(NC_EXT):
filename += NC_EXT
self.data.to_netcdf(filename)
def iterate(self, return_as="signal"):
"""
Return iterator over columns, so univariate measures can be computed
per column. Loops over tuples as (variable name, timeseries).
:param return_as: how to return columns: `xr` as xr.DataArray, `signal` as
instance of NeuroSignal with the same attributes as the mother signal
:type return_as: str
"""
if return_as == "xr":
yield from self.data.stack({"all": self.dims_not_time}).groupby("all")
elif return_as == "signal":
for name, column in self.data.stack({"all": self.dims_not_time}).groupby("all"):
yield name, self.__constructor__(column).__finalize__(self, [f"select {column.name}"])
else:
raise ValueError(f"Data type not understood: {return_as}")
def sel(self, sel_args, inplace=True):
"""
Subselect part of signal using pandas' `sel`, i.e. selecting by actual
physical index, hence time in seconds.
:param sel_args: arguments you'd give to df.sel[], i.e. slice of times
you want to select, in seconds as a len=2 list or tuple
:type sel_args: tuple|list
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
assert len(sel_args) == 2, "Must provide 2 arguments"
selected = self.data.sel(time=slice(sel_args[0], sel_args[1]))
add_steps = [f"select {sel_args[0] or 'x'}:{sel_args[1] or 'x'}s"]
if inplace:
self.data = selected
self.process_steps += add_steps
else:
return self.__constructor__(selected).__finalize__(self, add_steps)
def isel(self, isel_args, inplace=True):
"""
Subselect part of signal using pandas' `isel`, i.e. selecting by index,
hence integers.
:param loc_args: arguments you'd give to df.lioc[], i.e. slice of
indices you want to select, in seconds as a len=2 list or tuple
:type loc_args: tuple|list
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
assert len(isel_args) == 2, "Must provide 2 arguments"
selected = self.data.isel(time=slice(isel_args[0], isel_args[1]))
start = isel_args[0] * self.dt if isel_args[0] is not None else "x"
end = isel_args[1] * self.dt if isel_args[1] is not None else "x"
add_steps = [f"select {start}:{end}s"]
if inplace:
self.data = selected
self.process_steps += add_steps
else:
return self.__constructor__(selected).__finalize__(self, add_steps)
def sliding_window(self, length, step=1, window_function="boxcar", lengths_in_seconds=False):
"""
Return iterator over sliding windows with windowing function applied.
Each window has length `length` and each is translated by `step` steps.
For no windowing function use "boxcar". If the last window would have
the same length as other, it is omitted, i.e. last window does not have
to end with the final timeseries point!
:param length: length of the window, can be index or time in seconds,
see `lengths_in_seconds`
:type length: int|float
:param step: how much to translate window in the temporal sense, can be
index or time in seconds, see `lengths_in_seconds`
:type step: int|float
:param window_function: windowing function to use, this is passed to
`get_window()`; see `scipy.signal.windows.get_window` documentation
:type window_function: str|tuple|float
:param lengths_in_seconds: if True, `length` and `step` are interpreted
in seconds, if False they are indices
:type lengths_in_seconds: bool
:yield: generator with windowed Signals
"""
if lengths_in_seconds:
length = int(length / self.dt)
step = int(step / self.dt)
assert (
length < self.data.time.shape[0]
), f"Length must be smaller than time span of the timeseries: {self.data.time.shape[0]}"
assert step <= length, "Step cannot be larger than length, some part of timeseries would be omitted!"
current_idx = 0
add_steps = f"{str(window_function)} window: "
windowing_function = get_window(window_function, Nx=length)
while current_idx <= (self.data.time.shape[0] - length):
yield self.__constructor__(
self.data.isel(time=slice(current_idx, current_idx + length)) * windowing_function
).__finalize__(self, [add_steps + f"{current_idx}:{current_idx + length}"])
current_idx += step
@property
def shape(self):
"""
Return shape of the data. Time axis is the first one.
"""
return self.data.shape
@property
def dims_not_time(self):
"""
Return list of dimensions that are not time.
"""
return [dim for dim in self.data.dims if dim != "time"]
@property
def coords_not_time(self):
"""
Return dict with all coordinates except time.
"""
return {k: v.values for k, v in self.data.coords.items() if k != "time"}
@property
def start_time(self):
"""
Return starting time of the signal.
"""
return self.data.time.values[0]
@property
def end_time(self):
"""
Return ending time of the signal.
"""
return self.data.time.values[-1]
@property
def preprocessing_steps(self):
"""
Return preprocessing steps done on the data.
"""
return " -> ".join(self.process_steps)
def pad(
self, how_much, in_seconds=False, padding_type="constant", side="both", inplace=True, **kwargs,
):
"""
Pad signal by `how_much` on given side of given type.
:param how_much: how much we should pad, can be time points, or seconds,
see `in_seconds`
:type how_much: float|int
:param in_seconds: whether `how_much` is in seconds, if False, it is
number of time points
:type in_seconds: bool
:param padding_type: how to pad the signal, see `np.pad` documentation
:type padding_type: str
:param side: which side to pad - "before", "after", or "both"
:type side: str
:param inplace: whether to do the operation in place or return
:type inplace: bool
:kwargs: passed to `np.pad`
"""
if in_seconds:
how_much = int(np.around(how_much / self.dt))
if side == "before":
pad_width = (how_much, 0)
pad_times = np.arange(-how_much, 0) * self.dt + self.data.time.values[0]
new_times = np.concatenate([pad_times, self.data.time.values], axis=0)
elif side == "after":
pad_width = (0, how_much)
pad_times = np.arange(1, how_much + 1) * self.dt + self.data.time.values[-1]
new_times = np.concatenate([self.data.time.values, pad_times], axis=0)
elif side == "both":
pad_width = (how_much, how_much)
pad_before = np.arange(-how_much, 0) * self.dt + self.data.time.values[0]
pad_after = np.arange(1, how_much + 1) * self.dt + self.data.time.values[-1]
new_times = np.concatenate([pad_before, self.data.time.values, pad_after], axis=0)
side += " sides"
else:
raise ValueError(f"Unknown padding side: {side}")
# add padding for other axes than time - zeroes
pad_width = [(0, 0)] * len(self.dims_not_time) + [pad_width]
padded = np.pad(self.data.values, pad_width, mode=padding_type, **kwargs)
# to dataframe
padded = xr.DataArray(padded, dims=self.data.dims, coords={**self.coords_not_time, "time": new_times})
add_steps = [f"{how_much * self.dt}s {padding_type} {side} padding"]
if inplace:
self.data = padded
self.process_steps += add_steps
else:
return self.__constructor__(padded).__finalize__(self, add_steps)
def normalize(self, std=False, inplace=True):
"""
De-mean the timeseries. Optionally also standardise.
:param std: normalize by std, i.e. to unit variance
:type std: bool
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
def norm_func(x, dim):
demeaned = x - x.mean(dim=dim)
if std:
return demeaned / x.std(dim=dim)
else:
return demeaned
normalized = norm_func(self.data, dim="time")
add_steps = ["normalize", "standardize"] if std else ["normalize"]
if inplace:
self.data = normalized
self.process_steps += add_steps
else:
return self.__constructor__(normalized).__finalize__(self, add_steps)
def resample(self, to_frequency, inplace=True):
"""
Resample signal to target frequency.
:param to_frequency: target frequency of the signal, in Hz
:type to_frequency: float
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
to_frequency = float(to_frequency)
try:
from mne.filter import resample
resample_func = partial(
resample, up=to_frequency, down=self.sampling_frequency, npad="auto", axis=-1, pad="edge"
)
except ImportError:
logging.warning("`mne` module not found, falling back to basic scipy's function")
def resample_func(x):
return scipy_resample(
x,
num=int(round((to_frequency / self.sampling_frequency) * self.data.shape[-1])),
axis=-1,
window="boxcar",
)
resampled = resample_func(self.data.values)
# construct new times
new_times = (np.arange(resampled.shape[-1], dtype=np.float) / to_frequency) + self.data.time.values[0]
# to dataframe
resampled = xr.DataArray(resampled, dims=self.data.dims, coords={**self.coords_not_time, "time": new_times})
add_steps = [f"resample to {to_frequency}Hz"]
if inplace:
self.data = resampled
self.sampling_frequency = to_frequency
self.dt = np.around(np.diff(resampled.time).mean(), 6)
self.process_steps += add_steps
else:
return self.__constructor__(resampled).__finalize__(self, add_steps)
def hilbert_transform(self, return_as="complex", inplace=True):
"""
Perform hilbert transform on the signal resulting in analytic signal.
:param return_as: what to return
`complex` will compute only analytical signal
`amplitude` will compute amplitude, hence abs(H(x))
`phase_wrapped` will compute phase, hence angle(H(x)), in -pi,pi
`phase_unwrapped` will compute phase in a continuous sense, hence
monotonic
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
analytic = hilbert(self.data, axis=-1)
if return_as == "amplitude":
analytic = np.abs(analytic)
add_steps = ["Hilbert - amplitude"]
elif return_as == "phase_unwrapped":
analytic = np.unwrap(np.angle(analytic))
add_steps = ["Hilbert - unwrapped phase"]
elif return_as == "phase_wrapped":
analytic = np.angle(analytic)
add_steps = ["Hilbert - wrapped phase"]
elif return_as == "complex":
add_steps = ["Hilbert - complex"]
else:
raise ValueError(f"Do not know how to return: {return_as}")
analytic = xr.DataArray(analytic, dims=self.data.dims, coords=self.data.coords)
if inplace:
self.data = analytic
self.process_steps += add_steps
else:
return self.__constructor__(analytic).__finalize__(self, add_steps)
def detrend(self, segments=None, inplace=True):
"""
Linearly detrend signal. If segments are given, detrending will be
performed in each part.
:param segments: segments for detrening, if None will detrend whole
signal, given as indices of the time array
:type segments: list|None
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
segments = segments or 0
detrended = detrend(self.data, type="linear", bp=segments, axis=-1)
detrended = xr.DataArray(detrended, dims=self.data.dims, coords=self.data.coords)
segments_text = f" with segments: {segments}" if segments != 0 else ""
add_steps = [f"detrend{segments_text}"]
if inplace:
self.data = detrended
self.process_steps += add_steps
else:
return self.__constructor__(detrended).__finalize__(self, add_steps)
def filter(
self, low_freq, high_freq, l_trans_bandwidth="auto", h_trans_bandwidth="auto", inplace=True, **kwargs,
):
"""
Filter data. Can be:
low-pass (low_freq is None, high_freq is not None),
high-pass (high_freq is None, low_freq is not None),
band-pass (l_freq < h_freq),
band-stop (l_freq > h_freq) filter type
:param low_freq: frequency below which to filter the data
:type low_freq: float|None
:param high_freq: frequency above which to filter the data
:type high_freq: float|None
:param l_trans_bandwidth: transition band width for low frequency
:type l_trans_bandwidth: float|str
:param h_trans_bandwidth: transition band width for high frequency
:type h_trans_bandwidth: float|str
:param inplace: whether to do the operation in place or return
:type inplace: bool
:**kwargs: possible keywords to `mne.filter.create_filter`:
`filter_length`="auto",
`method`="fir",
`iir_params`=None
`phase`="zero",
`fir_window`="hamming",
`fir_design`="firwin"
"""
try:
from mne.filter import filter_data
except ImportError:
logging.warning("`mne` module not found, falling back to basic scipy's function")
filter_data = scipy_iir_filter_data
filtered = filter_data(
self.data.values, # times has to be the last axis
sfreq=self.sampling_frequency,
l_freq=low_freq,
h_freq=high_freq,
l_trans_bandwidth=l_trans_bandwidth,
h_trans_bandwidth=h_trans_bandwidth,
**kwargs,
)
add_steps = [f"filter: low {low_freq or 'x'}Hz - high {high_freq or 'x'}Hz"]
# to dataframe
filtered = xr.DataArray(filtered, dims=self.data.dims, coords=self.data.coords)
if inplace:
self.data = filtered
self.process_steps += add_steps
else:
return self.__constructor__(filtered).__finalize__(self, add_steps)
def functional_connectivity(self, fc_function=np.corrcoef):
"""
Compute and return functional connectivity from the data.
:param fc_function: function which to use for FC computation, should
take 2D array as space x time and convert it to space x space with
desired measure
"""
if len(self.data["space"]) <= 1:
logging.error("Cannot compute functional connectivity from one timeseries.")
return None
if self.data.ndim == 3:
assert callable(fc_function)
fcs = []
for output in self.data["output"]:
current_slice = self.data.sel({"output": output})
assert current_slice.ndim == 2
fcs.append(fc_function(current_slice.values))
return xr.DataArray(
np.array(fcs),
dims=["output", "space", "space"],
coords={"output": self.data.coords["output"], "space": self.data.coords["space"]},
)
if self.data.ndim == 2:
return xr.DataArray(
fc_function(self.data.values), dims=["space", "space"], coords={"space": self.data.coords["space"]},
)
def apply(self, func, inplace=True):
"""
Apply func for each timeseries.
:param func: function to be applied for each 1D timeseries
:type func: callable
:param inplace: whether to do the operation in place or return
:type inplace: bool
"""
assert callable(func)
try:
# this will work for element-wise function that does not reduces dimensions
processed = xr.apply_ufunc(func, self.data, input_core_dims=[["time"]], output_core_dims=[["time"]])
add_steps = [f"apply `{func.__name__}` function over time dim"]
if inplace:
self.data = processed
self.process_steps += add_steps
else:
return self.__constructor__(processed).__finalize__(self, add_steps)
except ValueError:
# this works for functions that reduce time dimension
processed = xr.apply_ufunc(func, self.data, input_core_dims=[["time"]])
logging.warning(
f"Shape changed after operation! Old shape: {self.shape}, new "
f"shape: {processed.shape}; Cannot cast to Signal class, "
"returing as `xr.DataArray`"
)
return processed
class VoltageSignal(Signal):
name = "Population mean membrane potential"
label = "V"
signal_type = "voltage"
unit = "mV"
class RatesSignal(Signal):
name = "Population firing rate"
label = "q"
signal_type = "rate"
unit = "Hz"
class BOLDSignal(Signal):
name = "Population blood oxygen level-dependent signal"
label = "BOLD"
signal_type = "bold"
unit = "%"
| [
"mne.filter.filter_data",
"scipy.signal.detrend",
"numpy.array",
"copy.deepcopy",
"xarray.apply_ufunc",
"logging.error",
"scipy.signal.get_window",
"numpy.arange",
"xarray.testing.assert_allclose",
"numpy.diff",
"scipy.signal.sosfiltfilt",
"numpy.concatenate",
"numpy.abs",
"logging.warning... | [((1864, 1892), 'scipy.signal.sosfiltfilt', 'sosfiltfilt', (['sos', 'x'], {'axis': '(-1)'}), '(sos, x, axis=-1)\n', (1875, 1892), False, 'from scipy.signal import sosfiltfilt\n'), ((2748, 2775), 'xarray.load_dataarray', 'xr.load_dataarray', (['filename'], {}), '(filename)\n', (2765, 2775), True, 'import xarray as xr\n'), ((3761, 3775), 'copy.deepcopy', 'deepcopy', (['data'], {}), '(data)\n', (3769, 3775), False, 'from copy import deepcopy\n'), ((3939, 3965), 'numpy.around', 'np.around', (["data['time']", '(6)'], {}), "(data['time'], 6)\n", (3948, 3965), True, 'import numpy as np\n'), ((11772, 11810), 'scipy.signal.get_window', 'get_window', (['window_function'], {'Nx': 'length'}), '(window_function, Nx=length)\n', (11782, 11810), False, 'from scipy.signal import butter, detrend, get_window, hilbert\n'), ((15132, 15196), 'numpy.pad', 'np.pad', (['self.data.values', 'pad_width'], {'mode': 'padding_type'}), '(self.data.values, pad_width, mode=padding_type, **kwargs)\n', (15138, 15196), True, 'import numpy as np\n'), ((15237, 15334), 'xarray.DataArray', 'xr.DataArray', (['padded'], {'dims': 'self.data.dims', 'coords': "{**self.coords_not_time, 'time': new_times}"}), "(padded, dims=self.data.dims, coords={**self.coords_not_time,\n 'time': new_times})\n", (15249, 15334), True, 'import xarray as xr\n'), ((17655, 17755), 'xarray.DataArray', 'xr.DataArray', (['resampled'], {'dims': 'self.data.dims', 'coords': "{**self.coords_not_time, 'time': new_times}"}), "(resampled, dims=self.data.dims, coords={**self.coords_not_time,\n 'time': new_times})\n", (17667, 17755), True, 'import xarray as xr\n'), ((18751, 18778), 'scipy.signal.hilbert', 'hilbert', (['self.data'], {'axis': '(-1)'}), '(self.data, axis=-1)\n', (18758, 18778), False, 'from scipy.signal import butter, detrend, get_window, hilbert\n'), ((19382, 19450), 'xarray.DataArray', 'xr.DataArray', (['analytic'], {'dims': 'self.data.dims', 'coords': 'self.data.coords'}), '(analytic, dims=self.data.dims, coords=self.data.coords)\n', (19394, 19450), True, 'import xarray as xr\n'), ((20144, 20199), 'scipy.signal.detrend', 'detrend', (['self.data'], {'type': '"""linear"""', 'bp': 'segments', 'axis': '(-1)'}), "(self.data, type='linear', bp=segments, axis=-1)\n", (20151, 20199), False, 'from scipy.signal import butter, detrend, get_window, hilbert\n'), ((20220, 20289), 'xarray.DataArray', 'xr.DataArray', (['detrended'], {'dims': 'self.data.dims', 'coords': 'self.data.coords'}), '(detrended, dims=self.data.dims, coords=self.data.coords)\n', (20232, 20289), True, 'import xarray as xr\n'), ((22070, 22258), 'mne.filter.filter_data', 'filter_data', (['self.data.values'], {'sfreq': 'self.sampling_frequency', 'l_freq': 'low_freq', 'h_freq': 'high_freq', 'l_trans_bandwidth': 'l_trans_bandwidth', 'h_trans_bandwidth': 'h_trans_bandwidth'}), '(self.data.values, sfreq=self.sampling_frequency, l_freq=\n low_freq, h_freq=high_freq, l_trans_bandwidth=l_trans_bandwidth,\n h_trans_bandwidth=h_trans_bandwidth, **kwargs)\n', (22081, 22258), False, 'from mne.filter import filter_data\n'), ((22505, 22573), 'xarray.DataArray', 'xr.DataArray', (['filtered'], {'dims': 'self.data.dims', 'coords': 'self.data.coords'}), '(filtered, dims=self.data.dims, coords=self.data.coords)\n', (22517, 22573), True, 'import xarray as xr\n'), ((3341, 3395), 'logging.warning', 'logging.warning', (['"""No metadata found, setting empty..."""'], {}), "('No metadata found, setting empty...')\n", (3356, 3395), False, 'import logging\n'), ((4974, 5023), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (5000, 5023), True, 'import xarray as xr\n'), ((14242, 14300), 'numpy.concatenate', 'np.concatenate', (['[pad_times, self.data.time.values]'], {'axis': '(0)'}), '([pad_times, self.data.time.values], axis=0)\n', (14256, 14300), True, 'import numpy as np\n'), ((16886, 16989), 'functools.partial', 'partial', (['resample'], {'up': 'to_frequency', 'down': 'self.sampling_frequency', 'npad': '"""auto"""', 'axis': '(-1)', 'pad': '"""edge"""'}), "(resample, up=to_frequency, down=self.sampling_frequency, npad=\n 'auto', axis=-1, pad='edge')\n", (16893, 16989), False, 'from functools import partial\n'), ((18839, 18855), 'numpy.abs', 'np.abs', (['analytic'], {}), '(analytic)\n', (18845, 18855), True, 'import numpy as np\n'), ((23158, 23234), 'logging.error', 'logging.error', (['"""Cannot compute functional connectivity from one timeseries."""'], {}), "('Cannot compute functional connectivity from one timeseries.')\n", (23171, 23234), False, 'import logging\n'), ((24457, 24549), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['func', 'self.data'], {'input_core_dims': "[['time']]", 'output_core_dims': "[['time']]"}), "(func, self.data, input_core_dims=[['time']],\n output_core_dims=[['time']])\n", (24471, 24549), True, 'import xarray as xr\n'), ((5284, 5339), 'logging.warning', 'logging.warning', (['f"""`{attr}` not equal between signals."""'], {}), "(f'`{attr}` not equal between signals.')\n", (5299, 5339), False, 'import logging\n'), ((6975, 6990), 'copy.deepcopy', 'deepcopy', (['value'], {}), '(value)\n', (6983, 6990), False, 'from copy import deepcopy\n'), ((14035, 14064), 'numpy.around', 'np.around', (['(how_much / self.dt)'], {}), '(how_much / self.dt)\n', (14044, 14064), True, 'import numpy as np\n'), ((14482, 14540), 'numpy.concatenate', 'np.concatenate', (['[self.data.time.values, pad_times]'], {'axis': '(0)'}), '([self.data.time.values, pad_times], axis=0)\n', (14496, 14540), True, 'import numpy as np\n'), ((17055, 17141), 'logging.warning', 'logging.warning', (['"""`mne` module not found, falling back to basic scipy\'s function"""'], {}), '(\n "`mne` module not found, falling back to basic scipy\'s function")\n', (17070, 17141), False, 'import logging\n'), ((17522, 17568), 'numpy.arange', 'np.arange', (['resampled.shape[-1]'], {'dtype': 'np.float'}), '(resampled.shape[-1], dtype=np.float)\n', (17531, 17568), True, 'import numpy as np\n'), ((21920, 22006), 'logging.warning', 'logging.warning', (['"""`mne` module not found, falling back to basic scipy\'s function"""'], {}), '(\n "`mne` module not found, falling back to basic scipy\'s function")\n', (21935, 22006), False, 'import logging\n'), ((23625, 23638), 'numpy.array', 'np.array', (['fcs'], {}), '(fcs)\n', (23633, 23638), True, 'import numpy as np\n'), ((24952, 25011), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['func', 'self.data'], {'input_core_dims': "[['time']]"}), "(func, self.data, input_core_dims=[['time']])\n", (24966, 25011), True, 'import xarray as xr\n'), ((25024, 25195), 'logging.warning', 'logging.warning', (['f"""Shape changed after operation! Old shape: {self.shape}, new shape: {processed.shape}; Cannot cast to Signal class, returing as `xr.DataArray`"""'], {}), "(\n f'Shape changed after operation! Old shape: {self.shape}, new shape: {processed.shape}; Cannot cast to Signal class, returing as `xr.DataArray`'\n )\n", (25039, 25195), False, 'import logging\n'), ((4063, 4081), 'numpy.diff', 'np.diff', (['data.time'], {}), '(data.time)\n', (4070, 4081), True, 'import numpy as np\n'), ((14157, 14180), 'numpy.arange', 'np.arange', (['(-how_much)', '(0)'], {}), '(-how_much, 0)\n', (14166, 14180), True, 'import numpy as np\n'), ((14814, 14884), 'numpy.concatenate', 'np.concatenate', (['[pad_before, self.data.time.values, pad_after]'], {'axis': '(0)'}), '([pad_before, self.data.time.values, pad_after], axis=0)\n', (14828, 14884), True, 'import numpy as np\n'), ((18982, 19000), 'numpy.angle', 'np.angle', (['analytic'], {}), '(analytic)\n', (18990, 19000), True, 'import numpy as np\n'), ((19122, 19140), 'numpy.angle', 'np.angle', (['analytic'], {}), '(analytic)\n', (19130, 19140), True, 'import numpy as np\n'), ((14393, 14419), 'numpy.arange', 'np.arange', (['(1)', '(how_much + 1)'], {}), '(1, how_much + 1)\n', (14402, 14419), True, 'import numpy as np\n'), ((17943, 17966), 'numpy.diff', 'np.diff', (['resampled.time'], {}), '(resampled.time)\n', (17950, 17966), True, 'import numpy as np\n'), ((14640, 14663), 'numpy.arange', 'np.arange', (['(-how_much)', '(0)'], {}), '(-how_much, 0)\n', (14649, 14663), True, 'import numpy as np\n'), ((14725, 14751), 'numpy.arange', 'np.arange', (['(1)', '(how_much + 1)'], {}), '(1, how_much + 1)\n', (14734, 14751), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import numpy as np
from pyscf import lib
def polariz_inter_ave(mf, gto, tddft, comega):
gto.set_common_orig((0.0,0.0,0.0))
ao_dip = gto.intor_symmetric('int1e_r', comp=3)
occidx = np.where(mf.mo_occ==2)[0]
viridx = np.where(mf.mo_occ==0)[0]
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
orbv,orbo = mo_coeff[:,viridx], mo_coeff[:,occidx]
vo_dip = np.einsum('cmb,bn->cmn', np.einsum('am,cab->cmb', orbv, ao_dip), orbo)
vo_dip = vo_dip.reshape((3,int(vo_dip.size/3)))
p = np.zeros((len(comega)), dtype=np.complex128)
#print(vo_dip.shape)
for (x,y),e in zip(tddft.xy, tddft.e):
#print(x.shape, y.shape)
dip = np.dot(vo_dip, np.sqrt(2.0)*(x+y)[0]) # Normalization ?
osc_strength = (2.0/3.0)*(dip*dip).sum()
for iw,w in enumerate(comega):
p[iw] += osc_strength*((1.0/(w-e))-(1.0/(w+e)))
return p
def polariz_nonin_ave(mf, gto, comega):
gto.set_common_orig((0.0,0.0,0.0))
ao_dip = gto.intor_symmetric('int1e_r', comp=3)
occidx = np.where(mf.mo_occ==2)[0]
viridx = np.where(mf.mo_occ==0)[0]
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
orbv,orbo = mo_coeff[:,viridx], mo_coeff[:,occidx]
vo_dip = np.einsum('cmb,bn->cmn', np.einsum('am,cab->cmb', orbv, ao_dip), orbo)
vo_dip = vo_dip.reshape((3,int(vo_dip.size/3)))
p = np.zeros((len(comega)), dtype=np.complex128)
eai = lib.direct_sum('a-i->ai', mo_energy[viridx], mo_energy[occidx])
for dip,e in zip(vo_dip.T,eai):
osc_strength = (2.0/3.0)*(dip*dip).sum()
for iw,w in enumerate(comega):
p[iw] += osc_strength*((1.0/(w-e[0]))-(1.0/(w+e[0])))
return p
| [
"numpy.where",
"numpy.sqrt",
"numpy.einsum",
"pyscf.lib.direct_sum"
] | [((1388, 1451), 'pyscf.lib.direct_sum', 'lib.direct_sum', (['"""a-i->ai"""', 'mo_energy[viridx]', 'mo_energy[occidx]'], {}), "('a-i->ai', mo_energy[viridx], mo_energy[occidx])\n", (1402, 1451), False, 'from pyscf import lib\n'), ((235, 259), 'numpy.where', 'np.where', (['(mf.mo_occ == 2)'], {}), '(mf.mo_occ == 2)\n', (243, 259), True, 'import numpy as np\n'), ((272, 296), 'numpy.where', 'np.where', (['(mf.mo_occ == 0)'], {}), '(mf.mo_occ == 0)\n', (280, 296), True, 'import numpy as np\n'), ((439, 477), 'numpy.einsum', 'np.einsum', (['"""am,cab->cmb"""', 'orbv', 'ao_dip'], {}), "('am,cab->cmb', orbv, ao_dip)\n", (448, 477), True, 'import numpy as np\n'), ((1029, 1053), 'numpy.where', 'np.where', (['(mf.mo_occ == 2)'], {}), '(mf.mo_occ == 2)\n', (1037, 1053), True, 'import numpy as np\n'), ((1066, 1090), 'numpy.where', 'np.where', (['(mf.mo_occ == 0)'], {}), '(mf.mo_occ == 0)\n', (1074, 1090), True, 'import numpy as np\n'), ((1233, 1271), 'numpy.einsum', 'np.einsum', (['"""am,cab->cmb"""', 'orbv', 'ao_dip'], {}), "('am,cab->cmb', orbv, ao_dip)\n", (1242, 1271), True, 'import numpy as np\n'), ((704, 716), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (711, 716), True, 'import numpy as np\n')] |
import glob
import os
import sys
import time
import cv2
import numpy as np
import png
from ip_basic import depth_map_utils
from ip_basic import vis_utils
def main():
"""Depth maps are saved to the 'outputs' folder.
"""
##############################
# Options
##############################
# Validation set
input_depth_dir = os.path.expanduser(
'~/Kitti/depth/depth_selection/val_selection_cropped/velodyne_raw')
data_split = 'val'
# Test set
# input_depth_dir = os.path.expanduser(
# '~/Kitti/depth/depth_selection/test_depth_completion_anonymous/velodyne_raw')
# data_split = 'test'
# Fast fill with Gaussian blur @90Hz (paper result)
fill_type = 'fast'
extrapolate = True
blur_type = 'gaussian'
# Fast Fill with bilateral blur, no extrapolation @87Hz (recommended)
# fill_type = 'fast'
# extrapolate = False
# blur_type = 'bilateral'
# Multi-scale dilations with extra noise removal, no extrapolation @ 30Hz
# fill_type = 'multiscale'
# extrapolate = False
# blur_type = 'bilateral'
# Save output to disk or show process
save_output = True
##############################
# Processing
##############################
if save_output:
# Save to Disk
show_process = False
save_depth_maps = True
else:
if fill_type == 'fast':
raise ValueError('"fast" fill does not support show_process')
# Show Process
show_process = True
save_depth_maps = False
# Create output folder
this_file_path = os.path.dirname(os.path.realpath(__file__))
outputs_dir = this_file_path + '/outputs'
os.makedirs(outputs_dir, exist_ok=True)
output_folder_prefix = 'depth_' + data_split
output_list = sorted(os.listdir(outputs_dir))
if len(output_list) > 0:
split_folders = [folder for folder in output_list
if folder.startswith(output_folder_prefix)]
if len(split_folders) > 0:
last_output_folder = split_folders[-1]
last_output_index = int(last_output_folder.split('_')[-1])
else:
last_output_index = -1
else:
last_output_index = -1
output_depth_dir = outputs_dir + '/{}_{:03d}'.format(
output_folder_prefix, last_output_index + 1)
if save_output:
if not os.path.exists(output_depth_dir):
os.makedirs(output_depth_dir)
else:
raise FileExistsError('Already exists!')
print('Output dir:', output_depth_dir)
# Get images in sorted order
images_to_use = sorted(glob.glob(input_depth_dir + '/*'))
# Rolling average array of times for time estimation
avg_time_arr_length = 10
last_fill_times = np.repeat([1.0], avg_time_arr_length)
last_total_times = np.repeat([1.0], avg_time_arr_length)
num_images = len(images_to_use)
for i in range(num_images):
depth_image_path = images_to_use[i]
# Calculate average time with last n fill times
avg_fill_time = np.mean(last_fill_times)
avg_total_time = np.mean(last_total_times)
# Show progress
sys.stdout.write('\rProcessing {} / {}, '
'Avg Fill Time: {:.5f}s, '
'Avg Total Time: {:.5f}s, '
'Est Time Remaining: {:.3f}s'.format(
i, num_images - 1, avg_fill_time, avg_total_time,
avg_total_time * (num_images - i)))
sys.stdout.flush()
# Start timing
start_total_time = time.time()
# Load depth projections from uint16 image
depth_image = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)
projected_depths = np.float32(depth_image / 256.0)
# Fill in
start_fill_time = time.time()
if fill_type == 'fast':
final_depths = depth_map_utils.fill_in_fast(
projected_depths, extrapolate=extrapolate, blur_type=blur_type)
elif fill_type == 'multiscale':
final_depths, process_dict = depth_map_utils.fill_in_multiscale(
projected_depths, extrapolate=extrapolate, blur_type=blur_type,
show_process=show_process)
else:
raise ValueError('Invalid fill_type {}'.format(fill_type))
end_fill_time = time.time()
# Display images from process_dict
if fill_type == 'multiscale' and show_process:
img_size = (570, 165)
x_start = 80
y_start = 50
x_offset = img_size[0]
y_offset = img_size[1]
x_padding = 0
y_padding = 28
img_x = x_start
img_y = y_start
max_x = 1900
row_idx = 0
for key, value in process_dict.items():
image_jet = cv2.applyColorMap(
np.uint8(value / np.amax(value) * 255),
cv2.COLORMAP_JET)
vis_utils.cv2_show_image(
key, image_jet,
img_size, (img_x, img_y))
img_x += x_offset + x_padding
if (img_x + x_offset + x_padding) > max_x:
img_x = x_start
row_idx += 1
img_y = y_start + row_idx * (y_offset + y_padding)
# Save process images
cv2.imwrite('process/' + key + '.png', image_jet)
cv2.waitKey()
# Save depth images to disk
if save_depth_maps:
depth_image_file_name = os.path.split(depth_image_path)[1]
# Save depth map to a uint16 png (same format as disparity maps)
file_path = output_depth_dir + '/' + depth_image_file_name
with open(file_path, 'wb') as f:
depth_image = (final_depths * 256).astype(np.uint16)
# pypng is used because cv2 cannot save uint16 format images
writer = png.Writer(width=depth_image.shape[1],
height=depth_image.shape[0],
bitdepth=16,
greyscale=True)
writer.write(f, depth_image)
end_total_time = time.time()
# Update fill times
last_fill_times = np.roll(last_fill_times, -1)
last_fill_times[-1] = end_fill_time - start_fill_time
# Update total times
last_total_times = np.roll(last_total_times, -1)
last_total_times[-1] = end_total_time - start_total_time
if __name__ == "__main__":
main()
| [
"ip_basic.depth_map_utils.fill_in_multiscale",
"png.Writer",
"numpy.mean",
"os.path.exists",
"os.listdir",
"numpy.repeat",
"os.path.split",
"sys.stdout.flush",
"cv2.waitKey",
"glob.glob",
"os.path.expanduser",
"ip_basic.vis_utils.cv2_show_image",
"ip_basic.depth_map_utils.fill_in_fast",
"t... | [((359, 450), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Kitti/depth/depth_selection/val_selection_cropped/velodyne_raw"""'], {}), "(\n '~/Kitti/depth/depth_selection/val_selection_cropped/velodyne_raw')\n", (377, 450), False, 'import os\n'), ((1704, 1743), 'os.makedirs', 'os.makedirs', (['outputs_dir'], {'exist_ok': '(True)'}), '(outputs_dir, exist_ok=True)\n', (1715, 1743), False, 'import os\n'), ((2789, 2826), 'numpy.repeat', 'np.repeat', (['[1.0]', 'avg_time_arr_length'], {}), '([1.0], avg_time_arr_length)\n', (2798, 2826), True, 'import numpy as np\n'), ((2850, 2887), 'numpy.repeat', 'np.repeat', (['[1.0]', 'avg_time_arr_length'], {}), '([1.0], avg_time_arr_length)\n', (2859, 2887), True, 'import numpy as np\n'), ((1626, 1652), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1642, 1652), False, 'import os\n'), ((1819, 1842), 'os.listdir', 'os.listdir', (['outputs_dir'], {}), '(outputs_dir)\n', (1829, 1842), False, 'import os\n'), ((2645, 2678), 'glob.glob', 'glob.glob', (["(input_depth_dir + '/*')"], {}), "(input_depth_dir + '/*')\n", (2654, 2678), False, 'import glob\n'), ((3083, 3107), 'numpy.mean', 'np.mean', (['last_fill_times'], {}), '(last_fill_times)\n', (3090, 3107), True, 'import numpy as np\n'), ((3133, 3158), 'numpy.mean', 'np.mean', (['last_total_times'], {}), '(last_total_times)\n', (3140, 3158), True, 'import numpy as np\n'), ((3554, 3572), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3570, 3572), False, 'import sys\n'), ((3624, 3635), 'time.time', 'time.time', ([], {}), '()\n', (3633, 3635), False, 'import time\n'), ((3710, 3759), 'cv2.imread', 'cv2.imread', (['depth_image_path', 'cv2.IMREAD_ANYDEPTH'], {}), '(depth_image_path, cv2.IMREAD_ANYDEPTH)\n', (3720, 3759), False, 'import cv2\n'), ((3787, 3818), 'numpy.float32', 'np.float32', (['(depth_image / 256.0)'], {}), '(depth_image / 256.0)\n', (3797, 3818), True, 'import numpy as np\n'), ((3864, 3875), 'time.time', 'time.time', ([], {}), '()\n', (3873, 3875), False, 'import time\n'), ((4394, 4405), 'time.time', 'time.time', ([], {}), '()\n', (4403, 4405), False, 'import time\n'), ((6294, 6305), 'time.time', 'time.time', ([], {}), '()\n', (6303, 6305), False, 'import time\n'), ((6361, 6389), 'numpy.roll', 'np.roll', (['last_fill_times', '(-1)'], {}), '(last_fill_times, -1)\n', (6368, 6389), True, 'import numpy as np\n'), ((6509, 6538), 'numpy.roll', 'np.roll', (['last_total_times', '(-1)'], {}), '(last_total_times, -1)\n', (6516, 6538), True, 'import numpy as np\n'), ((2394, 2426), 'os.path.exists', 'os.path.exists', (['output_depth_dir'], {}), '(output_depth_dir)\n', (2408, 2426), False, 'import os\n'), ((2440, 2469), 'os.makedirs', 'os.makedirs', (['output_depth_dir'], {}), '(output_depth_dir)\n', (2451, 2469), False, 'import os\n'), ((3935, 4031), 'ip_basic.depth_map_utils.fill_in_fast', 'depth_map_utils.fill_in_fast', (['projected_depths'], {'extrapolate': 'extrapolate', 'blur_type': 'blur_type'}), '(projected_depths, extrapolate=extrapolate,\n blur_type=blur_type)\n', (3963, 4031), False, 'from ip_basic import depth_map_utils\n'), ((5502, 5515), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (5513, 5515), False, 'import cv2\n'), ((4126, 4256), 'ip_basic.depth_map_utils.fill_in_multiscale', 'depth_map_utils.fill_in_multiscale', (['projected_depths'], {'extrapolate': 'extrapolate', 'blur_type': 'blur_type', 'show_process': 'show_process'}), '(projected_depths, extrapolate=\n extrapolate, blur_type=blur_type, show_process=show_process)\n', (4160, 4256), False, 'from ip_basic import depth_map_utils\n'), ((5034, 5100), 'ip_basic.vis_utils.cv2_show_image', 'vis_utils.cv2_show_image', (['key', 'image_jet', 'img_size', '(img_x, img_y)'], {}), '(key, image_jet, img_size, (img_x, img_y))\n', (5058, 5100), False, 'from ip_basic import vis_utils\n'), ((5439, 5488), 'cv2.imwrite', 'cv2.imwrite', (["('process/' + key + '.png')", 'image_jet'], {}), "('process/' + key + '.png', image_jet)\n", (5450, 5488), False, 'import cv2\n'), ((5617, 5648), 'os.path.split', 'os.path.split', (['depth_image_path'], {}), '(depth_image_path)\n', (5630, 5648), False, 'import os\n'), ((6018, 6118), 'png.Writer', 'png.Writer', ([], {'width': 'depth_image.shape[1]', 'height': 'depth_image.shape[0]', 'bitdepth': '(16)', 'greyscale': '(True)'}), '(width=depth_image.shape[1], height=depth_image.shape[0],\n bitdepth=16, greyscale=True)\n', (6028, 6118), False, 'import png\n'), ((4957, 4971), 'numpy.amax', 'np.amax', (['value'], {}), '(value)\n', (4964, 4971), True, 'import numpy as np\n')] |
import numpy as np
from itertools import product
class Board(np.ndarray):
def update(self):
neighbours = self.get_neighbours()
self[(self == 0) & (neighbours == 3)] = 1
self[(self == 1) & ((neighbours < 2) | (neighbours > 3))] = 0
def get_neighbours(self):
result = np.zeros(self.shape, dtype=int)
for row, col in product([-1, 0, 1], repeat=2):
if row != 0 or col != 0:
result += np.roll(self, (row, col), (0, 1))
return result
@classmethod
def from_int_data(cls, board_data):
arr = np.array(board_data, dtype=int)
return arr.view(cls)
@classmethod
def from_string_data(cls, *lines):
return cls.from_int_data(tuple(
tuple(1 if char != ' ' else 0 for char in line)
for line in lines
))
@classmethod
def from_zeros(cls, rows, cols):
arr = np.zeros((rows, cols), dtype=int)
return arr.view(cls)
| [
"numpy.array",
"numpy.zeros",
"itertools.product",
"numpy.roll"
] | [((310, 341), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'int'}), '(self.shape, dtype=int)\n', (318, 341), True, 'import numpy as np\n'), ((366, 395), 'itertools.product', 'product', (['[-1, 0, 1]'], {'repeat': '(2)'}), '([-1, 0, 1], repeat=2)\n', (373, 395), False, 'from itertools import product\n'), ((588, 619), 'numpy.array', 'np.array', (['board_data'], {'dtype': 'int'}), '(board_data, dtype=int)\n', (596, 619), True, 'import numpy as np\n'), ((916, 949), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {'dtype': 'int'}), '((rows, cols), dtype=int)\n', (924, 949), True, 'import numpy as np\n'), ((460, 493), 'numpy.roll', 'np.roll', (['self', '(row, col)', '(0, 1)'], {}), '(self, (row, col), (0, 1))\n', (467, 493), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random as rn
# -------------------------------- Creating sin-data -------------------------------
def true_fun(x):
return np.cos(1.5 * np.pi * x)
np.random.seed(42)
n_samples = 50
x_train = np.sort(np.random.rand(n_samples))
y_train = true_fun(x_train) + np.random.randn(n_samples) * 0.1
# --------------------- Build and compile neural net -------------------------------
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(16, input_shape=([1, ]), activation='tanh'),
tf.keras.layers.Dense(1, activation='tanh')
])
model.summary()
print(model.predict(x_train))
model.compile(loss='mse',
optimizer=tf.keras.optimizers.Adam(0.001))
# --------------------- training and results of neural net --------------------------
model.fit(x_train, y_train, epochs=500)
x_pred = tf.linspace(0.0, 1, n_samples)
y_pred = model.predict(x_pred)
print(model.layers[1].kernel)
plt.scatter(x_train, y_train,
edgecolor='b', s=20, label="Samples")
plt.plot(x_pred, y_pred, color='k', label='Predictions')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
print("done")
| [
"tensorflow.linspace",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.cos",
"numpy.random.randn",
"matplotlib.py... | [((237, 255), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (251, 255), True, 'import numpy as np\n'), ((895, 925), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(1)', 'n_samples'], {}), '(0.0, 1, n_samples)\n', (906, 925), True, 'import tensorflow as tf\n'), ((988, 1055), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_train', 'y_train'], {'edgecolor': '"""b"""', 's': '(20)', 'label': '"""Samples"""'}), "(x_train, y_train, edgecolor='b', s=20, label='Samples')\n", (999, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1124), 'matplotlib.pyplot.plot', 'plt.plot', (['x_pred', 'y_pred'], {'color': '"""k"""', 'label': '"""Predictions"""'}), "(x_pred, y_pred, color='k', label='Predictions')\n", (1076, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1140), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1135, 1140), True, 'import matplotlib.pyplot as plt\n'), ((1141, 1156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (1151, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1169), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1167, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1170, 1180), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1178, 1180), True, 'import matplotlib.pyplot as plt\n'), ((211, 234), 'numpy.cos', 'np.cos', (['(1.5 * np.pi * x)'], {}), '(1.5 * np.pi * x)\n', (217, 234), True, 'import numpy as np\n'), ((289, 314), 'numpy.random.rand', 'np.random.rand', (['n_samples'], {}), '(n_samples)\n', (303, 314), True, 'import numpy as np\n'), ((346, 372), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (361, 372), True, 'import numpy as np\n'), ((508, 569), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(16)'], {'input_shape': '[1]', 'activation': '"""tanh"""'}), "(16, input_shape=[1], activation='tanh')\n", (529, 569), True, 'import tensorflow as tf\n'), ((579, 622), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""tanh"""'}), "(1, activation='tanh')\n", (600, 622), True, 'import tensorflow as tf\n'), ((723, 754), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (747, 754), True, 'import tensorflow as tf\n')] |
from i3Deep import utils
import os
from evaluate import evaluate
import numpy as np
from skimage.segmentation.random_walker_segmentation import random_walker
from tqdm import tqdm
import torchio
import torch
def compute_predictions(image_path, mask_path, gt_path, save_path, nr_modalities, class_labels, resize=True, beta=10):
image_filenames = utils.load_filenames(image_path)[::nr_modalities]
mask_filenames = utils.load_filenames(mask_path)
target_shape = (256, 256, 200) # (256, 256, 100)
is_resized = False
for i in tqdm(range(len(image_filenames))):
image, affine, spacing, header = utils.load_nifty(image_filenames[i])
mask, _, _, _ = utils.load_nifty(mask_filenames[i])
if resize and image.size > np.prod(target_shape):
is_resized = True
print("Resized: ", os.path.basename(image_filenames[i]))
original_shape = image.shape
image = utils.interpolate(image, (target_shape[0], target_shape[1], original_shape[2]))
mask = utils.interpolate(mask, (target_shape[0], target_shape[1], original_shape[2]), mask=True)
image = utils.normalize(image)
labels = np.unique(mask)
# labels = labels[labels > 0]
for label in np.flip(labels):
mask[mask == label] = label + 1
mask = mask.astype(np.uint8)
mask = random_walker(data=image, labels=mask, beta=beta, mode='cg_mg')
for label in labels:
mask[mask == label + 1] = label
if is_resized:
mask = utils.interpolate(mask, original_shape, mask=True)
utils.save_nifty(save_path + os.path.basename(mask_filenames[i][:-12] + ".nii.gz"), mask, affine, spacing, header, is_mask=True)
results = evaluate(gt_path, save_path, class_labels)
return results
# def compute_predictions(image_path, mask_path, gt_path, save_path):
# image_filenames = utils.load_filenames(image_path)
# mask_filenames = utils.load_filenames(mask_path)
#
# for i in tqdm(range(len(image_filenames))):
# _, affine, spacing, header = utils.load_nifty(mask_filenames[i])
# subject = torchio.Subject(image=torchio.ScalarImage(image_filenames[i]), mask=torchio.LabelMap(mask_filenames[i]))
# sampler = torchio.inference.GridSampler(subject, patch_size=(20, 20, 10), padding_mode='edge')
# aggregator = torchio.inference.GridAggregator(sampler)
# for patch in sampler:
# image = patch["image"][torchio.DATA].numpy()[0]
# image = utils.normalize(image)
# mask = patch["mask"][torchio.DATA].numpy()[0]
# location = torch.tensor(patch[torchio.LOCATION]).unsqueeze(0)
# if not(image.max() <= 0 or mask.max() == 0):
# # image[image < 0] = 0
# mask = mask.astype(np.int32)
# mask = random_walker(data=image, labels=mask, mode='cg_j')
# mask = torch.tensor(mask).unsqueeze(0).unsqueeze(0)
# aggregator.add_batch(mask, location)
# mask = aggregator.get_output_tensor()
# utils.save_nifty(save_path + os.path.basename(mask_filenames[i]), mask, affine, spacing, header, is_mask=True)
# mean_dice_score, median_dice_score = evaluate(gt_path, save_path)
# return mean_dice_score, median_dice_score | [
"numpy.flip",
"numpy.prod",
"numpy.unique",
"os.path.basename",
"skimage.segmentation.random_walker_segmentation.random_walker",
"i3Deep.utils.load_nifty",
"evaluate.evaluate",
"i3Deep.utils.load_filenames",
"i3Deep.utils.interpolate",
"i3Deep.utils.normalize"
] | [((432, 463), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['mask_path'], {}), '(mask_path)\n', (452, 463), False, 'from i3Deep import utils\n'), ((1785, 1827), 'evaluate.evaluate', 'evaluate', (['gt_path', 'save_path', 'class_labels'], {}), '(gt_path, save_path, class_labels)\n', (1793, 1827), False, 'from evaluate import evaluate\n'), ((360, 392), 'i3Deep.utils.load_filenames', 'utils.load_filenames', (['image_path'], {}), '(image_path)\n', (380, 392), False, 'from i3Deep import utils\n'), ((636, 672), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (652, 672), False, 'from i3Deep import utils\n'), ((698, 733), 'i3Deep.utils.load_nifty', 'utils.load_nifty', (['mask_filenames[i]'], {}), '(mask_filenames[i])\n', (714, 733), False, 'from i3Deep import utils\n'), ((1164, 1186), 'i3Deep.utils.normalize', 'utils.normalize', (['image'], {}), '(image)\n', (1179, 1186), False, 'from i3Deep import utils\n'), ((1205, 1220), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1214, 1220), True, 'import numpy as np\n'), ((1282, 1297), 'numpy.flip', 'np.flip', (['labels'], {}), '(labels)\n', (1289, 1297), True, 'import numpy as np\n'), ((1398, 1461), 'skimage.segmentation.random_walker_segmentation.random_walker', 'random_walker', ([], {'data': 'image', 'labels': 'mask', 'beta': 'beta', 'mode': '"""cg_mg"""'}), "(data=image, labels=mask, beta=beta, mode='cg_mg')\n", (1411, 1461), False, 'from skimage.segmentation.random_walker_segmentation import random_walker\n'), ((957, 1036), 'i3Deep.utils.interpolate', 'utils.interpolate', (['image', '(target_shape[0], target_shape[1], original_shape[2])'], {}), '(image, (target_shape[0], target_shape[1], original_shape[2]))\n', (974, 1036), False, 'from i3Deep import utils\n'), ((1057, 1151), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', '(target_shape[0], target_shape[1], original_shape[2])'], {'mask': '(True)'}), '(mask, (target_shape[0], target_shape[1], original_shape[2\n ]), mask=True)\n', (1074, 1151), False, 'from i3Deep import utils\n'), ((1581, 1631), 'i3Deep.utils.interpolate', 'utils.interpolate', (['mask', 'original_shape'], {'mask': '(True)'}), '(mask, original_shape, mask=True)\n', (1598, 1631), False, 'from i3Deep import utils\n'), ((770, 791), 'numpy.prod', 'np.prod', (['target_shape'], {}), '(target_shape)\n', (777, 791), True, 'import numpy as np\n'), ((856, 892), 'os.path.basename', 'os.path.basename', (['image_filenames[i]'], {}), '(image_filenames[i])\n', (872, 892), False, 'import os\n'), ((1670, 1723), 'os.path.basename', 'os.path.basename', (["(mask_filenames[i][:-12] + '.nii.gz')"], {}), "(mask_filenames[i][:-12] + '.nii.gz')\n", (1686, 1723), False, 'import os\n')] |
import cv2 as cv
import numpy as np
import utilities
def empty(a):
pass
cv.namedWindow("Trackbars")
cv.resizeWindow("Trackbars", 640, 240)
cv.createTrackbar("Hue Min", "Trackbars", 55, 179,empty)
cv.createTrackbar("Hue Max", "Trackbars", 155, 179,empty)
cv.createTrackbar("Sat Min", "Trackbars", 21, 255,empty)
cv.createTrackbar("Sat Max", "Trackbars", 255, 255,empty)
cv.createTrackbar("Val Min", "Trackbars", 0, 255,empty)
cv.createTrackbar("Val Max", "Trackbars", 255, 255,empty)
# Config Webcam
frameWidth = 1920
frameHeight = 1080
cap = cv.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150) #brightness
while True:
success, img = cap.read()
imgHSV = cv.cvtColor(img, cv.COLOR_BGR2HSV)
h_min = cv.getTrackbarPos("Hue Min", "Trackbars")
h_max = cv.getTrackbarPos("Hue Max", "Trackbars")
s_min = cv.getTrackbarPos("Sat Min", "Trackbars")
s_max = cv.getTrackbarPos("Sat Max", "Trackbars")
v_min = cv.getTrackbarPos("Val Min", "Trackbars")
v_max = cv.getTrackbarPos("Val Max", "Trackbars")
# print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv.inRange(imgHSV, lower, upper)
imgResult = cv.bitwise_and(img, img, mask=mask)
imgStack = utilities.stackImages(0.5, ([img, imgHSV], [mask, imgResult]))
cv.imshow("stacked", imgStack)
if cv.waitKey(1) & 0xFF == ord('q'):
break
| [
"cv2.resizeWindow",
"cv2.inRange",
"cv2.bitwise_and",
"cv2.imshow",
"numpy.array",
"utilities.stackImages",
"cv2.waitKey",
"cv2.getTrackbarPos",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.createTrackbar",
"cv2.namedWindow"
] | [((78, 105), 'cv2.namedWindow', 'cv.namedWindow', (['"""Trackbars"""'], {}), "('Trackbars')\n", (92, 105), True, 'import cv2 as cv\n'), ((106, 144), 'cv2.resizeWindow', 'cv.resizeWindow', (['"""Trackbars"""', '(640)', '(240)'], {}), "('Trackbars', 640, 240)\n", (121, 144), True, 'import cv2 as cv\n'), ((145, 202), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Hue Min"""', '"""Trackbars"""', '(55)', '(179)', 'empty'], {}), "('Hue Min', 'Trackbars', 55, 179, empty)\n", (162, 202), True, 'import cv2 as cv\n'), ((202, 260), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Hue Max"""', '"""Trackbars"""', '(155)', '(179)', 'empty'], {}), "('Hue Max', 'Trackbars', 155, 179, empty)\n", (219, 260), True, 'import cv2 as cv\n'), ((260, 317), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Sat Min"""', '"""Trackbars"""', '(21)', '(255)', 'empty'], {}), "('Sat Min', 'Trackbars', 21, 255, empty)\n", (277, 317), True, 'import cv2 as cv\n'), ((317, 375), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Sat Max"""', '"""Trackbars"""', '(255)', '(255)', 'empty'], {}), "('Sat Max', 'Trackbars', 255, 255, empty)\n", (334, 375), True, 'import cv2 as cv\n'), ((375, 431), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Val Min"""', '"""Trackbars"""', '(0)', '(255)', 'empty'], {}), "('Val Min', 'Trackbars', 0, 255, empty)\n", (392, 431), True, 'import cv2 as cv\n'), ((431, 489), 'cv2.createTrackbar', 'cv.createTrackbar', (['"""Val Max"""', '"""Trackbars"""', '(255)', '(255)', 'empty'], {}), "('Val Max', 'Trackbars', 255, 255, empty)\n", (448, 489), True, 'import cv2 as cv\n'), ((550, 568), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (565, 568), True, 'import cv2 as cv\n'), ((704, 738), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2HSV'], {}), '(img, cv.COLOR_BGR2HSV)\n', (715, 738), True, 'import cv2 as cv\n'), ((752, 793), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Hue Min"""', '"""Trackbars"""'], {}), "('Hue Min', 'Trackbars')\n", (769, 793), True, 'import cv2 as cv\n'), ((806, 847), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Hue Max"""', '"""Trackbars"""'], {}), "('Hue Max', 'Trackbars')\n", (823, 847), True, 'import cv2 as cv\n'), ((860, 901), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Sat Min"""', '"""Trackbars"""'], {}), "('Sat Min', 'Trackbars')\n", (877, 901), True, 'import cv2 as cv\n'), ((914, 955), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Sat Max"""', '"""Trackbars"""'], {}), "('Sat Max', 'Trackbars')\n", (931, 955), True, 'import cv2 as cv\n'), ((968, 1009), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Val Min"""', '"""Trackbars"""'], {}), "('Val Min', 'Trackbars')\n", (985, 1009), True, 'import cv2 as cv\n'), ((1022, 1063), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""Val Max"""', '"""Trackbars"""'], {}), "('Val Max', 'Trackbars')\n", (1039, 1063), True, 'import cv2 as cv\n'), ((1132, 1163), 'numpy.array', 'np.array', (['[h_min, s_min, v_min]'], {}), '([h_min, s_min, v_min])\n', (1140, 1163), True, 'import numpy as np\n'), ((1176, 1207), 'numpy.array', 'np.array', (['[h_max, s_max, v_max]'], {}), '([h_max, s_max, v_max])\n', (1184, 1207), True, 'import numpy as np\n'), ((1219, 1251), 'cv2.inRange', 'cv.inRange', (['imgHSV', 'lower', 'upper'], {}), '(imgHSV, lower, upper)\n', (1229, 1251), True, 'import cv2 as cv\n'), ((1269, 1304), 'cv2.bitwise_and', 'cv.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (1283, 1304), True, 'import cv2 as cv\n'), ((1321, 1383), 'utilities.stackImages', 'utilities.stackImages', (['(0.5)', '([img, imgHSV], [mask, imgResult])'], {}), '(0.5, ([img, imgHSV], [mask, imgResult]))\n', (1342, 1383), False, 'import utilities\n'), ((1388, 1418), 'cv2.imshow', 'cv.imshow', (['"""stacked"""', 'imgStack'], {}), "('stacked', imgStack)\n", (1397, 1418), True, 'import cv2 as cv\n'), ((1427, 1440), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (1437, 1440), True, 'import cv2 as cv\n')] |
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
import sys
import warnings
import math
from tqdm import tqdm
from collections import namedtuple
import keras.backend as K
from keras.utils import Sequence
from keras.optimizers import Adam
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from csbdeep.models import BaseModel
from csbdeep.utils.tf import CARETensorBoard
from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json
from csbdeep.internals.predict import tile_iterator
from csbdeep.data import Resizer
from ..utils import _is_power_of_2, optimize_threshold
# TODO: support (optional) classification of objects?
# TODO: helper function to check if receptive field of cnn is sufficient for object sizes in GT
def generic_masked_loss(mask, loss, weights=1, norm_by_mask=True, reg_weight=0, reg_penalty=K.abs):
def _loss(y_true, y_pred):
actual_loss = K.mean(mask * weights * loss(y_true, y_pred), axis=-1)
norm_mask = (K.mean(mask) + K.epsilon()) if norm_by_mask else 1
if reg_weight > 0:
reg_loss = K.mean((1-mask) * reg_penalty(y_pred), axis=-1)
return actual_loss / norm_mask + reg_weight * reg_loss
else:
return actual_loss / norm_mask
return _loss
def masked_loss(mask, penalty, reg_weight, norm_by_mask):
loss = lambda y_true, y_pred: penalty(y_true - y_pred)
return generic_masked_loss(mask, loss, reg_weight=reg_weight, norm_by_mask=norm_by_mask)
# TODO: should we use norm_by_mask=True in the loss or only in a metric?
# previous 2D behavior was norm_by_mask=False
# same question for reg_weight? use 1e-4 (as in 3D) or 0 (as in 2D)?
def masked_loss_mae(mask, reg_weight=0, norm_by_mask=True):
return masked_loss(mask, K.abs, reg_weight=reg_weight, norm_by_mask=norm_by_mask)
def masked_loss_mse(mask, reg_weight=0, norm_by_mask=True):
return masked_loss(mask, K.square, reg_weight=reg_weight, norm_by_mask=norm_by_mask)
def masked_metric_mae(mask):
def relevant_mae(y_true, y_pred):
return masked_loss(mask, K.abs, reg_weight=0, norm_by_mask=True)(y_true, y_pred)
return relevant_mae
def masked_metric_mse(mask):
def relevant_mse(y_true, y_pred):
return masked_loss(mask, K.square, reg_weight=0, norm_by_mask=True)(y_true, y_pred)
return relevant_mse
def kld(y_true, y_pred):
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return K.mean(K.binary_crossentropy(y_true, y_pred) - K.binary_crossentropy(y_true, y_true), axis=-1)
class StarDistDataBase(Sequence):
def __init__(self, X, Y, n_rays, grid, batch_size, patch_size, use_gpu=False, maxfilter_cache=True, maxfilter_patch_size=None, augmenter=None):
X = [x.astype(np.float32, copy=False) for x in X]
# Y = [y.astype(np.uint16, copy=False) for y in Y]
self.X, self.Y = X, Y
self.batch_size = batch_size
self.n_rays = n_rays
self.patch_size = patch_size
self.ss_grid = (slice(None),) + tuple(slice(0, None, g) for g in grid)
self.perm = np.random.permutation(len(self.X))
self.use_gpu = bool(use_gpu)
if augmenter is None:
augmenter = lambda *args: args
callable(augmenter) or _raise(ValueError("augmenter must be None or callable"))
self.augmenter = augmenter
if self.use_gpu:
from gputools import max_filter
self.max_filter = lambda y, patch_size: max_filter(y.astype(np.float32), patch_size)
else:
from scipy.ndimage.filters import maximum_filter
self.max_filter = lambda y, patch_size: maximum_filter(y, patch_size, mode='constant')
self.maxfilter_patch_size = maxfilter_patch_size if maxfilter_patch_size is not None else self.patch_size
if maxfilter_cache:
self.R = [self.no_background_patches((x,y)) for x,y in zip(self.X,self.Y)]
else:
self.R = None
def __len__(self):
return int(np.ceil(len(self.X) / float(self.batch_size)))
def on_epoch_end(self):
self.perm = np.random.permutation(len(self.X))
def no_background_patches(self, arrays, *args):
x, y = arrays
return self.max_filter(y, self.maxfilter_patch_size) > 0
def no_background_patches_cached(self, k):
if self.R is None:
return self.no_background_patches
else:
return lambda *args: self.R[k]
class StarDistBase(BaseModel):
def __init__(self, config, name=None, basedir='.'):
super().__init__(config=config, name=name, basedir=basedir)
threshs = dict(prob=None, nms=None)
if basedir is not None:
try:
threshs = load_json(str(self.logdir / 'thresholds.json'))
print("Loading thresholds from 'thresholds.json'.")
if threshs.get('prob') is None or not (0 < threshs.get('prob') < 1):
print("- Invalid 'prob' threshold (%s), using default value." % str(threshs.get('prob')))
threshs['prob'] = None
if threshs.get('nms') is None or not (0 < threshs.get('nms') < 1):
print("- Invalid 'nms' threshold (%s), using default value." % str(threshs.get('nms')))
threshs['nms'] = None
except FileNotFoundError:
if config is None and len(tuple(self.logdir.glob('*.h5'))) > 0:
print("Couldn't load thresholds from 'thresholds.json', using default values. "
"(Call 'optimize_thresholds' to change that.)")
self.thresholds = dict (
prob = 0.5 if threshs['prob'] is None else threshs['prob'],
nms = 0.4 if threshs['nms'] is None else threshs['nms'],
)
print("Using default values: prob_thresh={prob:g}, nms_thresh={nms:g}.".format(prob=self.thresholds.prob, nms=self.thresholds.nms))
@property
def thresholds(self):
return self._thresholds
@thresholds.setter
def thresholds(self, d):
self._thresholds = namedtuple('Thresholds',d.keys())(*d.values())
def prepare_for_training(self, optimizer=None):
"""Prepare for neural network training.
Compiles the model and creates
`Keras Callbacks <https://keras.io/callbacks/>`_ to be used for training.
Note that this method will be implicitly called once by :func:`train`
(with default arguments) if not done so explicitly beforehand.
Parameters
----------
optimizer : obj or None
Instance of a `Keras Optimizer <https://keras.io/optimizers/>`_ to be used for training.
If ``None`` (default), uses ``Adam`` with the learning rate specified in ``config``.
"""
if optimizer is None:
optimizer = Adam(lr=self.config.train_learning_rate)
input_mask = self.keras_model.inputs[1] # second input layer is mask for dist loss
dist_loss = {'mse': masked_loss_mse, 'mae': masked_loss_mae}[self.config.train_dist_loss](input_mask, reg_weight=self.config.train_background_reg)
prob_loss = 'binary_crossentropy'
self.keras_model.compile(optimizer, loss=[prob_loss, dist_loss],
loss_weights = list(self.config.train_loss_weights),
metrics={'prob': kld, 'dist': [masked_metric_mae(input_mask),masked_metric_mse(input_mask)]})
self.callbacks = []
if self.basedir is not None:
self.callbacks += self._checkpoint_callbacks()
if self.config.train_tensorboard:
# self.callbacks.append(TensorBoard(log_dir=str(self.logdir), write_graph=False))
self.callbacks.append(CARETensorBoard(log_dir=str(self.logdir), prefix_with_timestamp=False, n_images=3, write_images=True, prob_out=False))
if self.config.train_reduce_lr is not None:
rlrop_params = self.config.train_reduce_lr
if 'verbose' not in rlrop_params:
rlrop_params['verbose'] = True
self.callbacks.append(ReduceLROnPlateau(**rlrop_params))
self._model_prepared = True
def predict(self, img, axes=None, normalizer=None, n_tiles=None, show_tile_progress=True, **predict_kwargs):
"""Predict.
Parameters
----------
img : :class:`numpy.ndarray`
Input image
axes : str or None
Axes of the input ``img``.
``None`` denotes that axes of img are the same as denoted in the config.
normalizer : :class:`csbdeep.data.Normalizer` or None
(Optional) normalization of input image before prediction.
Note that the default (``None``) assumes ``img`` to be already normalized.
n_tiles : iterable or None
Out of memory (OOM) errors can occur if the input image is too large.
To avoid this problem, the input image is broken up into (overlapping) tiles
that are processed independently and re-assembled.
This parameter denotes a tuple of the number of tiles for every image axis (see ``axes``).
``None`` denotes that no tiling should be used.
show_tile_progress: bool
Whether to show progress during tiled prediction.
predict_kwargs: dict
Keyword arguments for ``predict`` function of Keras model.
Returns
-------
(:class:`numpy.ndarray`,:class:`numpy.ndarray`)
Returns the tuple (`prob`, `dist`) of per-pixel object probabilities and star-convex polygon/polyhedra distances.
"""
if n_tiles is None:
n_tiles = [1]*img.ndim
try:
n_tiles = tuple(n_tiles)
img.ndim == len(n_tiles) or _raise(TypeError())
except TypeError:
raise ValueError("n_tiles must be an iterable of length %d" % img.ndim)
all(np.isscalar(t) and 1<=t and int(t)==t for t in n_tiles) or _raise(
ValueError("all values of n_tiles must be integer values >= 1"))
n_tiles = tuple(map(int,n_tiles))
if axes is None:
axes = self.config.axes
assert 'C' in axes
if img.ndim == len(axes)-1 and self.config.n_channel_in == 1:
# img has no dedicated channel axis, but 'C' always part of config axes
axes = axes.replace('C','')
axes = axes_check_and_normalize(axes,img.ndim)
axes_net = self.config.axes
_permute_axes = self._make_permute_axes(axes, axes_net)
x = _permute_axes(img) # x has axes_net semantics
channel = axes_dict(axes_net)['C']
self.config.n_channel_in == x.shape[channel] or _raise(ValueError())
axes_net_div_by = self._axes_div_by(axes_net)
grid = tuple(self.config.grid)
len(grid) == len(axes_net)-1 or _raise(ValueError())
grid_dict = dict(zip(axes_net.replace('C',''),grid))
normalizer = self._check_normalizer_resizer(normalizer, None)[0]
resizer = StarDistPadAndCropResizer(grid=grid_dict)
x = normalizer.before(x, axes_net)
x = resizer.before(x, axes_net, axes_net_div_by)
def predict_direct(tile):
sh = list(tile.shape); sh[channel] = 1; dummy = np.empty(sh,np.float32)
prob, dist = self.keras_model.predict([tile[np.newaxis],dummy[np.newaxis]], **predict_kwargs)
return prob[0], dist[0]
if np.prod(n_tiles) > 1:
tiling_axes = axes_net.replace('C','') # axes eligible for tiling
x_tiling_axis = tuple(axes_dict(axes_net)[a] for a in tiling_axes) # numerical axis ids for x
axes_net_tile_overlaps = self._axes_tile_overlap(axes_net)
# hack: permute tiling axis in the same way as img -> x was permuted
n_tiles = _permute_axes(np.empty(n_tiles,np.bool)).shape
(all(n_tiles[i] == 1 for i in range(x.ndim) if i not in x_tiling_axis) or
_raise(ValueError("entry of n_tiles > 1 only allowed for axes '%s'" % tiling_axes)))
sh = [s//grid_dict.get(a,1) for a,s in zip(axes_net,x.shape)]
sh[channel] = 1; prob = np.empty(sh,np.float32)
sh[channel] = self.config.n_rays; dist = np.empty(sh,np.float32)
n_block_overlaps = [int(np.ceil(overlap/blocksize)) for overlap, blocksize
in zip(axes_net_tile_overlaps, axes_net_div_by)]
for tile, s_src, s_dst in tqdm(tile_iterator(x, n_tiles, block_sizes=axes_net_div_by, n_block_overlaps=n_block_overlaps),
disable=(not show_tile_progress), total=np.prod(n_tiles)):
prob_tile, dist_tile = predict_direct(tile)
# account for grid
s_src = [slice(s.start//grid_dict.get(a,1),s.stop//grid_dict.get(a,1)) for s,a in zip(s_src,axes_net)]
s_dst = [slice(s.start//grid_dict.get(a,1),s.stop//grid_dict.get(a,1)) for s,a in zip(s_dst,axes_net)]
# prob and dist have different channel dimensionality than image x
s_src[channel] = slice(None)
s_dst[channel] = slice(None)
s_src, s_dst = tuple(s_src), tuple(s_dst)
# print(s_src,s_dst)
prob[s_dst] = prob_tile[s_src]
dist[s_dst] = dist_tile[s_src]
else:
prob, dist = predict_direct(x)
prob = resizer.after(prob, axes_net)
dist = resizer.after(dist, axes_net)
dist = np.maximum(1e-3, dist) # avoid small/negative dist values to prevent problems with Qhull
prob = np.take(prob,0,axis=channel)
dist = np.moveaxis(dist,channel,-1)
return prob, dist
def predict_instances(self, img, axes=None, normalizer=None, prob_thresh=None, nms_thresh=None,
n_tiles=None, show_tile_progress=True, predict_kwargs=None, nms_kwargs=None):
"""Predict instance segmentation from input image.
Parameters
----------
img : :class:`numpy.ndarray`
Input image
axes : str or None
Axes of the input ``img``.
``None`` denotes that axes of img are the same as denoted in the config.
normalizer : :class:`csbdeep.data.Normalizer` or None
(Optional) normalization of input image before prediction.
Note that the default (``None``) assumes ``img`` to be already normalized.
prob_thresh : float or None
Consider only object candidates from pixels with predicted object probability
above this threshold (also see `optimize_thresholds`).
nms_thresh : float or None
Perform non-maximum suppression that considers two objects to be the same
when their area/surface overlap exceeds this threshold (also see `optimize_thresholds`).
n_tiles : iterable or None
Out of memory (OOM) errors can occur if the input image is too large.
To avoid this problem, the input image is broken up into (overlapping) tiles
that are processed independently and re-assembled.
This parameter denotes a tuple of the number of tiles for every image axis (see ``axes``).
``None`` denotes that no tiling should be used.
show_tile_progress: bool
Whether to show progress during tiled prediction.
predict_kwargs: dict
Keyword arguments for ``predict`` function of Keras model.
nms_kwargs: dict
Keyword arguments for non-maximum suppression.
Returns
-------
(:class:`numpy.ndarray`, dict)
Returns a tuple of the label instances image and also
a dictionary with the details (coordinates, etc.) of all remaining polygons/polyhedra.
"""
if predict_kwargs is None:
predict_kwargs = {}
if nms_kwargs is None:
nms_kwargs = {}
prob, dist = self.predict(img, axes=axes, normalizer=normalizer, n_tiles=n_tiles, show_tile_progress=show_tile_progress, **predict_kwargs)
return self._instances_from_prediction(img.shape, prob, dist, prob_thresh=prob_thresh, nms_thresh=nms_thresh, **nms_kwargs)
def optimize_thresholds(self, X_val, Y_val, nms_threshs=[0.3,0.4,0.5], iou_threshs=[0.3,0.5,0.7], predict_kwargs=None, optimize_kwargs=None):
"""Optimize two thresholds (probability, NMS overlap) necessary for predicting object instances.
Note that the default thresholds yield good results in many cases, but optimizing
the thresholds for a particular dataset can further improve performance.
The optimized thresholds are automatically used for all further predictions
and also written to the model directory.
See ``utils.optimize_threshold`` for details and possible choices for ``optimize_kwargs``.
Parameters
----------
X_val : list of ndarray
(Validation) input images (must be normalized) to use for threshold tuning.
Y_val : list of ndarray
(Validation) label images to use for threshold tuning.
nms_threshs : list of float
List of overlap thresholds to be considered for NMS.
For each value in this list, optimization is run to find a corresponding prob_thresh value.
iou_threshs : list of float
List of intersection over union (IOU) thresholds for which
the (average) matching performance is considered to tune the thresholds.
predict_kwargs: dict
Keyword arguments for ``predict`` function of this class.
predict_kwargs: dict
Keyword arguments for ``utils.optimize_threshold`` function.
"""
if predict_kwargs is None:
predict_kwargs = {}
if optimize_kwargs is None:
optimize_kwargs = {}
Yhat_val = [self.predict(x, **predict_kwargs) for x in X_val]
opt_prob_thresh, opt_measure, opt_nms_thresh = None, -np.inf, None
for _opt_nms_thresh in nms_threshs:
_opt_prob_thresh, _opt_measure = optimize_threshold(Y_val, Yhat_val, model=self, nms_thresh=_opt_nms_thresh, iou_threshs=iou_threshs, **optimize_kwargs)
if _opt_measure > opt_measure:
opt_prob_thresh, opt_measure, opt_nms_thresh = _opt_prob_thresh, _opt_measure, _opt_nms_thresh
opt_threshs = dict(prob=opt_prob_thresh, nms=opt_nms_thresh)
self.thresholds = opt_threshs
print(end='', file=sys.stderr, flush=True)
print("Using optimized values: prob_thresh={prob:g}, nms_thresh={nms:g}.".format(prob=self.thresholds.prob, nms=self.thresholds.nms))
if self.basedir is not None:
print("Saving to 'thresholds.json'.")
save_json(opt_threshs, str(self.logdir / 'thresholds.json'))
def _compute_receptive_field(self, img_size=None):
# TODO: good enough?
from scipy.ndimage import zoom
if img_size is None:
img_size = tuple(g*(128 if self.config.n_dim==2 else 64) for g in self.config.grid)
if np.isscalar(img_size):
img_size = (img_size,) * self.config.n_dim
img_size = tuple(img_size)
# print(img_size)
assert all(_is_power_of_2(s) for s in img_size)
mid = tuple(s//2 for s in img_size)
x = np.zeros((1,)+img_size+(1,), dtype=np.float32)
z = np.zeros_like(x)
x[(0,)+mid+(0,)] = 1
y = self.keras_model.predict([x,x])[0][0,...,0]
y0 = self.keras_model.predict([z,z])[0][0,...,0]
grid = tuple((np.array(x.shape[1:-1])/np.array(y.shape)).astype(int))
assert grid == self.config.grid
y = zoom(y, grid,order=0)
y0 = zoom(y0,grid,order=0)
ind = np.where(np.abs(y-y0)>0)
return [(m-np.min(i), np.max(i)-m) for (m,i) in zip(mid,ind)]
def _axes_tile_overlap(self, query_axes):
query_axes = axes_check_and_normalize(query_axes)
try:
self._tile_overlap
except AttributeError:
self._tile_overlap = self._compute_receptive_field()
overlap = dict(zip(
self.config.axes.replace('C',''),
tuple(max(rf) for rf in self._tile_overlap)
))
return tuple(overlap.get(a,0) for a in query_axes)
class StarDistPadAndCropResizer(Resizer):
# TODO: check correctness
def __init__(self, grid, mode='reflect', **kwargs):
assert isinstance(grid, dict)
self.mode = mode
self.grid = grid
self.kwargs = kwargs
def before(self, x, axes, axes_div_by):
assert all(a%g==0 for g,a in zip((self.grid.get(a,1) for a in axes), axes_div_by))
axes = axes_check_and_normalize(axes,x.ndim)
def _split(v):
return 0, v # only pad at the end
self.pad = {
a : _split((div_n-s%div_n)%div_n)
for a, div_n, s in zip(axes, axes_div_by, x.shape)
}
x_pad = np.pad(x, tuple(self.pad[a] for a in axes), mode=self.mode, **self.kwargs)
self.padded_shape = dict(zip(axes,x_pad.shape))
if 'C' in self.padded_shape: del self.padded_shape['C']
return x_pad
def after(self, x, axes):
# axes can include 'C', which may not have been present in before()
axes = axes_check_and_normalize(axes,x.ndim)
assert all(s_pad == s * g for s,s_pad,g in zip(x.shape,
(self.padded_shape.get(a,_s) for a,_s in zip(axes,x.shape)),
(self.grid.get(a,1) for a in axes)))
# print(self.padded_shape)
# print(self.pad)
# print(self.grid)
crop = tuple (
slice(0, -(math.floor(p[1]/g)) if p[1]>=g else None)
for p,g in zip((self.pad.get(a,(0,0)) for a in axes),(self.grid.get(a,1) for a in axes))
)
# print(crop)
return x[crop]
| [
"numpy.prod",
"math.floor",
"numpy.array",
"csbdeep.internals.predict.tile_iterator",
"numpy.moveaxis",
"scipy.ndimage.zoom",
"numpy.isscalar",
"numpy.max",
"numpy.take",
"numpy.empty",
"numpy.min",
"keras.backend.epsilon",
"numpy.maximum",
"keras.optimizers.Adam",
"numpy.abs",
"numpy.... | [((2490, 2501), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2499, 2501), True, 'import keras.backend as K\n'), ((2534, 2545), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2543, 2545), True, 'import keras.backend as K\n'), ((10599, 10639), 'csbdeep.utils.axes_check_and_normalize', 'axes_check_and_normalize', (['axes', 'img.ndim'], {}), '(axes, img.ndim)\n', (10623, 10639), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((13753, 13776), 'numpy.maximum', 'np.maximum', (['(0.001)', 'dist'], {}), '(0.001, dist)\n', (13763, 13776), True, 'import numpy as np\n'), ((13858, 13888), 'numpy.take', 'np.take', (['prob', '(0)'], {'axis': 'channel'}), '(prob, 0, axis=channel)\n', (13865, 13888), True, 'import numpy as np\n'), ((13902, 13932), 'numpy.moveaxis', 'np.moveaxis', (['dist', 'channel', '(-1)'], {}), '(dist, channel, -1)\n', (13913, 13932), True, 'import numpy as np\n'), ((19372, 19393), 'numpy.isscalar', 'np.isscalar', (['img_size'], {}), '(img_size)\n', (19383, 19393), True, 'import numpy as np\n'), ((19623, 19673), 'numpy.zeros', 'np.zeros', (['((1,) + img_size + (1,))'], {'dtype': 'np.float32'}), '((1,) + img_size + (1,), dtype=np.float32)\n', (19631, 19673), True, 'import numpy as np\n'), ((19682, 19698), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (19695, 19698), True, 'import numpy as np\n'), ((19973, 19995), 'scipy.ndimage.zoom', 'zoom', (['y', 'grid'], {'order': '(0)'}), '(y, grid, order=0)\n', (19977, 19995), False, 'from scipy.ndimage import zoom\n'), ((20008, 20031), 'scipy.ndimage.zoom', 'zoom', (['y0', 'grid'], {'order': '(0)'}), '(y0, grid, order=0)\n', (20012, 20031), False, 'from scipy.ndimage import zoom\n'), ((20208, 20244), 'csbdeep.utils.axes_check_and_normalize', 'axes_check_and_normalize', (['query_axes'], {}), '(query_axes)\n', (20232, 20244), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((20986, 21024), 'csbdeep.utils.axes_check_and_normalize', 'axes_check_and_normalize', (['axes', 'x.ndim'], {}), '(axes, x.ndim)\n', (21010, 21024), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((21588, 21626), 'csbdeep.utils.axes_check_and_normalize', 'axes_check_and_normalize', (['axes', 'x.ndim'], {}), '(axes, x.ndim)\n', (21612, 21626), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((2568, 2605), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2589, 2605), True, 'import keras.backend as K\n'), ((2608, 2645), 'keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y_true', 'y_true'], {}), '(y_true, y_true)\n', (2629, 2645), True, 'import keras.backend as K\n'), ((6966, 7006), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.config.train_learning_rate'}), '(lr=self.config.train_learning_rate)\n', (6970, 7006), False, 'from keras.optimizers import Adam\n'), ((10817, 10836), 'csbdeep.utils.axes_dict', 'axes_dict', (['axes_net'], {}), '(axes_net)\n', (10826, 10836), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((11465, 11489), 'numpy.empty', 'np.empty', (['sh', 'np.float32'], {}), '(sh, np.float32)\n', (11473, 11489), True, 'import numpy as np\n'), ((11643, 11659), 'numpy.prod', 'np.prod', (['n_tiles'], {}), '(n_tiles)\n', (11650, 11659), True, 'import numpy as np\n'), ((12387, 12411), 'numpy.empty', 'np.empty', (['sh', 'np.float32'], {}), '(sh, np.float32)\n', (12395, 12411), True, 'import numpy as np\n'), ((12464, 12488), 'numpy.empty', 'np.empty', (['sh', 'np.float32'], {}), '(sh, np.float32)\n', (12472, 12488), True, 'import numpy as np\n'), ((1072, 1084), 'keras.backend.mean', 'K.mean', (['mask'], {}), '(mask)\n', (1078, 1084), True, 'import keras.backend as K\n'), ((1087, 1098), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1096, 1098), True, 'import keras.backend as K\n'), ((3756, 3802), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['y', 'patch_size'], {'mode': '"""constant"""'}), "(y, patch_size, mode='constant')\n", (3770, 3802), False, 'from scipy.ndimage.filters import maximum_filter\n'), ((8266, 8299), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {}), '(**rlrop_params)\n', (8283, 8299), False, 'from keras.callbacks import ReduceLROnPlateau, TensorBoard\n'), ((12701, 12795), 'csbdeep.internals.predict.tile_iterator', 'tile_iterator', (['x', 'n_tiles'], {'block_sizes': 'axes_net_div_by', 'n_block_overlaps': 'n_block_overlaps'}), '(x, n_tiles, block_sizes=axes_net_div_by, n_block_overlaps=\n n_block_overlaps)\n', (12714, 12795), False, 'from csbdeep.internals.predict import tile_iterator\n'), ((20053, 20067), 'numpy.abs', 'np.abs', (['(y - y0)'], {}), '(y - y0)\n', (20059, 20067), True, 'import numpy as np\n'), ((12039, 12065), 'numpy.empty', 'np.empty', (['n_tiles', 'np.bool'], {}), '(n_tiles, np.bool)\n', (12047, 12065), True, 'import numpy as np\n'), ((12525, 12553), 'numpy.ceil', 'np.ceil', (['(overlap / blocksize)'], {}), '(overlap / blocksize)\n', (12532, 12553), True, 'import numpy as np\n'), ((12875, 12891), 'numpy.prod', 'np.prod', (['n_tiles'], {}), '(n_tiles)\n', (12882, 12891), True, 'import numpy as np\n'), ((20088, 20097), 'numpy.min', 'np.min', (['i'], {}), '(i)\n', (20094, 20097), True, 'import numpy as np\n'), ((20099, 20108), 'numpy.max', 'np.max', (['i'], {}), '(i)\n', (20105, 20108), True, 'import numpy as np\n'), ((10094, 10108), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (10105, 10108), True, 'import numpy as np\n'), ((11779, 11798), 'csbdeep.utils.axes_dict', 'axes_dict', (['axes_net'], {}), '(axes_net)\n', (11788, 11798), False, 'from csbdeep.utils import _raise, backend_channels_last, axes_check_and_normalize, axes_dict, load_json, save_json\n'), ((19864, 19887), 'numpy.array', 'np.array', (['x.shape[1:-1]'], {}), '(x.shape[1:-1])\n', (19872, 19887), True, 'import numpy as np\n'), ((19888, 19905), 'numpy.array', 'np.array', (['y.shape'], {}), '(y.shape)\n', (19896, 19905), True, 'import numpy as np\n'), ((22032, 22052), 'math.floor', 'math.floor', (['(p[1] / g)'], {}), '(p[1] / g)\n', (22042, 22052), False, 'import math\n')] |
'''
Author: <NAME>
Implementation of Personalized Ranking Adaptation(PRA).
'https://dl.acm.org/citation.cfm?id=3087993.3088031'
Using Popularity Version and mean-std meature.
'''
import random
import numpy as np
def usr_samples(userid, user_items):
'''
sample items of # min(len(items), 10)
'''
past_items = user_items[userid]
usr_pra_samples = random.sample(past_items, min(len(past_items), 15))
return usr_pra_samples
def pra_score(items_list, item_popularity):
'''
Input :
items_list : a list of items one of {rec_list, history_sampled_list}
item_popularity : dict, containing item_popularity
Output :
score = pop_mean + pop_std
'''
# print('item_popularity[i] : ', [item_popularity[i] if i in item_popularity.keys() else 1 for i in items_list])
list_pop = [item_popularity[i][0] if len(item_popularity[i])==2 else 1 for i in items_list] # len==2 : [pop, LongTail(bool)]
# print('list_pop: ', list_pop)
pra_score = np.sum(np.mean(list_pop) + np.std(list_pop))
return pra_score
def pra_reranking(full_list, item_idx, top_n, u_pra_samples, item_popularity):
'''
Algorithm described in 'https://dl.acm.org/citation.cfm?id=3087993.3088031'
'''
conf_setting = {1: {'max_step': 1 , 'ex_len': 1},
5: {'max_step': 1, 'ex_len': 1}, # ex_len : 5, 10 ,25
10: {'max_step': 1, 'ex_len': 1},} # max_step: 5, 20, 25
conf = conf_setting[top_n]
# Get the top n indices
arg_index = np.argsort(-full_list) # sort from Great to Small values
rec_items = item_idx[arg_index[:top_n]]
exc_items = item_idx[arg_index[top_n: top_n+conf['ex_len']]]
u_pra = pra_score(u_pra_samples, item_popularity)
# rec_items_list = np.append(rec_items_list, rec_items) # for computing converage
for _ in range(conf['max_step']):
rec_score = pra_score(rec_items, item_popularity)
e_base = np.abs(rec_score - u_pra)
e_delta = 0
# in reverse order
for i in range(len(rec_items)-1, -1, -1):
for j in range(len(exc_items)):
temp_rec = rec_items.copy()
temp_rec[i] = exc_items[j]
e_temp = np.abs(pra_score(temp_rec, item_popularity) - u_pra)
if (e_base-e_temp) > e_delta:
I_best = i
J_best = j
e_delta = e_base - e_temp
# greedily swap only ONCE in each step
if e_delta > 0:
rec_items[I_best], exc_items[J_best] = exc_items[J_best], rec_items[I_best]
else:
break
# for computing HR and NDCG
pra_rec_index = [item_idx.tolist().index(item) for item in rec_items]
return rec_items, pra_rec_index
| [
"numpy.argsort",
"numpy.abs",
"numpy.mean",
"numpy.std"
] | [((1556, 1578), 'numpy.argsort', 'np.argsort', (['(-full_list)'], {}), '(-full_list)\n', (1566, 1578), True, 'import numpy as np\n'), ((1989, 2014), 'numpy.abs', 'np.abs', (['(rec_score - u_pra)'], {}), '(rec_score - u_pra)\n', (1995, 2014), True, 'import numpy as np\n'), ((1021, 1038), 'numpy.mean', 'np.mean', (['list_pop'], {}), '(list_pop)\n', (1028, 1038), True, 'import numpy as np\n'), ((1041, 1057), 'numpy.std', 'np.std', (['list_pop'], {}), '(list_pop)\n', (1047, 1057), True, 'import numpy as np\n')] |
'''
Created on 19 Mar 2022
@author: ucacsjj
'''
# This grid stores the value function for each state. It's defined to be a
# real number in all cases, so we specialise it here. In addition, it
# automatically creates a policy the policy is a grid the same size as the array.
# The
import random
import numpy as np
from grid_search.grid import Grid
from .policy_grid import PolicyGrid
class QGrid(Grid):
'''
classdocs
'''
def __init__(self, name, environment_map, num_actions, set_random = False):
#print(name)
#print(environment_map)
#print(num_actions)
Grid.__init__(self, name, \
environment_map.width(), environment_map.height())
self._num_actions = num_actions
self._policy = None
self._v = None
# Set random isn't used here; we assume a zero value is arbitrary enough
self._q_values = np.zeros((self._width, self._height, num_actions))
def policy(self):
return self._policy
def value_function(self):
return self._v
def num_actions(self):
return self._num_actions
def set_value(self, x, y, a, Q):
# Store the value
self._q_values[x, y, a] = Q
# Find the maximum value for the action at this cell, and set it for the policy
if self._policy is not None and self._v is not None:
action_values = self._q_values[x, y, :]
#print(f'actions={actions}')
# Find the max actions; note horrible subscripting
max_actions = (np.where(action_values == np.amax(action_values)))[0]
# Note that multiple actions might have the same q value. If that's the case,
# pick one at random
#print(random.choice(range(max_actions.size)))
max_action = max_actions[random.choice(range(max_actions.size))]
#print(f'max_action={max_action}')
if self._policy is not None:
self._policy.set_action(x, y, max_action)
# Set the current value function
if self._v is not None:
self._v.set_value(x, y, action_values[max_action])
def value(self, x, y, a):
return self._q_values[x, y, a]
def values_of_actions(self, x, y):
return self._q_values[x, y, :]
def show(self):
# Print out the policy as a string. Note we have to reverse y because
# y=0 is at the origin and so we need to print top-to-bottom
for a in range(0, self._num_actions):
print(f'Action={a}:')
for y in reversed(range(self._height)):
line_string = "{:.3f}".format(self._q_values[0,y,a])
for x in range(1,self._width):
line_string += str(" ") + "{:.3f}".format(self._q_values[x,y,a])
print(line_string)
print('========================================================')
| [
"numpy.zeros",
"numpy.amax"
] | [((945, 995), 'numpy.zeros', 'np.zeros', (['(self._width, self._height, num_actions)'], {}), '((self._width, self._height, num_actions))\n', (953, 995), True, 'import numpy as np\n'), ((1675, 1697), 'numpy.amax', 'np.amax', (['action_values'], {}), '(action_values)\n', (1682, 1697), True, 'import numpy as np\n')] |
from dataclasses import dataclass
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Dict, List
import matplotlib.pyplot as plt
import numpy as np
from myfitnesspal.exercise import Exercise
from myfitnesspal.meal import Meal
from . import styles
@dataclass
class MaterializedDay:
"""
A class to hold the properties from myfitnesspal that we are working with.
"""
username: str
date: datetime.date
meals: List[Meal]
exercises: List[Exercise]
goals: Dict[str, float]
notes: Dict # currently python-myfitnesspal only scrapes food notes
water: float
measurements: Dict[str, float]
@dataclass
class Style:
bg0: str
bg1: str
bg2: str
fg0: str
fg1: str
fg2: str
text0: str
text1: str
text2: str
accent0: str
accent1: str
gray0: str
gray1: str
warning: str
error: str
@dataclass
class User:
username: str
email: str
class ProgressReport:
template_name: str = "mfp_progress_report.jinja2"
email_from: str = "Lisko Home Automation"
def __init__(
self,
user: User,
report_data,
report_style_name: str = "default",
):
self.user = user
self.data = report_data.get("data_table", None)
self.period_start_date = report_data.get("starting_date", None)
self.current_day_number = self.data[-1][0]
self.email_subject = (
f"MyfitnessPaw Progress Report (Day {self.current_day_number})"
)
self.email_to = user.email
self.end_goal = report_data.get("end_goal", None)
self.num_rows_report_tbl = report_data.get("num_rows_report_tbl", 7)
style_pallete = styles.COLOR_PALETTES.get(report_style_name)
self.style = Style(**style_pallete)
self.attachments = [self._render_progress_bar_chart()]
@property
def period_start_date(self):
return self._period_start_date
@period_start_date.setter
def period_start_date(self, value: str):
self._period_start_date = datetime.strptime(value, "%Y-%m-%d")
def get_template_data_dict(self):
current_day_number = self.data[-1][0] # first field in last table row
title = f"MyFitnessPaw Progress Report (Day {current_day_number})"
user = f"{self.user.username}".capitalize()
today = datetime.now().strftime("%d %b %Y")
generated_ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nutrition_tbl_header = [
"day",
"date",
"cal target",
"deficit target",
"deficit actual",
"running deficit",
]
nutrition_tbl_data = self._prepare_nutrition_table()
return {
"title": title,
"user": user,
"today": today,
"nutrition_tbl_header": nutrition_tbl_header,
"nutrition_tbl_data": nutrition_tbl_data,
"generated_ts": generated_ts,
}
def get_template_style_dict(self):
return {
"title_bg_color": self.style.fg1,
"title_text_color": self.style.text2,
"article_bg_color": self.style.bg0,
"article_text_color": self.style.text2,
"table_border_color": self.style.fg1,
"table_bg_header": self.style.bg2,
"table_bg_color1": self.style.bg1,
"table_bg_color2": self.style.bg2,
"table_text_color": self.style.text2,
"footer_bg_color": self.style.text2,
"footer_text_color": self.style.text0,
"footer_link_color": self.style.accent0,
}
def _render_progress_bar_chart(self):
nutrition_tbl_data = self._prepare_nutrition_table()
yesterday_tbl_row = nutrition_tbl_data[-1]
current_date = yesterday_tbl_row[1]
deficit_actual = yesterday_tbl_row[4]
deficit_accumulated = yesterday_tbl_row[5]
if deficit_actual < 0:
deficit_remaining = (
self.end_goal - deficit_accumulated + abs(deficit_actual)
)
current_date_data = (
(
deficit_accumulated - abs(deficit_actual),
abs(deficit_actual),
deficit_remaining + deficit_actual,
),
"warning",
)
else:
deficit_remaining = self.end_goal - deficit_accumulated - deficit_actual
current_date_data = (
(
deficit_accumulated - deficit_actual,
deficit_actual,
deficit_remaining,
),
"accent0",
)
chart_data = {current_date: current_date_data}
color = list(chart_data.values())[0][1]
vals = tuple(chart_data.values())[0][0]
category_colors = [
self.style.gray1,
self.style.warning if color == "warning" else self.style.accent0,
self.style.gray0,
]
labels = list(chart_data.keys())
data = np.array(list(vals))
data_cum = data.cumsum()
fig = plt.figure(figsize=(5.5, 0.7))
ax = fig.add_subplot(111)
fig.set_facecolor("#00000000")
ax.set_axis_off()
ax.set_ymargin(0.5)
ax.set_xlim(0, np.sum(data, axis=0).max())
goals_bar = ax.barh( # noqa
labels,
width=data,
left=data_cum[:] - data,
color=category_colors,
)
our_dir = Path().absolute()
chart_dir = our_dir.joinpath(Path("tmp"))
chart_dir.mkdir(exist_ok=True)
chart_file = chart_dir.joinpath(Path("temp.png"))
plt.savefig(chart_file)
return chart_file
def _prepare_nutrition_table(self):
yesterday_str = (date.today() - timedelta(days=1)).strftime("%d-%b-%Y")
# row[4] is the deficit actual for yesterday
# we skip days where actual deficit is NULL when we prepare the table
report_window_data = [row for row in self.data if row[4] is not None]
# if report starts from today or yesterday has no entered info:
if not report_window_data or report_window_data[-1][1] != yesterday_str:
return {}
nutrition_tbl_data = report_window_data[(self.num_rows_report_tbl * -1) :]
return nutrition_tbl_data
def render(self):
pass
| [
"matplotlib.pyplot.savefig",
"pathlib.Path",
"datetime.datetime.strptime",
"datetime.timedelta",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.sum",
"datetime.date.today"
] | [((2083, 2119), 'datetime.datetime.strptime', 'datetime.strptime', (['value', '"""%Y-%m-%d"""'], {}), "(value, '%Y-%m-%d')\n", (2100, 2119), False, 'from datetime import date, datetime, timedelta\n'), ((5185, 5215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.5, 0.7)'}), '(figsize=(5.5, 0.7))\n', (5195, 5215), True, 'import matplotlib.pyplot as plt\n'), ((5751, 5774), 'matplotlib.pyplot.savefig', 'plt.savefig', (['chart_file'], {}), '(chart_file)\n', (5762, 5774), True, 'import matplotlib.pyplot as plt\n'), ((5632, 5643), 'pathlib.Path', 'Path', (['"""tmp"""'], {}), "('tmp')\n", (5636, 5643), False, 'from pathlib import Path\n'), ((5724, 5740), 'pathlib.Path', 'Path', (['"""temp.png"""'], {}), "('temp.png')\n", (5728, 5740), False, 'from pathlib import Path\n'), ((2381, 2395), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2393, 2395), False, 'from datetime import date, datetime, timedelta\n'), ((2440, 2454), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2452, 2454), False, 'from datetime import date, datetime, timedelta\n'), ((5577, 5583), 'pathlib.Path', 'Path', ([], {}), '()\n', (5581, 5583), False, 'from pathlib import Path\n'), ((5367, 5387), 'numpy.sum', 'np.sum', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5373, 5387), True, 'import numpy as np\n'), ((5867, 5879), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5877, 5879), False, 'from datetime import date, datetime, timedelta\n'), ((5882, 5899), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5891, 5899), False, 'from datetime import date, datetime, timedelta\n')] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
def test():
def _apply_ufunc(ufunc, method, inputs, kwargs):
nextinputs = []
for x in inputs:
if (
isinstance(x, ak._v2.highlevel.Array)
and x.layout.is_IndexedType
and not x.layout.is_OptionType
):
nextinputs.append(
ak._v2.highlevel.Array(
x.layout.project(), behavior=ak._v2._util.behavior_of(x)
)
)
else:
nextinputs.append(x)
return getattr(ufunc, method)(*nextinputs, **kwargs)
ak._v2.behavior[np.ufunc, "categorical"] = _apply_ufunc
array = ak._v2.highlevel.Array(
ak._v2.contents.IndexedArray(
ak._v2.index.Index64(np.array([0, 1, 2, 1, 3, 1, 4])),
ak._v2.contents.NumpyArray(np.array([321, 1.1, 123, 999, 2])),
parameters={"__array__": "categorical"},
)
)
assert to_list(array * 10) == [3210, 11, 1230, 11, 9990, 11, 20]
array = ak._v2.highlevel.Array(["HAL"])
with pytest.raises(TypeError):
array + 1
| [
"numpy.array",
"awkward._v2._util.behavior_of",
"pytest.raises",
"awkward._v2.highlevel.Array"
] | [((1309, 1340), 'awkward._v2.highlevel.Array', 'ak._v2.highlevel.Array', (["['HAL']"], {}), "(['HAL'])\n", (1331, 1340), True, 'import awkward as ak\n'), ((1350, 1374), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1363, 1374), False, 'import pytest\n'), ((1049, 1080), 'numpy.array', 'np.array', (['[0, 1, 2, 1, 3, 1, 4]'], {}), '([0, 1, 2, 1, 3, 1, 4])\n', (1057, 1080), True, 'import numpy as np\n'), ((1122, 1155), 'numpy.array', 'np.array', (['[321, 1.1, 123, 999, 2]'], {}), '([321, 1.1, 123, 999, 2])\n', (1130, 1155), True, 'import numpy as np\n'), ((695, 722), 'awkward._v2._util.behavior_of', 'ak._v2._util.behavior_of', (['x'], {}), '(x)\n', (719, 722), True, 'import awkward as ak\n')] |
from line_profiler import LineProfiler
import numpy as np
from quantumGAN.discriminator import ClassicalDiscriminator
from quantumGAN.performance_testing.performance_qgan import Quantum_GAN
from quantumGAN.quantum_generator import QuantumGenerator
num_qubits: int = 3
# Set number of training epochs
num_epochs = 20
# Batch size
batch_size = 10
train_data = []
for _ in range(800):
i = 1
x2 = np.random.uniform(.55, .46, (2,))
fake_datapoint = np.random.uniform(-np.pi * .01, np.pi * .01, (num_qubits,))
real_datapoint = np.array([x2[0], 0, x2[0], 0])
train_data.append((real_datapoint, fake_datapoint))
discriminator = ClassicalDiscriminator(sizes=[4, 16, 8, 1],
type_loss="minimax" # ,functions=["relu", "relu", "sigmoid" ]
)
generator = QuantumGenerator(num_qubits=num_qubits,
generator_circuit=None,
num_qubits_ancilla=1,
shots=4096)
quantum_gan = Quantum_GAN(generator, discriminator)
print("!432423489723974234324234")
print(num_epochs)
quantum_gan.num_epochs = num_epochs
quantum_gan.training_data = train_data
quantum_gan.batch_size = batch_size
quantum_gan.generator_lr = .1
quantum_gan.discriminator_lr = .1
lp = LineProfiler()
lp_wrapper_total = lp(quantum_gan.train)
lp_wrapper_total()
lp.print_stats()
| [
"quantumGAN.performance_testing.performance_qgan.Quantum_GAN",
"quantumGAN.quantum_generator.QuantumGenerator",
"numpy.array",
"numpy.random.uniform",
"quantumGAN.discriminator.ClassicalDiscriminator",
"line_profiler.LineProfiler"
] | [((631, 695), 'quantumGAN.discriminator.ClassicalDiscriminator', 'ClassicalDiscriminator', ([], {'sizes': '[4, 16, 8, 1]', 'type_loss': '"""minimax"""'}), "(sizes=[4, 16, 8, 1], type_loss='minimax')\n", (653, 695), False, 'from quantumGAN.discriminator import ClassicalDiscriminator\n'), ((830, 931), 'quantumGAN.quantum_generator.QuantumGenerator', 'QuantumGenerator', ([], {'num_qubits': 'num_qubits', 'generator_circuit': 'None', 'num_qubits_ancilla': '(1)', 'shots': '(4096)'}), '(num_qubits=num_qubits, generator_circuit=None,\n num_qubits_ancilla=1, shots=4096)\n', (846, 931), False, 'from quantumGAN.quantum_generator import QuantumGenerator\n'), ((1030, 1067), 'quantumGAN.performance_testing.performance_qgan.Quantum_GAN', 'Quantum_GAN', (['generator', 'discriminator'], {}), '(generator, discriminator)\n', (1041, 1067), False, 'from quantumGAN.performance_testing.performance_qgan import Quantum_GAN\n'), ((1303, 1317), 'line_profiler.LineProfiler', 'LineProfiler', ([], {}), '()\n', (1315, 1317), False, 'from line_profiler import LineProfiler\n'), ((400, 435), 'numpy.random.uniform', 'np.random.uniform', (['(0.55)', '(0.46)', '(2,)'], {}), '(0.55, 0.46, (2,))\n', (417, 435), True, 'import numpy as np\n'), ((452, 513), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi * 0.01)', '(np.pi * 0.01)', '(num_qubits,)'], {}), '(-np.pi * 0.01, np.pi * 0.01, (num_qubits,))\n', (469, 513), True, 'import numpy as np\n'), ((530, 560), 'numpy.array', 'np.array', (['[x2[0], 0, x2[0], 0]'], {}), '([x2[0], 0, x2[0], 0])\n', (538, 560), True, 'import numpy as np\n')] |
# Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import copy
import inspect
import pickle
from glob import glob
from datetime import datetime
import numpy as np
from ..exception import ScoreException
from ..utils.common import intersects, clip
class Recommender:
"""Generic class for a recommender model. All recommendation models should inherit from this class
Parameters
----------------
name: str, required
The name of the recommender model
trainable: boolean, optional, default: True
When False, the model is not trainable
"""
def __init__(self, name, trainable=True, verbose=False):
self.name = name
self.trainable = trainable
self.verbose = verbose
self.train_set = None
self.val_set = None
# attributes to be ignored when being saved
self.ignored_attrs = ["train_set", "val_set"]
def reset_info(self):
self.best_value = -np.Inf
self.best_epoch = 0
self.current_epoch = 0
self.stopped_epoch = 0
self.wait = 0
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in self.ignored_attrs:
continue
setattr(result, k, copy.deepcopy(v))
return result
@classmethod
def _get_init_params(cls):
"""Get initial parameters from the model constructor"""
init = getattr(cls.__init__, "deprecated_original", cls.__init__)
if init is object.__init__:
return []
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if p.name != "self"]
return sorted([p.name for p in parameters])
def clone(self, new_params=None):
"""Clone an instance of the model object.
Parameters
----------
new_params: dict, optional, default: None
New parameters for the cloned instance.
Returns
-------
object: :obj:`cornac.models.Recommender`
"""
new_params = {} if new_params is None else new_params
init_params = {}
for name in self._get_init_params():
init_params[name] = new_params.get(name, copy.deepcopy(getattr(self, name)))
return self.__class__(**init_params)
def save(self, save_dir=None):
"""Save a recommender model to the filesystem.
Parameters
----------
save_dir: str, default: None
Path to a directory for the model to be stored.
Returns
-------
model_file : str
Path to the model file stored on the filesystem.
"""
if save_dir is None:
return
model_dir = os.path.join(save_dir, self.name)
os.makedirs(model_dir, exist_ok=True)
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
model_file = os.path.join(model_dir, "{}.pkl".format(timestamp))
saved_model = copy.deepcopy(self)
pickle.dump(
saved_model, open(model_file, "wb"), protocol=pickle.HIGHEST_PROTOCOL
)
if self.verbose:
print("{} model is saved to {}".format(self.name, model_file))
return model_file
@staticmethod
def load(model_path, trainable=False):
"""Load a recommender model from the filesystem.
Parameters
----------
model_path: str, required
Path to a file or directory where the model is stored. If a directory is
provided, the latest model will be loaded.
trainable: boolean, optional, default: False
Set it to True if you would like to finetune the model. By default,
the model parameters are assumed to be fixed after being loaded.
Returns
-------
self : object
"""
if os.path.isdir(model_path):
model_file = sorted(glob("{}/*.pkl".format(model_path)))[-1]
else:
model_file = model_path
model = pickle.load(open(model_file, "rb"))
model.trainable = trainable
model.load_from = model_file # for further loading
return model
def fit(self, train_set, val_set=None):
"""Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
"""
self.reset_info()
self.train_set = train_set.reset()
self.val_set = None if val_set is None else val_set.reset()
return self
def score(self, user_idx, item_idx=None):
"""Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for that to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
"""
raise NotImplementedError("The algorithm is not able to make score prediction!")
def default_score(self):
"""Overwrite this function if your algorithm has special treatment for cold-start problem
"""
return self.train_set.global_mean
def rate(self, user_idx, item_idx, clipping=True):
"""Give a rating score between pair of user and item
Parameters
----------
user_idx: int, required
The index of the user for whom to perform item raking.
item_idx: int, required
The index of the item to be rated by the user.
clipping: bool, default: True
Whether to clip the predicted rating value.
Returns
-------
A scalar
A rating score of the user for the item
"""
try:
rating_pred = self.score(user_idx, item_idx)
except ScoreException:
rating_pred = self.default_score()
if clipping:
rating_pred = clip(
values=rating_pred,
lower_bound=self.train_set.min_rating,
upper_bound=self.train_set.max_rating,
)
return rating_pred
def rank(self, user_idx, item_indices=None):
"""Rank all test items for a given user.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform item raking.
item_indices: 1d array, optional, default: None
A list of candidate item indices to be ranked by the user.
If `None`, list of ranked known item indices and their scores will be returned.
ASSUMPTION: list of item indices are continuous from 0 to len(item_indices).
Returns
-------
Tuple of `item_rank`, and `item_scores`. The order of values
in item_scores are corresponding to the order of their ids in item_ids
"""
# obtain item scores from the model
try:
known_item_scores = self.score(user_idx)
except ScoreException:
known_item_scores = (
np.ones(self.train_set.total_items) * self.default_score()
)
# check if the returned scores also cover unknown items
# if not, all unknown items will be given the MIN score
if len(known_item_scores) == self.train_set.total_items:
all_item_scores = known_item_scores
else:
all_item_scores = np.ones(self.train_set.total_items) * np.min(
known_item_scores
)
all_item_scores[: self.train_set.num_items] = known_item_scores
# rank items based on their scores
if item_indices is None:
item_scores = all_item_scores[: self.train_set.num_items]
item_rank = item_scores.argsort()[::-1]
else:
item_scores = all_item_scores[: len(item_indices)]
item_rank = item_scores.argsort()[::-1]
item_scores = item_scores[item_indices]
return item_rank, item_scores
def monitor_value(self):
"""Calculating monitored value used for early stopping on validation set (`val_set`).
This function will be called by `early_stop()` function.
Note: `val_set` could be `None` thus it needs to be checked before usage.
Returns
-------
:raise NotImplementedError
"""
raise NotImplementedError()
def early_stop(self, min_delta=0.0, patience=0):
"""Check if training should be stopped when validation loss has stopped improving.
Parameters
----------
min_delta: float, optional, default: 0.
The minimum increase in monitored value on validation set to be considered as improvement,
i.e. an increment of less than `min_delta` will count as no improvement.
patience: int, optional, default: 0
Number of epochs with no improvement after which training should be stopped.
Returns
-------
res : bool
Return `True` if model training should be stopped (no improvement on validation set),
otherwise return `False`.
"""
self.current_epoch += 1
current_value = self.monitor_value()
if current_value is None:
return False
if np.greater_equal(current_value - self.best_value, min_delta):
self.best_value = current_value
self.best_epoch = self.current_epoch
self.wait = 0
else:
self.wait += 1
if self.wait >= patience:
self.stopped_epoch = self.current_epoch
if self.stopped_epoch > 0:
print("Early stopping:")
print(
"- best epoch = {}, stopped epoch = {}".format(
self.best_epoch, self.stopped_epoch
)
)
print(
"- best monitored value = {:.6f} (delta = {:.6f})".format(
self.best_value, current_value - self.best_value
)
)
return True
return False
| [
"numpy.ones",
"os.makedirs",
"os.path.join",
"inspect.signature",
"numpy.min",
"datetime.datetime.now",
"os.path.isdir",
"copy.deepcopy",
"numpy.greater_equal"
] | [((2266, 2289), 'inspect.signature', 'inspect.signature', (['init'], {}), '(init)\n', (2283, 2289), False, 'import inspect\n'), ((3450, 3483), 'os.path.join', 'os.path.join', (['save_dir', 'self.name'], {}), '(save_dir, self.name)\n', (3462, 3483), False, 'import os\n'), ((3492, 3529), 'os.makedirs', 'os.makedirs', (['model_dir'], {'exist_ok': '(True)'}), '(model_dir, exist_ok=True)\n', (3503, 3529), False, 'import os\n'), ((3694, 3713), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3707, 3713), False, 'import copy\n'), ((4586, 4611), 'os.path.isdir', 'os.path.isdir', (['model_path'], {}), '(model_path)\n', (4599, 4611), False, 'import os\n'), ((10549, 10609), 'numpy.greater_equal', 'np.greater_equal', (['(current_value - self.best_value)', 'min_delta'], {}), '(current_value - self.best_value, min_delta)\n', (10565, 10609), True, 'import numpy as np\n'), ((1955, 1971), 'copy.deepcopy', 'copy.deepcopy', (['v'], {}), '(v)\n', (1968, 1971), False, 'import copy\n'), ((3550, 3564), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3562, 3564), False, 'from datetime import datetime\n'), ((8671, 8706), 'numpy.ones', 'np.ones', (['self.train_set.total_items'], {}), '(self.train_set.total_items)\n', (8678, 8706), True, 'import numpy as np\n'), ((8709, 8734), 'numpy.min', 'np.min', (['known_item_scores'], {}), '(known_item_scores)\n', (8715, 8734), True, 'import numpy as np\n'), ((8312, 8347), 'numpy.ones', 'np.ones', (['self.train_set.total_items'], {}), '(self.train_set.total_items)\n', (8319, 8347), True, 'import numpy as np\n')] |
import os
from pickle import FALSE
import sys
import numpy as np
from collections import Iterable
import importlib
import open3d as o3d
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Categorical
from baselines import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'model/classifier'))
class PointCloudAttack(object):
def __init__(self, args):
"""Shape-invariant Adversarial Attack for 3D Point Clouds.
"""
self.args = args
self.device = args.device
self.eps = args.eps
self.normal = args.normal
self.step_size = args.step_size
self.num_class = args.num_class
self.max_steps = args.max_steps
self.top5_attack = args.top5_attack
assert args.transfer_attack_method is None or args.query_attack_method is None
assert not args.transfer_attack_method is None or not args.query_attack_method is None
self.attack_method = args.transfer_attack_method if args.query_attack_method is None else args.query_attack_method
self.build_models()
self.defense_method = args.defense_method
if not args.defense_method is None:
self.pre_head = self.get_defense_head(args.defense_method)
def build_models(self):
"""Build white-box surrogate model and black-box target model.
"""
# load white-box surrogate models
MODEL = importlib.import_module(self.args.surrogate_model)
wb_classifier = MODEL.get_model(
self.num_class,
normal_channel=self.normal
)
wb_classifier = wb_classifier.to(self.device)
# load black-box target models
MODEL = importlib.import_module(self.args.target_model)
classifier = MODEL.get_model(
self.num_class,
normal_channel=self.normal
)
classifier = classifier.to(self.args.device)
# load model weights
wb_classifier = self.load_models(wb_classifier, self.args.surrogate_model)
classifier = self.load_models(classifier, self.args.target_model)
# set eval
self.wb_classifier = wb_classifier.eval()
self.classifier = classifier.eval()
def load_models(self, classifier, model_name):
"""Load white-box surrogate model and black-box target model.
"""
model_path = os.path.join('./checkpoint/' + self.args.dataset, model_name)
if os.path.exists(model_path + '.pth'):
checkpoint = torch.load(model_path + '.pth')
elif os.path.exists(model_path + '.t7'):
checkpoint = torch.load(model_path + '.t7')
elif os.path.exists(model_path + '.tar'):
checkpoint = torch.load(model_path + '.tar')
else:
raise NotImplementedError
try:
if 'model_state_dict' in checkpoint:
classifier.load_state_dict(checkpoint['model_state_dict'])
elif 'model_state' in checkpoint:
classifier.load_state_dict(checkpoint['model_state'])
else:
classifier.load_state_dict(checkpoint)
except:
classifier = nn.DataParallel(classifier)
classifier.load_state_dict(checkpoint)
return classifier
def CWLoss(self, logits, target, kappa=0, tar=False, num_classes=40):
"""Carlini & Wagner attack loss.
Args:
logits (torch.cuda.FloatTensor): the predicted logits, [1, num_classes].
target (torch.cuda.LongTensor): the label for points, [1].
"""
target = torch.ones(logits.size(0)).type(torch.cuda.FloatTensor).mul(target.float())
target_one_hot = Variable(torch.eye(num_classes).type(torch.cuda.FloatTensor)[target.long()].cuda())
real = torch.sum(target_one_hot*logits, 1)
if not self.top5_attack:
### top-1 attack
other = torch.max((1-target_one_hot)*logits - (target_one_hot*10000), 1)[0]
else:
### top-5 attack
other = torch.topk((1-target_one_hot)*logits - (target_one_hot*10000), 5)[0][:, 4]
kappa = torch.zeros_like(other).fill_(kappa)
if tar:
return torch.sum(torch.max(other-real, kappa))
else :
return torch.sum(torch.max(real-other, kappa))
def run(self, points, target):
"""Main attack method.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 6].
target (torch.cuda.LongTensor): the label for points, [1].
"""
if self.attack_method == 'ifgm_ours':
return self.shape_invariant_ifgm(points, target)
elif self.attack_method == 'simba':
return self.simba_attack(points, target)
elif self.attack_method == 'simbapp':
return self.simbapp_attack(points, target)
elif self.attack_method == 'ours':
return self.shape_invariant_query_attack(points, target)
else:
NotImplementedError
def get_defense_head(self, method):
"""Set the pre-processing based defense module.
Args:
method (str): defense method name.
"""
if method == 'sor':
pre_head = SORDefense(k=2, alpha=1.1)
elif method == 'srs':
pre_head = SRSDefense(drop_num=500)
elif method == 'dupnet':
pre_head = DUPNet(sor_k=2, sor_alpha=1.1, npoint=1024, up_ratio=4)
else:
raise NotImplementedError
return pre_head
def get_normal_vector(self, points):
"""Calculate the normal vector.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 3].
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points.squeeze(0).detach().cpu().numpy())
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamKNN(knn=20))
normal_vec = torch.FloatTensor(pcd.normals).cuda().unsqueeze(0)
return normal_vec
def get_spin_axis_matrix(self, normal_vec):
"""Calculate the spin-axis matrix.
Args:
normal_vec (torch.cuda.FloatTensor): the normal vectors for all N points, [1, N, 3].
"""
_, N, _ = normal_vec.shape
x = normal_vec[:,:,0] # [1, N]
y = normal_vec[:,:,1] # [1, N]
z = normal_vec[:,:,2] # [1, N]
assert abs(normal_vec).max() <= 1
u = torch.zeros(1, N, 3, 3).cuda()
denominator = torch.sqrt(1-z**2) # \sqrt{1-z^2}, [1, N]
u[:,:,0,0] = y / denominator
u[:,:,0,1] = - x / denominator
u[:,:,0,2] = 0.
u[:,:,1,0] = x * z / denominator
u[:,:,1,1] = y * z / denominator
u[:,:,1,2] = - denominator
u[:,:,2] = normal_vec
# revision for |z| = 1, boundary case.
pos = torch.where(abs(z ** 2 - 1) < 1e-4)[1]
u[:,pos,0,0] = 1 / np.sqrt(2)
u[:,pos,0,1] = - 1 / np.sqrt(2)
u[:,pos,0,2] = 0.
u[:,pos,1,0] = z[:,pos] / np.sqrt(2)
u[:,pos,1,1] = z[:,pos] / np.sqrt(2)
u[:,pos,1,2] = 0.
u[:,pos,2,0] = 0.
u[:,pos,2,1] = 0.
u[:,pos,2,2] = z[:,pos]
return u.data
def get_transformed_point_cloud(self, points, normal_vec):
"""Calculate the spin-axis matrix.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 3].
normal_vec (torch.cuda.FloatTensor): the normal vectors for all N points, [1, N, 3].
"""
intercept = torch.mul(points, normal_vec).sum(-1, keepdim=True) # P \cdot N, [1, N, 1]
spin_axis_matrix = self.get_spin_axis_matrix(normal_vec) # U, [1, N, 3, 3]
translation_matrix = torch.mul(intercept, normal_vec).data # (P \cdot N) N, [1, N, 3]
new_points = points + translation_matrix # P + (P \cdot N) N, [1, N, 3]
new_points = new_points.unsqueeze(-1) # P + (P \cdot N) N, [1, N, 3, 1]
new_points = torch.matmul(spin_axis_matrix, new_points) # P' = U (P + (P \cdot N) N), [1, N, 3, 1]
new_points = new_points.squeeze(-1).data # P', [1, N, 3]
return new_points, spin_axis_matrix, translation_matrix
def get_original_point_cloud(self, new_points, spin_axis_matrix, translation_matrix):
"""Calculate the spin-axis matrix.
Args:
new_points (torch.cuda.FloatTensor): the transformed point cloud with N points, [1, N, 3].
spin_axis_matrix (torch.cuda.FloatTensor): the rotate matrix for transformation, [1, N, 3, 3].
translation_matrix (torch.cuda.FloatTensor): the offset matrix for transformation, [1, N, 3, 3].
"""
inputs = torch.matmul(spin_axis_matrix.transpose(-1, -2), new_points.unsqueeze(-1)) # U^T P', [1, N, 3, 1]
inputs = inputs - translation_matrix.unsqueeze(-1) # P = U^T P' - (P \cdot N) N, [1, N, 3, 1]
inputs = inputs.squeeze(-1) # P, [1, N, 3]
return inputs
def shape_invariant_ifgm(self, points, target):
"""Black-box I-FGSM based on shape-invariant sensitivity maps.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 6].
target (torch.cuda.LongTensor): the label for points, [1].
"""
normal_vec = points[:,:,-3:].data # N, [1, N, 3]
normal_vec = normal_vec / torch.sqrt(torch.sum(normal_vec ** 2, dim=-1, keepdim=True)) # N, [1, N, 3]
points = points[:,:,:3].data # P, [1, N, 3]
ori_points = points.data
clip_func = ClipPointsLinf(budget=self.eps)# * np.sqrt(3*1024))
for i in range(self.max_steps):
# P -> P', detach()
new_points, spin_axis_matrix, translation_matrix = self.get_transformed_point_cloud(points, normal_vec)
new_points = new_points.detach()
new_points.requires_grad = True
# P' -> P
points = self.get_original_point_cloud(new_points, spin_axis_matrix, translation_matrix)
points = points.transpose(1, 2) # P, [1, 3, N]
# get white-box gradients
if not self.defense_method is None:
logits = self.wb_classifier(self.pre_head(points))
else:
logits = self.wb_classifier(points)
loss = self.CWLoss(logits, target, kappa=0., tar=False, num_classes=self.num_class)
self.wb_classifier.zero_grad()
loss.backward()
# print(loss.item(), logits.max(1)[1], target)
grad = new_points.grad.data # g, [1, N, 3]
grad[:,:,2] = 0.
# update P', P and N
# # Linf
# new_points = new_points - self.step_size * torch.sign(grad)
# L2
norm = torch.sum(grad ** 2, dim=[1, 2]) ** 0.5
new_points = new_points - self.step_size * np.sqrt(3*1024) * grad / (norm[:, None, None] + 1e-9)
points = self.get_original_point_cloud(new_points, spin_axis_matrix, translation_matrix) # P, [1, N, 3]
points = clip_func(points, ori_points)
# points = torch.min(torch.max(points, ori_points - self.eps), ori_points + self.eps) # P, [1, N, 3]
normal_vec = self.get_normal_vector(points) # N, [1, N, 3]
with torch.no_grad():
adv_points = points.data
if not self.defense_method is None:
adv_logits = self.classifier(self.pre_head(points.transpose(1, 2).detach()))
else:
adv_logits = self.classifier(points.transpose(1, 2).detach())
adv_target = adv_logits.data.max(1)[1]
# print(target)
# print(adv_target)
if self.top5_attack:
target_top_5 = adv_logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
del normal_vec, grad, new_points, spin_axis_matrix, translation_matrix
return adv_points, adv_target, (adv_logits.data.max(1)[1] != target).sum().item()
def simba_attack(self, points, target):
"""Blaxk-box query-based SimBA attack.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 6].
target (torch.cuda.LongTensor): the label for points, [1].
"""
points = points[:,:,:3].data # P, [1, N, 3]
# initialization
query_costs = 0
with torch.no_grad():
points = points.transpose(1, 2)
if not self.defense_method is None:
adv_logits = self.classifier(self.pre_head(points.detach()))
else:
adv_logits = self.classifier(points)
adv_target = adv_logits.max(1)[1]
query_costs += 1
# if categorized wrong
if self.top5_attack:
target_top_5 = adv_logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
if adv_target != target:
return points.transpose(1, 2), adv_target, query_costs
# constructing random list
basis_list = []
for j in range(points.shape[2]):
for i in range(3):
basis_list.append((i, j))
basis_list = np.array(basis_list)
np.random.shuffle(basis_list)
# query loop
i = 0
best_loss = -999.
while best_loss < 0 and i < len(basis_list):
channel, idx = basis_list[i]
for eps in {self.step_size, -self.step_size}:
pert = torch.zeros_like(points).cuda() # \delta, [1, 3, N]
pert[:,channel,idx] += eps
inputs = points + pert
with torch.no_grad():
if not self.defense_method is None:
logits = self.classifier(self.pre_head(inputs.detach()))
else:
logits = self.classifier(inputs.detach()) # [1, num_class]
query_costs += 1
loss = self.CWLoss(logits, target, kappa=-999., tar=True, num_classes=self.num_class)
if loss.item() > best_loss:
# print(loss.item())
best_loss = loss.item()
points = points + pert
adv_target = logits.max(1)[1]
break
i += 1
# print(query_costs)
# print(target)
# print(adv_target)
adv_points = points.transpose(1, 2).data
if self.top5_attack:
target_top_5 = logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
del grad
return adv_points, adv_target, query_costs
def simbapp_attack(self, points, target):
"""Blaxk-box query-based SimBA++ attack.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 6].
target (torch.cuda.LongTensor): the label for points, [1].
"""
points = points[:,:,:3].data # P, [1, N, 3]
# initialization
query_costs = 0
with torch.no_grad():
points = points.transpose(1, 2)
if not self.defense_method is None:
adv_logits = self.classifier(self.pre_head(points.detach()))
else:
adv_logits = self.classifier(points)
adv_target = adv_logits.max(1)[1]
query_costs += 1
# if categorized wrong
if self.top5_attack:
target_top_5 = adv_logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
if adv_target != target:
return points.transpose(1, 2), adv_target, query_costs
# get white-box gradients
points = points.detach()
points.requires_grad = True
logits = self.wb_classifier(points)
loss = self.CWLoss(logits, target, kappa=-999., tar=True, num_classes=self.num_class)
self.wb_classifier.zero_grad()
loss.backward()
grad = points.grad.data # g, [1, 3, N]
grad = abs(grad).reshape(-1)
# # rank
# basis_list = []
# for j in range(points.shape[2]):
# for i in range(3):
# basis_list.append((i, j, grad[0][i][j]))
# sorted_basis_list = sorted(basis_list, key=lambda c: c[2], reverse=True)
# query loop
i = 0
best_loss = -999.
while best_loss < 0 and i < grad.shape[0]:
# channel, idx, _ = sorted_basis_list[i]
m = Categorical(grad)
choice = m.sample()
channel = int(choice % 3)
idx = int(choice // 3)
for eps in {self.step_size, -self.step_size}:
pert = torch.zeros_like(points).cuda() # \delta, [1, 3, N]
pert[:,channel,idx] += (eps + 0.1*torch.randn(1).cuda())
inputs = points + pert
with torch.no_grad():
if not self.defense_method is None:
logits = self.classifier(self.pre_head(inputs.detach()))
else:
logits = self.classifier(inputs.detach()) # [1, num_class]
query_costs += 1
loss = self.CWLoss(logits, target, kappa=-999., tar=True, num_classes=self.num_class)
if loss.item() > best_loss:
# print(loss.item())
best_loss = loss.item()
points = points + pert
adv_target = logits.max(1)[1]
break
i += 1
# print(query_costs)
# print(target)
# print(adv_target)
adv_points = points.transpose(1, 2).data
if self.top5_attack:
target_top_5 = logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
del grad, m
return adv_points, adv_target, query_costs
def shape_invariant_query_attack(self, points, target):
"""Blaxk-box query-based attack based on point-cloud sensitivity maps.
Args:
points (torch.cuda.FloatTensor): the point cloud with N points, [1, N, 6].
target (torch.cuda.LongTensor): the label for points, [1].
"""
normal_vec = points[:,:,-3:].data # N, [1, N, 3]
normal_vec = normal_vec / torch.sqrt(torch.sum(normal_vec ** 2, dim=-1, keepdim=True)) # N, [1, N, 3]
points = points[:,:,:3].data # P, [1, N, 3]
ori_points = points.data
# initialization
query_costs = 0
with torch.no_grad():
points = points.transpose(1, 2)
if not self.defense_method is None:
adv_logits = self.classifier(self.pre_head(points.detach()))
else:
adv_logits = self.classifier(points)
adv_target = adv_logits.max(1)[1]
query_costs += 1
# if categorized wrong
if self.top5_attack:
target_top_5 = adv_logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
if adv_target != target:
return points.transpose(1, 2), adv_target, query_costs
# P -> P', detach()
points = points.transpose(1, 2)
new_points, spin_axis_matrix, translation_matrix = self.get_transformed_point_cloud(points.detach(), normal_vec)
new_points = new_points.detach()
new_points.requires_grad = True
# P' -> P
inputs = self.get_original_point_cloud(new_points, spin_axis_matrix, translation_matrix)
inputs = torch.min(torch.max(inputs, ori_points - self.eps), ori_points + self.eps)
inputs = inputs.transpose(1, 2) # P, [1, 3, N]
# get white-box gradients
logits = self.wb_classifier(inputs)
loss = self.CWLoss(logits, target, kappa=-999., tar=True, num_classes=self.num_class)
self.wb_classifier.zero_grad()
loss.backward()
grad = new_points.grad.data # g, [1, N, 3]
grad[:,:,2] = 0.
new_points.requires_grad = False
rankings = torch.sqrt(grad[:,:,0] ** 2 + grad[:,:,1] ** 2) # \sqrt{g_{x'}^2+g_{y'}^2}, [1, N]
directions = grad / (rankings.unsqueeze(-1)+1e-16) # (g_{x'}/r,g_{y'}/r,0), [1, N, 3]
# rank the sensitivity map in the desending order
point_list = []
for i in range(points.size(1)):
point_list.append((i, directions[:,i,:], rankings[:,i].item()))
sorted_point_list = sorted(point_list, key=lambda c: c[2], reverse=True)
# query loop
i = 0
best_loss = -999.
while best_loss < 0 and i < len(sorted_point_list):
idx, direction, _ = sorted_point_list[i]
for eps in {self.step_size, -self.step_size}:
pert = torch.zeros_like(new_points).cuda()
pert[:,idx,:] += eps * direction
inputs = new_points + pert
inputs = torch.matmul(spin_axis_matrix.transpose(-1, -2), inputs.unsqueeze(-1)) # U^T P', [1, N, 3, 1]
inputs = inputs - translation_matrix.unsqueeze(-1) # P = U^T P' - (P \cdot N) N, [1, N, 3, 1]
inputs = inputs.squeeze(-1).transpose(1, 2) # P, [1, 3, N]
# inputs = torch.clamp(inputs, -1, 1)
with torch.no_grad():
if not self.defense_method is None:
logits = self.classifier(self.pre_head(inputs.detach()))
else:
logits = self.classifier(inputs.detach()) # [1, num_class]
query_costs += 1
loss = self.CWLoss(logits, target, kappa=-999., tar=True, num_classes=self.num_class)
if loss.item() > best_loss:
# print(loss.item())
best_loss = loss.item()
new_points = new_points + pert
adv_target = logits.max(1)[1]
break
i += 1
# print(query_costs)
# print(target)
# print(adv_target)
adv_points = inputs.transpose(1, 2).data
if self.top5_attack:
target_top_5 = logits.topk(5)[1]
if target in target_top_5:
adv_target = target
else:
adv_target = -1
del grad
return adv_points, adv_target, query_costs
| [
"torch.mul",
"numpy.sqrt",
"torch.distributions.Categorical",
"torch.max",
"torch.sqrt",
"numpy.array",
"torch.sum",
"os.path.exists",
"torch.eye",
"torch.matmul",
"torch.zeros_like",
"torch.randn",
"importlib.import_module",
"torch.topk",
"torch.load",
"open3d.geometry.KDTreeSearchPar... | [((382, 407), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (397, 407), False, 'import os\n'), ((447, 489), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""model/classifier"""'], {}), "(ROOT_DIR, 'model/classifier')\n", (459, 489), False, 'import os\n'), ((1624, 1674), 'importlib.import_module', 'importlib.import_module', (['self.args.surrogate_model'], {}), '(self.args.surrogate_model)\n', (1647, 1674), False, 'import importlib\n'), ((1909, 1956), 'importlib.import_module', 'importlib.import_module', (['self.args.target_model'], {}), '(self.args.target_model)\n', (1932, 1956), False, 'import importlib\n'), ((2597, 2658), 'os.path.join', 'os.path.join', (["('./checkpoint/' + self.args.dataset)", 'model_name'], {}), "('./checkpoint/' + self.args.dataset, model_name)\n", (2609, 2658), False, 'import os\n'), ((2671, 2706), 'os.path.exists', 'os.path.exists', (["(model_path + '.pth')"], {}), "(model_path + '.pth')\n", (2685, 2706), False, 'import os\n'), ((4053, 4090), 'torch.sum', 'torch.sum', (['(target_one_hot * logits)', '(1)'], {}), '(target_one_hot * logits, 1)\n', (4062, 4090), False, 'import torch\n'), ((6088, 6113), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (6111, 6113), True, 'import open3d as o3d\n'), ((6882, 6904), 'torch.sqrt', 'torch.sqrt', (['(1 - z ** 2)'], {}), '(1 - z ** 2)\n', (6892, 6904), False, 'import torch\n'), ((8404, 8446), 'torch.matmul', 'torch.matmul', (['spin_axis_matrix', 'new_points'], {}), '(spin_axis_matrix, new_points)\n', (8416, 8446), False, 'import torch\n'), ((13830, 13850), 'numpy.array', 'np.array', (['basis_list'], {}), '(basis_list)\n', (13838, 13850), True, 'import numpy as np\n'), ((13860, 13889), 'numpy.random.shuffle', 'np.random.shuffle', (['basis_list'], {}), '(basis_list)\n', (13877, 13889), True, 'import numpy as np\n'), ((21095, 21146), 'torch.sqrt', 'torch.sqrt', (['(grad[:, :, 0] ** 2 + grad[:, :, 1] ** 2)'], {}), '(grad[:, :, 0] ** 2 + grad[:, :, 1] ** 2)\n', (21105, 21146), False, 'import torch\n'), ((2734, 2765), 'torch.load', 'torch.load', (["(model_path + '.pth')"], {}), "(model_path + '.pth')\n", (2744, 2765), False, 'import torch\n'), ((2780, 2814), 'os.path.exists', 'os.path.exists', (["(model_path + '.t7')"], {}), "(model_path + '.t7')\n", (2794, 2814), False, 'import os\n'), ((7308, 7318), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7315, 7318), True, 'import numpy as np\n'), ((7349, 7359), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7356, 7359), True, 'import numpy as np\n'), ((7422, 7432), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7429, 7432), True, 'import numpy as np\n'), ((7468, 7478), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7475, 7478), True, 'import numpy as np\n'), ((8154, 8186), 'torch.mul', 'torch.mul', (['intercept', 'normal_vec'], {}), '(intercept, normal_vec)\n', (8163, 8186), False, 'import torch\n'), ((11757, 11772), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11770, 11772), False, 'import torch\n'), ((12946, 12961), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12959, 12961), False, 'import torch\n'), ((15795, 15810), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15808, 15810), False, 'import torch\n'), ((17335, 17352), 'torch.distributions.Categorical', 'Categorical', (['grad'], {}), '(grad)\n', (17346, 17352), False, 'from torch.distributions import Categorical\n'), ((19484, 19499), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19497, 19499), False, 'import torch\n'), ((20590, 20630), 'torch.max', 'torch.max', (['inputs', '(ori_points - self.eps)'], {}), '(inputs, ori_points - self.eps)\n', (20599, 20630), False, 'import torch\n'), ((2842, 2872), 'torch.load', 'torch.load', (["(model_path + '.t7')"], {}), "(model_path + '.t7')\n", (2852, 2872), False, 'import torch\n'), ((2887, 2922), 'os.path.exists', 'os.path.exists', (["(model_path + '.tar')"], {}), "(model_path + '.tar')\n", (2901, 2922), False, 'import os\n'), ((3414, 3441), 'torch.nn.DataParallel', 'nn.DataParallel', (['classifier'], {}), '(classifier)\n', (3429, 3441), True, 'import torch.nn as nn\n'), ((4174, 4242), 'torch.max', 'torch.max', (['((1 - target_one_hot) * logits - target_one_hot * 10000)', '(1)'], {}), '((1 - target_one_hot) * logits - target_one_hot * 10000, 1)\n', (4183, 4242), False, 'import torch\n'), ((4400, 4423), 'torch.zeros_like', 'torch.zeros_like', (['other'], {}), '(other)\n', (4416, 4423), False, 'import torch\n'), ((4486, 4516), 'torch.max', 'torch.max', (['(other - real)', 'kappa'], {}), '(other - real, kappa)\n', (4495, 4516), False, 'import torch\n'), ((4562, 4592), 'torch.max', 'torch.max', (['(real - other)', 'kappa'], {}), '(real - other, kappa)\n', (4571, 4592), False, 'import torch\n'), ((6248, 6289), 'open3d.geometry.KDTreeSearchParamKNN', 'o3d.geometry.KDTreeSearchParamKNN', ([], {'knn': '(20)'}), '(knn=20)\n', (6281, 6289), True, 'import open3d as o3d\n'), ((6828, 6851), 'torch.zeros', 'torch.zeros', (['(1)', 'N', '(3)', '(3)'], {}), '(1, N, 3, 3)\n', (6839, 6851), False, 'import torch\n'), ((7965, 7994), 'torch.mul', 'torch.mul', (['points', 'normal_vec'], {}), '(points, normal_vec)\n', (7974, 7994), False, 'import torch\n'), ((9829, 9877), 'torch.sum', 'torch.sum', (['(normal_vec ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(normal_vec ** 2, dim=-1, keepdim=True)\n', (9838, 9877), False, 'import torch\n'), ((11236, 11268), 'torch.sum', 'torch.sum', (['(grad ** 2)'], {'dim': '[1, 2]'}), '(grad ** 2, dim=[1, 2])\n', (11245, 11268), False, 'import torch\n'), ((19267, 19315), 'torch.sum', 'torch.sum', (['(normal_vec ** 2)'], {'dim': '(-1)', 'keepdim': '(True)'}), '(normal_vec ** 2, dim=-1, keepdim=True)\n', (19276, 19315), False, 'import torch\n'), ((2950, 2981), 'torch.load', 'torch.load', (["(model_path + '.tar')"], {}), "(model_path + '.tar')\n", (2960, 2981), False, 'import torch\n'), ((4308, 4377), 'torch.topk', 'torch.topk', (['((1 - target_one_hot) * logits - target_one_hot * 10000)', '(5)'], {}), '((1 - target_one_hot) * logits - target_one_hot * 10000, 5)\n', (4318, 4377), False, 'import torch\n'), ((14293, 14308), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14306, 14308), False, 'import torch\n'), ((17732, 17747), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17745, 17747), False, 'import torch\n'), ((22337, 22352), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22350, 22352), False, 'import torch\n'), ((6313, 6343), 'torch.FloatTensor', 'torch.FloatTensor', (['pcd.normals'], {}), '(pcd.normals)\n', (6330, 6343), False, 'import torch\n'), ((14135, 14159), 'torch.zeros_like', 'torch.zeros_like', (['points'], {}), '(points)\n', (14151, 14159), False, 'import torch\n'), ((17544, 17568), 'torch.zeros_like', 'torch.zeros_like', (['points'], {}), '(points)\n', (17560, 17568), False, 'import torch\n'), ((21823, 21851), 'torch.zeros_like', 'torch.zeros_like', (['new_points'], {}), '(new_points)\n', (21839, 21851), False, 'import torch\n'), ((11332, 11349), 'numpy.sqrt', 'np.sqrt', (['(3 * 1024)'], {}), '(3 * 1024)\n', (11339, 11349), True, 'import numpy as np\n'), ((3960, 3982), 'torch.eye', 'torch.eye', (['num_classes'], {}), '(num_classes)\n', (3969, 3982), False, 'import torch\n'), ((17647, 17661), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (17658, 17661), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/3/11 18:58
import matplotlib.pyplot as plt
import numpy as np
import scipy
from sklearn.datasets import make_moons, make_regression
class LDA:
def __init__(self, k):
"""
:param k: reduced dimension R^d -> R^k
"""
self.reduced_dimension = k
def make_data(self, regression=False):
if regression:
x, y = make_regression(n_samples=50, n_features=1,
n_targets=1, noise=1.5, random_state=1, bias=0)
y = np.array([y]).T
print(y.shape)
self.X_data = np.concatenate([x, y], axis=1)
new_X_data = self.X_data - np.array([[15, 13]])
self.X_data = np.concatenate([self.X_data, new_X_data], axis=0)
# self.X_data = self.X_data - np.mean(self.X_data, 0)
y = np.array([1 for _ in range(50)])
new_y = np.array([0 for _ in range(50)])
self.Y_data = np.concatenate([y, new_y], axis=0)
print(self.X_data.shape)
else:
self.X_data, self.Y_data = make_moons(100, noise=.04, random_state=0)
def cal_within(self):
"""
calculate within-class scatter matrix
:return: Sw
"""
class_name = np.unique(self.Y_data)
class_num = len(np.unique(self.Y_data))
attribute_dimension = self.X_data.shape[1]
Sw = np.zeros([attribute_dimension, attribute_dimension])
for i in range(class_num):
xi = self.X_data[np.where(self.Y_data == class_name[i])[0], :]
miui = np.array([np.mean(xi, axis=0)])
print(xi.shape)
Sw += xi.T @ xi
Sw -= xi.shape[0] * miui.T @ miui
return Sw
def cal_between(self, Sw):
"""
calculate class-between scatter matrix
Sb = St - Sw
:return: Sb
"""
St = self.X_data.T @ self.X_data
miu = np.array([np.mean(self.X_data, axis=0)])
St = St - self.X_data.shape[0] * miu.T @ miu
Sb = St - Sw
# miu0 = np.array([np.mean(self.X_data[np.where(self.Y_data == 0)[0], :], axis=0)])
# miu1 = np.array([np.mean(self.X_data[np.where(self.Y_data == 1)[0], :], axis=0)])
# print("miu0.shape = ", miu0.shape)
# Sb = (miu0 - miu1).T @(miu0 - miu1)
return Sb
def LDA(self):
Sw = self.cal_within()
Sb = self.cal_between(Sw)
eigval, eig_vec = scipy.linalg.eig(Sb, Sw)
eigval = eigval.real
index_sorted = np.argsort(eigval)[-self.reduced_dimension:]
self.Projective_matrix = eig_vec[:, index_sorted]
mat = np.mat(self.X_data).T
self.new_mat = self.Projective_matrix.T @ mat
def result(self, regression=False):
if regression == False:
translate_vector = [- self.Projective_matrix[1, 0] * 2, self.Projective_matrix[0, 0] * 2]
else:
translate_vector = [- self.Projective_matrix[1, 0] * 400, self.Projective_matrix[0, 0] * 400]
for i in range(self.new_mat.shape[1]):
if self.Y_data[i] == 1:
plt.scatter(self.new_mat[0, i] * self.Projective_matrix[0, 0] + translate_vector[0],
self.new_mat[0, i] * self.Projective_matrix[1, 0] + translate_vector[1], c='r')
else:
plt.scatter(self.new_mat[0, i] * self.Projective_matrix[0, 0] + translate_vector[0],
self.new_mat[0, i] * self.Projective_matrix[1, 0] + translate_vector[1], c='b')
one_index = np.where(self.Y_data == 1)
zero_index = np.where(self.Y_data == 0)
plt.scatter(self.X_data[one_index, 0], self.X_data[one_index, 1], c='r')
plt.scatter(self.X_data[zero_index, 0], self.X_data[zero_index, 1], c='b')
ax = plt.gca()
ax.axis("equal")
ax.annotate("",
xy=(translate_vector[0], translate_vector[1]),
xytext=(0, 0),
arrowprops=dict(arrowstyle="->", color="k"))
if regression:
plt.text(translate_vector[0] * 0.9, translate_vector[1] * 0.95 + 10,
r'projection', fontdict={'size': 8, 'color': 'k'})
else:
plt.text(translate_vector[0] * 0.95 + 0.05, translate_vector[1] * 0.95,
r'projection', fontdict={'size': 8, 'color': 'k'})
plt.show()
if __name__ == "__main__":
a = LDA(1)
a.make_data()
# a.make_data(True)
a.LDA()
a.result()
# a.result(True)
| [
"numpy.mat",
"matplotlib.pyplot.text",
"sklearn.datasets.make_regression",
"numpy.mean",
"numpy.unique",
"numpy.where",
"matplotlib.pyplot.gca",
"sklearn.datasets.make_moons",
"scipy.linalg.eig",
"numpy.zeros",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.scatter",
"numpy.concatenate... | [((1368, 1390), 'numpy.unique', 'np.unique', (['self.Y_data'], {}), '(self.Y_data)\n', (1377, 1390), True, 'import numpy as np\n'), ((1506, 1558), 'numpy.zeros', 'np.zeros', (['[attribute_dimension, attribute_dimension]'], {}), '([attribute_dimension, attribute_dimension])\n', (1514, 1558), True, 'import numpy as np\n'), ((2586, 2610), 'scipy.linalg.eig', 'scipy.linalg.eig', (['Sb', 'Sw'], {}), '(Sb, Sw)\n', (2602, 2610), False, 'import scipy\n'), ((3711, 3737), 'numpy.where', 'np.where', (['(self.Y_data == 1)'], {}), '(self.Y_data == 1)\n', (3719, 3737), True, 'import numpy as np\n'), ((3760, 3786), 'numpy.where', 'np.where', (['(self.Y_data == 0)'], {}), '(self.Y_data == 0)\n', (3768, 3786), True, 'import numpy as np\n'), ((3796, 3868), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X_data[one_index, 0]', 'self.X_data[one_index, 1]'], {'c': '"""r"""'}), "(self.X_data[one_index, 0], self.X_data[one_index, 1], c='r')\n", (3807, 3868), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3952), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X_data[zero_index, 0]', 'self.X_data[zero_index, 1]'], {'c': '"""b"""'}), "(self.X_data[zero_index, 0], self.X_data[zero_index, 1], c='b')\n", (3889, 3952), True, 'import matplotlib.pyplot as plt\n'), ((3967, 3976), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3974, 3976), True, 'import matplotlib.pyplot as plt\n'), ((4559, 4569), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4567, 4569), True, 'import matplotlib.pyplot as plt\n'), ((467, 562), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_samples': '(50)', 'n_features': '(1)', 'n_targets': '(1)', 'noise': '(1.5)', 'random_state': '(1)', 'bias': '(0)'}), '(n_samples=50, n_features=1, n_targets=1, noise=1.5,\n random_state=1, bias=0)\n', (482, 562), False, 'from sklearn.datasets import make_moons, make_regression\n'), ((683, 713), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {'axis': '(1)'}), '([x, y], axis=1)\n', (697, 713), True, 'import numpy as np\n'), ((804, 853), 'numpy.concatenate', 'np.concatenate', (['[self.X_data, new_X_data]'], {'axis': '(0)'}), '([self.X_data, new_X_data], axis=0)\n', (818, 853), True, 'import numpy as np\n'), ((1052, 1086), 'numpy.concatenate', 'np.concatenate', (['[y, new_y]'], {'axis': '(0)'}), '([y, new_y], axis=0)\n', (1066, 1086), True, 'import numpy as np\n'), ((1180, 1223), 'sklearn.datasets.make_moons', 'make_moons', (['(100)'], {'noise': '(0.04)', 'random_state': '(0)'}), '(100, noise=0.04, random_state=0)\n', (1190, 1223), False, 'from sklearn.datasets import make_moons, make_regression\n'), ((1416, 1438), 'numpy.unique', 'np.unique', (['self.Y_data'], {}), '(self.Y_data)\n', (1425, 1438), True, 'import numpy as np\n'), ((2665, 2683), 'numpy.argsort', 'np.argsort', (['eigval'], {}), '(eigval)\n', (2675, 2683), True, 'import numpy as np\n'), ((2784, 2803), 'numpy.mat', 'np.mat', (['self.X_data'], {}), '(self.X_data)\n', (2790, 2803), True, 'import numpy as np\n'), ((4235, 4357), 'matplotlib.pyplot.text', 'plt.text', (['(translate_vector[0] * 0.9)', '(translate_vector[1] * 0.95 + 10)', '"""projection"""'], {'fontdict': "{'size': 8, 'color': 'k'}"}), "(translate_vector[0] * 0.9, translate_vector[1] * 0.95 + 10,\n 'projection', fontdict={'size': 8, 'color': 'k'})\n", (4243, 4357), True, 'import matplotlib.pyplot as plt\n'), ((4405, 4530), 'matplotlib.pyplot.text', 'plt.text', (['(translate_vector[0] * 0.95 + 0.05)', '(translate_vector[1] * 0.95)', '"""projection"""'], {'fontdict': "{'size': 8, 'color': 'k'}"}), "(translate_vector[0] * 0.95 + 0.05, translate_vector[1] * 0.95,\n 'projection', fontdict={'size': 8, 'color': 'k'})\n", (4413, 4530), True, 'import matplotlib.pyplot as plt\n'), ((612, 625), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (620, 625), True, 'import numpy as np\n'), ((754, 774), 'numpy.array', 'np.array', (['[[15, 13]]'], {}), '([[15, 13]])\n', (762, 774), True, 'import numpy as np\n'), ((2065, 2093), 'numpy.mean', 'np.mean', (['self.X_data'], {'axis': '(0)'}), '(self.X_data, axis=0)\n', (2072, 2093), True, 'import numpy as np\n'), ((3266, 3438), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(self.new_mat[0, i] * self.Projective_matrix[0, 0] + translate_vector[0])', '(self.new_mat[0, i] * self.Projective_matrix[1, 0] + translate_vector[1])'], {'c': '"""r"""'}), "(self.new_mat[0, i] * self.Projective_matrix[0, 0] +\n translate_vector[0], self.new_mat[0, i] * self.Projective_matrix[1, 0] +\n translate_vector[1], c='r')\n", (3277, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3496, 3668), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(self.new_mat[0, i] * self.Projective_matrix[0, 0] + translate_vector[0])', '(self.new_mat[0, i] * self.Projective_matrix[1, 0] + translate_vector[1])'], {'c': '"""b"""'}), "(self.new_mat[0, i] * self.Projective_matrix[0, 0] +\n translate_vector[0], self.new_mat[0, i] * self.Projective_matrix[1, 0] +\n translate_vector[1], c='b')\n", (3507, 3668), True, 'import matplotlib.pyplot as plt\n'), ((1701, 1720), 'numpy.mean', 'np.mean', (['xi'], {'axis': '(0)'}), '(xi, axis=0)\n', (1708, 1720), True, 'import numpy as np\n'), ((1625, 1663), 'numpy.where', 'np.where', (['(self.Y_data == class_name[i])'], {}), '(self.Y_data == class_name[i])\n', (1633, 1663), True, 'import numpy as np\n')] |
"""Tests for utility functions."""
import pytest
from pytest import approx
import numpy as np
from bdm import BDM
from bdm.utils import get_reduced_shape, get_reduced_idx, slice_dataset
from bdm.utils import make_min_data, make_max_data
@pytest.mark.parametrize('x,shape,shift,size_only,expected', [
(np.ones((50, 10)), (4, 4), 0, False, (13, 3)),
(np.ones((50, 10)), (4, 4), 0, True, 39),
(np.ones((50, 10)), (4, 4), 1, False, (47, 7)),
(np.ones((4, 8)), (4, 4), 0, False, (1, 2)),
(np.ones((4, 8)), (4, 4), 1, False, (1, 5))
])
def test_get_reduced_shape(x, shape, shift, size_only, expected):
output = get_reduced_shape(x, shape, shift=shift, size_only=size_only)
assert output == expected
@pytest.mark.parametrize('i,shape,expected', [
(0, (2, 2, 2), (0, 0, 0)),
(1, (2, 2, 2), (0, 0, 1)),
(2, (2, 2, 2), (0, 1, 0)),
(3, (2, 2, 2), (0, 1, 1)),
(4, (2, 2, 2), (1, 0, 0)),
(5, (2, 2, 2), (1, 0, 1)),
(6, (2, 2, 2), (1, 1, 0)),
(7, (2, 2, 2), (1, 1, 1)),
(0, (1, 2), (0, 0)),
(1, (1, 2), (0, 1))
])
def test_get_reduced_idx(i, shape, expected):
output = get_reduced_idx(i, shape)
assert output == expected
@pytest.mark.parametrize('X,shape,shift,expected', [
(np.ones((10, 5)), (5, 5), 0, (np.ones((5, 5)), np.ones((5, 5)))),
(np.ones((6, 6)), (5, 5), 1, (np.ones((5,5)), np.ones((5,5)), np.ones((5,5)), np.ones((5,5))))
])
def test_slice_dataset(X, shape, shift, expected):
for o, e in zip(slice_dataset(X, shape, shift), expected):
assert np.array_equal(o, e)
@pytest.mark.parametrize('shape,expected', [
((5, 5), np.zeros((5, 5), dtype=int))
])
def test_make_min_data(shape, expected):
output = make_min_data(shape)
assert np.array_equal(output, expected)
@pytest.mark.parametrize('shape,part_shape,expected', [
((5, 5), (4, 4), 36.02279026553976),
((30,), (12,), 74.95847260483343)
])
def test_make_max_data(shape, part_shape, expected):
# pylint: disable=protected-access
bdm = BDM(len(shape))
output = make_max_data(shape, part_shape, bdm._ctm)
assert bdm.bdm(output) == approx(expected)
| [
"pytest.approx",
"bdm.utils.get_reduced_idx",
"numpy.ones",
"bdm.utils.slice_dataset",
"bdm.utils.get_reduced_shape",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.array_equal",
"bdm.utils.make_min_data",
"bdm.utils.make_max_data"
] | [((725, 1046), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""i,shape,expected"""', '[(0, (2, 2, 2), (0, 0, 0)), (1, (2, 2, 2), (0, 0, 1)), (2, (2, 2, 2), (0, 1,\n 0)), (3, (2, 2, 2), (0, 1, 1)), (4, (2, 2, 2), (1, 0, 0)), (5, (2, 2, 2\n ), (1, 0, 1)), (6, (2, 2, 2), (1, 1, 0)), (7, (2, 2, 2), (1, 1, 1)), (0,\n (1, 2), (0, 0)), (1, (1, 2), (0, 1))]'], {}), "('i,shape,expected', [(0, (2, 2, 2), (0, 0, 0)), (1,\n (2, 2, 2), (0, 0, 1)), (2, (2, 2, 2), (0, 1, 0)), (3, (2, 2, 2), (0, 1,\n 1)), (4, (2, 2, 2), (1, 0, 0)), (5, (2, 2, 2), (1, 0, 1)), (6, (2, 2, 2\n ), (1, 1, 0)), (7, (2, 2, 2), (1, 1, 1)), (0, (1, 2), (0, 0)), (1, (1, \n 2), (0, 1))])\n", (748, 1046), False, 'import pytest\n'), ((1775, 1906), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape,part_shape,expected"""', '[((5, 5), (4, 4), 36.02279026553976), ((30,), (12,), 74.95847260483343)]'], {}), "('shape,part_shape,expected', [((5, 5), (4, 4), \n 36.02279026553976), ((30,), (12,), 74.95847260483343)])\n", (1798, 1906), False, 'import pytest\n'), ((631, 692), 'bdm.utils.get_reduced_shape', 'get_reduced_shape', (['x', 'shape'], {'shift': 'shift', 'size_only': 'size_only'}), '(x, shape, shift=shift, size_only=size_only)\n', (648, 692), False, 'from bdm.utils import get_reduced_shape, get_reduced_idx, slice_dataset\n'), ((1130, 1155), 'bdm.utils.get_reduced_idx', 'get_reduced_idx', (['i', 'shape'], {}), '(i, shape)\n', (1145, 1155), False, 'from bdm.utils import get_reduced_shape, get_reduced_idx, slice_dataset\n'), ((1708, 1728), 'bdm.utils.make_min_data', 'make_min_data', (['shape'], {}), '(shape)\n', (1721, 1728), False, 'from bdm.utils import make_min_data, make_max_data\n'), ((1740, 1772), 'numpy.array_equal', 'np.array_equal', (['output', 'expected'], {}), '(output, expected)\n', (1754, 1772), True, 'import numpy as np\n'), ((2043, 2085), 'bdm.utils.make_max_data', 'make_max_data', (['shape', 'part_shape', 'bdm._ctm'], {}), '(shape, part_shape, bdm._ctm)\n', (2056, 2085), False, 'from bdm.utils import make_min_data, make_max_data\n'), ((1484, 1514), 'bdm.utils.slice_dataset', 'slice_dataset', (['X', 'shape', 'shift'], {}), '(X, shape, shift)\n', (1497, 1514), False, 'from bdm.utils import get_reduced_shape, get_reduced_idx, slice_dataset\n'), ((1542, 1562), 'numpy.array_equal', 'np.array_equal', (['o', 'e'], {}), '(o, e)\n', (1556, 1562), True, 'import numpy as np\n'), ((2116, 2132), 'pytest.approx', 'approx', (['expected'], {}), '(expected)\n', (2122, 2132), False, 'from pytest import approx\n'), ((307, 324), 'numpy.ones', 'np.ones', (['(50, 10)'], {}), '((50, 10))\n', (314, 324), True, 'import numpy as np\n'), ((359, 376), 'numpy.ones', 'np.ones', (['(50, 10)'], {}), '((50, 10))\n', (366, 376), True, 'import numpy as np\n'), ((405, 422), 'numpy.ones', 'np.ones', (['(50, 10)'], {}), '((50, 10))\n', (412, 422), True, 'import numpy as np\n'), ((457, 472), 'numpy.ones', 'np.ones', (['(4, 8)'], {}), '((4, 8))\n', (464, 472), True, 'import numpy as np\n'), ((506, 521), 'numpy.ones', 'np.ones', (['(4, 8)'], {}), '((4, 8))\n', (513, 521), True, 'import numpy as np\n'), ((1245, 1261), 'numpy.ones', 'np.ones', (['(10, 5)'], {}), '((10, 5))\n', (1252, 1261), True, 'import numpy as np\n'), ((1316, 1331), 'numpy.ones', 'np.ones', (['(6, 6)'], {}), '((6, 6))\n', (1323, 1331), True, 'import numpy as np\n'), ((1622, 1649), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {'dtype': 'int'}), '((5, 5), dtype=int)\n', (1630, 1649), True, 'import numpy as np\n'), ((1275, 1290), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1282, 1290), True, 'import numpy as np\n'), ((1292, 1307), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1299, 1307), True, 'import numpy as np\n'), ((1345, 1360), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1352, 1360), True, 'import numpy as np\n'), ((1361, 1376), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1368, 1376), True, 'import numpy as np\n'), ((1377, 1392), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1384, 1392), True, 'import numpy as np\n'), ((1393, 1408), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1400, 1408), True, 'import numpy as np\n')] |
# Importing some useful/necessary packages
import numpy as np
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
import cv2
def leaf_image(image_id,target_length=160):
# `image_id` should be the index of the image in the images/ folder
# Return the image of a given id(1~1584) with the target size (target_length x target_length)
image_name = str(image_id) + '.jpg'
leaf_img = plt.imread('images/'+image_name) # Reading in the image
leaf_img_width = leaf_img.shape[1]
leaf_img_height = leaf_img.shape[0]
#target_length = 160
img_target = np.zeros((target_length, target_length), np.uint8)
if leaf_img_width >= leaf_img_height:
scale_img_width = target_length
scale_img_height = int( (float(scale_img_width)/leaf_img_width)*leaf_img_height )
img_scaled = cv2.resize(leaf_img, (scale_img_width, scale_img_height), interpolation = cv2.INTER_AREA)
copy_location = int((target_length-scale_img_height)/2)
img_target[copy_location:copy_location+scale_img_height,:] = img_scaled
else:
# leaf_img_width < leaf_img_height:
scale_img_height = target_length
scale_img_width = int( (float(scale_img_height)/leaf_img_height)*leaf_img_width )
img_scaled = cv2.resize(leaf_img, (scale_img_width, scale_img_height), interpolation = cv2.INTER_AREA)
copy_location = int((target_length-scale_img_width)/2)
img_target[:, copy_location:copy_location+scale_img_width] = img_scaled
return img_target
def species_image(species, labels, train_raw, classes):
# `species` should be the index or species name
# Return an image of a certain labeled species
leaf_image_length = 160
#img_target = np.zeros([leaf_image_length, 0], np.uint8) # Initialization
img_target = 240*np.ones([leaf_image_length, leaf_image_length*2], np.uint8) # Initialization
label_info = ''
# if type(species)==int and species >= 0 and species < 99:
# if species >= 0 and species < 99:
if type(species)==int and species >= 0 and species < 99:
images_index = np.where(labels==species)[0]
label_info = str(species) + '-' + train_raw.species[images_index[0]]
elif type(species)==str and species in classes:
images_index = np.where(train_raw.species==species)[0]
label_info = str(images_index[0]) + '-' + species
else:
print ('Error: Please input a valid index or species name')
return
for image_index in images_index:
image_id = train_raw.id[image_index]
leaf_img = leaf_image(image_id)
img_target = np.append(img_target, leaf_img, axis=1)
# Add information onto the first block
cv2.putText(img_target, label_info, (10,90), cv2.FONT_HERSHEY_COMPLEX, 0.8, (100,170,0), 2)
return img_target, label_info
def visualize_error(train_predictions, y_test):
# Review the images where mistakes occur
error_indices = np.where(train_predictions != y_test)[0];
print ('The error indices: ', error_indices)
for err_index in error_indices[0:]:
print ('Error index in the test set: ', err_index)
err_img_index = train_raw.id[test_index[err_index]]
print ('Ground truth species index: {}'.format(y_test[err_index]))
print ('Wrong predicting species index: {}'.format(train_predictions[err_index]))
plt.imshow(leaf_image(err_img_index, 160), cmap='gray'); plt.axis('off'); plt.show()
wrong_pred_species_img, label_info = species_image(train_predictions[err_index], labels, train_raw, classes)
fig = plt.figure(num=None, figsize=(16, 3), dpi=1200, facecolor='w', edgecolor='w',frameon=False,linewidth = 0)
wrong_pred_species_img = cv2.cvtColor(wrong_pred_species_img,cv2.COLOR_GRAY2RGB)
wrong_pred_species_img = cv2.copyMakeBorder(wrong_pred_species_img,10,10,10,10,cv2.BORDER_CONSTANT,value=[255,0,0])
plt.imshow(wrong_pred_species_img, cmap='gray'); plt.axis('off'); plt.show()
ground_truth_species_img, label_info = species_image(y_test[err_index], labels, train_raw, classes)
fig = plt.figure(num=None, figsize=(16, 3), dpi=1200, facecolor='w', edgecolor='w',frameon=False,linewidth = 0)
plt.imshow(ground_truth_species_img, cmap='gray'); plt.axis('off'); plt.show()
print ('#'*50)
def prepData(train, test, ss_split, labels):
for train_index, test_index in ss_split.split(train, labels):
X_train, X_test = train.values[train_index], train.values[test_index]
y_train, y_test = labels[train_index], labels[test_index]
# Double check the data
print (y_train.shape, y_test.shape)
return X_train, X_test, y_train, y_test
| [
"matplotlib.pyplot.imshow",
"seaborn.set",
"numpy.ones",
"numpy.where",
"cv2.copyMakeBorder",
"matplotlib.pyplot.imread",
"cv2.putText",
"numpy.append",
"numpy.zeros",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"matplotlib.pyplot.axis",
"cv2.resize",
"matplotlib.pyplot.show"
] | [((105, 114), 'seaborn.set', 'sns.set', ([], {}), '()\n', (112, 114), True, 'import seaborn as sns\n'), ((429, 463), 'matplotlib.pyplot.imread', 'plt.imread', (["('images/' + image_name)"], {}), "('images/' + image_name)\n", (439, 463), True, 'import matplotlib.pyplot as plt\n'), ((607, 657), 'numpy.zeros', 'np.zeros', (['(target_length, target_length)', 'np.uint8'], {}), '((target_length, target_length), np.uint8)\n', (615, 657), True, 'import numpy as np\n'), ((2727, 2825), 'cv2.putText', 'cv2.putText', (['img_target', 'label_info', '(10, 90)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.8)', '(100, 170, 0)', '(2)'], {}), '(img_target, label_info, (10, 90), cv2.FONT_HERSHEY_COMPLEX, 0.8,\n (100, 170, 0), 2)\n', (2738, 2825), False, 'import cv2\n'), ((851, 943), 'cv2.resize', 'cv2.resize', (['leaf_img', '(scale_img_width, scale_img_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(leaf_img, (scale_img_width, scale_img_height), interpolation=cv2\n .INTER_AREA)\n', (861, 943), False, 'import cv2\n'), ((1291, 1383), 'cv2.resize', 'cv2.resize', (['leaf_img', '(scale_img_width, scale_img_height)'], {'interpolation': 'cv2.INTER_AREA'}), '(leaf_img, (scale_img_width, scale_img_height), interpolation=cv2\n .INTER_AREA)\n', (1301, 1383), False, 'import cv2\n'), ((1837, 1898), 'numpy.ones', 'np.ones', (['[leaf_image_length, leaf_image_length * 2]', 'np.uint8'], {}), '([leaf_image_length, leaf_image_length * 2], np.uint8)\n', (1844, 1898), True, 'import numpy as np\n'), ((2639, 2678), 'numpy.append', 'np.append', (['img_target', 'leaf_img'], {'axis': '(1)'}), '(img_target, leaf_img, axis=1)\n', (2648, 2678), True, 'import numpy as np\n'), ((2968, 3005), 'numpy.where', 'np.where', (['(train_predictions != y_test)'], {}), '(train_predictions != y_test)\n', (2976, 3005), True, 'import numpy as np\n'), ((3452, 3467), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3460, 3467), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3479), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3477, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 3)', 'dpi': '(1200)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'frameon': '(False)', 'linewidth': '(0)'}), "(num=None, figsize=(16, 3), dpi=1200, facecolor='w', edgecolor=\n 'w', frameon=False, linewidth=0)\n", (3622, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3751, 3807), 'cv2.cvtColor', 'cv2.cvtColor', (['wrong_pred_species_img', 'cv2.COLOR_GRAY2RGB'], {}), '(wrong_pred_species_img, cv2.COLOR_GRAY2RGB)\n', (3763, 3807), False, 'import cv2\n'), ((3840, 3943), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['wrong_pred_species_img', '(10)', '(10)', '(10)', '(10)', 'cv2.BORDER_CONSTANT'], {'value': '[255, 0, 0]'}), '(wrong_pred_species_img, 10, 10, 10, 10, cv2.\n BORDER_CONSTANT, value=[255, 0, 0])\n', (3858, 3943), False, 'import cv2\n'), ((3940, 3987), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wrong_pred_species_img'], {'cmap': '"""gray"""'}), "(wrong_pred_species_img, cmap='gray')\n", (3950, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3989, 4004), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3997, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4016), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4014, 4016), True, 'import matplotlib.pyplot as plt\n'), ((4140, 4250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(16, 3)', 'dpi': '(1200)', 'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'frameon': '(False)', 'linewidth': '(0)'}), "(num=None, figsize=(16, 3), dpi=1200, facecolor='w', edgecolor=\n 'w', frameon=False, linewidth=0)\n", (4150, 4250), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4303), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ground_truth_species_img'], {'cmap': '"""gray"""'}), "(ground_truth_species_img, cmap='gray')\n", (4264, 4303), True, 'import matplotlib.pyplot as plt\n'), ((4305, 4320), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4313, 4320), True, 'import matplotlib.pyplot as plt\n'), ((4322, 4332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4330, 4332), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2149), 'numpy.where', 'np.where', (['(labels == species)'], {}), '(labels == species)\n', (2130, 2149), True, 'import numpy as np\n'), ((2303, 2341), 'numpy.where', 'np.where', (['(train_raw.species == species)'], {}), '(train_raw.species == species)\n', (2311, 2341), True, 'import numpy as np\n')] |
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import numpy as np
try:
numeric_types = (int, float, long)
except NameError:
numeric_types = (int, float)
class SimpleVectorPlotter(object):
"""Plots vector data represented as lists of coordinates."""
# _graphics = {}
def __init__(self, interactive, ticks=False, figsize=None, limits=None):
"""Construct a new SimpleVectorPlotter.
interactive - boolean flag denoting interactive mode
ticks - boolean flag denoting whether to show axis tickmarks
figsize - optional figure size
limits - optional geographic limits (x_min, x_max, y_min, y_max)
"""
# if figsize:
# plt.figure(num=1, figsize=figsize)
plt.figure(num=1, figsize=figsize)
self.interactive = interactive
self.ticks = ticks
if interactive:
plt.ion()
else:
plt.ioff()
if limits is not None:
self.set_limits(*limits)
if not ticks:
self.no_ticks()
plt.axis('equal')
self._graphics = {}
self._init_colors()
def adjust_markers(self):
figsize = plt.gcf().get_size_inches()
r = min(figsize[0] / 8, figsize[1] / 6)
mpl.rcParams['lines.markersize'] = 6 * r
mpl.rcParams['lines.markeredgewidth'] = 0.5 * r
mpl.rcParams['lines.linewidth'] = r
mpl.rcParams['patch.linewidth'] = r
def adjust_markersize(self, size):
figsize = plt.gcf().get_size_inches()
r = min(figsize[0] / 8, figsize[1] / 6)
return 6 * r
def axis_on(self, on):
"""Turn the axes and labels on or off."""
if on:
plt.axis('on')
else:
plt.axis('off')
def clear(self):
"""Clear the plot area."""
plt.cla()
self._graphics = {}
if not self.ticks:
self.no_ticks()
def close(self):
"""Close the plot."""
self.clear()
plt.close()
def draw(self):
"""Draw a non-interactive plot."""
plt.show()
def hide(self, name):
"""Hide the layer with the given name."""
try:
graphics = self._graphics[name]
graphic_type = type(graphics[0])
if graphic_type is mpl.lines.Line2D:
for graphic in graphics:
plt.axes().lines.remove(graphic)
elif graphic_type is mpl.patches.Polygon or graphic_type is mpl.patches.PathPatch:
for graphic in graphics:
plt.axes().patches.remove(graphic)
else:
raise RuntimeError('{} not supported'.format(graphic_type))
except (KeyError, ValueError):
pass
def plot_line(self, data, symbol='', name='', **kwargs):
"""Plot a line.
data - list of (x, y) tuples
symbol - optional pyplot symbol to draw the line with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
graphics = self._plot_line(data, symbol, **kwargs)
self._set_graphics(graphics, name, symbol or kwargs)
def plot_multiline(self, data, symbol='', name='', **kwargs):
"""Plot a multiline.
data - list of lines, each of which is a list of (x, y) tuples
symbol - optional pyplot symbol to draw the lines with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._line_symbol()
graphics = self._plot_multiline(data, symbol, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_multipoint(self, data, symbol='', name='', **kwargs):
"""Plot a multipoint.
data - list of (x, y) tuples
symbol - optional pyplot symbol to draw the points with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._point_symbol()
graphics = self._plot_multipoint(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_multipolygon(self, data, color='', name='', **kwargs):
"""Plot a multipolygon.
data - list of polygons, each of which is a list of rings, each of
which is a list of (x, y) tuples
color - optional pyplot color to draw the polygons with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = bool(color or kwargs)
if not ('facecolor' in kwargs or 'fc' in kwargs):
kwargs['fc'] = color or self._next_color()
graphics = self._plot_multipolygon(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_point(self, data, symbol='', name='', **kwargs):
"""Plot a point.
data - (x, y) tuple
symbol - optional pyplot symbol to draw the point with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = symbol or kwargs
symbol = symbol or self._point_symbol()
graphics = self._plot_point(data, symbol, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def plot_polygon(self, data, color='', name='', **kwargs):
"""Plot a polygon.
data - list of rings, each of which is a list of (x, y) tuples
color - optional pyplot color to draw the polygon with
name - optional name to assign to layer so can access it later
kwargs - optional pyplot drawing parameters
"""
has_symbol = bool(color or kwargs)
if not ('facecolor' in kwargs or 'fc' in kwargs):
kwargs['fc'] = color or self._next_color()
graphics = self._plot_polygon(data, **kwargs)
self._set_graphics(graphics, name, has_symbol)
def remove(self, name):
"""Remove a layer with the given name."""
try:
self.hide(name)
del self._graphics[name]
except KeyError:
pass
def save(self, fn, dpi=300):
plt.savefig(fn, dpi=dpi, bbox_inches='tight', pad_inches=0.02)
def set_limits(self, x_min, x_max, y_min, y_max):
"""Set geographic limits for plotting."""
self.x_lim = x_min, x_max
self.y_lim = y_min, y_max
self._set_limits()
def show(self, name):
"""Show the layer with the given name."""
try:
graphics = self._graphics[name]
graphic_type = type(graphics[0])
if graphic_type is mpl.lines.Line2D:
for graphic in graphics:
plt.axes().add_line(graphic)
elif graphic_type is mpl.patches.Polygon or graphic_type is mpl.patches.PathPatch:
for graphic in graphics:
plt.axes().add_patch(graphic)
else:
raise RuntimeError('{} not supported'.format(graphic_type))
except KeyError:
pass
def no_ticks(self):
plt.gca().get_xaxis().set_ticks([])
plt.gca().get_yaxis().set_ticks([])
def zoom(self, factor):
"""Zoom in or out by a percentage; negative is out."""
x_min, x_max, y_min, y_max = plt.axis()
x_delta = (x_max - x_min) * factor / 100
y_delta = (y_max - y_min) * factor / 100
plt.axis((x_min + x_delta, x_max - x_delta,
y_min + y_delta, y_max - y_delta))
def _clockwise(self, data):
"""Determine if points are in clockwise order."""
total = 0
x1, y1 = data[0]
for x, y in data[1:]:
total += (x - x1) * (y + y1)
x1, y1 = x, y
x, y = data[0]
total += (x - x1) * (y + y1)
return total > 0
def _codes(self, data):
"""Get a list of codes for creating a new PathPatch."""
codes = np.ones(len(data), dtype=np.int) * Path.LINETO
codes[0] = Path.MOVETO
return codes
def _init_colors(self):
if mpl.__version__ >= '1.5':
self.colors = list(mpl.rcParams['axes.prop_cycle'])
self.current_color = -1
self._next_color = self._next_color_new
else:
self._next_color = self._next_color_old
def _line_symbol(self):
"""Get a default line symbol."""
return self._next_color() + '-'
def _next_color_old(self):
"""Get the next color in the rotation."""
return next(plt.gca()._get_lines.color_cycle)
def _next_color_new(self):
"""Get the next color in the rotation."""
self.current_color += 1
if self.current_color >= len(self.colors):
self.current_color = 0
return self.colors[self.current_color]['color']
def _order_vertices(self, data, clockwise=True):
"""Order vertices in clockwise or counter-clockwise order."""
self._clockwise(data) != clockwise or data.reverse()
if data[0] != data[-1]:
data.append(data[0])
return data
def _plot_line(self, data, symbol, **kwargs):
x, y = zip(*data)
return plt.plot(x, y, symbol, **kwargs)
def _plot_multiline(self, data, symbol, **kwargs):
"""Plot a multiline."""
graphics = []
for line in data:
graphics += self._plot_line(line, symbol, **kwargs)
return graphics
def _plot_multipoint(self, data, symbol, **kwargs):
"""Plot a multipoint."""
graphics = []
for point in data:
graphics += self._plot_point(point, symbol, **kwargs)
return graphics
def _plot_multipolygon(self, data, **kwargs):
"""Plot a multipolygon."""
graphics = []
for poly in data:
graphics += self._plot_polygon(poly, **kwargs)
return graphics
def _plot_point(self, data, symbol, **kwargs):
"""Plot a point."""
return plt.plot(data[0], data[1], symbol, **kwargs)
def _plot_polygon(self, data, **kwargs):
"""Plot a polygon."""
outer = self._order_vertices(data[0], True)
inner = [self._order_vertices(d, False) for d in data[1:]]
vertices = np.concatenate(
[np.asarray(outer)] + [np.asarray(i) for i in inner])
codes = np.concatenate(
[self._codes(outer)] + [self._codes(i) for i in inner])
patch = PathPatch(Path(vertices, codes), **kwargs)
plt.axes().add_patch(patch)
return [patch]
def _point_symbol(self):
"""Get a default point symbol."""
return self._next_color() + 'o'
def _same_type(self, graphic1, graphic2):
"""Determine if two graphics are of the same type."""
if type(graphic1) is not type(graphic2):
return False
if type(graphic1) is mpl.patches.Polygon: ## huh?
return True
if len(graphic1.get_xdata()) == len(graphic2.get_xdata()):
return True
return len(graphic1.get_xdata()) > 1 and len(graphic2.get_xdata()) > 1
def _set_graphics(self, graphics, name, has_symbol):
"""Add graphics to plot."""
name = name or len(self._graphics)
if name in self._graphics:
self.hide(name)
if not has_symbol and self._same_type(graphics[0], self._graphics[name][0]):
styled_graphic = self._graphics[name][0]
for graphic in graphics:
graphic.update_from(styled_graphic)
self._graphics[name] = graphics
plt.axis('equal')
def _set_limits(self):
"""Set axis limits."""
plt.xlim(*self.x_lim)
plt.ylim(*self.y_lim)
plt.axes().set_aspect('equal')
| [
"matplotlib.path.Path",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ion",
"matplotlib.pypl... | [((844, 878), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': '(1)', 'figsize': 'figsize'}), '(num=1, figsize=figsize)\n', (854, 878), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1171), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (1162, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1929, 1938), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1936, 1938), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2114), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2112, 2114), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2195, 2197), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6572), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fn'], {'dpi': 'dpi', 'bbox_inches': '"""tight"""', 'pad_inches': '(0.02)'}), "(fn, dpi=dpi, bbox_inches='tight', pad_inches=0.02)\n", (6521, 6572), True, 'import matplotlib.pyplot as plt\n'), ((7655, 7665), 'matplotlib.pyplot.axis', 'plt.axis', ([], {}), '()\n', (7663, 7665), True, 'import matplotlib.pyplot as plt\n'), ((7772, 7850), 'matplotlib.pyplot.axis', 'plt.axis', (['(x_min + x_delta, x_max - x_delta, y_min + y_delta, y_max - y_delta)'], {}), '((x_min + x_delta, x_max - x_delta, y_min + y_delta, y_max - y_delta))\n', (7780, 7850), True, 'import matplotlib.pyplot as plt\n'), ((9541, 9573), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'symbol'], {}), '(x, y, symbol, **kwargs)\n', (9549, 9573), True, 'import matplotlib.pyplot as plt\n'), ((10339, 10383), 'matplotlib.pyplot.plot', 'plt.plot', (['data[0]', 'data[1]', 'symbol'], {}), '(data[0], data[1], symbol, **kwargs)\n', (10347, 10383), True, 'import matplotlib.pyplot as plt\n'), ((11936, 11953), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (11944, 11953), True, 'import matplotlib.pyplot as plt\n'), ((12021, 12042), 'matplotlib.pyplot.xlim', 'plt.xlim', (['*self.x_lim'], {}), '(*self.x_lim)\n', (12029, 12042), True, 'import matplotlib.pyplot as plt\n'), ((12051, 12072), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*self.y_lim'], {}), '(*self.y_lim)\n', (12059, 12072), True, 'import matplotlib.pyplot as plt\n'), ((981, 990), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (988, 990), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1027), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1025, 1027), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1821), 'matplotlib.pyplot.axis', 'plt.axis', (['"""on"""'], {}), "('on')\n", (1815, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1863), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1856, 1863), True, 'import matplotlib.pyplot as plt\n'), ((10806, 10827), 'matplotlib.path.Path', 'Path', (['vertices', 'codes'], {}), '(vertices, codes)\n', (10810, 10827), False, 'from matplotlib.path import Path\n'), ((1277, 1286), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1284, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1604, 1613), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1611, 1613), True, 'import matplotlib.pyplot as plt\n'), ((10847, 10857), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (10855, 10857), True, 'import matplotlib.pyplot as plt\n'), ((12081, 12091), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (12089, 12091), True, 'import matplotlib.pyplot as plt\n'), ((8889, 8898), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8896, 8898), True, 'import matplotlib.pyplot as plt\n'), ((10627, 10644), 'numpy.asarray', 'np.asarray', (['outer'], {}), '(outer)\n', (10637, 10644), True, 'import numpy as np\n'), ((10649, 10662), 'numpy.asarray', 'np.asarray', (['i'], {}), '(i)\n', (10659, 10662), True, 'import numpy as np\n'), ((7446, 7455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7453, 7455), True, 'import matplotlib.pyplot as plt\n'), ((7490, 7499), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7497, 7499), True, 'import matplotlib.pyplot as plt\n'), ((7062, 7072), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (7070, 7072), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2497), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2495, 2497), True, 'import matplotlib.pyplot as plt\n'), ((7247, 7257), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (7255, 7257), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2686), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2684, 2686), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from simulator.network_simulator.bbr import BBR
from simulator.network_simulator.cubic import Cubic
from simulator.trace import Trace
def parse_args():
"""Parse arguments from the command line."""
parser = argparse.ArgumentParser("Plot validation curv.")
parser.add_argument('--log-file', type=str, nargs="+", required=True,
help="path to a testing log file.")
parser.add_argument('--save-dir', type=str, default=None,
help="path to save.")
args, unknown = parser.parse_known_args()
return args
def main():
args = parse_args()
bbr = BBR(False)
cubic = Cubic(False)
validation_traces = []
save_dirs = []
for i in range(20):
trace_file = os.path.join(args.save_dir, 'validation_traces', "trace_{}.json".format(i))
if not os.path.exists(trace_file):
continue
validation_traces.append(Trace.load_from_file(trace_file))
save_dir = os.path.join(args.save_dir, 'validation_traces',"trace_{}".format(i))
os.makedirs(save_dir, exist_ok=True)
save_dirs.append(save_dir)
bbr_trace_rewards = bbr.test_on_traces(validation_traces, save_dirs, False)
cubic_trace_rewards = cubic.test_on_traces(validation_traces, save_dirs, False)
bbr_rewards = [mi_level_reward for mi_level_reward, _ in bbr_trace_rewards]
cubic_rewards = [mi_level_reward for mi_level_reward, _ in cubic_trace_rewards]
for log_file in args.log_file:
plt.figure()
model_name = log_file.split('/')[-2]
plt.title(model_name)
df = pd.read_csv(log_file, sep='\t')
best_step = int(df['num_timesteps'][df['mean_validation_reward'].argmax()])
t_used = df['tot_t_used(min)'][df['mean_validation_reward'].argmax()]
best_reward = df['mean_validation_reward'].max()
best_model_path = os.path.join(os.path.dirname(log_file), "model_step_{}.ckpt.meta".format(best_step))
plt.plot(df['num_timesteps'], df['mean_validation_reward'],
'o-', label="best_reward: {:.2f}, best step: {}, used {:.2f}min".format(best_reward, int(best_step), t_used))
plt.axhline(y=np.mean(bbr_rewards), c='r', label='BBR')
plt.axhline(y=np.mean(cubic_rewards), c='k', label='Cubic')
plt.xlabel('Num steps')
plt.ylabel('Validation Reward')
plt.legend()
assert os.path.exists(best_model_path)
print(best_model_path.replace(".meta", ""))
if args.save_dir:
os.makedirs(args.save_dir, exist_ok=True)
plt.savefig(os.path.join(args.save_dir,
'{}_val_curve.png'.format(model_name)))
plt.close()
if __name__ == '__main__':
main()
| [
"os.path.exists",
"numpy.mean",
"simulator.network_simulator.bbr.BBR",
"argparse.ArgumentParser",
"os.makedirs",
"matplotlib.use",
"pandas.read_csv",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel",
"simulator.trace.Trace.load_from_file",
"matplotlib.pyplot.close",
"os.path.dirname",
... | [((44, 65), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (58, 65), False, 'import matplotlib\n'), ((354, 402), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Plot validation curv."""'], {}), "('Plot validation curv.')\n", (377, 402), False, 'import argparse\n'), ((756, 766), 'simulator.network_simulator.bbr.BBR', 'BBR', (['(False)'], {}), '(False)\n', (759, 766), False, 'from simulator.network_simulator.bbr import BBR\n'), ((779, 791), 'simulator.network_simulator.cubic.Cubic', 'Cubic', (['(False)'], {}), '(False)\n', (784, 791), False, 'from simulator.network_simulator.cubic import Cubic\n'), ((1189, 1225), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (1200, 1225), False, 'import os\n'), ((1633, 1645), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1643, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1720), 'matplotlib.pyplot.title', 'plt.title', (['model_name'], {}), '(model_name)\n', (1708, 1720), True, 'import matplotlib.pyplot as plt\n'), ((1734, 1765), 'pandas.read_csv', 'pd.read_csv', (['log_file'], {'sep': '"""\t"""'}), "(log_file, sep='\\t')\n", (1745, 1765), True, 'import pandas as pd\n'), ((2432, 2455), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Num steps"""'], {}), "('Num steps')\n", (2442, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2495), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation Reward"""'], {}), "('Validation Reward')\n", (2474, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2504, 2516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2514, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2563), 'os.path.exists', 'os.path.exists', (['best_model_path'], {}), '(best_model_path)\n', (2546, 2563), False, 'import os\n'), ((2833, 2844), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pyplot as plt\n'), ((975, 1001), 'os.path.exists', 'os.path.exists', (['trace_file'], {}), '(trace_file)\n', (989, 1001), False, 'import os\n'), ((1057, 1089), 'simulator.trace.Trace.load_from_file', 'Trace.load_from_file', (['trace_file'], {}), '(trace_file)\n', (1077, 1089), False, 'from simulator.trace import Trace\n'), ((2024, 2049), 'os.path.dirname', 'os.path.dirname', (['log_file'], {}), '(log_file)\n', (2039, 2049), False, 'import os\n'), ((2654, 2695), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {'exist_ok': '(True)'}), '(args.save_dir, exist_ok=True)\n', (2665, 2695), False, 'import os\n'), ((2314, 2334), 'numpy.mean', 'np.mean', (['bbr_rewards'], {}), '(bbr_rewards)\n', (2321, 2334), True, 'import numpy as np\n'), ((2378, 2400), 'numpy.mean', 'np.mean', (['cubic_rewards'], {}), '(cubic_rewards)\n', (2385, 2400), True, 'import numpy as np\n')] |
from greenonbrown import green_on_brown
from imutils.video import count_frames, FileVideoStream
import numpy as np
import imutils
import glob
import cv2
import csv
import os
def frame_analysis(exgFile: str, exgsFile: str, hueFile: str, exhuFile: str, HDFile: str):
baseName = os.path.splitext(os.path.basename(exhuFile))[0]
exgVideo = cv2.VideoCapture(exgFile)
print("[INFO] Loaded {}".format(exgFile))
lenexg = count_frames(exgFile, override=True) - 1
exgsVideo = cv2.VideoCapture(exgsFile)
print("[INFO] Loaded {}".format(exgsFile))
lenexgs = count_frames(exgsFile, override=True) - 1
hueVideo = cv2.VideoCapture(hueFile)
print("[INFO] Loaded {}".format(hueFile))
lenhue = count_frames(hueFile, override=True) - 1
exhuVideo = cv2.VideoCapture(exhuFile)
print("[INFO] Loaded {}".format(exhuFile))
lenexhu = count_frames(exhuFile, override=True) - 1
videoHD = cv2.VideoCapture(HDFile)
print("[INFO] Loaded {}".format(HDFile))
lenHD = count_frames(HDFile, override=True) - 1
hdFrame = None
exgFrame = None
exgsFrame = None
hueFrame = None
exhuFrame = None
hdframecount = 0
exgframecount = 0
exgsframecount = 0
hueframecount = 0
exhuframecount = 0
hdFramesAll = []
exgFramesAll = []
exgsFramesAll = []
hueFramesAll = []
exhuFramesAll = []
while True:
k = cv2.waitKey(1) & 0xFF
if k == ord('v') or hdFrame is None:
if hdframecount >= len(hdFramesAll):
hdFrame = next(frame_processor(videoHD, 'hd'))
hdFrame = imutils.resize(hdFrame, height=640)
hdFrame = imutils.rotate(hdFrame, angle=180)
hdframecount += 1
hdFramesAll.append(hdFrame)
else:
hdFrame = hdFramesAll[hdframecount]
hdframecount += 1
if k == ord('q') or exgFrame is None:
if exgframecount >= len(exgFramesAll):
exgFrame = next(frame_processor(exgVideo, 'exg'))
exgframecount += 1
exgFramesAll.append(exgFrame)
else:
exgFrame = exgFramesAll[exgframecount]
exgframecount += 1
if k == ord('w') or exgsFrame is None:
if exgsframecount >= len(exgsFramesAll):
exgsFrame = next(frame_processor(exgsVideo, 'exgs'))
exgsframecount += 1
exgsFramesAll.append(exgsFrame)
else:
exgsFrame = exgsFramesAll[exgsframecount]
exgsframecount += 1
if k == ord('e') or hueFrame is None:
if hueframecount >= len(hueFramesAll):
hueFrame = next(frame_processor(hueVideo, 'hsv'))
hueframecount += 1
hueFramesAll.append(hueFrame)
else:
hueFrame = hueFramesAll[hueframecount]
hueframecount += 1
if k == ord('r') or exhuFrame is None:
if exhuframecount >= len(exhuFramesAll):
exhuFrame = next(frame_processor(exhuVideo, 'exhu'))
exhuframecount += 1
exhuFramesAll.append(exhuFrame)
else:
exhuFrame = exhuFramesAll[exhuframecount]
exhuframecount += 1
if k == ord('b'):
if hdframecount > 0:
hdframecount -= 1
hdFrame = hdFramesAll[hdframecount]
else:
hdFrame = hdFramesAll[hdframecount]
if k == ord('a'):
if exgframecount > 0:
exgframecount -= 1
exgFrame = exgFramesAll[exgframecount]
else:
exgFrame = exgFramesAll[exgframecount]
if k == ord('s'):
if exgsframecount > 0:
exgsframecount -= 1
exgsFrame = exgsFramesAll[exgsframecount]
else:
exgsFrame = exgsFramesAll[exgsframecount]
if k == ord('d'):
if hueframecount > 0:
hueframecount -= 1
hueFrame = hueFramesAll[hueframecount]
else:
hueFrame = hueFramesAll[hueframecount]
if k == ord('f'):
if exhuframecount > 0:
exhuframecount -= 1
exhuFrame = exhuFramesAll[exhuframecount]
else:
exhuFrame = exhuFramesAll[exhuframecount]
# save current frames for the video comparison
if k == ord('y'):
cv2.imwrite('images/frameGrabs/{}_frame{}_exg.png'.format(baseName, exgframecount), exgFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exgs.png'.format(baseName, exgsframecount), exgsFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_hue.png'.format(baseName, hueframecount), hueFrame)
cv2.imwrite('images/frameGrabs/{}_frame{}_exhu.png'.format(baseName, exhuframecount), exhuFrame)
print('[INFO] All frames written.')
# write text on each video frame
exgVis = exgFrame.copy()
exgsVis = exgsFrame.copy()
hueVis = hueFrame.copy()
exhuVis = exhuFrame.copy()
cv2.putText(exhuVis, 'exhu: {} / {}'.format(exhuframecount, lenexhu), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hueVis, 'hue: {} / {}'.format(hueframecount, lenhue), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgsVis, 'exgs: {} / {}'.format(exgsframecount, lenexgs), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(exgVis, 'exg: {} / {}'.format(exgframecount, lenexg), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.putText(hdFrame, 'HD: {} / {}'.format(hdframecount, lenHD), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
# stack the video frames
topRow = np.hstack((exgVis, exgsVis))
bottomRow = np.hstack((hueVis, exhuVis))
combined = np.vstack((topRow, bottomRow))
combined = np.hstack((combined, hdFrame))
cv2.imshow('Output', combined)
if k == 27:
break
def frame_processor(videoFeed, videoName):
frameShape = None
while True:
k = cv2.waitKey(1) & 0xFF
ret, frame = videoFeed.read()
if ret == False:
frame = np.zeros(frameShape, dtype='uint8')
if frameShape is None:
frameShape = frame.shape
if videoName == "hd":
yield frame
else:
cnts, boxes, weedCentres, imageOut = green_on_brown(frame, exgMin=29,
exgMax=200,
hueMin=30,
hueMax=92,
saturationMin=10,
saturationMax=250,
brightnessMin=60,
brightnessMax=250,
headless=False,
algorithm=videoName, minArea=10)
yield imageOut
if k == 27:
videoFeed.stop()
break
import pandas as pd
def blur_analysis(directory):
blurDict = {}
df = pd.DataFrame(columns=['field', 'algorithm', 'blur'])
for videoPath in glob.iglob(directory + '\*.mp4'):
allframeBlur = []
sampledframeBlur = []
video = FileVideoStream(videoPath).start()
frameCount = 0
while True:
frame = video.read()
if video.stopped:
meanBlur = np.mean(allframeBlur)
stdBlur = np.std(allframeBlur)
vidName = os.path.basename(videoPath)
fieldNameList = [vidName.split("-")[0] for i in range(100)]
print(fieldNameList)
algorithmNameList = [os.path.splitext(vidName.split("-")[2])[0] for i in range(100)]
for i in range(100):
randint = np.random.randint(0, len(allframeBlur))
sampledframeBlur.append(allframeBlur[randint])
df2 = pd.DataFrame(list(zip(fieldNameList, algorithmNameList, sampledframeBlur)), columns=['field', 'algorithm', 'blur'])
print(df2)
df = df.append(df2)
print(df)
df.to_csv(r"videos\blur\blurriness.csv")
blurDict[vidName] = [meanBlur, stdBlur]
break
greyscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurriness = cv2.Laplacian(greyscale, cv2.CV_64F).var()
allframeBlur.append(blurriness)
frameCount += 1
print(vidName, ',', np.round(meanBlur, 2), ',', np.round(stdBlur, 2), ',', frameCount)
print(blurDict)
if __name__ == "__main__":
# RSilos 3 - DONE
# exgFile = r'videos/20210429-142930-HQ1-exg.avi'
# exgsFile = r'videos/20210429-143441-HQ1-exgs.avi'
# hueFile = r'videos/20210429-143559-HQ1-hue.avi'
# exhuFile = r'videos/20210429-143759-HQ1-exhu.avi'
# hdFile = r'videos/20210429_143950.mp4'
# # canola night 1
# exgFile = r'videos/20210429-174827-HQ2-exg.avi'
# exgsFile = r'videos/20210429-175001-HQ2-exgs.avi'
# hueFile = r'videos/20210429-175138-HQ2-hue.avi'
# exhuFile = r'videos/20210429-175307-HQ2-exhu.avi'
# hdFile = r'videos/20210429_175512.mp4'
# RWheat 1 - DONE
# exgFile = r'videos/20210429-145743-HQ1-exg.avi'
# exgsFile = r'videos/20210429-145942-HQ1-exgs.avi'
# hueFile = r'videos/20210429-150119-HQ1-hue.avi'
# exhuFile = r'videos/20210429-150254-HQ1-exhu.avi'
# hdFile = r'videos/20210429_150543.mp4'
# CSU Sheep 1 - DONE
# exgFile = r'videos/blur/CSUSheep1-HQ1-exg.mp4'
# exgsFile = r'videos/blur/CSUSheep1-HQ1-exgs.mp4'
# hueFile = r'videos/blur/CSUSheep1-HQ1-hue.mp4'
# exhuFile = r'videos/blur/CSUSheep1-HQ1-exhu.mp4'
# hdFile = r'videos/20210430_110451.mp4'
# DPI 3 - DONE
# exgFile = r'videos/blur/DPI3-HQ2-exg.mp4'
# exgsFile = r'videos/blur/DPI3-HQ1-exgs.mp4'
# hueFile = r'videos/blur/DPI3-HQ1-hue.mp4'
# exhuFile = r'videos/blur/DPI3-HQ1-exhu.mp4'
# hdFile = r'videos/20210430_094837.mp4'
# LD Day
# exgFile = r'videos/20210507-143847-HQ2-exg.avi'
# exgsFile = r'videos/20210507-144117-HQ2-exgs.avi'
# hueFile = r'videos/20210507-144241-HQ2-hue.avi'
# exhuFile = r'videos/20210507-144407-HQ2-exhu.avi'
# hdFile = r'videos/20210507_144808.mp4'
# LD Night
# exgFile = r'videos/20210506-184104-HQ2-exg.avi'
# exgsFile = r'videos/20210506-183237-HQ2-exgs.avi'
# hueFile = r'videos/20210506-183417-HQ2-hue.avi'
# exhuFile = r'videos/20210506-183601-HQ2-exhu.avi'
# hdFile = r'videos/20210506_183834.mp4'
# frame_analysis(exgFile=exgFile,
# exgsFile=exgsFile,
# hueFile=hueFile,
# exhuFile=exhuFile,
# HDFile=hdFile)
# blur analysis
directory = r"videos/blur"
blur_analysis(directory=directory) | [
"glob.iglob",
"numpy.hstack",
"cv2.imshow",
"numpy.mean",
"cv2.Laplacian",
"imutils.video.FileVideoStream",
"numpy.vstack",
"imutils.rotate",
"pandas.DataFrame",
"greenonbrown.green_on_brown",
"cv2.waitKey",
"numpy.round",
"cv2.cvtColor",
"numpy.std",
"imutils.resize",
"numpy.zeros",
... | [((345, 370), 'cv2.VideoCapture', 'cv2.VideoCapture', (['exgFile'], {}), '(exgFile)\n', (361, 370), False, 'import cv2\n'), ((488, 514), 'cv2.VideoCapture', 'cv2.VideoCapture', (['exgsFile'], {}), '(exgsFile)\n', (504, 514), False, 'import cv2\n'), ((634, 659), 'cv2.VideoCapture', 'cv2.VideoCapture', (['hueFile'], {}), '(hueFile)\n', (650, 659), False, 'import cv2\n'), ((777, 803), 'cv2.VideoCapture', 'cv2.VideoCapture', (['exhuFile'], {}), '(exhuFile)\n', (793, 803), False, 'import cv2\n'), ((922, 946), 'cv2.VideoCapture', 'cv2.VideoCapture', (['HDFile'], {}), '(HDFile)\n', (938, 946), False, 'import cv2\n'), ((7621, 7673), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['field', 'algorithm', 'blur']"}), "(columns=['field', 'algorithm', 'blur'])\n", (7633, 7673), True, 'import pandas as pd\n'), ((7695, 7728), 'glob.iglob', 'glob.iglob', (["(directory + '\\\\*.mp4')"], {}), "(directory + '\\\\*.mp4')\n", (7705, 7728), False, 'import glob\n'), ((430, 466), 'imutils.video.count_frames', 'count_frames', (['exgFile'], {'override': '(True)'}), '(exgFile, override=True)\n', (442, 466), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((576, 613), 'imutils.video.count_frames', 'count_frames', (['exgsFile'], {'override': '(True)'}), '(exgsFile, override=True)\n', (588, 613), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((719, 755), 'imutils.video.count_frames', 'count_frames', (['hueFile'], {'override': '(True)'}), '(hueFile, override=True)\n', (731, 755), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((865, 902), 'imutils.video.count_frames', 'count_frames', (['exhuFile'], {'override': '(True)'}), '(exhuFile, override=True)\n', (877, 902), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((1004, 1039), 'imutils.video.count_frames', 'count_frames', (['HDFile'], {'override': '(True)'}), '(HDFile, override=True)\n', (1016, 1039), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((6000, 6028), 'numpy.hstack', 'np.hstack', (['(exgVis, exgsVis)'], {}), '((exgVis, exgsVis))\n', (6009, 6028), True, 'import numpy as np\n'), ((6049, 6077), 'numpy.hstack', 'np.hstack', (['(hueVis, exhuVis)'], {}), '((hueVis, exhuVis))\n', (6058, 6077), True, 'import numpy as np\n'), ((6097, 6127), 'numpy.vstack', 'np.vstack', (['(topRow, bottomRow)'], {}), '((topRow, bottomRow))\n', (6106, 6127), True, 'import numpy as np\n'), ((6147, 6177), 'numpy.hstack', 'np.hstack', (['(combined, hdFrame)'], {}), '((combined, hdFrame))\n', (6156, 6177), True, 'import numpy as np\n'), ((6187, 6217), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'combined'], {}), "('Output', combined)\n", (6197, 6217), False, 'import cv2\n'), ((298, 324), 'os.path.basename', 'os.path.basename', (['exhuFile'], {}), '(exhuFile)\n', (314, 324), False, 'import os\n'), ((1399, 1413), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1410, 1413), False, 'import cv2\n'), ((6351, 6365), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6362, 6365), False, 'import cv2\n'), ((6457, 6492), 'numpy.zeros', 'np.zeros', (['frameShape'], {'dtype': '"""uint8"""'}), "(frameShape, dtype='uint8')\n", (6465, 6492), True, 'import numpy as np\n'), ((6681, 6879), 'greenonbrown.green_on_brown', 'green_on_brown', (['frame'], {'exgMin': '(29)', 'exgMax': '(200)', 'hueMin': '(30)', 'hueMax': '(92)', 'saturationMin': '(10)', 'saturationMax': '(250)', 'brightnessMin': '(60)', 'brightnessMax': '(250)', 'headless': '(False)', 'algorithm': 'videoName', 'minArea': '(10)'}), '(frame, exgMin=29, exgMax=200, hueMin=30, hueMax=92,\n saturationMin=10, saturationMax=250, brightnessMin=60, brightnessMax=\n 250, headless=False, algorithm=videoName, minArea=10)\n', (6695, 6879), False, 'from greenonbrown import green_on_brown\n'), ((8868, 8907), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (8880, 8907), False, 'import cv2\n'), ((9077, 9098), 'numpy.round', 'np.round', (['meanBlur', '(2)'], {}), '(meanBlur, 2)\n', (9085, 9098), True, 'import numpy as np\n'), ((9105, 9125), 'numpy.round', 'np.round', (['stdBlur', '(2)'], {}), '(stdBlur, 2)\n', (9113, 9125), True, 'import numpy as np\n'), ((1604, 1639), 'imutils.resize', 'imutils.resize', (['hdFrame'], {'height': '(640)'}), '(hdFrame, height=640)\n', (1618, 1639), False, 'import imutils\n'), ((1666, 1700), 'imutils.rotate', 'imutils.rotate', (['hdFrame'], {'angle': '(180)'}), '(hdFrame, angle=180)\n', (1680, 1700), False, 'import imutils\n'), ((7801, 7827), 'imutils.video.FileVideoStream', 'FileVideoStream', (['videoPath'], {}), '(videoPath)\n', (7816, 7827), False, 'from imutils.video import count_frames, FileVideoStream\n'), ((7969, 7990), 'numpy.mean', 'np.mean', (['allframeBlur'], {}), '(allframeBlur)\n', (7976, 7990), True, 'import numpy as np\n'), ((8017, 8037), 'numpy.std', 'np.std', (['allframeBlur'], {}), '(allframeBlur)\n', (8023, 8037), True, 'import numpy as np\n'), ((8064, 8091), 'os.path.basename', 'os.path.basename', (['videoPath'], {}), '(videoPath)\n', (8080, 8091), False, 'import os\n'), ((8933, 8969), 'cv2.Laplacian', 'cv2.Laplacian', (['greyscale', 'cv2.CV_64F'], {}), '(greyscale, cv2.CV_64F)\n', (8946, 8969), False, 'import cv2\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# # Simple Linear Regression (sLR) With scikit-learn (Example from lesson ML05)
# Powered by: Dr. <NAME>, DHBW Stuttgart(Germany); July 2020
# Following ideas from:
# "Linear Regression in Python" by <NAME>, 28.4.2020
# (see details: https://realpython.com/linear-regression-in-python/#what-is-regression))
#The example is from Lecture: "ML_Concept&Algorithm" (WS2020); Chapter ML5, Slide with title:
# "Calculation of the optimal Regression Line y = b0 + b1*x"
# Let’s start with the simplest case, which is simple linear regression.
# There are five basic steps when you’re implementing linear regression:
# 1. Import the packages and classes you need.
#2. Provide data to work with and eventually do appropriate transformations.
# 3. Create a regression model and fit it with existing data.
# 4. Check the results of model fitting to know whether the model is satisfactory.
# 5. Apply the model for predictions.
# These steps are more or less general for most of the regression approaches and implementations.
# # Step 1: Import packages and classes
# The first step is to import the package numpy and the class LinearRegression from sklearn.linear_model:
# In[1]:
# Step 1: Import packages and classes
import numpy as np
from sklearn.linear_model import LinearRegression
# import time module
import time
# Now, you have all the functionalities you need to implement linear regression.
#
# The fundamental data type of NumPy is the array type called numpy.ndarray. The rest of this article uses the term array to refer to instances of the type numpy.ndarray.
#
# The class sklearn.linear_model.LinearRegression will be used to perform linear and polynomial regression and make predictions accordingly.
# # Step 2: Provide data
# The second step is defining data to work with. The inputs (regressors, 𝑥) and output (predictor, 𝑦) should be arrays
# (the instances of the class numpy.ndarray) or similar objects. This is the simplest way of providing data for regression:
# In[2]:
# Step 2: Provide data
x = np.array([ 1, 3, 2, 4, 5, 6, 7]).reshape((-1, 1))
y = np.array([ 2, 4, 6, 8, 12, 13, 15])
# Now, you have two arrays: the input x and output y. You should call .reshape() on x because this array is required to be two-dimensional, or to be more precise, to have one column and as many rows as necessary. That’s exactly what the argument (-1, 1) of .reshape() specifies.
# In[3]:
print ("This is how x and y look now:")
print("x=",x)
print("y=",y)
# As you can see, x has two dimensions, and x.shape is (7, 1), while y has only a single dimension, and y.shape is (7,).
# # Step 3: Create a model and fit it
#
# The next step is to create a linear regression model and fit it using the existing data.
# Let’s create an instance of the class LinearRegression, which will represent the regression model:
# In[4]:
model = LinearRegression()
# This statement creates the variable model as the instance of LinearRegression. You can provide several optional
# parameters to LinearRegression:
#
# ----> fit_intercept is a Boolean (True by default) that decides whether to calculate the intercept 𝑏₀ (True) or consider
# it equal to zero (False).
#
# ----> normalize is a Boolean (False by default) that decides whether to normalize the input variables (True) or not
# (False).
#
# ----> copy_X is a Boolean (True by default) that decides whether to copy (True) or overwrite the input variables (False).
#
# ----> n_jobs is an integer or None (default) and represents the number of jobs used in parallel computation. None
# usually means one job and -1 to use all processors.
#
# This example uses the default values of all parameters.
#
# It’s time to start using the model. First, you need to call .fit() on model:
#
#
# In[5]:
model.fit(x, y)
# With .fit(), you calculate the optimal values of the weights 𝑏₀ and 𝑏₁, using the existing input and output (x and y) as
# the arguments. In other words, .fit() fits the model. It returns self, which is the variable model itself. That’s why you
# can replace the last two statements with this one:
# In[6]:
# model = LinearRegression().fit(x, y)
# This statement does the same thing as the previous two. It’s just shorter.
# # Step 4: Get results
#
# Once you have your model fitted, you can get the results to check whether the model works satisfactorily and
# interpret it.
#
# You can obtain the coefficient of determination (𝑅²) with .score() called on model:
# In[7]:
r_sq = model.score(x, y)
print('coefficient of determination:', r_sq)
# When you’re applying .score(), the arguments are also the predictor x and regressor y, and the return value is 𝑅².
#
# The attributes of model are .intercept_, which represents the coefficient,𝑏₀ and .coef_, which represents 𝑏₁:
# In[8]:
print('intercept:', model.intercept_)
print('slope:', model.coef_)
# The code above illustrates how to get 𝑏₀ and 𝑏₁. You can notice that .intercept_ is a scalar, while .coef_ is an array.
#
# The value 𝑏₀ = -0.1429 (approximately) illustrates that your model predicts the response -0.1429 when 𝑥 is zero. The value 𝑏₁
# = 2.1786 means that the predicted response rises by 2.1786 when 𝑥 is increased by one.
#
# You should notice that you can provide y as a two-dimensional array as well. In this case, you’ll get a similar result.
# This is how it might look:
# In[9]:
new_model = LinearRegression().fit(x, y.reshape((-1, 1)))
print('intercept:', new_model.intercept_)
print('slope:', new_model.coef_)
# As you can see, this example is very similar to the previous one, but in this case, .intercept_ is a one-dimensional array with the single element 𝑏₀, and .coef_ is a two-dimensional array with the single element 𝑏₁.
# # Step 5: Predict response
#
# Once there is a satisfactory model, you can use it for predictions with either existing or new data.
#
# To obtain the predicted response, use .predict():
# In[10]:
y_pred = model.predict(x)
print('predicted response:', y_pred, sep='\n')
# When applying .predict(), you pass the regressor as the argument and get the corresponding predicted response.
#
# This is a nearly identical way to predict the response:
# In[11]:
y_pred = model.intercept_ + model.coef_ * x
print('predicted response:', y_pred, sep='\n')
# In this case, you multiply each element of x with model.coef_ and add model.intercept_ to the product.
#
# The output here differs from the previous example only in dimensions. The predicted response is now a twodimensional
# array, while in the previous case, it had one dimension.
#
# If you reduce the number of dimensions of x to one, these two approaches will yield the same result. You can do this
# by replacing x with x.reshape(-1), x.flatten(), or x.ravel() when multiplying it with model.coef_.
#
# In practice, regression models are oen applied for forecasts. This means that you can use fitted models to calculate
# the outputs based on some other, new inputs:
# x_new = np.arange(5).reshape((-1, 1))
# print(x_new)
# y_new = model.predict(x_new)
# print(y_new)
# Here .predict() is applied to the new regressor x_new and yields the response y_new. This example conveniently uses
# arange() from numpy to generate an array with the elements from 0 (inclusive) to 5 (exclusive), that is 0, 1, 2, 3, and 4.
#
# You can find more information about LinearRegression on the official documentation page.
# In[12]:
# print current date and time
print("date",time.strftime("%d.%m.%Y %H:%M:%S"))
print ("end")
| [
"numpy.array",
"time.strftime",
"sklearn.linear_model.LinearRegression"
] | [((2144, 2178), 'numpy.array', 'np.array', (['[2, 4, 6, 8, 12, 13, 15]'], {}), '([2, 4, 6, 8, 12, 13, 15])\n', (2152, 2178), True, 'import numpy as np\n'), ((2918, 2936), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2934, 2936), False, 'from sklearn.linear_model import LinearRegression\n'), ((7527, 7561), 'time.strftime', 'time.strftime', (['"""%d.%m.%Y %H:%M:%S"""'], {}), "('%d.%m.%Y %H:%M:%S')\n", (7540, 7561), False, 'import time\n'), ((2090, 2121), 'numpy.array', 'np.array', (['[1, 3, 2, 4, 5, 6, 7]'], {}), '([1, 3, 2, 4, 5, 6, 7])\n', (2098, 2121), True, 'import numpy as np\n'), ((5451, 5469), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5467, 5469), False, 'from sklearn.linear_model import LinearRegression\n')] |
'''
*****************************************************************************************
*
* ===============================================
* Nirikshak Bot (NB) Theme (eYRC 2020-21)
* ===============================================
*
* This script is to implement Task 1A - Part 1 of Nirikshak Bot (NB) Theme (eYRC 2020-21).
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)
*
*****************************************************************************************
'''
# Team ID: 763 [ Team-ID ]
# Author List: <NAME>, <NAME>, <NAME>, <NAME> [ Names of team members worked on this file separated by Comma: Name1, Name2, ... ]
# Filename: task_1a_part1.py
# Functions: scan_image, isclose, quad
# [ Comma separated list of functions in this file ]
# Global variables: shapes
# [ List of global variables defined in this file ]
####################### IMPORT MODULES #######################
## You are not allowed to make any changes in this section. ##
## You have to implement this task with the three available ##
## modules for this task (numpy, opencv, os) ##
##############################################################
import cv2
import numpy as np
import os
##############################################################
# Global variable for details of shapes found in image and will be put in this dictionary, returned from scan_image function
shapes = {}
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
def isclose(a, b, rel_tol=1e-09, abs_tol=3.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def quad(approx): # func to diff between square rhombus trapezium parallelogram quadrilateral
x = approx.ravel()
m12 = (x[3] - x[1]) / (x[2] - x[0])
m23 = (x[5] - x[3]) / (x[4] - x[2])
m34 = (x[7] - x[5]) / (x[6] - x[4])
m41 = (x[1] - x[7]) / (x[0] - x[6])
l12 = np.sqrt((x[3] - x[1]) ** 2 + (x[2] - x[0]) ** 2)
l23 = np.sqrt((x[5] - x[3]) ** 2 + (x[4] - x[2]) ** 2)
l34 = np.sqrt((x[7] - x[5]) ** 2 + (x[6] - x[4]) ** 2)
l41 = np.sqrt((x[1] - x[7]) ** 2 + (x[0] - x[6]) ** 2)
c123 = isclose(l12, l23)
c234 = isclose(l23, l34)
c341 = isclose(l34, l41)
c1234 = isclose(l12, l34)
c2341 = isclose(l23, l41)
c1241 = isclose(l12, l41)
d13 = np.sqrt((x[5]-x[1]) ** 2 + (x[4]-x[0]) ** 2)
d24 = np.sqrt((x[7]-x[3]) ** 2 + (x[6]-x[2]) ** 2)
p1223 = np.sqrt(l12 ** 2 + l23 ** 2)
p2334 = np.sqrt(l23 ** 2 + l34 ** 2)
if c123 and c234 and c341 and c1234 and c2341 and c1241:
if isclose(d13, np.sqrt(2) * l12):
return 'Square'
else:
return 'Rhombus'
else:
if isclose(m12, m34) and isclose(m23, m41):
if isclose(p1223, d13) and isclose(p2334, d24):
return 'Rectangle'
else:
return 'Parallelogram'
elif isclose(m12, m34) or isclose(m23, m41):
return 'Trapezium'
else:
return 'Quadrilateral'
##############################################################
def scan_image(img_file_path):
"""
Purpose:
---
this function takes file path of an image as an argument and returns dictionary
containing details of colored (non-white) shapes in that image
Input Arguments:
---
`img_file_path` : [ str ]
file path of image
Returns:
---
`shapes` : [ dictionary ]
details of colored (non-white) shapes present in image at img_file_path
{ 'Shape' : ['color', Area, cX, cY] }
Example call:
---
shapes = scan_image(img_file_path)
"""
global shapes
############## ADD YOUR CODE HERE ##############
img = cv2.imread(img_file_path)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(imgGray, 240, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea)
shapes = {}
for i in reversed(range(0, len(contours) - 1)):
approx = cv2.approxPolyDP(contours[i], 0.01 * cv2.arcLength(contours[i], True), True)
area = cv2.contourArea(contours[i])
m = cv2.moments(contours[i])
cx = int(m['m10'] / m['m00'])
cy = int(m['m01'] / m['m00'])
b, g, r = img[cy, cx] # because in open cv the image is stored in bgr format
if r > max(g, b):
cl = 'red'
elif g > max(r, b):
cl = 'green'
elif b > max(r, g):
cl = 'blue'
else:
cl = 'random'
if len(approx) == 3:
shapes['Triangle'] = [cl, area, cx, cy]
elif len(approx) == 4:
shp = quad(approx)
shapes[shp] = [cl, area, cx, cy]
elif len(approx) == 5:
shapes['Pentagon'] = [cl, area, cx, cy]
elif len(approx) == 6:
shapes['Hexagon'] = [cl, area, cx, cy]
else:
shapes['Circle'] = [cl, area, cx, cy]
##################################################
return shapes
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: main
# Inputs: None
# Outputs: None
# Purpose: the function first takes 'Sample1.png' as input and runs scan_image function to find details
# of colored (non-white) shapes present in 'Sample1.png', it then asks the user whether
# to repeat the same on all images present in 'Samples' folder or not
if __name__ == '__main__':
curr_dir_path = os.getcwd()
print('Currently working in ' + curr_dir_path)
# path directory of images in 'Samples' folder
img_dir_path = curr_dir_path + '/Samples/'
# path to 'Sample1.png' image file
file_num = 1
img_file_path = img_dir_path + 'Sample' + str(file_num) + '.png'
print('\n============================================')
print('\nLooking for Sample' + str(file_num) + '.png')
if os.path.exists('Samples/Sample' + str(file_num) + '.png'):
print('\nFound Sample' + str(file_num) + '.png')
else:
print('\n[ERROR] Sample' + str(file_num) + '.png not found. Make sure "Samples" folder has the selected file.')
exit()
print('\n============================================')
try:
print('\nRunning scan_image function with ' + img_file_path + ' as an argument')
shapes = scan_image(img_file_path)
if type(shapes) is dict:
print(shapes)
print('\nOutput generated. Please verify.')
else:
print('\n[ERROR] scan_image function returned a ' + str(type(shapes)) + ' instead of a dictionary.\n')
exit()
except Exception:
print('\n[ERROR] scan_image function is throwing an error. Please debug scan_image function')
exit()
print('\n============================================')
choice = input('\nWant to run your script on all the images in Samples folder ? ==>> "y" or "n": ')
if choice == 'y':
file_count = 2
for file_num in range(file_count):
# path to image file
img_file_path = img_dir_path + 'Sample' + str(file_num + 1) + '.png'
print('\n============================================')
print('\nLooking for Sample' + str(file_num + 1) + '.png')
if os.path.exists('Samples/Sample' + str(file_num + 1) + '.png'):
print('\nFound Sample' + str(file_num + 1) + '.png')
else:
print('\n[ERROR] Sample' + str(file_num + 1) + '.png not found. Make sure "Samples" folder has the '
'selected file.')
exit()
print('\n============================================')
try:
print('\nRunning scan_image function with ' + img_file_path + ' as an argument')
shapes = scan_image(img_file_path)
if type(shapes) is dict:
print(shapes)
print('\nOutput generated. Please verify.')
else:
print(
'\n[ERROR] scan_image function returned a ' + str(type(shapes)) + ' instead of a dictionary.\n')
exit()
except Exception:
print('\n[ERROR] scan_image function is throwing an error. Please debug scan_image function')
exit()
print('\n============================================')
else:
print('')
| [
"numpy.sqrt",
"cv2.threshold",
"cv2.arcLength",
"os.getcwd",
"cv2.contourArea",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.imread"
] | [((2460, 2508), 'numpy.sqrt', 'np.sqrt', (['((x[3] - x[1]) ** 2 + (x[2] - x[0]) ** 2)'], {}), '((x[3] - x[1]) ** 2 + (x[2] - x[0]) ** 2)\n', (2467, 2508), True, 'import numpy as np\n'), ((2519, 2567), 'numpy.sqrt', 'np.sqrt', (['((x[5] - x[3]) ** 2 + (x[4] - x[2]) ** 2)'], {}), '((x[5] - x[3]) ** 2 + (x[4] - x[2]) ** 2)\n', (2526, 2567), True, 'import numpy as np\n'), ((2578, 2626), 'numpy.sqrt', 'np.sqrt', (['((x[7] - x[5]) ** 2 + (x[6] - x[4]) ** 2)'], {}), '((x[7] - x[5]) ** 2 + (x[6] - x[4]) ** 2)\n', (2585, 2626), True, 'import numpy as np\n'), ((2637, 2685), 'numpy.sqrt', 'np.sqrt', (['((x[1] - x[7]) ** 2 + (x[0] - x[6]) ** 2)'], {}), '((x[1] - x[7]) ** 2 + (x[0] - x[6]) ** 2)\n', (2644, 2685), True, 'import numpy as np\n'), ((2875, 2923), 'numpy.sqrt', 'np.sqrt', (['((x[5] - x[1]) ** 2 + (x[4] - x[0]) ** 2)'], {}), '((x[5] - x[1]) ** 2 + (x[4] - x[0]) ** 2)\n', (2882, 2923), True, 'import numpy as np\n'), ((2930, 2978), 'numpy.sqrt', 'np.sqrt', (['((x[7] - x[3]) ** 2 + (x[6] - x[2]) ** 2)'], {}), '((x[7] - x[3]) ** 2 + (x[6] - x[2]) ** 2)\n', (2937, 2978), True, 'import numpy as np\n'), ((2988, 3016), 'numpy.sqrt', 'np.sqrt', (['(l12 ** 2 + l23 ** 2)'], {}), '(l12 ** 2 + l23 ** 2)\n', (2995, 3016), True, 'import numpy as np\n'), ((3029, 3057), 'numpy.sqrt', 'np.sqrt', (['(l23 ** 2 + l34 ** 2)'], {}), '(l23 ** 2 + l34 ** 2)\n', (3036, 3057), True, 'import numpy as np\n'), ((4295, 4320), 'cv2.imread', 'cv2.imread', (['img_file_path'], {}), '(img_file_path)\n', (4305, 4320), False, 'import cv2\n'), ((4335, 4372), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (4347, 4372), False, 'import cv2\n'), ((4389, 4440), 'cv2.threshold', 'cv2.threshold', (['imgGray', '(240)', '(255)', 'cv2.THRESH_BINARY'], {}), '(imgGray, 240, 255, cv2.THRESH_BINARY)\n', (4402, 4440), False, 'import cv2\n'), ((4459, 4523), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (4475, 4523), False, 'import cv2\n'), ((6169, 6180), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6178, 6180), False, 'import os\n'), ((4755, 4783), 'cv2.contourArea', 'cv2.contourArea', (['contours[i]'], {}), '(contours[i])\n', (4770, 4783), False, 'import cv2\n'), ((4796, 4820), 'cv2.moments', 'cv2.moments', (['contours[i]'], {}), '(contours[i])\n', (4807, 4820), False, 'import cv2\n'), ((3144, 3154), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3151, 3154), True, 'import numpy as np\n'), ((4700, 4732), 'cv2.arcLength', 'cv2.arcLength', (['contours[i]', '(True)'], {}), '(contours[i], True)\n', (4713, 4732), False, 'import cv2\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import tensorflow as tf
import numpy as np
import gc
import pandas as pd
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.decomposition import PCA
from tensorflow import keras
# In[2]:
df = pd.read_csv("../Dataset/02-03-2018.csv", low_memory = False)
# In[3]:
df = df.drop([0,1])
# In[4]:
input_label = np.array(df.loc[:, df.columns != "Label"]).astype(np.float)
# In[5]:
output_label = np.array(df["Label"])
# In[6]:
out = []
for o in output_label:
if(o == "Benign"):out.append(0)
else: out.append(1)
output_label = out
# In[7]:
scaler = MinMaxScaler(feature_range=(0,1))
scaler.fit(input_label)
input_label = scaler.transform(input_label)
# In[8]:
input_label, output_label = shuffle(input_label, output_label)
# <h2>PCA</h2>
# In[ ]:
pca = PCA(n_components=18)
# In[ ]:
pca.fit(input_label)
# <h2>cross validation</h2>
# In[17]:
input_label = pca.transform(input_label).reshape(len(input_label), 18, 1)
# In[18]:
def createModel():
model = keras.Sequential([
keras.layers.Conv1D(filters = 16, input_shape = (18,1), kernel_size = 3, padding = "same", activation = "relu", use_bias = True),
keras.layers.MaxPool1D(pool_size = 3),
keras.layers.Conv1D(filters = 8, kernel_size = 3, padding = "same", activation = "relu", use_bias = True),
keras.layers.MaxPool1D(pool_size = 3),
keras.layers.Flatten(),
keras.layers.Dense(units = 2, activation = "softmax")
])
model.compile(optimizer= keras.optimizers.Adam(lr= 0.00025), loss="sparse_categorical_crossentropy", metrics=['accuracy'])
return model
# In[19]:
skf = StratifiedKFold(n_splits = 10, shuffle = True, random_state=1)
# In[20]:
confusion_matrixs = []
roc_curvs = []
# In[21]:
for i, (train, test) in enumerate(skf.split(input_label, output_label)):
print("Modelo " + str(i))
inp_train, out_train = np.array(input_label)[train], np.array(output_label)[train]
inp_test, out_test = np.array(input_label)[test], np.array(output_label)[test]
model = createModel()
model.fit(x = inp_train, y = out_train, validation_split= 0.1, epochs = 10, shuffle = True,verbose = 2)
res = np.array([np.argmax(resu) for resu in model.predict(inp_test)])
confusion_matrixs.append(confusion_matrix(out_test, res))
fpr, tpr, _ = roc_curve(out_test, res)
auc = roc_auc_score(out_test, res)
roc_curvs.append([fpr, tpr, auc])
print("\n\n")
# <h2>Roc Curves</h2>
# In[ ]:
for i in range(10):
print("------------------------------------")
print("Modelo " + str(i))
print(roc_curvs[i])
print(confusion_matrixs[i])
print("------------------------------------")
| [
"pandas.read_csv",
"sklearn.decomposition.PCA",
"sklearn.utils.shuffle",
"numpy.argmax",
"sklearn.metrics.roc_auc_score",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"tensorflow.keras.layers.MaxPool1D",
"sklearn.metrics.roc_curve",
"tensorflow.keras.layers.Dense",
"tensorflow.keras... | [((608, 666), 'pandas.read_csv', 'pd.read_csv', (['"""../Dataset/02-03-2018.csv"""'], {'low_memory': '(False)'}), "('../Dataset/02-03-2018.csv', low_memory=False)\n", (619, 666), True, 'import pandas as pd\n'), ((817, 838), 'numpy.array', 'np.array', (["df['Label']"], {}), "(df['Label'])\n", (825, 838), True, 'import numpy as np\n'), ((985, 1019), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (997, 1019), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1128, 1162), 'sklearn.utils.shuffle', 'shuffle', (['input_label', 'output_label'], {}), '(input_label, output_label)\n', (1135, 1162), False, 'from sklearn.utils import shuffle\n'), ((1198, 1218), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(18)'}), '(n_components=18)\n', (1201, 1218), False, 'from sklearn.decomposition import PCA\n'), ((2046, 2104), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=10, shuffle=True, random_state=1)\n', (2061, 2104), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((2736, 2760), 'sklearn.metrics.roc_curve', 'roc_curve', (['out_test', 'res'], {}), '(out_test, res)\n', (2745, 2760), False, 'from sklearn.metrics import roc_curve\n'), ((2772, 2800), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['out_test', 'res'], {}), '(out_test, res)\n', (2785, 2800), False, 'from sklearn.metrics import roc_auc_score\n'), ((729, 771), 'numpy.array', 'np.array', (["df.loc[:, df.columns != 'Label']"], {}), "(df.loc[:, df.columns != 'Label'])\n", (737, 771), True, 'import numpy as np\n'), ((2685, 2716), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['out_test', 'res'], {}), '(out_test, res)\n', (2701, 2716), False, 'from sklearn.metrics import confusion_matrix\n'), ((1442, 1564), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(16)', 'input_shape': '(18, 1)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)'}), "(filters=16, input_shape=(18, 1), kernel_size=3, padding\n ='same', activation='relu', use_bias=True)\n", (1461, 1564), False, 'from tensorflow import keras\n'), ((1580, 1615), 'tensorflow.keras.layers.MaxPool1D', 'keras.layers.MaxPool1D', ([], {'pool_size': '(3)'}), '(pool_size=3)\n', (1602, 1615), False, 'from tensorflow import keras\n'), ((1627, 1727), 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(8)', 'kernel_size': '(3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(True)'}), "(filters=8, kernel_size=3, padding='same', activation=\n 'relu', use_bias=True)\n", (1646, 1727), False, 'from tensorflow import keras\n'), ((1742, 1777), 'tensorflow.keras.layers.MaxPool1D', 'keras.layers.MaxPool1D', ([], {'pool_size': '(3)'}), '(pool_size=3)\n', (1764, 1777), False, 'from tensorflow import keras\n'), ((1789, 1811), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (1809, 1811), False, 'from tensorflow import keras\n'), ((1821, 1870), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(2)', 'activation': '"""softmax"""'}), "(units=2, activation='softmax')\n", (1839, 1870), False, 'from tensorflow import keras\n'), ((1911, 1944), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.00025)'}), '(lr=0.00025)\n', (1932, 1944), False, 'from tensorflow import keras\n'), ((2305, 2326), 'numpy.array', 'np.array', (['input_label'], {}), '(input_label)\n', (2313, 2326), True, 'import numpy as np\n'), ((2335, 2357), 'numpy.array', 'np.array', (['output_label'], {}), '(output_label)\n', (2343, 2357), True, 'import numpy as np\n'), ((2390, 2411), 'numpy.array', 'np.array', (['input_label'], {}), '(input_label)\n', (2398, 2411), True, 'import numpy as np\n'), ((2419, 2441), 'numpy.array', 'np.array', (['output_label'], {}), '(output_label)\n', (2427, 2441), True, 'import numpy as np\n'), ((2602, 2617), 'numpy.argmax', 'np.argmax', (['resu'], {}), '(resu)\n', (2611, 2617), True, 'import numpy as np\n')] |
import logging
import re
from typing import Any, List, Optional, Union
import numpy as np
from skweak.aggregation import HMM as HMM_
from spacy.lang.en import English
from spacy.tokenizer import Tokenizer
from spacy.tokens import Span
from ..basemodel import BaseSeqModel
from ..dataset import BaseSeqDataset
from ..utils import set_seed
logger = logging.getLogger(__name__)
ABSTAIN = -1
def label_to_span(labels: List[str],
scheme: Optional[str] = 'BIO') -> dict:
"""
convert labels to spans
:param labels: a list of labels
:param scheme: labeling scheme, in ['BIO', 'BILOU'].
:return: labeled spans, a list of tuples (start_idx, end_idx, label)
"""
assert scheme in ['BIO', 'BILOU'], ValueError("unknown labeling scheme")
labeled_spans = dict()
i = 0
while i < len(labels):
if labels[i] == 'O':
i += 1
continue
else:
if scheme == 'BIO':
if labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] == 'I':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
i += 1
# this should not happen
elif labels[i][0] == 'I':
i += 1
elif scheme == 'BILOU':
if labels[i][0] == 'U':
start = i
end = i + 1
lb = labels[i][2:]
labeled_spans[(start, end)] = lb
i += 1
elif labels[i][0] == 'B':
start = i
lb = labels[i][2:]
i += 1
try:
while labels[i][0] != 'L':
i += 1
end = i
labeled_spans[(start, end)] = lb
except IndexError:
end = i
labeled_spans[(start, end)] = lb
break
i += 1
else:
i += 1
return labeled_spans
class HMM(BaseSeqModel):
def __init__(self,
n_epochs: Optional[int] = 50,
redundancy_factor: Optional[float] = 0.0,
**kwargs: Any):
super().__init__()
self.hyperparas = {
"n_epochs" : n_epochs,
"redundancy_factor": redundancy_factor,
}
self.model = None
def prepare_doc(self, corpus, weak_labels):
nlp = English()
nlp.tokenizer = Tokenizer(nlp.vocab, token_match=re.compile(r'\S').match)
docs = []
for text, weak_labels_i in zip(corpus, weak_labels):
doc = nlp(' '.join(text))
assert len(doc) == len(text)
for i in range(self.n_lf):
doc.spans[str(i)] = []
weak_label = [self.id2label[ii] for ii in weak_labels_i[:, i]]
for (start, end), label in label_to_span(weak_label).items():
span = Span(doc, start, end, label)
doc.spans[str(i)].append(span)
docs.append(doc)
return docs
def fit(self,
dataset_train: Union[BaseSeqDataset],
verbose: Optional[bool] = False,
seed: int = None,
**kwargs: Any):
if not verbose:
logger.setLevel(logging.ERROR)
seed = seed or np.random.randint(1e6)
set_seed(seed)
self._update_hyperparas(**kwargs)
self.entity_types = dataset_train.entity_types
self.id2label = dataset_train.id2label
self.n_lf = dataset_train.n_lf
docs = self.prepare_doc([item['text'] for item in dataset_train.examples], dataset_train.weak_labels)
# with NoStdStreams(logger):
hmm = HMM_("hmm", self.entity_types, redundancy_factor=self.hyperparas['redundancy_factor'])
hmm.fit(docs, n_iter=self.hyperparas['n_epochs'])
self.model = hmm
def predict(self, dataset: BaseSeqDataset, **kwargs: Any):
model = self.model
docs = self.prepare_doc([item['text'] for item in dataset.examples], dataset.weak_labels)
preds = []
for doc in docs:
sources = [source for source in doc.spans if len(doc.spans[source]) > 0
and not doc.spans[source].attrs.get("aggregated", False)
and not doc.spans[source].attrs.get("avoid_in_aggregation", False)]
if len(sources) > 0:
df = model.get_observation_df(doc)
# Running the actual aggregation
agg_df = model._aggregate(df)
# Converting back to token labels
preds.append(agg_df.values.argmax(axis=1).tolist())
else:
preds.append([0] * len(doc))
return preds
| [
"logging.getLogger",
"re.compile",
"spacy.lang.en.English",
"spacy.tokens.Span",
"numpy.random.randint",
"skweak.aggregation.HMM"
] | [((350, 377), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (367, 377), False, 'import logging\n'), ((2854, 2863), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (2861, 2863), False, 'from spacy.lang.en import English\n'), ((4152, 4243), 'skweak.aggregation.HMM', 'HMM_', (['"""hmm"""', 'self.entity_types'], {'redundancy_factor': "self.hyperparas['redundancy_factor']"}), "('hmm', self.entity_types, redundancy_factor=self.hyperparas[\n 'redundancy_factor'])\n", (4156, 4243), True, 'from skweak.aggregation import HMM as HMM_\n'), ((3759, 3787), 'numpy.random.randint', 'np.random.randint', (['(1000000.0)'], {}), '(1000000.0)\n', (3776, 3787), True, 'import numpy as np\n'), ((2921, 2938), 're.compile', 're.compile', (['"""\\\\S"""'], {}), "('\\\\S')\n", (2931, 2938), False, 'import re\n'), ((3366, 3394), 'spacy.tokens.Span', 'Span', (['doc', 'start', 'end', 'label'], {}), '(doc, start, end, label)\n', (3370, 3394), False, 'from spacy.tokens import Span\n')] |
import os
import glob
import cmat
import pickle
import math
import collections
from collections import Counter
import numpy as np
import pandas as pd
import sklearn.model_selection
import src.featurizer
def replace_classes(y, replace_dict):
if replace_dict:
return y.replace(replace_dict)
else:
return y
def grid_search(args):
"""
Wrapper around sklearn's parameter grid. Mends
dict values that are not wrapped in list
"""
args = args if isinstance(args, (list, tuple)) else [args]
return sklearn.model_selection.ParameterGrid([
{k: v if isinstance(v, (list, tuple)) else [v] for k, v in a.items()}
for a in args
])
def cv_split(data, folds, randomize=0):
"""
Do a cross validation split on subjects
"""
if folds > len(data):
raise ValueError(f'More folds than subjects provided {folds} > {len(data)}')
# Do leave-one-out if fold is zero or a negative number
if folds <= 0:
folds = len(data)
# Make a list of subjects and do a seeded shuffle if configured
subjects = list(data)
if randomize > 0:
np.random.seed(randomize)
np.random.shuffle(subjects)
# Get step size and loop over folds
step = int(np.ceil(len(data) / folds))
for fold in range(folds):
valid = subjects[fold * step:(fold + 1) * step]
train = [s for s in subjects if not s in valid]
yield fold, train, valid
def get_multiset_cv_split(data, subject_groups, num_test,
num_valid=0, randomize=0):
data_groups = []
# Split the data into the subgroups:
for group in subject_groups:
corresponding_subjects = {}
for subject in data.keys():
if subject in group:
corresponding_subjects[subject] = data[subject]
corresponding_subjects = list(corresponding_subjects)
if randomize > 0:
np.random.seed(randomize)
np.random.shuffle(corresponding_subjects)
data_groups.append(corresponding_subjects)
# Create the train/test split:
folds = min([len(g) for g in data_groups])
folds = folds//((num_test+num_valid)//len(data_groups))
for fold in range(folds):
fold_val = []
fold_tst = []
fold_tr = []
step_val = num_valid//len(data_groups)
step_tst = num_test//len(data_groups)
current_start = fold * (step_val + step_tst)
for subjects in data_groups:
start = current_start
end = start + step_val
val = subjects[start:end]
start = end
end = start + step_tst
tst = subjects[start:end]
tr = [s for s in subjects if s not in val and s not in tst]
fold_val += val
fold_tst += tst
fold_tr += tr
yield fold, fold_tr, fold_tst # , fold_val
def read_chunked_data(file, batch_size, sequence_length):
chunksize = batch_size * sequence_length
it = pd.read_csv(file,
index_col=0, parse_dates=[0],
chunksize=chunksize
)
for chunk in it:
if len(chunk) < chunksize:
chunk = chunk[:len(chunk) - len(chunk) % sequence_length]
if len(chunk):
yield chunk
break
yield chunk
def save_intermediate_cmat(path, filename, args, cmats):
# Save cmat object and args in pickle file:
args_cmats = [args, cmats]
if not os.path.exists(path):
os.makedirs(path)
filehandler = open(path+filename, 'wb')
pickle.dump(args_cmats, filehandler)
filehandler.close()
def windowed_labels(
labels,
num_labels,
frame_length,
frame_step=None,
pad_end=False,
kind='density',
):
"""Segmenting a label array
With kind=None we are able to split the given labels
array into batches.
Parameters
----------
labels : np.array
Array of
Returns
-------
: np.array
"""
# Labels should be a single vector (int-likes) or kind has to be None
labels = np.asarray(labels)
if kind is not None and not labels.ndim == 1:
raise ValueError('Labels must be a vector')
if not (labels >= 0).all():
raise ValueError('All labels must be >= 0')
# Kind determines how labels in each window should be processed
if not kind in {'counts', 'density', 'onehot', 'argmax', None}:
raise ValueError('`kind` must be in {counts, density, onehot, argmax, None}')
# Let frame_step default to one full frame_length
frame_step = frame_length if frame_step is None else frame_step
# Process labels with a sliding window.
output = []
for i in range(0, len(labels), frame_step):
chunk = labels[i:i+frame_length]
# Ignore incomplete end chunk unless padding is enabled
if len(chunk) < frame_length and not pad_end:
continue
# Just append the chunk if kind is None
if kind == None:
output.append(chunk)
continue
# Count the occurences of each label
counts = np.bincount(chunk, minlength=max(labels))
# Then process based on kind
if kind == 'counts':
output.append(counts)
elif kind == 'density':
output.append(counts / len(chunk))
elif kind == 'onehot':
one_hot = np.zeros(num_labels)
one_hot[np.argmax(counts)] = 1
output.append(one_hot)
elif kind == 'argmax':
output.append(np.argmax(counts))
return np.array(output)
def windowed_signals(
signals,
frame_length,
frame_step=None,
pad_end=False
):
"""Generates signal segments of size frame_length"""
# Let frame_step default to one full frame_length
frame_step = frame_length if frame_step is None else frame_step
# Process signals with a sliding window
output = []
for i in range(0, len(signals), frame_step):
chunk = signals[i:i+frame_length]
# Ignore incomplete end chunk unless padding is enabled
if len(chunk) < frame_length and not pad_end:
continue
output.append(chunk)
return np.array(output)
def unfold_windows(arr, window_size, window_shift,
overlap_kind='mean'):
'''
Parameters
----------
arr: np.array
Either 2 or 3 dimensional
window_size: int
window_shift: int
overlap_kind: str, optional
What to do with possible overlapping areas. (default is 'sum')
'sum' adds the values in the overlapping areas
'mean' computes the mean of the overlapping areas
Returns
-------
: np.arr
2-dimensional array
'''
nseg = arr.shape[0]
last_dim = arr.shape[-1]
new_dim = (window_shift * nseg + window_size - window_shift, last_dim)
buffer = np.zeros(new_dim)
if overlap_kind == 'sum':
for i in range(nseg):
buffer[i*window_shift:i*window_shift+window_size] += arr[i]
return buffer
elif overlap_kind == 'mean':
weights = np.zeros((new_dim[0],1))
for i in range(nseg):
buffer[i*window_shift:i*window_shift+window_size] += arr[i]
weights[i*window_shift:i*window_shift+window_size] += 1.0
return buffer/weights
else:
raise NotImplementedError(f'overlap_kind {overlap_kind}')
def get_existing_features(path, label_column, fold_nr):
'''Features and labels extracted from csv files'''
folds = os.listdir(path)
for fold in folds:
folder_nr = int(fold.split('_')[-1])
if folder_nr != fold_nr:
continue
print('Get from folder: ', fold)
data = {}
files = os.listdir(path+'/'+fold)
for f in files:
df = pd.read_csv(path+'/'+fold+'/'+f)
y = df[label_column]
feature_columns = list(df.columns.drop(label_column))
feature_columns = [fc for fc in feature_columns if 'f' in fc]
x = df[feature_columns]
data[f] = (x,y)
return data
def load_dataset(dataset_paths, config):
'''Loads the data and creates the features according to the config
Parameters
----------
dataset_paths: list of str
config: src.config.Config
Returns
-------
data: dict of (x,y) tuple
x are the signal features
y are the corresponding labels
ground_truths: dict of np.array
Due to feature creation, majority voting is applied, which reduces
the number of labels in the training data. To allow proper testing,
ground_truths contains the original labels for each sample without
aggregation.
'''
ground_truths = {}
data = {}
# Read train data from csv files in configured directory
for dataset_path in dataset_paths:
subjects = {}
print(f'Reading train data from {dataset_path}')
for path in glob.glob(os.path.join(dataset_path, '*.csv')):
subjects[os.path.basename(path)] = pd.read_csv(path)
columns = config.SENSOR_COLUMNS
# Grab data corresponding to column and compute features
for subject, subject_data in subjects.items():
print(f'Preprocessing: {subject}')
x = subject_data[columns]
y = subject_data[config.LABEL_COLUMN]
# Replace classes with majority
y = replace_classes(y, config.replace_classes)
x = windowed_signals(
x,
config.SEQUENCE_LENGTH,
config.FRAME_SHIFT
)
# Split original labels into subsets according to the frame_length
# and frame_shift. This is later used for testing
gt = windowed_labels(
labels=y,
num_labels=len(config.CLASSES),
frame_length=config.SEQUENCE_LENGTH,
frame_step=config.FRAME_SHIFT,
pad_end=False,
kind=None,
).reshape(-1)
# Windowing and majority voting for training
y = windowed_labels(
labels=y,
num_labels=len(config.CLASSES),
frame_length=config.SEQUENCE_LENGTH,
frame_step=config.FRAME_SHIFT,
pad_end=False,
kind='argmax',
)
# Generate features
x = src.featurizer.Featurizer.get(config.FEATURES,
x, columns,
sample_rate=config.SAMPLE_RATE)
# Tranform np array to series
y = pd.Series(y)
# Add to set of preprocessed data
data[subject] = (x, y)
ground_truths[subject] = gt
return data, ground_truths
| [
"pandas.Series",
"os.path.exists",
"os.listdir",
"pickle.dump",
"pandas.read_csv",
"os.makedirs",
"numpy.asarray",
"os.path.join",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"os.path.basename",
"numpy.random.shuffle"
] | [((3001, 3069), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(0)', 'parse_dates': '[0]', 'chunksize': 'chunksize'}), '(file, index_col=0, parse_dates=[0], chunksize=chunksize)\n', (3012, 3069), True, 'import pandas as pd\n'), ((3598, 3634), 'pickle.dump', 'pickle.dump', (['args_cmats', 'filehandler'], {}), '(args_cmats, filehandler)\n', (3609, 3634), False, 'import pickle\n'), ((4110, 4128), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (4120, 4128), True, 'import numpy as np\n'), ((5596, 5612), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (5604, 5612), True, 'import numpy as np\n'), ((6219, 6235), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (6227, 6235), True, 'import numpy as np\n'), ((6896, 6913), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (6904, 6913), True, 'import numpy as np\n'), ((7547, 7563), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7557, 7563), False, 'import os\n'), ((1130, 1155), 'numpy.random.seed', 'np.random.seed', (['randomize'], {}), '(randomize)\n', (1144, 1155), True, 'import numpy as np\n'), ((1164, 1191), 'numpy.random.shuffle', 'np.random.shuffle', (['subjects'], {}), '(subjects)\n', (1181, 1191), True, 'import numpy as np\n'), ((3502, 3522), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3516, 3522), False, 'import os\n'), ((3532, 3549), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (3543, 3549), False, 'import os\n'), ((7761, 7790), 'os.listdir', 'os.listdir', (["(path + '/' + fold)"], {}), "(path + '/' + fold)\n", (7771, 7790), False, 'import os\n'), ((1927, 1952), 'numpy.random.seed', 'np.random.seed', (['randomize'], {}), '(randomize)\n', (1941, 1952), True, 'import numpy as np\n'), ((1965, 2006), 'numpy.random.shuffle', 'np.random.shuffle', (['corresponding_subjects'], {}), '(corresponding_subjects)\n', (1982, 2006), True, 'import numpy as np\n'), ((7119, 7144), 'numpy.zeros', 'np.zeros', (['(new_dim[0], 1)'], {}), '((new_dim[0], 1))\n', (7127, 7144), True, 'import numpy as np\n'), ((7828, 7868), 'pandas.read_csv', 'pd.read_csv', (["(path + '/' + fold + '/' + f)"], {}), "(path + '/' + fold + '/' + f)\n", (7839, 7868), True, 'import pandas as pd\n'), ((8990, 9025), 'os.path.join', 'os.path.join', (['dataset_path', '"""*.csv"""'], {}), "(dataset_path, '*.csv')\n", (9002, 9025), False, 'import os\n'), ((9075, 9092), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (9086, 9092), True, 'import pandas as pd\n'), ((10695, 10707), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (10704, 10707), True, 'import pandas as pd\n'), ((9049, 9071), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (9065, 9071), False, 'import os\n'), ((5410, 5430), 'numpy.zeros', 'np.zeros', (['num_labels'], {}), '(num_labels)\n', (5418, 5430), True, 'import numpy as np\n'), ((5451, 5468), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (5460, 5468), True, 'import numpy as np\n'), ((5566, 5583), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (5575, 5583), True, 'import numpy as np\n')] |
"""Configuration for MeerKAT observatory."""
from __future__ import division
from __future__ import absolute_import
import ephem
import json
import katpoint
import numpy
import os
from datetime import datetime, timedelta
from .simulate import user_logger, setobserver
from .targets import katpoint_target_string
try:
import katconf
# Set up configuration source
_config_path = "/var/kat/config"
_node_file = "/var/kat/node.conf"
_settings = {}
if os.path.isdir(_config_path):
katconf.set_config(katconf.environ(override=_config_path))
elif os.path.isfile(_node_file):
with open(_node_file, "r") as fh:
_node_conf = json.loads(fh.read())
for _key, _val in _node_conf.items():
# Remove comments at the end of the line
_val = _val.split("#", 1)[0]
_settings[_key] = _val.strip()
if _settings.get("configuri", False):
katconf.set_config(katconf.environ(_node_conf["configuri"]))
else:
raise ValueError("Could not open node config file using configuri")
else:
raise ValueError("Could not open node config file")
except (ImportError, ValueError):
# default reference position for MKAT array
_ref_location = "ref, -30:42:39.8, 21:26:38.0, 1035.0, 0.0, , , 1.15"
_node_config_available = False
else:
# default reference position for MKAT array from katconf
_ref_location = (
katconf.ArrayConfig().array["array"]["name"]
+ ", "
+ katconf.ArrayConfig().array["array"]["position"]
)
_node_config_available = True
class Observatory(object):
"""Basic LST calculations using ephem."""
def __init__(self, location=None, horizon=20.0, datetime=None):
self.location = _ref_location
self.node_config_available = _node_config_available
if location is not None:
self.location = location
self.kat = self.get_location()
self.observer = self.get_observer(horizon=horizon)
if datetime is not None:
self.observer.date = datetime
def _midnight_(self):
return datetime.now().replace(hour=0,
minute=0,
second=0,
microsecond=0)
def _ephem_risetime_(self, ephem_target, lst=True):
midnight_plus_one = ((self._midnight_() + timedelta(seconds=1))
.strftime("%H:%M:%S"))
midnight_plus_one = ephem.hours(midnight_plus_one)
try:
rise_time = self.observer.next_rising(ephem_target)
except ephem.AlwaysUpError:
return midnight_plus_one
except AttributeError:
return midnight_plus_one
if not lst:
return rise_time
self.observer.date = rise_time
return self.observer.sidereal_time()
def _ephem_settime_(self, ephem_target, lst=True):
midnight = self._midnight_() + timedelta(days=1)
midnight_minus_one = ((midnight - timedelta(seconds=1))
.strftime("%H:%M:%S"))
midnight_minus_one = ephem.hours(midnight_minus_one)
try:
rise_time = self.observer.next_rising(ephem_target)
set_time = self.observer.next_setting(ephem_target, start=rise_time)
except ephem.AlwaysUpError:
return midnight_minus_one
except AttributeError:
return midnight_minus_one
if not lst:
return set_time
self.observer.date = set_time
return self.observer.sidereal_time()
def target_rise_and_set_times(self, target, lst=True):
"""Target rise and set LST times"""
rise_lst = self._ephem_risetime_(target, lst=lst)
set_lst = self._ephem_settime_(target, lst=lst)
return rise_lst, set_lst
def read_file_from_node_config(self, catalogue_file):
"""Read catalogue file from node config.
Parameters
----------
catalogue_file: file
Catalogue of celestial objects that can be observed with
the telescope system running on the current node
"""
if not self.node_config_available:
raise AttributeError("Node config is not configured")
else:
err_msg = "Catalogue file does not exist in node config!"
assert katconf.resource_exists(catalogue_file), err_msg
return katconf.resource_template(catalogue_file)
def get_location(self):
"""Get the default reference location.
Calls the katpoint.Antenna object,
a MeerKAT wrapper around the PyEphem.observer object
"""
return katpoint.Antenna(self.location)
def get_observer(self, horizon=20.0):
"""Get the MeerKAT observer object.
The location and time of the telescope instance
Parameters
----------
horizon: float
minimum pointing angle in degrees
"""
observer = self.kat.observer
observer.horizon = numpy.deg2rad(horizon)
observer.date = ephem.now()
return observer
def set_target(self, target):
"""Set the target.
MeerKAT Wrapper around a PyEphem.Body object, target is an object
that can be pointed at by an antenna.
Parameters
----------
target: str
A comma-separated description which contains parameters such as
the target name, position, flux model.
"""
target = katpoint.Target(target)
target.body.compute(self.observer)
return target
def get_target(self, target_item):
"""Obtain target description.
Call to `set_target` methods described in this module
Parameters
----------
target_item: str
Names and descriptions of target(s) which can be pointed at by an antenna
"""
name, target_item = katpoint_target_string(target_item)
return self.set_target(target_item)
def lst2hours(self, ephem_lst):
"""Convert time format from ephem LST time to number of hours since epoch.
Parameters
----------
ephem_lst: datetime
ephem LST datetime
"""
time_ = datetime.strptime("{}".format(ephem_lst), "%H:%M:%S.%f").time()
time_ = (
time_.hour
+ (time_.minute / 60.0)
+ (time_.second + time_.microsecond / 1e6) / 3600.0
)
return "%.3f" % time_
def start_obs(self, target_list, str_flag=False):
"""Start time of the observation.
Call to `lst2hours` method described in this module for the
starting time target of observation
Parameters
----------
target_list: list
List of targets and information about their location, flux etc
str_flag:
"""
start_lst = []
for target in target_list:
target_ = self.get_target(target).body
start_lst.append(self._ephem_risetime_(target_))
start_lst = start_lst[numpy.asarray(start_lst, dtype=float).argmin()]
if str_flag:
return str(start_lst)
return self.lst2hours(start_lst)
def end_obs(self, target_list, str_flag=False):
"""End time of the observation.
Call to `lst2hours` method described in this module for the
end time target of observation
Parameters
----------
target_list: list
List of targets and information about their location, flux etc
str_flag:
"""
end_lst = []
for target in target_list:
target_ = self.get_target(target).body
end_lst.append(self._ephem_settime_(target_))
end_lst = end_lst[numpy.asarray(end_lst, dtype=float).argmax()]
if str_flag:
return str(end_lst)
return self.lst2hours(end_lst)
def collect_targets(kat, args):
"""Collect targets into katpoint catalogue.
Parameters
----------
kat: session kat container-like object
"""
from_names = from_strings = from_catalogues = num_catalogues = 0
catalogue = katpoint.Catalogue()
catalogue.antenna = katpoint.Antenna(_ref_location)
setobserver(catalogue.antenna.observer)
for arg in args:
try:
# First assume the string is a catalogue file name
count_before_add = len(catalogue)
try:
catalogue.add(open(arg))
except ValueError:
msg = "Catalogue {} contains bad targets".format(arg)
user_logger.warning(msg)
from_catalogues += len(catalogue) - count_before_add
num_catalogues += 1
except IOError:
# If the file failed to load,
# assume it is a name or description string
# With no comma in target string,
# assume it's the name of a target
# to be looked up in standard catalogue
if arg.find(",") < 0:
target = kat.sources[arg]
if target is None:
msg = "Unknown target or catalogue {}, skipping it".format(arg)
user_logger.warning(msg)
else:
catalogue.add(target)
from_names += 1
else:
# Assume the argument is a target description string
try:
catalogue.add(arg)
from_strings += 1
except ValueError as err:
msg = "Invalid target {}, skipping it [{}]".format(arg, err)
user_logger.warning(msg)
if len(catalogue) == 0:
raise ValueError("No known targets found in argument list")
msg = (
"Found {} target(s): {} from {} catalogue(s), {} from default catalogue and "
"{} as target string(s)".format(
len(catalogue), from_catalogues, num_catalogues, from_names, from_strings
)
)
user_logger.info(msg)
return catalogue
# -fin-
| [
"ephem.now",
"katpoint.Target",
"numpy.asarray",
"os.path.isfile",
"datetime.datetime.now",
"numpy.deg2rad",
"os.path.isdir",
"katconf.resource_template",
"ephem.hours",
"katpoint.Antenna",
"katconf.resource_exists",
"katconf.ArrayConfig",
"datetime.timedelta",
"katpoint.Catalogue",
"kat... | [((476, 503), 'os.path.isdir', 'os.path.isdir', (['_config_path'], {}), '(_config_path)\n', (489, 503), False, 'import os\n'), ((8248, 8268), 'katpoint.Catalogue', 'katpoint.Catalogue', ([], {}), '()\n', (8266, 8268), False, 'import katpoint\n'), ((8293, 8324), 'katpoint.Antenna', 'katpoint.Antenna', (['_ref_location'], {}), '(_ref_location)\n', (8309, 8324), False, 'import katpoint\n'), ((581, 607), 'os.path.isfile', 'os.path.isfile', (['_node_file'], {}), '(_node_file)\n', (595, 607), False, 'import os\n'), ((2528, 2558), 'ephem.hours', 'ephem.hours', (['midnight_plus_one'], {}), '(midnight_plus_one)\n', (2539, 2558), False, 'import ephem\n'), ((3170, 3201), 'ephem.hours', 'ephem.hours', (['midnight_minus_one'], {}), '(midnight_minus_one)\n', (3181, 3201), False, 'import ephem\n'), ((4736, 4767), 'katpoint.Antenna', 'katpoint.Antenna', (['self.location'], {}), '(self.location)\n', (4752, 4767), False, 'import katpoint\n'), ((5097, 5119), 'numpy.deg2rad', 'numpy.deg2rad', (['horizon'], {}), '(horizon)\n', (5110, 5119), False, 'import numpy\n'), ((5144, 5155), 'ephem.now', 'ephem.now', ([], {}), '()\n', (5153, 5155), False, 'import ephem\n'), ((5579, 5602), 'katpoint.Target', 'katpoint.Target', (['target'], {}), '(target)\n', (5594, 5602), False, 'import katpoint\n'), ((532, 570), 'katconf.environ', 'katconf.environ', ([], {'override': '_config_path'}), '(override=_config_path)\n', (547, 570), False, 'import katconf\n'), ((3006, 3023), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3015, 3023), False, 'from datetime import datetime, timedelta\n'), ((4417, 4456), 'katconf.resource_exists', 'katconf.resource_exists', (['catalogue_file'], {}), '(catalogue_file)\n', (4440, 4456), False, 'import katconf\n'), ((4485, 4526), 'katconf.resource_template', 'katconf.resource_template', (['catalogue_file'], {}), '(catalogue_file)\n', (4510, 4526), False, 'import katconf\n'), ((2139, 2153), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2151, 2153), False, 'from datetime import datetime, timedelta\n'), ((958, 998), 'katconf.environ', 'katconf.environ', (["_node_conf['configuri']"], {}), "(_node_conf['configuri'])\n", (973, 998), False, 'import katconf\n'), ((1523, 1544), 'katconf.ArrayConfig', 'katconf.ArrayConfig', ([], {}), '()\n', (1542, 1544), False, 'import katconf\n'), ((2426, 2446), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (2435, 2446), False, 'from datetime import datetime, timedelta\n'), ((3066, 3086), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3075, 3086), False, 'from datetime import datetime, timedelta\n'), ((7153, 7190), 'numpy.asarray', 'numpy.asarray', (['start_lst'], {'dtype': 'float'}), '(start_lst, dtype=float)\n', (7166, 7190), False, 'import numpy\n'), ((7860, 7895), 'numpy.asarray', 'numpy.asarray', (['end_lst'], {'dtype': 'float'}), '(end_lst, dtype=float)\n', (7873, 7895), False, 'import numpy\n'), ((1453, 1474), 'katconf.ArrayConfig', 'katconf.ArrayConfig', ([], {}), '()\n', (1472, 1474), False, 'import katconf\n')] |
# -*- coding: utf-8 -*-
"""
Absolute spectral radiance calibration
"""
# Module importation
import os
import time
import string
import deepdish
import h5py
import numpy as np
import skimage.measure
import matplotlib.pyplot as plt
# Other modules
import source.processing as proccessing
from source.geometric_rolloff import MatlabGeometricMengine
# Functions
def radiance_planck(wavelength, T):
"""
Planck black body radiance distribution.
:param wavelength:
:param T:
:return:
"""
h = 6.62607015e-34
c = 299792458
k = 1.380649e-23
lamb = wavelength * 1e-9
expo = np.exp((h * c) / (lamb * k * T))
rad = 1e-9 * ((2 * h * c ** 2) / (lamb ** 5)) * (1 / (expo - 1))
return rad, rad / replica_trapz(wavelength, rad)
def replica_trapz(x, y):
"""
:param x:
:param y:
:return:
"""
diff = x[1:] - x[:-1]
if len(y.shape) == 1:
return np.sum((y[1:] + y[:-1]) * diff/2, axis=0)
else:
return np.sum((y[1:] + y[:-1]) * diff[:, None] / 2, axis=0)
if __name__ == "__main__":
# Instance of ProcessImage
process = proccessing.ProcessImage()
# Instance of FigureFunctions
ff = proccessing.FigureFunctions()
# General path to all data
path = process.folder_choice("/Volumes/MYBOOK/data-i360-tests/")
path_i360 = os.path.dirname(os.path.dirname(__file__))
# Choice of camera
while True:
answer = input("Which lens do you want to analyze? (c/f): ")
if answer.lower() in ["c", "f"]:
break
if answer.lower() == "c":
impath = path + "/nofilter/lensclose"
imlist = process.imageslist(impath)
imlistdark = process.imageslist_dark(impath, prefix="AMB")
geocalib = deepdish.io.load(path_i360 + "/geometric-calibration/calibrationfiles/geometric-calibration-air.h5", "/lens-close/20190104_192404/")
srdata = h5py.File(path_i360 + "/relative-spectral-response/calibrationfiles/rsr_20200610.h5", "r")
srdata = srdata["lens-close"]
elif answer.lower() == "f":
impath = path + "/nofilter/lensfar"
imlist = process.imageslist(impath)
imlistdark = process.imageslist_dark(impath, prefix="AMB")
geocalib = deepdish.io.load(path_i360 + "/geometric-calibration/calibrationfiles/geometric-calibration-air.h5", "/lens-far/20190104_214037/")
srdata = h5py.File(path_i360 + "/relative-spectral-response/calibrationfiles/rsr_20200710.h5", "r")
srdata = srdata["lens-far"]
else:
raise ValueError("Not valid choice.")
# Open spectrometer data
spectro = proccessing.FlameSpectrometer(path)
spectro.calibration_coefficient("light") # Calibration for absolute spectrum
_, spectral_rad_15, spectral_rad_15_unc, cops_wl, cops_val = spectro.source_spectral_radiance("labsphere", [589, 589, 589], 0)
w_s, spectral_rad_35, _, _, _ = spectro.source_spectral_radiance("labsphere", [589, 589, 589], 1)
_, spectral_rad_45, _ , _, _ = spectro.source_spectral_radiance("labsphere", [589, 589, 589], 2)
condwl = (w_s <= 700) & (w_s >= 400)
spectral_rad_norm_15 = spectral_rad_15 / replica_trapz(w_s, spectral_rad_15)
_, rad_planck_norm = radiance_planck(w_s, 2796)
# Effective radiance in bands
wl_rsr = srdata["wavelength"][:]
rsr = srdata["rsr_peak_norm"][:]
spectral_rad_source = np.interp(wl_rsr, w_s, spectral_rad_15)
effective_rad = replica_trapz(wl_rsr, rsr * spectral_rad_source[:, None]) / replica_trapz(wl_rsr, rsr)
effective_lambda = replica_trapz(wl_rsr, rsr * wl_rsr[:, None]) / replica_trapz(wl_rsr, rsr)
# i360 camera data
whichim = {"c": "close", "f": "far"}
imstack, exp, gain, blevel = process.imagestack(imlist, whichim[answer.lower()])
ambstack, _, _, _ = process.imagestack(imlistdark, whichim[answer.lower()])
imstack -= ambstack.mean(axis=2)[:, :, None]
imstack = np.clip(imstack, 0, None)
im_dws = process.dwnsampling(imstack.mean(axis=2), "RGGB")
geo = {}
for i in geocalib["fp"].keys():
geo[i] = MatlabGeometricMengine(geocalib["fp"][i], geocalib["ierror"][i])
channel_correspondance = {0: "red", 1: "green", 2: "blue"}
zenith_max = 5.0
imdownsampling = 250
# Pre-allocation
plt.style.use("../../figurestyle.mplstyle")
dn_avg = np.empty(3)
dn_std = np.empty(3)
fig, ax = plt.subplots(1, 3, sharey=True, figsize=ff.set_size())
for i in range(im_dws.shape[2]):
im = im_dws[:, :, i]
curr_geo = geo[channel_correspondance[i]]
_, z, _ = curr_geo.angular_coordinates()
maskdegree = z <= zenith_max
dn_avg[i] = im[maskdegree].mean()
dn_std[i] = im[maskdegree].std()
print(im[maskdegree].shape)
# Figure
region = skimage.measure.regionprops(maskdegree.astype(int))
draw_circle = plt.Circle((region[0].centroid[1], region[0].centroid[0]), region[0].equivalent_diameter / 2,
fill=False, linestyle=":")
imsh = ax[i].imshow(im) # vmin=dn_avg[i]*0.9, vmax=dn_avg[i]*1.1
ax[i].plot(curr_geo.center[0], curr_geo.center[1], "r+")
ax[i].add_artist(draw_circle)
cb = fig.colorbar(imsh, ax=ax[i], orientation="vertical", fraction=0.046, pad=0.04)
cb.ax.set_title("$DN_{i}$", fontsize=10)
mask_sphere = (im >= dn_avg[i]*0.9) & (im <= dn_avg[i]*1.1)
region_sphere = skimage.measure.regionprops(mask_sphere.astype(int))
ax[i].set_xlim((int(region_sphere[0].centroid[1] - imdownsampling), int(region_sphere[0].centroid[1] + imdownsampling)))
ax[i].set_ylim((int(region_sphere[0].centroid[0] - imdownsampling), int(region_sphere[0].centroid[0] + imdownsampling)))
ax[i].text(-0.1, 1.1, "(" + string.ascii_lowercase[i] + ")", transform=ax[i].transAxes, size=11, weight='bold')
ax[i].set_xlabel("$x$ [px]")
ax[0].set_ylabel("$y$ [px]")
coeff = effective_rad / (dn_avg / (exp[0] * gain[0] / 100))
print(coeff)
cvf = np.array([2.397e-8, 8.460e-9, 1.362e-8])
# Uncertainty on calibration coefficient
unc_effective_rad = np.interp(effective_lambda, w_s, spectral_rad_15_unc)
unc_coeff = np.sqrt((unc_effective_rad) ** 2 + (dn_std / dn_avg) ** 2)
# Figures
fig1, ax1 = plt.subplots(1, 2)
ax1[0].plot(w_s[condwl], spectral_rad_15[condwl], label="1.5 cm")
ax1[0].plot(w_s[condwl], spectral_rad_35[condwl], label="3.5 cm")
ax1[0].plot(w_s[condwl], spectral_rad_45[condwl], label="4.5 cm")
ax1[0].plot(effective_lambda, effective_rad, "o")
ax1[0].set_xlabel("wavelength [nm]")
ax1[0].set_ylabel("$L_{source}$ [$\mathrm{W \cdot sr^{-1} \cdot m^{-2} \cdot nm^{-1}}$]")
ax1[0].legend(loc="best")
ax1[1].plot(w_s[condwl], spectral_rad_norm_15[condwl], label="1.5 cm")
ax1[1].plot(w_s[condwl], rad_planck_norm[condwl], label="Planck $T = 2796$ K")
ax1[1].set_xlabel("wavelength [nm]")
ax1[1].set_ylabel("normalized radiance [$\mathrm{nm^{-1}}$]")
ax1[1].legend(loc="best")
# Figure 2
fig2, ax2 = plt.subplots(1, 1, figsize=ff.set_size(fraction=0.7))
ax2.plot(w_s[condwl], spectral_rad_15[condwl], color="k", label="$L_{source}(\lambda)$")
ax2.fill_between(w_s[condwl], spectral_rad_15[condwl] * (1 - spectral_rad_15_unc[condwl]),
spectral_rad_15[condwl] * (1 + spectral_rad_15_unc[condwl]), color="gray", alpha=0.6)
ax2.plot(cops_wl, cops_val, color="k", marker="^", markersize=6, linestyle="None", markeredgecolor="k",
markerfacecolor="none", label="C-OPS at 589 nm")
ax2.errorbar(effective_lambda, effective_rad, xerr=replica_trapz(wl_rsr, rsr) / 2, color="k", marker="o",
markersize=5, linestyle="None", markeredgecolor="k", markerfacecolor="none",
label="$\overline{L}_{i, source}$")
ax2.set_yscale("log")
ax2.set_xlabel("Wavelength [nm]")
ax2.set_ylabel("$L~[\mathrm{W \cdot sr^{-1} \cdot m^{-2} \cdot nm^{-1}}]$")
ax2.legend(loc='lower right')
# Figure 3 - uncertainties of the Ocean Optic calibration source
fig3, ax4 = plt.subplots(1, 1, figsize=ff.set_size())
ax4.plot(spectro.oo_lamp_uncertainty[:, 0], spectro.oo_lamp_uncertainty[:, 1] * 100,)
ax4.set_xlabel("Wavelength [nm]")
ax4.set_ylabel("Uncertainty (k=1)[%]")
# Saving figure
fig.tight_layout()
fig1.tight_layout()
fig2.tight_layout()
# Saving results
save_answer = process.save_results()
if save_answer == "y":
filename = "absolute_radiance" + ".h5"
pathname = "calibrationfiles/" + filename
timestr = time.strftime("%Y%m%d", time.localtime(os.stat(imlist[0])[-1]))
correspond_optic = {"c": "close", "f": "far"}
if answer.lower() == "c":
process.create_hdf5_dataset(pathname, "lens-close/" + timestr, "cal-coefficients", coeff)
else:
process.create_hdf5_dataset(pathname, "lens-far/" + timestr, "cal-coefficients", coeff)
fig.savefig("figures/output_sphere_{}.pdf".format(correspond_optic[answer.lower()]), format="pdf", dpi=600, bbox_inches='tight')
fig2.savefig("figures/spectral_radiance_{}.pdf".format(correspond_optic[answer.lower()]), format="pdf", dpi=600, bbox_inches='tight')
plt.show()
| [
"numpy.clip",
"numpy.sqrt",
"numpy.array",
"source.processing.FigureFunctions",
"matplotlib.pyplot.style.use",
"numpy.exp",
"numpy.empty",
"matplotlib.pyplot.Circle",
"source.processing.ProcessImage",
"h5py.File",
"source.processing.FlameSpectrometer",
"os.path.dirname",
"numpy.interp",
"s... | [((613, 643), 'numpy.exp', 'np.exp', (['(h * c / (lamb * k * T))'], {}), '(h * c / (lamb * k * T))\n', (619, 643), True, 'import numpy as np\n'), ((1117, 1143), 'source.processing.ProcessImage', 'proccessing.ProcessImage', ([], {}), '()\n', (1141, 1143), True, 'import source.processing as proccessing\n'), ((1188, 1217), 'source.processing.FigureFunctions', 'proccessing.FigureFunctions', ([], {}), '()\n', (1215, 1217), True, 'import source.processing as proccessing\n'), ((2623, 2658), 'source.processing.FlameSpectrometer', 'proccessing.FlameSpectrometer', (['path'], {}), '(path)\n', (2652, 2658), True, 'import source.processing as proccessing\n'), ((3386, 3425), 'numpy.interp', 'np.interp', (['wl_rsr', 'w_s', 'spectral_rad_15'], {}), '(wl_rsr, w_s, spectral_rad_15)\n', (3395, 3425), True, 'import numpy as np\n'), ((3924, 3949), 'numpy.clip', 'np.clip', (['imstack', '(0)', 'None'], {}), '(imstack, 0, None)\n', (3931, 3949), True, 'import numpy as np\n'), ((4281, 4324), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""../../figurestyle.mplstyle"""'], {}), "('../../figurestyle.mplstyle')\n", (4294, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4350), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (4347, 4350), True, 'import numpy as np\n'), ((4364, 4375), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (4372, 4375), True, 'import numpy as np\n'), ((6047, 6089), 'numpy.array', 'np.array', (['[2.397e-08, 8.46e-09, 1.362e-08]'], {}), '([2.397e-08, 8.46e-09, 1.362e-08])\n', (6055, 6089), True, 'import numpy as np\n'), ((6158, 6211), 'numpy.interp', 'np.interp', (['effective_lambda', 'w_s', 'spectral_rad_15_unc'], {}), '(effective_lambda, w_s, spectral_rad_15_unc)\n', (6167, 6211), True, 'import numpy as np\n'), ((6228, 6284), 'numpy.sqrt', 'np.sqrt', (['(unc_effective_rad ** 2 + (dn_std / dn_avg) ** 2)'], {}), '(unc_effective_rad ** 2 + (dn_std / dn_avg) ** 2)\n', (6235, 6284), True, 'import numpy as np\n'), ((6318, 6336), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (6330, 6336), True, 'import matplotlib.pyplot as plt\n'), ((9313, 9323), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9321, 9323), True, 'import matplotlib.pyplot as plt\n'), ((922, 965), 'numpy.sum', 'np.sum', (['((y[1:] + y[:-1]) * diff / 2)'], {'axis': '(0)'}), '((y[1:] + y[:-1]) * diff / 2, axis=0)\n', (928, 965), True, 'import numpy as np\n'), ((989, 1041), 'numpy.sum', 'np.sum', (['((y[1:] + y[:-1]) * diff[:, None] / 2)'], {'axis': '(0)'}), '((y[1:] + y[:-1]) * diff[:, None] / 2, axis=0)\n', (995, 1041), True, 'import numpy as np\n'), ((1351, 1376), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1366, 1376), False, 'import os\n'), ((1756, 1896), 'deepdish.io.load', 'deepdish.io.load', (["(path_i360 +\n '/geometric-calibration/calibrationfiles/geometric-calibration-air.h5')", '"""/lens-close/20190104_192404/"""'], {}), "(path_i360 +\n '/geometric-calibration/calibrationfiles/geometric-calibration-air.h5',\n '/lens-close/20190104_192404/')\n", (1772, 1896), False, 'import deepdish\n'), ((1907, 2001), 'h5py.File', 'h5py.File', (["(path_i360 + '/relative-spectral-response/calibrationfiles/rsr_20200610.h5')", '"""r"""'], {}), "(path_i360 +\n '/relative-spectral-response/calibrationfiles/rsr_20200610.h5', 'r')\n", (1916, 2001), False, 'import h5py\n'), ((4080, 4144), 'source.geometric_rolloff.MatlabGeometricMengine', 'MatlabGeometricMengine', (["geocalib['fp'][i]", "geocalib['ierror'][i]"], {}), "(geocalib['fp'][i], geocalib['ierror'][i])\n", (4102, 4144), False, 'from source.geometric_rolloff import MatlabGeometricMengine\n'), ((4881, 5006), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(region[0].centroid[1], region[0].centroid[0])', '(region[0].equivalent_diameter / 2)'], {'fill': '(False)', 'linestyle': '""":"""'}), "((region[0].centroid[1], region[0].centroid[0]), region[0].\n equivalent_diameter / 2, fill=False, linestyle=':')\n", (4891, 5006), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2384), 'deepdish.io.load', 'deepdish.io.load', (["(path_i360 +\n '/geometric-calibration/calibrationfiles/geometric-calibration-air.h5')", '"""/lens-far/20190104_214037/"""'], {}), "(path_i360 +\n '/geometric-calibration/calibrationfiles/geometric-calibration-air.h5',\n '/lens-far/20190104_214037/')\n", (2262, 2384), False, 'import deepdish\n'), ((2395, 2489), 'h5py.File', 'h5py.File', (["(path_i360 + '/relative-spectral-response/calibrationfiles/rsr_20200710.h5')", '"""r"""'], {}), "(path_i360 +\n '/relative-spectral-response/calibrationfiles/rsr_20200710.h5', 'r')\n", (2404, 2489), False, 'import h5py\n'), ((8697, 8715), 'os.stat', 'os.stat', (['imlist[0]'], {}), '(imlist[0])\n', (8704, 8715), False, 'import os\n')] |
# coding: utf-8
import datetime
import glob
import multiprocessing as mp
import os
import queue
import random
import threading
import keras.backend.tensorflow_backend as KTF
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.applications.resnet50 import preprocess_input
from keras.applications.resnet50 import ResNet50
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.callbacks import ReduceLROnPlateau
from keras.callbacks import TensorBoard
from keras.layers import Dense
from keras.layers import GlobalAveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import load_model
from keras.models import Model
from keras.models import model_from_json
from keras.preprocessing import image
# get a GPU session and reserve memory
def get_session(gpu_fraction=0.3):
'''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# load an image from disk
def load_image(img_path):
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
# use an image with a model to get features
def image_features(img, model, length=2048):
features = np.zeros((1, length), dtype=np.float16)
#model = Model(input=base_model.input, output=base_model.get_layer('flatten_1').output)
predictions = model.predict(img)
return predictions
def image_class_generator(image_classes_mapping, num_classes, batch_size=32):
image_list = list(image_classes_mapping.keys())
while True:
xs = []
ys = []
for filename in random.sample(image_list, batch_size):
xs.append(load_image(filename))
y = np.zeros(num_classes)
y[image_classes_mapping[filename]] = 1
ys.append(y)
yield (np.array(xs).squeeze(), np.array(ys))
def buffered_gen_mp(source_gen, buffer_size=2, num_processes=4):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the
# buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block
# forever.
for _ in range(num_processes):
process = mp.Process(
target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
def save_model_workaround(model, model_output_file, weights_output_file):
print('saving model to {}'.format(model_output_file))
print('saving weignts to {}'.format(weights_output_file))
# serialize model to JSON
model_json = model.to_json()
with open(model_output_file, 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(weights_output_file)
def load_model_workaround(model_output_file, weights_output_file):
# load json and create model
json_file = open(model_output_file, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights_output_file)
return loaded_model
def prep_datasets(basepath):
image_classes = set()
image_class_mapping = {}
for image_class_filepath in glob.glob(os.path.join(basepath, '*')):
if os.path.isdir(image_class_filepath):
image_class_num = int(os.path.basename(image_class_filepath))
image_classes.add(image_class_num)
for filename in glob.glob(os.path.join(image_class_filepath, '*')):
image_class_mapping[filename] = image_class_num
print('basepath:{0},image_classes:{1}, number_images:{2}'.format(
basepath, len(image_classes), len(image_class_mapping)))
return (len(image_classes), image_classes, image_class_mapping)
def make_callbacks(model_checkpoint_format_string, tensor_board_log_dir):
callbacks = []
if model_checkpoint_format_string is not None:
callbacks.append(ModelCheckpoint(model_checkpoint_format_string,
monitor='loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1))
if tensor_board_log_dir is not None:
callbacks.append(TensorBoard(log_dir=tensor_board_log_dir,
histogram_freq=0,
write_graph=True,
write_images=False))
callbacks.append(ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=10,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0))
callbacks.append(EarlyStopping(monitor='val_acc',
min_delta=0.003,
patience=4,
verbose=1,
mode='max'))
return callbacks
# # Start the sesion
# verbose =0 -> nothing
# 1 -> progress bar
# 2 -> 1x/epoch
def do_training(training_basepath,
validation_basepath,
model_output_file,
weights_output_file,
tensor_board_log_dir=None,
model_checkpoint_format_string=None,
batch_size=32,
verbose=1,
divisor=10):
# # Start the sesion
KTF.set_session(get_session(.95))
# # get data ready to process
num_training_classes, training_image_classes, training_image_class_mapping = prep_datasets(
training_basepath)
num_validation_classes, validation_image_classes, validation_image_class_mapping = prep_datasets(
validation_basepath)
# # Create a new model with the right number of targets and pretrain top
base_model = ResNet50(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
my_layer = base_model.output
my_layer = GlobalAveragePooling2D()(my_layer)
# let's add a fully-connected my_layer
my_layer = Dense(1024, activation='relu')(my_layer)
# brad said this would make things work better, but stuff would still be
# messed up.
my_layer = BatchNormalization()(my_layer)
predictions = Dense(num_training_classes, activation='softmax')(my_layer)
model = Model(input=base_model.input, output=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to
# non-trainable)
model.compile(optimizer='adam',
loss='categorical_crossentropy', metrics=['accuracy'])
callbacks = make_callbacks(
model_checkpoint_format_string, tensor_board_log_dir)
train_buffered_generator_mp = buffered_gen_mp(image_class_generator(training_image_class_mapping,
num_training_classes,
batch_size),
buffer_size=20,
num_processes=5)
val_buffered_generator_mp = buffered_gen_mp(image_class_generator(validation_image_class_mapping,
num_validation_classes,
batch_size),
buffer_size=20,
num_processes=5)
# # Train
samples_per_epoch = int(len(training_image_class_mapping) / divisor)
print('sample_per_epoch:', samples_per_epoch)
start = datetime.datetime.time(datetime.datetime.now())
print('started at {0}'.format(start))
fixed_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=samples_per_epoch,
nb_epoch=30,
callbacks=callbacks,
nb_val_samples=10000,
validation_data=val_buffered_generator_mp,
verbose=verbose)
finish = datetime.datetime.time(datetime.datetime.now())
print('finished at {0}'.format(finish))
# # Make all layers trainable
for layer in model.layers:
try:
if not layer.trainable:
layer.trainable = True
except:
print('error layer is stuck:', layer.name)
model.compile(optimizer='adam',
loss='categorical_crossentropy', metrics=['accuracy'])
# # Continue training
callbacks = make_callbacks(
model_checkpoint_format_string, tensor_board_log_dir)
start = datetime.datetime.time(datetime.datetime.now())
print('started at {0}'.format(start))
samples_per_epoch = int(len(training_image_class_mapping) / divisor)
full_history = model.fit_generator(train_buffered_generator_mp,
samples_per_epoch=samples_per_epoch,
nb_epoch=250,
callbacks=callbacks,
nb_val_samples=10000,
validation_data=val_buffered_generator_mp,
verbose=verbose)
finish = datetime.datetime.time(datetime.datetime.now())
print('finished at {0}'.format(finish))
save_model_workaround(model, model_output_file, weights_output_file)
KTF.clear_session()
return full_history
| [
"keras.preprocessing.image.img_to_array",
"multiprocessing.Process",
"keras.applications.resnet50.preprocess_input",
"numpy.array",
"keras.layers.Dense",
"tensorflow.GPUOptions",
"os.path.isdir",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.layers.GlobalAveragePooling2D",
"tenso... | [((980, 1013), 'os.environ.get', 'os.environ.get', (['"""OMP_NUM_THREADS"""'], {}), "('OMP_NUM_THREADS')\n", (994, 1013), False, 'import os\n'), ((1032, 1091), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_fraction'}), '(per_process_gpu_memory_fraction=gpu_fraction)\n', (1045, 1091), True, 'import tensorflow as tf\n'), ((1390, 1438), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(299, 299)'}), '(img_path, target_size=(299, 299))\n', (1404, 1438), False, 'from keras.preprocessing import image\n'), ((1447, 1470), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1465, 1470), False, 'from keras.preprocessing import image\n'), ((1479, 1504), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1493, 1504), True, 'import numpy as np\n'), ((1513, 1532), 'keras.applications.resnet50.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1529, 1532), False, 'from keras.applications.resnet50 import preprocess_input\n'), ((1652, 1691), 'numpy.zeros', 'np.zeros', (['(1, length)'], {'dtype': 'np.float16'}), '((1, length), dtype=np.float16)\n', (1660, 1691), True, 'import numpy as np\n'), ((2630, 2663), 'multiprocessing.Queue', 'mp.Queue', ([], {'maxsize': '(buffer_size - 1)'}), '(maxsize=buffer_size - 1)\n', (2638, 2663), True, 'import multiprocessing as mp\n'), ((4094, 4128), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (4109, 4128), False, 'from keras.models import model_from_json\n'), ((7318, 7365), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (7326, 7365), False, 'from keras.applications.resnet50 import ResNet50\n'), ((7832, 7881), 'keras.models.Model', 'Model', ([], {'input': 'base_model.input', 'output': 'predictions'}), '(input=base_model.input, output=predictions)\n', (7837, 7881), False, 'from keras.models import Model\n'), ((11282, 11301), 'keras.backend.tensorflow_backend.clear_session', 'KTF.clear_session', ([], {}), '()\n', (11299, 11301), True, 'import keras.backend.tensorflow_backend as KTF\n'), ((2048, 2085), 'random.sample', 'random.sample', (['image_list', 'batch_size'], {}), '(image_list, batch_size)\n', (2061, 2085), False, 'import random\n'), ((3268, 3342), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_buffered_generation_process', 'args': '(source_gen, buffer)'}), '(target=_buffered_generation_process, args=(source_gen, buffer))\n', (3278, 3342), True, 'import multiprocessing as mp\n'), ((4366, 4393), 'os.path.join', 'os.path.join', (['basepath', '"""*"""'], {}), "(basepath, '*')\n", (4378, 4393), False, 'import os\n'), ((4407, 4442), 'os.path.isdir', 'os.path.isdir', (['image_class_filepath'], {}), '(image_class_filepath)\n', (4420, 4442), False, 'import os\n'), ((5772, 5900), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(10)', 'verbose': '(1)', 'mode': '"""auto"""', 'epsilon': '(0.0001)', 'cooldown': '(0)', 'min_lr': '(0)'}), "(monitor='val_loss', factor=0.1, patience=10, verbose=1,\n mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)\n", (5789, 5900), False, 'from keras.callbacks import ReduceLROnPlateau\n'), ((6193, 6281), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0.003)', 'patience': '(4)', 'verbose': '(1)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0.003, patience=4, verbose=1,\n mode='max')\n", (6206, 6281), False, 'from keras.callbacks import EarlyStopping\n'), ((7464, 7488), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (7486, 7488), False, 'from keras.layers import GlobalAveragePooling2D\n'), ((7558, 7588), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (7563, 7588), False, 'from keras.layers import Dense\n'), ((7709, 7729), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (7727, 7729), False, 'from keras.layers.normalization import BatchNormalization\n'), ((7759, 7808), 'keras.layers.Dense', 'Dense', (['num_training_classes'], {'activation': '"""softmax"""'}), "(num_training_classes, activation='softmax')\n", (7764, 7808), False, 'from keras.layers import Dense\n'), ((9370, 9393), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9391, 9393), False, 'import datetime\n'), ((9936, 9959), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9957, 9959), False, 'import datetime\n'), ((10499, 10522), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10520, 10522), False, 'import datetime\n'), ((11134, 11157), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11155, 11157), False, 'import datetime\n'), ((2147, 2168), 'numpy.zeros', 'np.zeros', (['num_classes'], {}), '(num_classes)\n', (2155, 2168), True, 'import numpy as np\n'), ((5083, 5229), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_checkpoint_format_string'], {'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(model_checkpoint_format_string, monitor='loss', verbose=1,\n save_best_only=True, save_weights_only=False, mode='min', period=1)\n", (5098, 5229), False, 'from keras.callbacks import ModelCheckpoint\n'), ((5540, 5642), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'tensor_board_log_dir', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(False)'}), '(log_dir=tensor_board_log_dir, histogram_freq=0, write_graph=\n True, write_images=False)\n', (5551, 5642), False, 'from keras.callbacks import TensorBoard\n'), ((1146, 1232), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'intra_op_parallelism_threads': 'num_threads'}), '(gpu_options=gpu_options, intra_op_parallelism_threads=\n num_threads)\n', (1160, 1232), True, 'import tensorflow as tf\n'), ((1285, 1324), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (1299, 1324), True, 'import tensorflow as tf\n'), ((2284, 2296), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (2292, 2296), True, 'import numpy as np\n'), ((4478, 4516), 'os.path.basename', 'os.path.basename', (['image_class_filepath'], {}), '(image_class_filepath)\n', (4494, 4516), False, 'import os\n'), ((4603, 4642), 'os.path.join', 'os.path.join', (['image_class_filepath', '"""*"""'], {}), "(image_class_filepath, '*')\n", (4615, 4642), False, 'import os\n'), ((2260, 2272), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (2268, 2272), True, 'import numpy as np\n')] |
import numpy as np
import warnings
import cv2
import time
def Gamma_correction(low_frequency, alpha=0.5):
"""
Adjust the coefficient of low frequency component using Gamma correction.
:param low_frequency: the low frequency component of the image calculated with Shearlet transformation.
:param alpha: adjustment factor with a range from 0 to 1, in addition, it lies between 0.4 and 0.6.
:return: the result after correction.
"""
x_max = np.max(low_frequency)
x_min = np.min(low_frequency)
phi_x = (low_frequency - x_min) / (x_max - x_min)
f_x = np.pi * phi_x
gamma_x = 1.0 + alpha * np.cos(f_x)
output = x_max * phi_x ** (1.0 / gamma_x)
return output
def nonuniform_correction(low_frequancy):
#low_frequancy = low_frequancy[..., 0]
shape_x, shape_y = low_frequancy.shape
kernel1_size = np.int64(np.max([shape_x, shape_y]) / 150)
kernel2_size = np.int64(np.max([shape_x, shape_y]) / 75)
print('kernel1_size = {}, kernel2_size = {}'.format(kernel1_size, kernel2_size))
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel1_size, kernel1_size))
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel2_size, kernel2_size))
SCl1 = cv2.morphologyEx(low_frequancy, cv2.MORPH_OPEN, kernel1)
SCl2 = cv2.morphologyEx(low_frequancy, cv2.MORPH_OPEN, kernel2)
SCcor = SCl2 / SCl1 * low_frequancy
return SCcor
def linearly_enhancing_details(alphas, numOfScales=2, window_scale=3):
"""
:param alphas: the high frequency component of the image calculated with Shearlet transformation.
i * j * (kl) array, where i, j are equal to the height and width of the original image, kl is
depend by the number of scales and the number of directions per scale. kl = 4 + 8 + 16 + ...
when there are 1, 2, 3... scales.
numOfScales: the number of scales of high frequency coefficients.
:return:
"""
# the gain factor of each scale
if numOfScales > 3 or numOfScales < 1:
print('Number of Scale mast be in the set of {1, 2, 3} !!!')
gain_weight = np.array([0.9, 1.0, 1.1, 1.2, 1.5, 3.0])
lam = 1000
# calculate the gain factor vector W
w_array = []
for i in range(1, numOfScales + 1):
w_array = np.concatenate((w_array, gain_weight[i] * np.ones(2 ** (i + 1))))
betas = alphas * w_array
a_times_b = alphas * betas
kernel = np.ones((window_scale, window_scale))
numerator_ab_part = 1.0 / (window_scale**2) * cv2.filter2D(a_times_b, -1, kernel=kernel)
mu0 = cv2.filter2D(alphas, -1, kernel=kernel)
mu1 = cv2.filter2D(betas, -1, kernel=kernel)
#mu1 = mu0 * w_array
# fast method to calculate the local standard deviation (mean(img^2) - mean(img)^2)
# instead of using the original method std = 1/n x sum(Xn - mean)
sigma2 = cv2.filter2D(alphas**2, -1, kernel) - (cv2.filter2D(alphas, -1, kernel)) ** 2
pm = (numerator_ab_part - mu0 * mu1) / (lam + sigma2)
qm = mu1 - pm * mu0
gammai = pm * alphas + qm
return gammai
def normalization(image):
"""
Normalizing the input image to [0.0, 1.0]
:param image:
:return:
"""
return (image - np.min(image)) / (np.max(image) - np.min(image))
# ===========================================================================================
# Shearlet Transformation stuff
# ===========================================================================================
from meyerShearlet import (meyerShearletSpect, meyeraux, kutyniokaux, gaussian)
def _defaultNumberOfScales(l):
numOfScales = int(np.floor(0.5 * np.log2(np.max(l))))
if numOfScales < 1:
raise ValueError('image too small!')
return numOfScales
def scalesShearsAndSpectra(shape, numOfScales=None,
realCoefficients=True, maxScale='max',
shearletSpect=meyerShearletSpect,
shearletArg=kutyniokaux, realReal=True,
fftshift_spectra=True):
""" Compute the shearlet spectra of a given shape and number of scales.
The number of scales and a boolean indicating real or complex shearlets
are optional parameters.
Parameters
----------
shape : array-like
dimensions of the image
numOfScales : int
number of scales
realCoefficients : bool
Controls whether real or complex shearlets are generated.
shearletSpect : string or handle
shearlet spectrum
shearletArg : ???
further parameters for shearlet
realReal : bool
guarantee truly real shearlets
maxScale : {'max', 'min'}, optional
maximal or minimal finest scale
Returns
-------
Psi : ndarray
Shearlets in the Fourier domain.
"""
if len(shape) != 2:
raise ValueError("2D image dimensions required")
if numOfScales is None:
numOfScales = _defaultNumberOfScales(shape)
# rectangular images
if shape[1] != shape[0]:
rectangular = True
else:
rectangular = False
# for better symmetry each dimensions of the array should be odd
shape = np.asarray(shape)
shape_orig = shape.copy()
shapem = np.mod(shape, 2) == 0 # True for even sized axes
both_even = np.all(np.equal(shapem, False))
both_odd = np.all(np.equal(shapem, True))
shape[shapem] += 1
if not realCoefficients:
warnings.warn("Complex shearlet case may be buggy. Doesn't "
"currently give perfect reconstruction.")
if not (both_even or both_odd):
# for some reason reconstruction is not exact in this case, so don't
# allow it for now.
raise ValueError("Mixture of odd and even axis sizes is currently "
"unsupported.")
# create meshgrid
# largest value where psi_1 is equal to 1
maxScale = maxScale.lower()
if maxScale == 'max':
X = 2**(2 * (numOfScales - 1) + 1)
elif maxScale == 'min':
X = 2**(2 * (numOfScales - 1))
else:
raise ValueError('Wrong option for maxScale, must be "max" or "min"')
xi_x_init = np.linspace(0, X, (shape[1] + 1) // 2)
xi_x_init = np.concatenate((-xi_x_init[-1:0:-1], xi_x_init), axis=0)
if rectangular:
xi_y_init = np.linspace(0, X, (shape[0] + 1) // 2)
xi_y_init = np.concatenate((-xi_y_init[-1:0:-1], xi_y_init), axis=0)
else:
xi_y_init = xi_x_init
# create grid, from left to right, bottom to top
[xi_x, xi_y] = np.meshgrid(xi_x_init, xi_y_init[::-1], indexing='xy')
# cones
C_hor = np.abs(xi_x) >= np.abs(xi_y) # with diag
C_ver = np.abs(xi_x) < np.abs(xi_y)
# number of shears: |-2^j,...,0,...,2^j| = 2 * 2^j + 1
# now: inner shears for both cones:
# |-(2^j-1),...,0,...,2^j-1|
# = 2 * (2^j - 1) + 1
# = 2^(j+1) - 2 + 1 = 2^(j+1) - 1
# outer scales: 2 ("one" for each cone)
# shears for each scale: hor: 2^(j+1) - 1, ver: 2^(j+1) - 1, diag: 2
# -> hor + ver + diag = 2*(2^(j+1) - 1) +2 = 2^(j + 2)
# + 1 for low-pass
shearsPerScale = 2**(np.arange(numOfScales) + 2)
numOfAllShears = 1 + shearsPerScale.sum()
# init
Psi = np.zeros(tuple(shape) + (numOfAllShears, ))
# frequency domain:
# k 2^j 0 -2^j
#
# 4 3 2 -2^j
# \ | /
# (5)- x -1 0
# / | \
# 2^j
#
# [0:-1:-2^j][-2^j:1:2^j][2^j:-1:1] (not 0)
# hor ver hor
#
# start with shear -2^j (insert in index 2^j+1 (with transposed
# added)) then continue with increasing scale. Save to index 2^j+1 +- k,
# if + k save transposed. If shear 0 is reached save -k starting from
# the end (thus modulo). For + k just continue.
#
# then in time domain:
#
# 2 1 8
# \ | /
# 3- x -7
# / | \
# 4 5 6
#
# lowpass
Psi[:, :, 0] = shearletSpect(xi_x, xi_y, np.NaN, np.NaN, realCoefficients,
shearletArg, scaling_only=True)
# loop for each scale
for j in range(numOfScales):
# starting index
idx = 2**j
start_index = 1 + shearsPerScale[:j].sum()
shift = 1
for k in range(-2**j, 2**j + 1):
# shearlet spectrum
P_hor = shearletSpect(xi_x, xi_y, 2**(-2 * j), k * 2**(-j),
realCoefficients, shearletArg)
if rectangular:
P_ver = shearletSpect(xi_y, xi_x, 2**(-2 * j), k * 2**(-j),
realCoefficients, shearletArg)
else:
# the matrix is supposed to be mirrored at the counter
# diagonal
# P_ver = fliplr(flipud(P_hor'))
P_ver = np.rot90(P_hor, 2).T # TODO: np.conj here too?
if not realCoefficients:
# workaround to cover left-upper part
P_ver = np.rot90(P_ver, 2)
if k == -2**j:
Psi[:, :, start_index + idx] = P_hor * C_hor + P_ver * C_ver
elif k == 2**j:
Psi_idx = start_index + idx + shift
Psi[:, :, Psi_idx] = P_hor * C_hor + P_ver * C_ver
else:
new_pos = np.mod(idx + 1 - shift, shearsPerScale[j]) - 1
if(new_pos == -1):
new_pos = shearsPerScale[j] - 1
Psi[:, :, start_index + new_pos] = P_hor
Psi[:, :, start_index + idx + shift] = P_ver
# update shift
shift += 1
# generate output with size shape_orig
Psi = Psi[:shape_orig[0], :shape_orig[1], :]
# modify spectra at finest scales to obtain really real shearlets
# the modification has only to be done for dimensions with even length
if realCoefficients and realReal and (shapem[0] or shapem[1]):
idx_finest_scale = (1 + np.sum(shearsPerScale[:-1]))
scale_idx = idx_finest_scale + np.concatenate(
(np.arange(1, (idx_finest_scale + 1) / 2 + 1),
np.arange((idx_finest_scale + 1) / 2 + 2, shearsPerScale[-1])),
axis=0)
scale_idx = scale_idx.astype(np.int)
if shapem[0]: # even number of rows -> modify first row:
idx = slice(1, shape_orig[1])
Psi[0, idx, scale_idx] = 1 / np.sqrt(2) * (
Psi[0, idx, scale_idx] +
Psi[0, shape_orig[1] - 1:0:-1, scale_idx])
if shapem[1]: # even number of columns -> modify first column:
idx = slice(1, shape_orig[0])
Psi[idx, 0, scale_idx] = 1 / np.sqrt(2) * (
Psi[idx, 0, scale_idx] +
Psi[shape_orig[0] - 1:0:-1, 0, scale_idx])
if fftshift_spectra:
# Note: changed to ifftshift so roundtrip tests pass for odd sized
# arrays
Psi = np.fft.ifftshift(Psi, axes=(0, 1))
# Add the following two lines to calculate the spectra of the Shearlet to comprass the size of the .npy file
#Psi[..., 1] = np.sum(Psi[..., 1:], axis=-1)
#Psi = Psi[..., :2]
return Psi
def fft2d(A):
return cv2.dft(A, flags=cv2.DFT_COMPLEX_OUTPUT)
def ifft2d(A):
return cv2.idft(A, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)
def enhance_Psi(Psi, enhance_factors):
print('enhance_factors = {}'.format(enhance_factors))
assert len(enhance_factors) == 2, 'Ecpect the length of enhance_factors to be 2, but provided {}.' \
''.format(len(enhance_factors))
assert isinstance(enhance_factors[1], (list, tuple)), 'Expect the type of enhance_factors[1] to be list or tuple' \
', but provided {}.'.format(type(enhance_factors[1]))
enhance_factor_array_high = np.concatenate([[enhance_factors[1][i]] *
(2 ** (i + 2)) for i in range(len(enhance_factors[1]))])
enhance_factor_array = np.concatenate([[enhance_factors[0]], enhance_factor_array_high])
Psi = Psi * enhance_factor_array
Psi_sum = np.sum(Psi, axis=-1)
return Psi_sum
def allinone(A, Psi=None, numOfScales=None, realCoefficients=True, maxScale='max', shearletSpect=meyerShearletSpect,
realReal=True, enhance_factor=(1.05, (2.5, 2.5, 1.0))):
A_FFT = fft2d(A)
#enhanced_Psi = enhance_Psi(Psi=Psi, enhance_factors=enhance_factor)
A_complex = A_FFT * Psi[..., np.newaxis]
A = ifft2d(A_complex)
return A
def allinone_original(A, Psi=None, numOfScales=None, realCoefficients=True, maxScale='max',
shearletSpect=meyerShearletSpect, realReal=True, enhance_factor=(1.05, 3.5)):
A_FFT = fft2d(A)
Psi[..., 1] = np.sum(Psi[..., 1:], axis=-1)
Psi = Psi[..., :2]
print('in allinone, enhancefactors = {}'.format(enhance_factor))
Psi_low = Psi[..., 0] * enhance_factor[0]
Psi_high =Psi[..., 1] * enhance_factor[1][0]
A_FFT_real = A_FFT[..., 0]
A_FFT_img = A_FFT[..., 1]
A_low_real = Psi_low * A_FFT_real
A_low_img = Psi_low * A_FFT_img
A_high_real = Psi_high * A_FFT_real
A_high_img = Psi_high * A_FFT_img
A_real = A_high_real + A_low_real
A_img = A_high_img + A_low_img
A_complex = np.stack((A_real, A_img), axis=-1)
A = ifft2d(A_complex)
return A
| [
"cv2.idft",
"numpy.sqrt",
"cv2.filter2D",
"numpy.equal",
"numpy.array",
"numpy.rot90",
"numpy.mod",
"numpy.arange",
"cv2.dft",
"numpy.asarray",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.concatenate",
"numpy.min",
"warnings.warn",
"numpy.meshgrid",
"numpy.abs",
"numpy.o... | [((467, 488), 'numpy.max', 'np.max', (['low_frequency'], {}), '(low_frequency)\n', (473, 488), True, 'import numpy as np\n'), ((501, 522), 'numpy.min', 'np.min', (['low_frequency'], {}), '(low_frequency)\n', (507, 522), True, 'import numpy as np\n'), ((1059, 1133), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(kernel1_size, kernel1_size)'], {}), '(cv2.MORPH_ELLIPSE, (kernel1_size, kernel1_size))\n', (1084, 1133), False, 'import cv2\n'), ((1148, 1222), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(kernel2_size, kernel2_size)'], {}), '(cv2.MORPH_ELLIPSE, (kernel2_size, kernel2_size))\n', (1173, 1222), False, 'import cv2\n'), ((1234, 1290), 'cv2.morphologyEx', 'cv2.morphologyEx', (['low_frequancy', 'cv2.MORPH_OPEN', 'kernel1'], {}), '(low_frequancy, cv2.MORPH_OPEN, kernel1)\n', (1250, 1290), False, 'import cv2\n'), ((1302, 1358), 'cv2.morphologyEx', 'cv2.morphologyEx', (['low_frequancy', 'cv2.MORPH_OPEN', 'kernel2'], {}), '(low_frequancy, cv2.MORPH_OPEN, kernel2)\n', (1318, 1358), False, 'import cv2\n'), ((2167, 2207), 'numpy.array', 'np.array', (['[0.9, 1.0, 1.1, 1.2, 1.5, 3.0]'], {}), '([0.9, 1.0, 1.1, 1.2, 1.5, 3.0])\n', (2175, 2207), True, 'import numpy as np\n'), ((2480, 2517), 'numpy.ones', 'np.ones', (['(window_scale, window_scale)'], {}), '((window_scale, window_scale))\n', (2487, 2517), True, 'import numpy as np\n'), ((2621, 2660), 'cv2.filter2D', 'cv2.filter2D', (['alphas', '(-1)'], {'kernel': 'kernel'}), '(alphas, -1, kernel=kernel)\n', (2633, 2660), False, 'import cv2\n'), ((2671, 2709), 'cv2.filter2D', 'cv2.filter2D', (['betas', '(-1)'], {'kernel': 'kernel'}), '(betas, -1, kernel=kernel)\n', (2683, 2709), False, 'import cv2\n'), ((5250, 5267), 'numpy.asarray', 'np.asarray', (['shape'], {}), '(shape)\n', (5260, 5267), True, 'import numpy as np\n'), ((6243, 6281), 'numpy.linspace', 'np.linspace', (['(0)', 'X', '((shape[1] + 1) // 2)'], {}), '(0, X, (shape[1] + 1) // 2)\n', (6254, 6281), True, 'import numpy as np\n'), ((6298, 6354), 'numpy.concatenate', 'np.concatenate', (['(-xi_x_init[-1:0:-1], xi_x_init)'], {'axis': '(0)'}), '((-xi_x_init[-1:0:-1], xi_x_init), axis=0)\n', (6312, 6354), True, 'import numpy as np\n'), ((6624, 6678), 'numpy.meshgrid', 'np.meshgrid', (['xi_x_init', 'xi_y_init[::-1]'], {'indexing': '"""xy"""'}), "(xi_x_init, xi_y_init[::-1], indexing='xy')\n", (6635, 6678), True, 'import numpy as np\n'), ((11247, 11287), 'cv2.dft', 'cv2.dft', (['A'], {'flags': 'cv2.DFT_COMPLEX_OUTPUT'}), '(A, flags=cv2.DFT_COMPLEX_OUTPUT)\n', (11254, 11287), False, 'import cv2\n'), ((11316, 11370), 'cv2.idft', 'cv2.idft', (['A'], {'flags': '(cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)'}), '(A, flags=cv2.DFT_SCALE | cv2.DFT_REAL_OUTPUT)\n', (11324, 11370), False, 'import cv2\n'), ((12084, 12149), 'numpy.concatenate', 'np.concatenate', (['[[enhance_factors[0]], enhance_factor_array_high]'], {}), '([[enhance_factors[0]], enhance_factor_array_high])\n', (12098, 12149), True, 'import numpy as np\n'), ((12201, 12221), 'numpy.sum', 'np.sum', (['Psi'], {'axis': '(-1)'}), '(Psi, axis=-1)\n', (12207, 12221), True, 'import numpy as np\n'), ((12847, 12876), 'numpy.sum', 'np.sum', (['Psi[..., 1:]'], {'axis': '(-1)'}), '(Psi[..., 1:], axis=-1)\n', (12853, 12876), True, 'import numpy as np\n'), ((13372, 13406), 'numpy.stack', 'np.stack', (['(A_real, A_img)'], {'axis': '(-1)'}), '((A_real, A_img), axis=-1)\n', (13380, 13406), True, 'import numpy as np\n'), ((2568, 2610), 'cv2.filter2D', 'cv2.filter2D', (['a_times_b', '(-1)'], {'kernel': 'kernel'}), '(a_times_b, -1, kernel=kernel)\n', (2580, 2610), False, 'import cv2\n'), ((2907, 2944), 'cv2.filter2D', 'cv2.filter2D', (['(alphas ** 2)', '(-1)', 'kernel'], {}), '(alphas ** 2, -1, kernel)\n', (2919, 2944), False, 'import cv2\n'), ((5311, 5327), 'numpy.mod', 'np.mod', (['shape', '(2)'], {}), '(shape, 2)\n', (5317, 5327), True, 'import numpy as np\n'), ((5384, 5407), 'numpy.equal', 'np.equal', (['shapem', '(False)'], {}), '(shapem, False)\n', (5392, 5407), True, 'import numpy as np\n'), ((5431, 5453), 'numpy.equal', 'np.equal', (['shapem', '(True)'], {}), '(shapem, True)\n', (5439, 5453), True, 'import numpy as np\n'), ((5516, 5626), 'warnings.warn', 'warnings.warn', (['"""Complex shearlet case may be buggy. Doesn\'t currently give perfect reconstruction."""'], {}), '(\n "Complex shearlet case may be buggy. Doesn\'t currently give perfect reconstruction."\n )\n', (5529, 5626), False, 'import warnings\n'), ((6395, 6433), 'numpy.linspace', 'np.linspace', (['(0)', 'X', '((shape[0] + 1) // 2)'], {}), '(0, X, (shape[0] + 1) // 2)\n', (6406, 6433), True, 'import numpy as np\n'), ((6454, 6510), 'numpy.concatenate', 'np.concatenate', (['(-xi_y_init[-1:0:-1], xi_y_init)'], {'axis': '(0)'}), '((-xi_y_init[-1:0:-1], xi_y_init), axis=0)\n', (6468, 6510), True, 'import numpy as np\n'), ((6704, 6716), 'numpy.abs', 'np.abs', (['xi_x'], {}), '(xi_x)\n', (6710, 6716), True, 'import numpy as np\n'), ((6720, 6732), 'numpy.abs', 'np.abs', (['xi_y'], {}), '(xi_y)\n', (6726, 6732), True, 'import numpy as np\n'), ((6758, 6770), 'numpy.abs', 'np.abs', (['xi_x'], {}), '(xi_x)\n', (6764, 6770), True, 'import numpy as np\n'), ((6773, 6785), 'numpy.abs', 'np.abs', (['xi_y'], {}), '(xi_y)\n', (6779, 6785), True, 'import numpy as np\n'), ((10983, 11017), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['Psi'], {'axes': '(0, 1)'}), '(Psi, axes=(0, 1))\n', (10999, 11017), True, 'import numpy as np\n'), ((630, 641), 'numpy.cos', 'np.cos', (['f_x'], {}), '(f_x)\n', (636, 641), True, 'import numpy as np\n'), ((864, 890), 'numpy.max', 'np.max', (['[shape_x, shape_y]'], {}), '([shape_x, shape_y])\n', (870, 890), True, 'import numpy as np\n'), ((926, 952), 'numpy.max', 'np.max', (['[shape_x, shape_y]'], {}), '([shape_x, shape_y])\n', (932, 952), True, 'import numpy as np\n'), ((2946, 2978), 'cv2.filter2D', 'cv2.filter2D', (['alphas', '(-1)', 'kernel'], {}), '(alphas, -1, kernel)\n', (2958, 2978), False, 'import cv2\n'), ((3258, 3271), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3264, 3271), True, 'import numpy as np\n'), ((3276, 3289), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (3282, 3289), True, 'import numpy as np\n'), ((3292, 3305), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (3298, 3305), True, 'import numpy as np\n'), ((7209, 7231), 'numpy.arange', 'np.arange', (['numOfScales'], {}), '(numOfScales)\n', (7218, 7231), True, 'import numpy as np\n'), ((10032, 10059), 'numpy.sum', 'np.sum', (['shearsPerScale[:-1]'], {}), '(shearsPerScale[:-1])\n', (10038, 10059), True, 'import numpy as np\n'), ((9068, 9086), 'numpy.rot90', 'np.rot90', (['P_ver', '(2)'], {}), '(P_ver, 2)\n', (9076, 9086), True, 'import numpy as np\n'), ((2381, 2402), 'numpy.ones', 'np.ones', (['(2 ** (i + 1))'], {}), '(2 ** (i + 1))\n', (2388, 2402), True, 'import numpy as np\n'), ((3716, 3725), 'numpy.max', 'np.max', (['l'], {}), '(l)\n', (3722, 3725), True, 'import numpy as np\n'), ((8905, 8923), 'numpy.rot90', 'np.rot90', (['P_hor', '(2)'], {}), '(P_hor, 2)\n', (8913, 8923), True, 'import numpy as np\n'), ((10129, 10173), 'numpy.arange', 'np.arange', (['(1)', '((idx_finest_scale + 1) / 2 + 1)'], {}), '(1, (idx_finest_scale + 1) / 2 + 1)\n', (10138, 10173), True, 'import numpy as np\n'), ((10188, 10249), 'numpy.arange', 'np.arange', (['((idx_finest_scale + 1) / 2 + 2)', 'shearsPerScale[-1]'], {}), '((idx_finest_scale + 1) / 2 + 2, shearsPerScale[-1])\n', (10197, 10249), True, 'import numpy as np\n'), ((10466, 10476), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10473, 10476), True, 'import numpy as np\n'), ((10736, 10746), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10743, 10746), True, 'import numpy as np\n'), ((9383, 9425), 'numpy.mod', 'np.mod', (['(idx + 1 - shift)', 'shearsPerScale[j]'], {}), '(idx + 1 - shift, shearsPerScale[j])\n', (9389, 9425), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image, ImageTk
import PySimpleGUI as sg
def adapta_imagem(img, shape, max_val=1):
'''
Função que adapta a imagem para o range e o tipo correto.
'''
img = (img - img.min())/(img.max() - img.min())
if max_val > 1:
img *= max_val
return img.reshape(shape).astype(np.uint8)
else:
return img.reshape(shape)
# Carregando a média e os autovalores:
autovetores = np.load('eigenvectors.npy')
mean = np.load('mean.npy')
mean = (mean - mean.mean())/mean.std() # Padronizando a média
# Iniciando montagem da GUI
sg.theme('Dark')
# Coluna com as barras deslizantes dos componentes principais
sliders_column = [
[sg.Text("Componentes Principais")],
[sg.Button("Aleatório", button_color='blue', size=(14, 1), key=("Button_Random")), sg.Button("Reset", button_color='green', size=(14, 1), key=("Button_Reset"))],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC1")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC2")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC3")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC4")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC5")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC6")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC7")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC8")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC9")],
[sg.Slider(range=(-255, 255), default_value=0, resolution=5, orientation='h', size=(30, 10), key="PC10")]
]
# Coluna de exibição da imagem
image_column = [
[sg.Text(text="Media + Componentes*Autovetores")],
[sg.Image(key="IMAGE")]
]
# Layout completo
layout = [
[
sg.Column(sliders_column),
sg.VSeparator(),
sg.Column(image_column)
]
]
# Definindo janela da GUI
window = sg.Window("EigenFaces - Cats", layout)
last_values = None
# Iniciando loop de leitura da janela
while True:
event, values = window.read(timeout=20)
if event == "Exit" or event == sg.WIN_CLOSED:
break
if values != last_values:
comps = np.array([[values[f"PC{i}"] for i in range(1,11)]])
img = comps@autovetores + mean
img = adapta_imagem(img, (64, 64, 3), 255)
img = Image.fromarray(img)
img = img.resize((128, 128))
img = ImageTk.PhotoImage(img)
window["IMAGE"].update(data=img)
last_values = values
if event == 'Button_Random':
for i in range(1,11):
window[f'PC{i}'].update(np.random.randint(-255, 255))
if event == 'Button_Reset':
for i in range(1,11):
window[f'PC{i}'].update(0) | [
"PIL.Image.fromarray",
"PySimpleGUI.Slider",
"PySimpleGUI.Column",
"PySimpleGUI.Text",
"PySimpleGUI.VSeparator",
"PySimpleGUI.Button",
"PySimpleGUI.theme",
"numpy.random.randint",
"PySimpleGUI.Image",
"numpy.load",
"PySimpleGUI.Window",
"PIL.ImageTk.PhotoImage"
] | [((472, 499), 'numpy.load', 'np.load', (['"""eigenvectors.npy"""'], {}), "('eigenvectors.npy')\n", (479, 499), True, 'import numpy as np\n'), ((508, 527), 'numpy.load', 'np.load', (['"""mean.npy"""'], {}), "('mean.npy')\n", (515, 527), True, 'import numpy as np\n'), ((625, 641), 'PySimpleGUI.theme', 'sg.theme', (['"""Dark"""'], {}), "('Dark')\n", (633, 641), True, 'import PySimpleGUI as sg\n'), ((2361, 2399), 'PySimpleGUI.Window', 'sg.Window', (['"""EigenFaces - Cats"""', 'layout'], {}), "('EigenFaces - Cats', layout)\n", (2370, 2399), True, 'import PySimpleGUI as sg\n'), ((733, 766), 'PySimpleGUI.Text', 'sg.Text', (['"""Componentes Principais"""'], {}), "('Componentes Principais')\n", (740, 766), True, 'import PySimpleGUI as sg\n'), ((775, 853), 'PySimpleGUI.Button', 'sg.Button', (['"""Aleatório"""'], {'button_color': '"""blue"""', 'size': '(14, 1)', 'key': '"""Button_Random"""'}), "('Aleatório', button_color='blue', size=(14, 1), key='Button_Random')\n", (784, 853), True, 'import PySimpleGUI as sg\n'), ((858, 932), 'PySimpleGUI.Button', 'sg.Button', (['"""Reset"""'], {'button_color': '"""green"""', 'size': '(14, 1)', 'key': '"""Button_Reset"""'}), "('Reset', button_color='green', size=(14, 1), key='Button_Reset')\n", (867, 932), True, 'import PySimpleGUI as sg\n'), ((942, 1048), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC1"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC1')\n", (951, 1048), True, 'import PySimpleGUI as sg\n'), ((1053, 1159), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC2"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC2')\n", (1062, 1159), True, 'import PySimpleGUI as sg\n'), ((1164, 1270), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC3"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC3')\n", (1173, 1270), True, 'import PySimpleGUI as sg\n'), ((1275, 1381), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC4"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC4')\n", (1284, 1381), True, 'import PySimpleGUI as sg\n'), ((1386, 1492), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC5"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC5')\n", (1395, 1492), True, 'import PySimpleGUI as sg\n'), ((1497, 1603), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC6"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC6')\n", (1506, 1603), True, 'import PySimpleGUI as sg\n'), ((1608, 1714), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC7"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC7')\n", (1617, 1714), True, 'import PySimpleGUI as sg\n'), ((1719, 1825), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC8"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC8')\n", (1728, 1825), True, 'import PySimpleGUI as sg\n'), ((1830, 1936), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC9"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC9')\n", (1839, 1936), True, 'import PySimpleGUI as sg\n'), ((1941, 2048), 'PySimpleGUI.Slider', 'sg.Slider', ([], {'range': '(-255, 255)', 'default_value': '(0)', 'resolution': '(5)', 'orientation': '"""h"""', 'size': '(30, 10)', 'key': '"""PC10"""'}), "(range=(-255, 255), default_value=0, resolution=5, orientation='h',\n size=(30, 10), key='PC10')\n", (1950, 2048), True, 'import PySimpleGUI as sg\n'), ((2107, 2154), 'PySimpleGUI.Text', 'sg.Text', ([], {'text': '"""Media + Componentes*Autovetores"""'}), "(text='Media + Componentes*Autovetores')\n", (2114, 2154), True, 'import PySimpleGUI as sg\n'), ((2163, 2184), 'PySimpleGUI.Image', 'sg.Image', ([], {'key': '"""IMAGE"""'}), "(key='IMAGE')\n", (2171, 2184), True, 'import PySimpleGUI as sg\n'), ((2234, 2259), 'PySimpleGUI.Column', 'sg.Column', (['sliders_column'], {}), '(sliders_column)\n', (2243, 2259), True, 'import PySimpleGUI as sg\n'), ((2266, 2281), 'PySimpleGUI.VSeparator', 'sg.VSeparator', ([], {}), '()\n', (2279, 2281), True, 'import PySimpleGUI as sg\n'), ((2288, 2311), 'PySimpleGUI.Column', 'sg.Column', (['image_column'], {}), '(image_column)\n', (2297, 2311), True, 'import PySimpleGUI as sg\n'), ((2838, 2858), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (2853, 2858), False, 'from PIL import Image, ImageTk\n'), ((2932, 2955), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (2950, 2955), False, 'from PIL import Image, ImageTk\n'), ((3148, 3176), 'numpy.random.randint', 'np.random.randint', (['(-255)', '(255)'], {}), '(-255, 255)\n', (3165, 3176), True, 'import numpy as np\n')] |
from sklearn.neighbors import KDTree
from os.path import join, exists, dirname, abspath
import numpy as np
import pandas as pd
import os, sys, glob, pickle
import nibabel as nib
from multiprocessing import Process
import concurrent.futures
from tqdm import tqdm
from scipy import ndimage
import argparse
BASE_DIR = dirname(abspath(__file__))
ROOT_DIR = dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
from helper_ply import write_ply
from helper_tool import DataProcessing as DP
import time
typeimg = ['t1ce','t1', 'flair', 't2', 'seg']
sub_grid_size = 0.01
out_format = '.ply'
parallel = False
dataTraining = True
n_point = 365000
def load_volume(ID):
def itensity_normalize_one_volume(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean)/std
# random normal too slow
#out_random = np.random.normal(0, 1, size = volume.shape)
out_random = np.zeros(volume.shape)
out[volume == 0] = out_random[volume == 0]
return out
path_volume = os.path.join(dataset_path, ID, ID)
output_volume = np.empty((5,240,240,155))
#Load and process image
for i,mod in enumerate(typeimg[:-1]):
path_mod = str(path_volume+'_'+mod+'.nii.gz')
img = np.asanyarray(nib.load(path_mod).dataobj)
img = itensity_normalize_one_volume(img)
output_volume[i] = img
if dataTraining:
path_mod = str(path_volume+'_'+typeimg[-1]+'.nii.gz')
img = np.asanyarray(nib.load(path_mod).dataobj)
img[img==4]=3
output_volume[4] = img
else:
path_mask = os.path.join(attention_mask_path, ID+'.nii.gz')
mask = np.asanyarray(nib.load(path_mask).dataobj)
mask = mask.astype(np.uint8)
output_volume[4] = mask
return output_volume
def convert_pc2ply(volume,ID):
channel,x_axis, y_axis, z_axis = volume.shape
data_list = [[x,y,z,volume[0][x][y][z],volume[1][x][y][z],volume[2][x][y][z],volume[3][x][y][z],volume[4][x][y][z]] for x in range(x_axis) for y in range(y_axis) for z in range(z_axis) if (volume[0][x][y][z] != 0 or volume[1][x][y][z] != 0 or volume[2][x][y][z] != 0 or volume[3][x][y][z] != 0)]
pc_data = np.array(data_list)
xyz_origin = pc_data[:,:3].astype(int)
np.save(os.path.join(sub_pc_folder, ID+"_xyz_origin.npy"), xyz_origin)
xyz_min = np.array([x_axis,y_axis,z_axis])
pc_data[:, 0:3] /= xyz_min
xyz = pc_data[:, :3].astype(np.float32)
colors = pc_data[:, 3:7].astype(np.float32)
labels = pc_data[:,7].astype(np.uint8)
(unique, counts) = np.unique(labels, return_counts=True)
print(ID," n point ", len(labels),unique, counts )
#write full ply
write_ply(os.path.join(original_pc_folder, ID+out_format), (xyz, colors, labels), ['x', 'y', 'z', 't1ce', 't1', 'flair', 't2' ,'class'])
# save sub_cloud and KDTree file
sub_xyz, sub_colors, sub_labels = DP.grid_sub_sampling(xyz, colors, labels, sub_grid_size)
#write sub ply
write_ply(os.path.join(sub_pc_folder, ID+out_format), [sub_xyz, sub_colors, sub_labels], ['x', 'y', 'z', 't1ce', 't1', 'flair', 't2' ,'class'])
kd_tree_file = os.path.join(sub_pc_folder, ID+ '_KDTree.pkl')
search_tree = KDTree(sub_xyz)
with open(kd_tree_file, 'wb') as f:
pickle.dump(search_tree, f)
proj_idx = np.squeeze(search_tree.query(xyz, return_distance=False))
proj_idx = proj_idx.astype(np.int32)
proj_save = os.path.join(sub_pc_folder, ID+ '_proj.pkl')
with open(proj_save, 'wb') as f:
pickle.dump([proj_idx, labels], f)
def process_data_and_save(ID):
convert_pc2ply(load_volume(ID),ID)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--n_point', type=int, default=365000, help='The number of points cloud ')
parser.add_argument('--data_3D_path', type=str, default=0, help='Path to the 3D volume data')
parser.add_argument('--outPC_path', type=str, default='train', help='Path of output points cloud data')
FLAGS = parser.parse_args()
dataset_path = FLAGS.data_3D_path
outPC_path = FLAGS.outPC_path
n_point = FLAGS.n_point
original_pc_folder = os.path.join(outPC_path,"original_ply")
sub_pc_folder = os.path.join(outPC_path,"input0.01")
attention_mask_path = None # you can modify to path binary of the attention network during the inference process
if not exists(original_pc_folder):
os.makedirs(original_pc_folder)
if not exists(sub_pc_folder):
os.makedirs(sub_pc_folder)
list_ID = os.listdir(dataset_path)
if parallel:
with concurrent.futures.ProcessPoolExecutor(50) as executor:
tqdm(executor.map(process_data_and_save, list_ID), total=len(list_ID))
else:
for i,ID in enumerate(list_ID):
process_data_and_save(ID)
| [
"os.path.exists",
"helper_tool.DataProcessing.grid_sub_sampling",
"os.listdir",
"pickle.dump",
"numpy.unique",
"argparse.ArgumentParser",
"os.makedirs",
"nibabel.load",
"os.path.join",
"sklearn.neighbors.KDTree",
"os.path.dirname",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"os.path.ab... | [((355, 372), 'os.path.dirname', 'dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (362, 372), False, 'from os.path import join, exists, dirname, abspath\n'), ((374, 399), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (389, 399), False, 'import os, sys, glob, pickle\n'), ((400, 425), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (415, 425), False, 'import os, sys, glob, pickle\n'), ((325, 342), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (332, 342), False, 'from os.path import join, exists, dirname, abspath\n'), ((1322, 1356), 'os.path.join', 'os.path.join', (['dataset_path', 'ID', 'ID'], {}), '(dataset_path, ID, ID)\n', (1334, 1356), False, 'import os, sys, glob, pickle\n'), ((1377, 1405), 'numpy.empty', 'np.empty', (['(5, 240, 240, 155)'], {}), '((5, 240, 240, 155))\n', (1385, 1405), True, 'import numpy as np\n'), ((2501, 2520), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (2509, 2520), True, 'import numpy as np\n'), ((2658, 2692), 'numpy.array', 'np.array', (['[x_axis, y_axis, z_axis]'], {}), '([x_axis, y_axis, z_axis])\n', (2666, 2692), True, 'import numpy as np\n'), ((2884, 2921), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (2893, 2921), True, 'import numpy as np\n'), ((3217, 3273), 'helper_tool.DataProcessing.grid_sub_sampling', 'DP.grid_sub_sampling', (['xyz', 'colors', 'labels', 'sub_grid_size'], {}), '(xyz, colors, labels, sub_grid_size)\n', (3237, 3273), True, 'from helper_tool import DataProcessing as DP\n'), ((3463, 3510), 'os.path.join', 'os.path.join', (['sub_pc_folder', "(ID + '_KDTree.pkl')"], {}), "(sub_pc_folder, ID + '_KDTree.pkl')\n", (3475, 3510), False, 'import os, sys, glob, pickle\n'), ((3528, 3543), 'sklearn.neighbors.KDTree', 'KDTree', (['sub_xyz'], {}), '(sub_xyz)\n', (3534, 3543), False, 'from sklearn.neighbors import KDTree\n'), ((3751, 3796), 'os.path.join', 'os.path.join', (['sub_pc_folder', "(ID + '_proj.pkl')"], {}), "(sub_pc_folder, ID + '_proj.pkl')\n", (3763, 3796), False, 'import os, sys, glob, pickle\n'), ((3993, 4018), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4016, 4018), False, 'import argparse\n'), ((4483, 4523), 'os.path.join', 'os.path.join', (['outPC_path', '"""original_ply"""'], {}), "(outPC_path, 'original_ply')\n", (4495, 4523), False, 'import os, sys, glob, pickle\n'), ((4545, 4582), 'os.path.join', 'os.path.join', (['outPC_path', '"""input0.01"""'], {}), "(outPC_path, 'input0.01')\n", (4557, 4582), False, 'import os, sys, glob, pickle\n'), ((4868, 4892), 'os.listdir', 'os.listdir', (['dataset_path'], {}), '(dataset_path)\n', (4878, 4892), False, 'import os, sys, glob, pickle\n'), ((1206, 1228), 'numpy.zeros', 'np.zeros', (['volume.shape'], {}), '(volume.shape)\n', (1214, 1228), True, 'import numpy as np\n'), ((1891, 1940), 'os.path.join', 'os.path.join', (['attention_mask_path', "(ID + '.nii.gz')"], {}), "(attention_mask_path, ID + '.nii.gz')\n", (1903, 1940), False, 'import os, sys, glob, pickle\n'), ((2576, 2627), 'os.path.join', 'os.path.join', (['sub_pc_folder', "(ID + '_xyz_origin.npy')"], {}), "(sub_pc_folder, ID + '_xyz_origin.npy')\n", (2588, 2627), False, 'import os, sys, glob, pickle\n'), ((3014, 3063), 'os.path.join', 'os.path.join', (['original_pc_folder', '(ID + out_format)'], {}), '(original_pc_folder, ID + out_format)\n', (3026, 3063), False, 'import os, sys, glob, pickle\n'), ((3309, 3353), 'os.path.join', 'os.path.join', (['sub_pc_folder', '(ID + out_format)'], {}), '(sub_pc_folder, ID + out_format)\n', (3321, 3353), False, 'import os, sys, glob, pickle\n'), ((3592, 3619), 'pickle.dump', 'pickle.dump', (['search_tree', 'f'], {}), '(search_tree, f)\n', (3603, 3619), False, 'import os, sys, glob, pickle\n'), ((3841, 3875), 'pickle.dump', 'pickle.dump', (['[proj_idx, labels]', 'f'], {}), '([proj_idx, labels], f)\n', (3852, 3875), False, 'import os, sys, glob, pickle\n'), ((4715, 4741), 'os.path.exists', 'exists', (['original_pc_folder'], {}), '(original_pc_folder)\n', (4721, 4741), False, 'from os.path import join, exists, dirname, abspath\n'), ((4751, 4782), 'os.makedirs', 'os.makedirs', (['original_pc_folder'], {}), '(original_pc_folder)\n', (4762, 4782), False, 'import os, sys, glob, pickle\n'), ((4795, 4816), 'os.path.exists', 'exists', (['sub_pc_folder'], {}), '(sub_pc_folder)\n', (4801, 4816), False, 'from os.path import join, exists, dirname, abspath\n'), ((4826, 4852), 'os.makedirs', 'os.makedirs', (['sub_pc_folder'], {}), '(sub_pc_folder)\n', (4837, 4852), False, 'import os, sys, glob, pickle\n'), ((1556, 1574), 'nibabel.load', 'nib.load', (['path_mod'], {}), '(path_mod)\n', (1564, 1574), True, 'import nibabel as nib\n'), ((1780, 1798), 'nibabel.load', 'nib.load', (['path_mod'], {}), '(path_mod)\n', (1788, 1798), True, 'import nibabel as nib\n'), ((1968, 1987), 'nibabel.load', 'nib.load', (['path_mask'], {}), '(path_mask)\n', (1976, 1987), True, 'import nibabel as nib\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# @Time : 2020/7/16 20:00
# @Author : <NAME>
import os
import math
import cv2
import subprocess
import numpy as np
from .pse import pse_cpp, get_num, get_points
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
if subprocess.call(['make', '-C', BASE_DIR]) != 0: # return value
raise RuntimeError('Cannot compile pse: {}'.format(BASE_DIR))
def order_clockwise(poly):
"""
4 coner vertex, clockwise order, always start in 2nd Quadrant
"""
cent = np.mean(poly, axis=0)
poly_cented = poly - cent
poly_angles = [math.atan2(p[1], p[0]) for p in poly_cented]
poly = poly[np.argsort(poly_angles)]
# compare statpoint
return poly
def order_rotate_rectangle(poly):
# Only used when geometry == 'RBOX':
# rbox is clock-wise, with -90<angle<=0
# more details of cv2.minAreaRect can be found in notebooks
rect = cv2.minAreaRect(poly)
rbox = np.array(cv2.boxPoints(rect))
angle = rect[-1]
assert (angle <= 0)
if angle < -45.:
idx_start = np.argmin(rbox[:, 1])
angle = -angle - 90.
else: # [-45,0)
idx_start = np.argmin(rbox[:, 0])
angle = -angle
rbox = rbox[[idx_start, (idx_start+1)%4, (idx_start+2)%4, (idx_start+3)%4]]
angle = angle * np.pi / 180.
return rbox, angle
def get_side_mask(score_map, label_pts, label_num):
# head_mask, tail_mask: utilizing info from score_map.
head_mask = np.zeros_like(score_map, dtype=np.int32)
tail_mask = np.zeros_like(score_map, dtype=np.int32)
is_vert = np.array([0]*label_num, dtype=np.int32)
for label_val, label_pt in label_pts.items():
label_pt = label_pt[2:]
points = np.array(label_pt, dtype=int).reshape(-1,2)
rbox, angle = order_rotate_rectangle(points)
p0, p1, p2, p3 = rbox
w, h = np.linalg.norm(p0-p1), np.linalg.norm(p0-p3)
# head, tail
if h<w:
dir_v = (p1-p0)*h/w
head = np.array([p0, p0+dir_v, p3+dir_v, p3], dtype=np.int32)
tail = np.array([p1-dir_v, p1, p2, p2-dir_v], dtype=np.int32)
is_vert[label_val] = 0 # horz.
else:
dir_v = (p3-p0)*w/h
head = np.array([p0, p1, p1+dir_v, p0+dir_v], dtype=np.int32)
tail = np.array([p3-dir_v, p2-dir_v, p2, p3], dtype=np.int32)
is_vert[label_val] = 1 # vert.
cv2.fillConvexPoly(head_mask, head, label_val)
cv2.fillConvexPoly(tail_mask, tail, label_val)
return head_mask, tail_mask, is_vert
def decode(score_map, geo_rbox_map, geo_quad_map, score_thresh=0.8, min_area=10):
"""
distinguish of text and background, give out bbox using geo maps and quad maps.
:return: quads and confidents
"""
kernel = score_map > score_thresh # shrinked text kernel.
label_num, label_map = cv2.connectedComponents(kernel.astype(np.uint8), connectivity=4)
label_pts = get_points(label_map, score_map, label_num)
head_mask, tail_mask, is_vert = get_side_mask(score_map, label_pts, label_num)
quads_confs = pse_cpp(score_map, label_map, label_num, head_mask, tail_mask,
is_vert, geo_quad_map)
quads = quads_confs[:,:8]
confs = quads_confs[:,8:]
# post-fitlering
quads = np.array(quads, dtype=np.float)
keep = []
for label_idx, label_pt in label_pts.items():
score_i, count_i = label_pt[0], label_pt[1]
if count_i < min_area or score_i<0.8:
continue
if quads[label_idx][8]<0.8:
continue
keep.append(label_idx)
quads[label_idx] = order_clockwise(quads[label_idx])
quads_keep = quads[keep]
return quads_keep
#def predict(score_map, side_map, geo_map, scale=1, score_thresh=0.5, side_thresh=0.5, min_area=10):
# from .pse import pse_cpp, get_num, get_points
# score_map = np.squeeze(score_map)
# side_map = np.squeeze(side_map)
# geo_map = np.squeeze(geo_map)
# #print('score, sides, geos with shape:', score.shape, sides.shape, geos.shape)
# kernel = score_map > score_thresh
# sides = side_map > side_thresh
#
# label_num, label_map = cv2.connectedComponents(kernel.astype(np.uint8), connectivity=4)
# label_values = []
# label_sum = get_num(label_map, label_num)
# for label_idx in range(1, label_num):
# if label_sum[label_idx] < min_area:
# continue
# label_values.append(label_idx)
# quads = pse_cpp(label_map.astype(np.int32), sides.astype(np.uint8), geo_map.astype(np.float32), label_num, scale)
# quads = np.array(quads, dtype=np.float)
# for label_idx in range(1, label_num):
# if any(quads[label_idx][:4]):
# # TODO: re-guess the right
# pass
# if any(quads[label_idx][4:]):
# # TODO: re-guess the left
# pass
# quads = quads.reshape((-1, 4, 2))
# for label_idx in range(1, label_num):
# quads[label_idx] = order_clockwise(quads[label_idx])
# return quads
| [
"numpy.mean",
"cv2.boxPoints",
"os.path.realpath",
"cv2.minAreaRect",
"numpy.array",
"numpy.argsort",
"subprocess.call",
"math.atan2",
"numpy.linalg.norm",
"numpy.argmin",
"numpy.zeros_like",
"cv2.fillConvexPoly"
] | [((1497, 1523), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1513, 1523), False, 'import os\n'), ((1529, 1570), 'subprocess.call', 'subprocess.call', (["['make', '-C', BASE_DIR]"], {}), "(['make', '-C', BASE_DIR])\n", (1544, 1570), False, 'import subprocess\n'), ((1781, 1802), 'numpy.mean', 'np.mean', (['poly'], {'axis': '(0)'}), '(poly, axis=0)\n', (1788, 1802), True, 'import numpy as np\n'), ((2178, 2199), 'cv2.minAreaRect', 'cv2.minAreaRect', (['poly'], {}), '(poly)\n', (2193, 2199), False, 'import cv2\n'), ((2730, 2770), 'numpy.zeros_like', 'np.zeros_like', (['score_map'], {'dtype': 'np.int32'}), '(score_map, dtype=np.int32)\n', (2743, 2770), True, 'import numpy as np\n'), ((2787, 2827), 'numpy.zeros_like', 'np.zeros_like', (['score_map'], {'dtype': 'np.int32'}), '(score_map, dtype=np.int32)\n', (2800, 2827), True, 'import numpy as np\n'), ((2842, 2883), 'numpy.array', 'np.array', (['([0] * label_num)'], {'dtype': 'np.int32'}), '([0] * label_num, dtype=np.int32)\n', (2850, 2883), True, 'import numpy as np\n'), ((4575, 4606), 'numpy.array', 'np.array', (['quads'], {'dtype': 'np.float'}), '(quads, dtype=np.float)\n', (4583, 4606), True, 'import numpy as np\n'), ((1856, 1878), 'math.atan2', 'math.atan2', (['p[1]', 'p[0]'], {}), '(p[1], p[0])\n', (1866, 1878), False, 'import math\n'), ((1917, 1940), 'numpy.argsort', 'np.argsort', (['poly_angles'], {}), '(poly_angles)\n', (1927, 1940), True, 'import numpy as np\n'), ((2220, 2239), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (2233, 2239), False, 'import cv2\n'), ((2327, 2348), 'numpy.argmin', 'np.argmin', (['rbox[:, 1]'], {}), '(rbox[:, 1])\n', (2336, 2348), True, 'import numpy as np\n'), ((2419, 2440), 'numpy.argmin', 'np.argmin', (['rbox[:, 0]'], {}), '(rbox[:, 0])\n', (2428, 2440), True, 'import numpy as np\n'), ((3687, 3733), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['head_mask', 'head', 'label_val'], {}), '(head_mask, head, label_val)\n', (3705, 3733), False, 'import cv2\n'), ((3742, 3788), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['tail_mask', 'tail', 'label_val'], {}), '(tail_mask, tail, label_val)\n', (3760, 3788), False, 'import cv2\n'), ((3136, 3159), 'numpy.linalg.norm', 'np.linalg.norm', (['(p0 - p1)'], {}), '(p0 - p1)\n', (3150, 3159), True, 'import numpy as np\n'), ((3159, 3182), 'numpy.linalg.norm', 'np.linalg.norm', (['(p0 - p3)'], {}), '(p0 - p3)\n', (3173, 3182), True, 'import numpy as np\n'), ((3270, 3328), 'numpy.array', 'np.array', (['[p0, p0 + dir_v, p3 + dir_v, p3]'], {'dtype': 'np.int32'}), '([p0, p0 + dir_v, p3 + dir_v, p3], dtype=np.int32)\n', (3278, 3328), True, 'import numpy as np\n'), ((3344, 3402), 'numpy.array', 'np.array', (['[p1 - dir_v, p1, p2, p2 - dir_v]'], {'dtype': 'np.int32'}), '([p1 - dir_v, p1, p2, p2 - dir_v], dtype=np.int32)\n', (3352, 3402), True, 'import numpy as np\n'), ((3507, 3565), 'numpy.array', 'np.array', (['[p0, p1, p1 + dir_v, p0 + dir_v]'], {'dtype': 'np.int32'}), '([p0, p1, p1 + dir_v, p0 + dir_v], dtype=np.int32)\n', (3515, 3565), True, 'import numpy as np\n'), ((3581, 3639), 'numpy.array', 'np.array', (['[p3 - dir_v, p2 - dir_v, p2, p3]'], {'dtype': 'np.int32'}), '([p3 - dir_v, p2 - dir_v, p2, p3], dtype=np.int32)\n', (3589, 3639), True, 'import numpy as np\n'), ((2986, 3015), 'numpy.array', 'np.array', (['label_pt'], {'dtype': 'int'}), '(label_pt, dtype=int)\n', (2994, 3015), True, 'import numpy as np\n')] |
import argparse
import os
import sys
from types import ModuleType
from typing import Dict
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import SimpleITK as sitk
import tensorflow as tf
from tensorflow.keras.models import load_model
import PrognosAIs.Constants
import PrognosAIs.IO.utils as IO_utils
import PrognosAIs.Model.Losses
import PrognosAIs.Model.Parsers as ModelParsers
import PrognosAIs.Model.Metrics
import tensorflow_addons as tfa
from PrognosAIs.IO import ConfigLoader
from PrognosAIs.IO import DataGenerator
class Evaluator:
def __init__(self, model_file, data_folder, config_file, output_folder) -> None:
self.EVALUATION_FOLDER = "Results"
self.data_folder = data_folder
self.model_file = model_file
self.predictions = None
self.sample_predictions = None
self.sample_labels = None
self.sample_names = None
self.output_folder = os.path.join(output_folder, self.EVALUATION_FOLDER)
IO_utils.create_directory(self.output_folder)
self.config = ConfigLoader.ConfigLoader(config_file)
self.batch_size = self.config.get_batch_size()
self.model = self.load_model(self.model_file, None)
self.init_model_parameters()
self.data_generator = self.init_data_generators()
self.dataset_names = list(self.data_generator.keys())
self.sample_metadata = self.data_generator[self.dataset_names[0]].get_feature_metadata()
self.label_data_generator = self.init_label_generators()
self.image_output_labels = self.get_image_output_labels()
@staticmethod
def load_model(model_file: str, custom_module: ModuleType = None) -> tf.keras.Model:
"""
Load the model, including potential custom losses.
Args:
model_file (str): Location of the model file
custom_module (ModuleType): Custom module from which to load losses or metrics
Raises:
error: If the model could not be loaded
and the problem is not due to a missing loss or metric function.
Returns:
tf.keras.Model: The loaded model
"""
# type hint for mypy
model = tf.keras.models.load_model(model_file, custom_objects={"MaskedAUC": PrognosAIs.Model.Metrics.MaskedAUC, "DICE": PrognosAIs.Model.Metrics.DICE, "MaskedSensitivity": PrognosAIs.Model.Metrics.MaskedSensitivity, "MaskedSpecificity": PrognosAIs.Model.Metrics.MaskedSpecificity, "AdamW": tfa.optimizers.AdamW, "MaskedCategoricalCrossentropy": PrognosAIs.Model.Losses.MaskedCategoricalCrossentropy, "DICE_loss": PrognosAIs.Model.Losses.DICE_loss})
# from loaded models, see https://github.com/tensorflow/tensorflow/issues/37990
# Therefore we do a fake "fit" round to make everything available
return model
def _init_data_generators(self, labels_only: bool) -> dict:
"""
Initialize data generators for all sample folders.
Args:
labels_only (bool): Whether to only load labels
Returns:
dict: initalized data generators
"""
sub_folders = IO_utils.get_subdirectories(self.data_folder)
data_generators = {}
for i_sub_folder in sub_folders:
folder_name = IO_utils.get_root_name(i_sub_folder)
if (
folder_name == PrognosAIs.Constants.TRAIN_DS_NAME
and not self.config.get_evaluate_train_set()
):
continue
data_generators[folder_name] = DataGenerator.HDF5Generator(
i_sub_folder,
self.batch_size,
shuffle=self.config.get_shuffle_evaluation(),
drop_batch_remainder=False,
labels_only=labels_only,
)
return data_generators
def init_data_generators(self) -> dict:
"""
Initialize the data generators.
Returns:
dict: DataGenerator for each subfolder of samples
"""
return self._init_data_generators(False)
def init_label_generators(self) -> dict:
"""
Initialize the data generators which only give labels.
Returns:
dict: DataGenerator for each subfolder of samples
"""
return self._init_data_generators(False)
def init_model_parameters(self) -> None:
"""
Initialize the parameters from the model.
"""
self.output_names = self.model.output_names
self.number_of_outputs = len(self.output_names)
if self.number_of_outputs == 1:
self.output_shapes = [self.model.output_shape]
else:
self.output_shapes = self.model.output_shape
self.output_classes = {}
self.one_hot_outputs = {}
for i_output_index, i_output_name in enumerate(self.output_names):
self.output_classes[i_output_name] = self.output_shapes[i_output_index][-1]
if self.output_shapes[i_output_index][-1] > 1:
self.one_hot_outputs[i_output_name] = True
else:
self.one_hot_outputs[i_output_name] = False
self.input_names = self.model.input_names
self.number_of_inputs = len(self.input_names)
model_input_shape = self.model.input_shape
if isinstance(model_input_shape, dict):
self.input_shapes = list(model_input_shape.values())
elif self.number_of_inputs == 1:
self.input_shapes = [model_input_shape]
else:
self.input_shapes = model_input_shape.values()
def get_image_output_labels(self) -> dict:
"""
Whether an output label is a simple class, the label is actually an image.
Returns:
dict: Output labels that are image outputs
"""
image_outputs_labels = {}
for i_output_name, i_output_shape in zip(self.output_names, self.output_shapes):
for i_input_name, i_input_shape in zip(self.input_names, self.input_shapes):
# It is an image of a certain input of the output has as many dimension
# and the size of each dimension is equal to the input size
# minus the batch dimension and number of classes
equal_dimensions = len(i_input_shape) == len(i_output_shape)
equal_size = i_input_shape[1:-1] == i_output_shape[1:-1]
if equal_dimensions and equal_size:
image_outputs_labels[i_output_name] = i_input_name
return image_outputs_labels
def _format_predictions(self, predictions: Union[list, np.ndarray]) -> dict:
"""
Format the predictions to match them with the output names
Args:
predictions (Union[list, np.ndarray]): The predictions from the model
Raises:
ValueError: If the predictions do not match with the expected output names
Returns:
dict: Output predictions matched with the output names
"""
if isinstance(predictions, np.ndarray):
# There is only one output in this case
predictions = [predictions]
if len(predictions) != len(self.output_names):
raise ValueError("The predictions do not match with the output names!")
out_predictions = {}
for i_output_name, i_prediction in zip(self.output_names, predictions):
out_predictions[i_output_name] = i_prediction
return out_predictions
def predict(self) -> dict:
"""
Get predictions from the model
Returns:
dict: Predictions for the different outputs of the model for all samples
"""
if self.predictions is None:
# We have not yet determined the predictions, first run
self.predictions = {}
predictions = {}
for i_generator_name, i_generator in self.data_generator.items():
# We go over all generators
self.predictions[i_generator_name] = {}
dataset = i_generator.get_tf_dataset()
final_predictions = {}
for i_output_name in self.output_names:
final_predictions[i_output_name] = []
for i_batch in dataset:
# We have to go over the different predictions step by step
# Otherwise will lead to memory leak
# The first index in the batch is the sample (the second is the label)
batch_prediction = self.model.predict_on_batch(i_batch[0])
# Convert to list if we only have one output
if isinstance(batch_prediction, np.ndarray):
batch_prediction = [batch_prediction]
for i_output_name, i_prediction in zip(self.output_names, batch_prediction):
final_predictions[i_output_name].append(i_prediction)
# We create one single list for all predictions that we got
for i_output_name in self.output_names:
final_predictions[i_output_name] = np.concatenate(
final_predictions[i_output_name], axis=0
)
predictions[i_generator_name] = final_predictions
self.predictions = predictions
return self.predictions
def patches_to_sample_image(
self,
datagenerator: PrognosAIs.IO.DataGenerator.HDF5Generator,
filenames: list,
output_name: str,
predictions: np.ndarray,
labels_are_one_hot: bool,
label_combination_type: str,
) -> np.ndarray:
if not labels_are_one_hot and label_combination_type == "average":
err_msg = (
"Predictions can only be combined when given as probability score"
"thus the labels must be one-hot encoded."
)
raise ValueError(err_msg)
input_name = self.image_output_labels[output_name]
image_size = self.sample_metadata[input_name]["original_size"]
transpose_dims = np.arange(len(image_size) - 1, -1, -1)
number_of_classes = self.output_classes[output_name]
image_size = np.append(image_size, number_of_classes)
original_image = np.zeros(image_size)
number_of_hits = np.zeros(image_size)
if isinstance(filenames, str):
filenames = [filenames]
# TODO REMOVE ONLY FOR IF NOT REALLY PATCHES
if (
len(predictions.shape) == len(original_image.shape)
and predictions.shape[-1] == original_image.shape[-1]
):
predictions = np.expand_dims(predictions, axis=0)
for i_filename, i_prediction in zip(filenames, predictions):
i_sample_metadata = datagenerator.get_feature_metadata_from_sample(i_filename)
i_sample_metadata = i_sample_metadata[input_name]
in_sample_index_start = np.copy(i_sample_metadata["index"])
in_sample_index_end = in_sample_index_start + i_sample_metadata["size"]
# Parts of the patch can be outside of the original image, because of padding
# Thus here we take only the parts of the patch that are within the original image
in_sample_index_start[in_sample_index_start < 0] = 0
sample_indices = tuple(
slice(*i) for i in zip(in_sample_index_start, in_sample_index_end)
)
patch_index_start = np.copy(i_sample_metadata["index"])
patch_index_end = i_sample_metadata["size"]
# We also need to cut out the part of the patch that is normally outside of the image
# we do this here
patch_index_start[patch_index_start > 0] = 0
patch_index_start = -1 * patch_index_start
patch_slices = tuple(slice(*i) for i in zip(patch_index_start, patch_index_end))
if not labels_are_one_hot:
i_prediction = i_prediction.astype(np.int32)
i_prediction = np.eye(number_of_classes)[i_prediction]
elif label_combination_type == "vote":
i_prediction = np.round(i_prediction)
original_image[sample_indices] += i_prediction[patch_slices]
number_of_hits[sample_indices] += 1
number_of_hits[number_of_hits == 0] = 1
if label_combination_type == "vote":
original_image = np.argmax(original_image, axis=-1)
elif label_combination_type == "average":
original_image = np.argmax(np.round(original_image / number_of_hits), axis=-1)
else:
raise ValueError("Unknown combination type")
# Need to transpose because of different indexing between numpy and simpleitk
original_image = np.transpose(original_image, transpose_dims)
return original_image
def image_array_to_sitk(self, image_array: np.ndarray, input_name: str) -> sitk.Image:
original_image_direction = self.sample_metadata[input_name]["original_direction"]
original_image_origin = self.sample_metadata[input_name]["original_origin"]
original_image_spacing = self.sample_metadata[input_name]["original_spacing"]
img = sitk.GetImageFromArray(image_array)
img.SetDirection(original_image_direction)
img.SetOrigin(original_image_origin)
img.SetSpacing(original_image_spacing)
# To ensure proper loading
img = sitk.Cast(img, sitk.sitkFloat32)
return img
def _find_sample_names_from_patch_names(self, data_generator):
filenames = data_generator.sample_files
# Get the unique names of the files
sample_names = np.unique([i_file.split("_patch")[0] for i_file in filenames])
sample_indices = {}
for i_sample_name in sample_names:
sample_indices[i_sample_name] = np.squeeze(
np.argwhere(
[i_sample_name == i_filename.split("_patch")[0] for i_filename in filenames]
)
)
return sample_names, sample_indices
def get_sample_result_from_patch_results(self, patch_results):
sample_results = {}
sample_names = {}
for i_dataset_name, i_dataset_generator in self.data_generator.items():
i_patch_results = patch_results[i_dataset_name]
sample_results[i_dataset_name] = {}
file_locations = np.asarray(i_dataset_generator.sample_locations)
sample_names[i_dataset_name], sample_indices = self._find_sample_names_from_patch_names(
i_dataset_generator
)
for i_output_name, i_output_prediction in i_patch_results.items():
sample_results[i_dataset_name][i_output_name] = []
if i_output_name in self.image_output_labels:
for i_sample_name, i_sample_indices in sample_indices.items():
patches_from_sample_results = i_output_prediction[i_sample_indices]
sample_results[i_dataset_name][i_output_name].append(
self.patches_to_sample_image(
i_dataset_generator,
file_locations[i_sample_indices],
i_output_name,
patches_from_sample_results,
self.one_hot_outputs[i_output_name],
self.config.get_label_combination_type(),
)
)
for i_key, i_value in sample_results[i_dataset_name].items():
sample_results[i_dataset_name][i_key] = np.asarray(i_value)
return sample_names, sample_results
def get_sample_predictions_from_patch_predictions(self):
patch_predictions = self.predict()
sample_names, sample_predictions = self.get_sample_result_from_patch_results(
patch_predictions
)
return sample_names, sample_predictions
@staticmethod
def one_hot_labels_to_flat_labels(labels: np.ndarray) -> np.ndarray:
flat_labels = np.argmax(labels, axis=-1)
flat_labels[labels[..., 0] == -1] = -1
return flat_labels
def make_dataframe(self, sample_names, predictions) -> pd.DataFrame:
df_columns = ["Sample"]
for i_output_name in self.output_names:
if i_output_name not in self.image_output_labels:
for i_class in range(self.output_classes[i_output_name]):
df_columns.append("Prediction_" + i_output_name + "_class_" + str(i_class))
results_df = pd.DataFrame(columns=df_columns)
results_df["Sample"] = sample_names
for i_output_name, i_output_prediction in predictions.items():
if i_output_name not in self.image_output_labels:
for i_class in range(self.output_classes[i_output_name]):
results_df[
"Prediction_" + i_output_name + "_class_" + str(i_class)
] = i_output_prediction[:, i_class]
return results_df
def write_image_predictions_to_files(self, sample_names, predictions, labels_one_hot) -> None:
for i_output_name, i_output_prediction in predictions.items():
if i_output_name in self.image_output_labels:
if labels_one_hot is not None and labels_one_hot[i_output_name]:
i_output_prediction = self.one_hot_labels_to_flat_labels(i_output_prediction,)
i_output_prediction_images = [
self.image_array_to_sitk(
i_sample_output_prediction, self.image_output_labels[i_output_name]
)
for i_sample_output_prediction in i_output_prediction
]
for i_pred_image, i_sample_name in zip(i_output_prediction_images, sample_names):
out_file = os.path.join(
self.output_folder, i_sample_name.split(".")[0] + "_mask.nii.gz"
)
sitk.WriteImage(i_pred_image, out_file)
def write_predictions_to_file(self) -> None:
predictions = self.predict()
for i_dataset_name, i_dataset_generator in self.data_generator.items():
out_file = os.path.join(self.output_folder, "genetic_histological_predictions.csv")
i_prediction = predictions[i_dataset_name]
results_df = self.make_dataframe(
i_dataset_generator.sample_files,
i_prediction,
)
results_df.to_csv(out_file, index=False)
(
sample_names,
sample_predictions,
) = self.get_sample_predictions_from_patch_predictions()
self.write_image_predictions_to_files(
sample_names[i_dataset_name], sample_predictions[i_dataset_name], None,
)
def evaluate(self):
self.write_predictions_to_file()
| [
"tensorflow.keras.models.load_model",
"PrognosAIs.IO.ConfigLoader.ConfigLoader",
"numpy.asarray",
"PrognosAIs.IO.utils.get_root_name",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.round",
"numpy.eye",
"numpy.argmax",
"PrognosAIs.IO.utils.create_directory",
"SimpleITK.Cast",
"numpy.transpose... | [((959, 1010), 'os.path.join', 'os.path.join', (['output_folder', 'self.EVALUATION_FOLDER'], {}), '(output_folder, self.EVALUATION_FOLDER)\n', (971, 1010), False, 'import os\n'), ((1019, 1064), 'PrognosAIs.IO.utils.create_directory', 'IO_utils.create_directory', (['self.output_folder'], {}), '(self.output_folder)\n', (1044, 1064), True, 'import PrognosAIs.IO.utils as IO_utils\n'), ((1088, 1126), 'PrognosAIs.IO.ConfigLoader.ConfigLoader', 'ConfigLoader.ConfigLoader', (['config_file'], {}), '(config_file)\n', (1113, 1126), False, 'from PrognosAIs.IO import ConfigLoader\n'), ((2244, 2709), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_file'], {'custom_objects': "{'MaskedAUC': PrognosAIs.Model.Metrics.MaskedAUC, 'DICE': PrognosAIs.Model.\n Metrics.DICE, 'MaskedSensitivity': PrognosAIs.Model.Metrics.\n MaskedSensitivity, 'MaskedSpecificity': PrognosAIs.Model.Metrics.\n MaskedSpecificity, 'AdamW': tfa.optimizers.AdamW,\n 'MaskedCategoricalCrossentropy': PrognosAIs.Model.Losses.\n MaskedCategoricalCrossentropy, 'DICE_loss': PrognosAIs.Model.Losses.\n DICE_loss}"}), "(model_file, custom_objects={'MaskedAUC':\n PrognosAIs.Model.Metrics.MaskedAUC, 'DICE': PrognosAIs.Model.Metrics.\n DICE, 'MaskedSensitivity': PrognosAIs.Model.Metrics.MaskedSensitivity,\n 'MaskedSpecificity': PrognosAIs.Model.Metrics.MaskedSpecificity,\n 'AdamW': tfa.optimizers.AdamW, 'MaskedCategoricalCrossentropy':\n PrognosAIs.Model.Losses.MaskedCategoricalCrossentropy, 'DICE_loss':\n PrognosAIs.Model.Losses.DICE_loss})\n", (2270, 2709), True, 'import tensorflow as tf\n'), ((3176, 3221), 'PrognosAIs.IO.utils.get_subdirectories', 'IO_utils.get_subdirectories', (['self.data_folder'], {}), '(self.data_folder)\n', (3203, 3221), True, 'import PrognosAIs.IO.utils as IO_utils\n'), ((10312, 10352), 'numpy.append', 'np.append', (['image_size', 'number_of_classes'], {}), '(image_size, number_of_classes)\n', (10321, 10352), True, 'import numpy as np\n'), ((10379, 10399), 'numpy.zeros', 'np.zeros', (['image_size'], {}), '(image_size)\n', (10387, 10399), True, 'import numpy as np\n'), ((10425, 10445), 'numpy.zeros', 'np.zeros', (['image_size'], {}), '(image_size)\n', (10433, 10445), True, 'import numpy as np\n'), ((12895, 12939), 'numpy.transpose', 'np.transpose', (['original_image', 'transpose_dims'], {}), '(original_image, transpose_dims)\n', (12907, 12939), True, 'import numpy as np\n'), ((13336, 13371), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_array'], {}), '(image_array)\n', (13358, 13371), True, 'import SimpleITK as sitk\n'), ((13564, 13596), 'SimpleITK.Cast', 'sitk.Cast', (['img', 'sitk.sitkFloat32'], {}), '(img, sitk.sitkFloat32)\n', (13573, 13596), True, 'import SimpleITK as sitk\n'), ((16278, 16304), 'numpy.argmax', 'np.argmax', (['labels'], {'axis': '(-1)'}), '(labels, axis=-1)\n', (16287, 16304), True, 'import numpy as np\n'), ((16787, 16819), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df_columns'}), '(columns=df_columns)\n', (16799, 16819), True, 'import pandas as pd\n'), ((3318, 3354), 'PrognosAIs.IO.utils.get_root_name', 'IO_utils.get_root_name', (['i_sub_folder'], {}), '(i_sub_folder)\n', (3340, 3354), True, 'import PrognosAIs.IO.utils as IO_utils\n'), ((10756, 10791), 'numpy.expand_dims', 'np.expand_dims', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (10770, 10791), True, 'import numpy as np\n'), ((11052, 11087), 'numpy.copy', 'np.copy', (["i_sample_metadata['index']"], {}), "(i_sample_metadata['index'])\n", (11059, 11087), True, 'import numpy as np\n'), ((11589, 11624), 'numpy.copy', 'np.copy', (["i_sample_metadata['index']"], {}), "(i_sample_metadata['index'])\n", (11596, 11624), True, 'import numpy as np\n'), ((12536, 12570), 'numpy.argmax', 'np.argmax', (['original_image'], {'axis': '(-1)'}), '(original_image, axis=-1)\n', (12545, 12570), True, 'import numpy as np\n'), ((14534, 14582), 'numpy.asarray', 'np.asarray', (['i_dataset_generator.sample_locations'], {}), '(i_dataset_generator.sample_locations)\n', (14544, 14582), True, 'import numpy as np\n'), ((18482, 18554), 'os.path.join', 'os.path.join', (['self.output_folder', '"""genetic_histological_predictions.csv"""'], {}), "(self.output_folder, 'genetic_histological_predictions.csv')\n", (18494, 18554), False, 'import os\n'), ((9206, 9262), 'numpy.concatenate', 'np.concatenate', (['final_predictions[i_output_name]'], {'axis': '(0)'}), '(final_predictions[i_output_name], axis=0)\n', (9220, 9262), True, 'import numpy as np\n'), ((12146, 12171), 'numpy.eye', 'np.eye', (['number_of_classes'], {}), '(number_of_classes)\n', (12152, 12171), True, 'import numpy as np\n'), ((12268, 12290), 'numpy.round', 'np.round', (['i_prediction'], {}), '(i_prediction)\n', (12276, 12290), True, 'import numpy as np\n'), ((12660, 12701), 'numpy.round', 'np.round', (['(original_image / number_of_hits)'], {}), '(original_image / number_of_hits)\n', (12668, 12701), True, 'import numpy as np\n'), ((15820, 15839), 'numpy.asarray', 'np.asarray', (['i_value'], {}), '(i_value)\n', (15830, 15839), True, 'import numpy as np\n'), ((18251, 18290), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['i_pred_image', 'out_file'], {}), '(i_pred_image, out_file)\n', (18266, 18290), True, 'import SimpleITK as sitk\n')] |
#!usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
def plot_loss(loss_list):
t = np.arange(len(loss_list))
l = np.array(loss_list)
plt.plot(t, l)
plt.yscale("log")
plt.xlabel("step")
plt.ylabel("loss")
plt.show()
def plot_reward(reward_list):
t = np.arange(len(reward_list))
l = np.array(reward_list)
plt.plot(t, l)
# plt.yscale("log")
plt.xlabel("step")
plt.ylabel("reward")
plt.show()
loss = np.loadtxt("loss_log.txt")
#reward = np.loadtxt("reward_log.txt")
plot_loss(loss)
#plot_reward(reward)
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.loadtxt",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.show"
] | [((471, 497), 'numpy.loadtxt', 'np.loadtxt', (['"""loss_log.txt"""'], {}), "('loss_log.txt')\n", (481, 497), True, 'import numpy as np\n'), ((138, 157), 'numpy.array', 'np.array', (['loss_list'], {}), '(loss_list)\n', (146, 157), True, 'import numpy as np\n'), ((162, 176), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'l'], {}), '(t, l)\n', (170, 176), True, 'import matplotlib.pyplot as plt\n'), ((181, 198), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (191, 198), True, 'import matplotlib.pyplot as plt\n'), ((203, 221), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (213, 221), True, 'import matplotlib.pyplot as plt\n'), ((226, 244), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (236, 244), True, 'import matplotlib.pyplot as plt\n'), ((249, 259), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (257, 259), True, 'import matplotlib.pyplot as plt\n'), ((335, 356), 'numpy.array', 'np.array', (['reward_list'], {}), '(reward_list)\n', (343, 356), True, 'import numpy as np\n'), ((361, 375), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'l'], {}), '(t, l)\n', (369, 375), True, 'import matplotlib.pyplot as plt\n'), ((404, 422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""step"""'], {}), "('step')\n", (414, 422), True, 'import matplotlib.pyplot as plt\n'), ((427, 447), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reward"""'], {}), "('reward')\n", (437, 447), True, 'import matplotlib.pyplot as plt\n'), ((452, 462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (460, 462), True, 'import matplotlib.pyplot as plt\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.