code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from numpy.testing import assert_raises
from scipy.spatial import distance
from sklearn.utils.testing import assert_equal
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| [
"pickle.loads",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.testing.assert_raises",
"sklearn.cluster.dbscan_.dbscan",
"sklearn.utils.testing.assert_equal",
"numpy.max",
"scipy.spatial.distance.pdist",
"sklearn.cluster.dbscan_.DBSCAN",
"pickle.dumps"
] | [((689, 698), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (695, 698), True, 'import numpy as np\n'), ((747, 812), 'sklearn.cluster.dbscan_.dbscan', 'dbscan', (['D'], {'metric': '"""precomputed"""', 'eps': 'eps', 'min_samples': 'min_samples'}), "(D, metric='precomputed', eps=eps, min_samples=min_samples)\n", (753, 812), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((969, 1007), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_1', 'n_clusters'], {}), '(n_clusters_1, n_clusters)\n', (981, 1007), False, 'from sklearn.utils.testing import assert_equal\n'), ((1018, 1080), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'metric': '"""precomputed"""', 'eps': 'eps', 'min_samples': 'min_samples'}), "(metric='precomputed', eps=eps, min_samples=min_samples)\n", (1024, 1080), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((1173, 1211), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_2', 'n_clusters'], {}), '(n_clusters_2, n_clusters)\n', (1185, 1211), False, 'from sklearn.utils.testing import assert_equal\n'), ((1571, 1629), 'sklearn.cluster.dbscan_.dbscan', 'dbscan', (['X'], {'metric': 'metric', 'eps': 'eps', 'min_samples': 'min_samples'}), '(X, metric=metric, eps=eps, min_samples=min_samples)\n', (1577, 1629), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((1777, 1815), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_1', 'n_clusters'], {}), '(n_clusters_1, n_clusters)\n', (1789, 1815), False, 'from sklearn.utils.testing import assert_equal\n'), ((1826, 1881), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'metric': 'metric', 'eps': 'eps', 'min_samples': 'min_samples'}), '(metric=metric, eps=eps, min_samples=min_samples)\n', (1832, 1881), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((1974, 2012), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_2', 'n_clusters'], {}), '(n_clusters_2, n_clusters)\n', (1986, 2012), False, 'from sklearn.utils.testing import assert_equal\n'), ((2435, 2521), 'sklearn.cluster.dbscan_.dbscan', 'dbscan', (['X'], {'metric': 'metric', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""'}), "(X, metric=metric, eps=eps, min_samples=min_samples, algorithm=\n 'ball_tree')\n", (2441, 2521), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((2698, 2736), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_1', 'n_clusters'], {}), '(n_clusters_1, n_clusters)\n', (2710, 2736), False, 'from sklearn.utils.testing import assert_equal\n'), ((2747, 2825), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'metric': 'metric', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""'}), "(metric=metric, eps=eps, min_samples=min_samples, algorithm='ball_tree')\n", (2753, 2825), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((2934, 2972), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_2', 'n_clusters'], {}), '(n_clusters_2, n_clusters)\n', (2946, 2972), False, 'from sklearn.utils.testing import assert_equal\n'), ((3124, 3145), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (3142, 3145), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((3173, 3238), 'sklearn.cluster.dbscan_.dbscan', 'dbscan', (['D'], {'metric': '"""precomputed"""', 'eps': 'eps', 'min_samples': 'min_samples'}), "(D, metric='precomputed', eps=eps, min_samples=min_samples)\n", (3179, 3238), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((3386, 3424), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_1', 'n_clusters'], {}), '(n_clusters_1, n_clusters)\n', (3398, 3424), False, 'from sklearn.utils.testing import assert_equal\n'), ((3435, 3505), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'p': '(2.0)', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""'}), "(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')\n", (3441, 3505), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((3598, 3636), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_2', 'n_clusters'], {}), '(n_clusters_2, n_clusters)\n', (3610, 3636), False, 'from sklearn.utils.testing import assert_equal\n'), ((3647, 3715), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'p': '(2.0)', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""kd_tree"""'}), "(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')\n", (3653, 3715), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((3808, 3846), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_3', 'n_clusters'], {}), '(n_clusters_3, n_clusters)\n', (3820, 3846), False, 'from sklearn.utils.testing import assert_equal\n'), ((3857, 3927), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'p': '(1.0)', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""'}), "(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')\n", (3863, 3927), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((4020, 4058), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_4', 'n_clusters'], {}), '(n_clusters_4, n_clusters)\n', (4032, 4058), False, 'from sklearn.utils.testing import assert_equal\n'), ((4069, 4146), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {'leaf_size': '(20)', 'eps': 'eps', 'min_samples': 'min_samples', 'algorithm': '"""ball_tree"""'}), "(leaf_size=20, eps=eps, min_samples=min_samples, algorithm='ball_tree')\n", (4075, 4146), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((4255, 4293), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['n_clusters_5', 'n_clusters'], {}), '(n_clusters_5, n_clusters)\n', (4267, 4293), False, 'from sklearn.utils.testing import assert_equal\n'), ((4398, 4444), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'dbscan', 'X'], {'eps': '(-1.0)'}), '(ValueError, dbscan, X, eps=-1.0)\n', (4411, 4444), False, 'from numpy.testing import assert_raises\n'), ((4485, 4539), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'dbscan', 'X'], {'algorithm': '"""blah"""'}), "(ValueError, dbscan, X, algorithm='blah')\n", (4498, 4539), False, 'from numpy.testing import assert_raises\n'), ((4580, 4631), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'dbscan', 'X'], {'metric': '"""blah"""'}), "(ValueError, dbscan, X, metric='blah')\n", (4593, 4631), False, 'from numpy.testing import assert_raises\n'), ((4672, 4722), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'dbscan', 'X'], {'leaf_size': '(-1)'}), '(ValueError, dbscan, X, leaf_size=-1)\n', (4685, 4722), False, 'from numpy.testing import assert_raises\n'), ((4763, 4805), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'dbscan', 'X'], {'p': '(-1)'}), '(ValueError, dbscan, X, p=-1)\n', (4776, 4805), False, 'from numpy.testing import assert_raises\n'), ((4873, 4881), 'sklearn.cluster.dbscan_.DBSCAN', 'DBSCAN', ([], {}), '()\n', (4879, 4881), False, 'from sklearn.cluster.dbscan_ import DBSCAN, dbscan\n'), ((4890, 4907), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (4902, 4907), False, 'import pickle\n'), ((661, 678), 'scipy.spatial.distance.pdist', 'distance.pdist', (['X'], {}), '(X)\n', (675, 678), False, 'from scipy.spatial import distance\n'), ((4930, 4945), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (4942, 4945), False, 'import pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ntm_interface.py: pytorch module implementing NTM interface to external memory."""
__author__ = "<NAME>"
import torch
import logging
import collections
import numpy as np
from torch.nn import Module
logger = logging.getLogger('NTM-Interface')
from miprometheus.utils.app_state import AppState
# Helper collection type.
_HeadStateTuple = collections.namedtuple(
'HeadStateTuple', ('attention', 'similarity', 'gate', 'shift'))
class HeadStateTuple(_HeadStateTuple):
"""
Tuple used by interface for storing current/past state information.
"""
__slots__ = ()
# Helper collection type.
_InterfaceStateTuple = collections.namedtuple(
'InterfaceStateTuple', ('read_heads', 'write_head',))
class InterfaceStateTuple(_InterfaceStateTuple):
"""
Tuple used by interface for storing current/past state information.
"""
__slots__ = ()
class NTMInterface(Module):
"""
Class realizing interface between controller and memory.
"""
def __init__(self, params):
"""
Constructor.
:param params: Dictionary of parameters.
"""
# Call constructor of base class.
super(NTMInterface, self).__init__()
# Parse parameters.
# Get hidden state size.
self.ctrl_hidden_state_size = params['controller']['hidden_state_size']
# Get memory parameters.
self.num_memory_content_bits = params['memory']['num_content_bits']
# Get interface parameters.
self.interface_shift_size = params['interface']['shift_size']
assert self.interface_shift_size % 2 != 0, 'Shift size must be an odd number'
assert self.interface_shift_size > 0, 'Shift size must be > 0'
self.interface_num_read_heads = params['interface']['num_read_heads']
assert self.interface_num_read_heads >= 1, "NTM requires at least 1 read head (currently %r)" % self.interface_num_read_heads
# Check if CBA should be used or not.
self.use_content_based_addressing = params['interface'].get(
'use_content_based_addressing', True)
# -------------- READ HEADS -----------------#
# Number/size of parameters of a single read head:
if self.use_content_based_addressing:
# key [MEMORY_CONTENT_BITS] + beta [1] + gate [1] + gamma [1] + shift kernel size [SHIFT_SIZE]
# All read params = NUM_HEADS * above (but it's not important here)
num_read_params = (self.num_memory_content_bits +
1 + 1 + 1 + self.interface_shift_size)
# Dictionary with read parameters - used during slicing.
self.read_param_locations = self.calculate_param_locations(
{
'query_vector': self.num_memory_content_bits,
'beta': 1,
'gate': 1,
'shift': self.interface_shift_size,
'gamma': 1},
"Read")
assert num_read_params == self.read_param_locations[-1], "Last location must be equal to number of read params."
else:
# gamma [1] + shift kernel size [SHIFT_SIZE]
# All read params = NUM_HEADS * above (but it's not important here)
num_read_params = (1 + self.interface_shift_size)
# Dictionary with read parameters - used during slicing.
self.read_param_locations = self.calculate_param_locations({
'shift': self.interface_shift_size, 'gamma': 1}, "Read")
assert num_read_params == self.read_param_locations[-1], "Last location must be equal to number of read params."
# Forward linear layers that generate parameters of read heads.
self.hidden2read_list = torch.nn.ModuleList()
for _ in range(self.interface_num_read_heads):
self.hidden2read_list.append(torch.nn.Linear(
self.ctrl_hidden_state_size, num_read_params))
# -------------- WRITE HEAD -----------------#
# Number/size of wrrite parameters:
if self.use_content_based_addressing:
# key [MEMORY_BITS] + beta [1] + gate [1] + gamma [1] +
# + shift kernel size [SHIFT_SIZE] + erase vector [MEMORY_CONTENT_BITS] + write vector[MEMORY_BITS]
num_write_params = 3 * self.num_memory_content_bits + \
1 + 1 + 1 + self.interface_shift_size
# Write parameters - used during slicing.
self.write_param_locations = self.calculate_param_locations(
{
'query_vector': self.num_memory_content_bits,
'beta': 1,
'gate': 1,
'shift': self.interface_shift_size,
'gamma': 1,
'erase_vector': self.num_memory_content_bits,
'add_vector': self.num_memory_content_bits},
"Write")
assert num_write_params == self.write_param_locations[
-1], "Last location must be equal to number of write params."
else:
# gamma [1] +
# + shift kernel size [SHIFT_SIZE] + erase vector [MEMORY_CONTENT_BITS] + write vector[MEMORY_BITS]
num_write_params = 2 * self.num_memory_content_bits + 1 + self.interface_shift_size
# Write parameters - used during slicing.
self.write_param_locations = self.calculate_param_locations(
{'shift': self.interface_shift_size, 'gamma': 1,
'erase_vector': self.num_memory_content_bits,
'add_vector': self.num_memory_content_bits},
"Write")
assert num_write_params == self.write_param_locations[
-1], "Last location must be equal to number of write params."
# Forward linear layer that generates parameters of write heads.
self.hidden2write_params = torch.nn.Linear(
self.ctrl_hidden_state_size, num_write_params)
def init_state(self, batch_size, num_memory_addresses):
"""
Returns 'zero' (initial) state tuple.
:param batch_size: Size of the batch in given iteraction/epoch.
:param num_memory_addresses: Number of memory addresses.
:returns: Initial state tuple - object of InterfaceStateTuple class.
"""
dtype = AppState().dtype
# Add read head states - one for each read head.
read_state_tuples = []
# Initial attention weights [BATCH_SIZE x MEMORY_ADDRESSES x 1]
# Initialize attention: to address 0.
zh_attention = torch.zeros(
batch_size, num_memory_addresses, 1).type(dtype)
zh_attention[:, 0, 0] = 1
# Initialize gating: to previous attention (i.e. zero-hard).
init_gating = torch.ones(batch_size, 1, 1).type(dtype)
# Initialize shift - to zero.
init_shift = torch.zeros(
batch_size, self.interface_shift_size, 1).type(dtype)
init_shift[:, 1, 0] = 1
for i in range(self.interface_num_read_heads):
read_ht = HeadStateTuple(
zh_attention, zh_attention, init_gating, init_shift)
# Single read head tuple.
read_state_tuples.append(read_ht)
# Single write head tuple.
write_state_tuple = HeadStateTuple(
zh_attention, zh_attention, init_gating, init_shift)
# Return tuple.
interface_state = InterfaceStateTuple(
read_state_tuples, write_state_tuple)
return interface_state
def forward(self, ctrl_hidden_state_BxH, prev_memory_BxAxC,
prev_interface_state_tuple):
"""
Controller forward function.
:param ctrl_hidden_state_BxH: a Tensor with controller hidden state of size [BATCH_SIZE x HIDDEN_SIZE]
:param prev_memory_BxAxC: Previous state of the memory [BATCH_SIZE x MEMORY_ADDRESSES x CONTENT_BITS]
:param prev_interface_state_tuple: Tuple containing previous read and write attention vectors.
:returns: List of read vectors [BATCH_SIZE x CONTENT_SIZE], updated memory and state tuple (object of LSTMStateTuple class).
"""
# Unpack previous cell state - just to make sure that everything is ok...
#(prev_read_attentions_BxAx1_H, prev_write_attention_BxAx1) = prev_interface_state_tuple
# Unpack cell state.
(prev_read_state_tuples, prev_write_state_tuple) = prev_interface_state_tuple
(prev_write_attention_BxAx1, _, _, _) = prev_write_state_tuple
(prev_read_attentions_BxAx1_H, _, _, _) = zip(*prev_read_state_tuples)
# !! Execute single step !!
# Read attentions
read_attentions_BxAx1_H = []
# List of read vectors - with two dimensions! [BATCH_SIZE x
# CONTENT_SIZE]
read_vectors_BxC_H = []
# List of read tuples - for visualization.
read_state_tuples = []
# Read heads.
for i in range(self.interface_num_read_heads):
# Calculate parameters of a given read head.
params_BxP = self.hidden2read_list[i](ctrl_hidden_state_BxH)
if self.use_content_based_addressing:
# Split the parameters.
query_vector_BxC, beta_Bx1, gate_Bx1, shift_BxS, gamma_Bx1 = self.split_params(
params_BxP, self.read_param_locations)
# Update the attention of a given read head.
read_attention_BxAx1, read_state_tuple = self.update_attention(
query_vector_BxC, beta_Bx1, gate_Bx1, shift_BxS, gamma_Bx1,
prev_memory_BxAxC, prev_read_attentions_BxAx1_H[i])
else:
# Split the parameters.
shift_BxS, gamma_Bx1 = self.split_params(
params_BxP, self.read_param_locations)
# Update the attention of a given read head.
read_attention_BxAx1, read_state_tuple = self.update_attention(
_, _, _, shift_BxS, gamma_Bx1, prev_memory_BxAxC, prev_read_attentions_BxAx1_H[i])
# Read vector from memory [BATCH_SIZE x CONTENT_BITS].
read_vector_BxC = self.read_from_memory(
read_attention_BxAx1, prev_memory_BxAxC)
# Save read attentions and vectors in a list.
read_attentions_BxAx1_H.append(read_attention_BxAx1)
read_vectors_BxC_H.append(read_vector_BxC)
# We always collect tuples, as we are using e.g. attentions from
# them.
read_state_tuples.append(read_state_tuple)
# Write head operation.
# Calculate parameters of a given read head.
params_BxP = self.hidden2write_params(ctrl_hidden_state_BxH)
if self.use_content_based_addressing:
# Split the parameters.
query_vector_BxC, beta_Bx1, gate_Bx1, shift_BxS, gamma_Bx1, erase_vector_BxC, add_vector_BxC = self.split_params(
params_BxP, self.write_param_locations)
# Update the attention of the write head.
write_attention_BxAx1, write_state_tuple = self.update_attention(
query_vector_BxC, beta_Bx1, gate_Bx1, shift_BxS, gamma_Bx1,
prev_memory_BxAxC, prev_write_attention_BxAx1)
else:
# Split the parameters.
shift_BxS, gamma_Bx1, erase_vector_BxC, add_vector_BxC = self.split_params(
params_BxP, self.write_param_locations)
# Update the attention of the write head.
write_attention_BxAx1, write_state_tuple = self.update_attention(
_, _, _, shift_BxS, gamma_Bx1, prev_memory_BxAxC, prev_write_attention_BxAx1)
# Add 3rd dimensions where required and apply non-linear transformations.
# I didn't had that non-linear transformation in TF!
erase_vector_Bx1xC = torch.nn.functional.sigmoid(erase_vector_BxC).unsqueeze(1)
add_vector_Bx1xC = torch.nn.functional.sigmoid(add_vector_BxC).unsqueeze(1)
#logger.debug("write_attention_BxAx1 {}:\n {}".format(write_attention_BxAx1.size(), write_attention_BxAx1))
# Update the memory.
memory_BxAxC = self.update_memory(
write_attention_BxAx1,
erase_vector_Bx1xC,
add_vector_Bx1xC,
prev_memory_BxAxC)
# Pack current cell state.
interface_state_tuple = InterfaceStateTuple(
read_state_tuples, write_state_tuple)
# Return read vector, new memory state and state tuple.
return read_vectors_BxC_H, memory_BxAxC, interface_state_tuple
def calculate_param_locations(self, param_sizes_dict, head_name):
"""
Calculates locations of parameters, that will subsequently be used
during parameter splitting.
:param param_sizes_dict: Dictionary containing parameters along with their sizes (in bits/units).
:param head_name: Name of head.
:returns: "Locations" of parameters.
"""
#logger.debug("{} param sizes dict:\n {}".format(head_name, param_sizes_dict))
# Create the parameter lengths and store their cumulative sum
lengths = np.fromiter(param_sizes_dict.values(), dtype=int)
# Store "parameter locations" for further usage.
param_locations = np.cumsum(
np.insert(lengths, 0, 0), dtype=int).tolist()
#logger.debug("{} param locations:\n {}".format(head_name, param_locations))
return param_locations
def split_params(self, params, locations):
"""
Split parameters into list on the basis of locations.
"""
param_splits = [params[..., locations[i]:locations[i + 1]]
for i in range(len(locations) - 1)]
#logger.debug("Splitted params:\n {}".format(param_splits))
return param_splits
def update_attention(
self,
query_vector_BxC,
beta_Bx1,
gate_Bx1,
shift_BxS,
gamma_Bx1,
prev_memory_BxAxC,
prev_attention_BxAx1):
"""
Updates the attention weights.
:param query_vector_BxC: Query used for similarity calculation in content-based addressing [BATCH_SIZE x CONTENT_BITS]
:param beta_Bx1: Strength parameter used in content-based addressing.
:param gate_Bx1:
:param shift_BxS:
:param gamma_Bx1:
:param prev_memory_BxAxC: tensor containing memory before update [BATCH_SIZE x MEMORY_ADDRESSES x CONTENT_BITS]
:param prev_attention_BxAx1: previous attention vector [BATCH_SIZE x MEMORY_ADDRESSES x 1]
:returns: attention vector of size [BATCH_SIZE x ADDRESS_SIZE x 1]
"""
# Add 3rd dimensions where required and apply non-linear transformations.
# Produce location-addressing params.
shift_BxSx1 = torch.nn.functional.softmax(shift_BxS, dim=1).unsqueeze(2)
# Gamma - oneplus.
gamma_Bx1x1 = torch.nn.functional.softplus(gamma_Bx1).unsqueeze(2) + 1
if self.use_content_based_addressing:
# Add 3rd dimensions where required and apply non-linear transformations.
# Produce content-addressing params.
query_vector_Bx1xC = torch.nn.functional.sigmoid(query_vector_BxC).unsqueeze(1)
# Beta: oneplus
beta_Bx1x1 = torch.nn.functional.softplus(beta_Bx1).unsqueeze(2) + 1
# Produce gating param.
gate_Bx1x1 = torch.nn.functional.sigmoid(gate_Bx1).unsqueeze(2)
# Content-based addressing.
content_attention_BxAx1 = self.content_based_addressing(
query_vector_Bx1xC, beta_Bx1x1, prev_memory_BxAxC)
# Gating mechanism - choose beetween new attention from CBA or
# attention from previous iteration. [BATCH_SIZE x ADDRESSES x 1].
attention_after_gating_BxAx1 = gate_Bx1x1 * content_attention_BxAx1 + \
(torch.ones_like(gate_Bx1x1) - gate_Bx1x1) * prev_attention_BxAx1
#logger.debug("attention_after_gating_BxAx1 {}:\n {}".format(attention_after_gating_BxAx1.size(), attention_after_gating_BxAx1))
# Location-based addressing.
location_attention_BxAx1 = self.location_based_addressing(
attention_after_gating_BxAx1, shift_BxSx1, gamma_Bx1x1)
else:
# Location-based addressing ONLY!
location_attention_BxAx1 = self.location_based_addressing(
prev_attention_BxAx1, shift_BxSx1, gamma_Bx1x1)
content_attention_BxAx1 = torch.zeros_like(
location_attention_BxAx1)
gate_Bx1x1 = torch.zeros_like(gamma_Bx1x1)
#logger.warning("location_attention_BxAx1 {}:\n {}".format(location_attention_BxAx1.size(), location_attention_BxAx1))
head_tuple = HeadStateTuple(
location_attention_BxAx1,
content_attention_BxAx1,
gate_Bx1x1,
shift_BxSx1)
return location_attention_BxAx1, head_tuple
def content_based_addressing(
self, query_vector_Bx1xC, beta_Bx1x1, prev_memory_BxAxC):
"""
Computes content-based addressing. Uses query vectors for calculation
of similarity.
:param query_vector_Bx1xC: NTM "key" [BATCH_SIZE x 1 x CONTENT_BITS]
:param beta_Bx1x1: key strength [BATCH_SIZE x 1 x 1]
:param prev_memory_BxAxC: tensor containing memory before update [BATCH_SIZE x MEMORY_ADDRESSES x CONTENT_BITS]
:returns: attention of size [BATCH_SIZE x ADDRESS_SIZE x 1]
"""
# Normalize query batch - along content.
norm_query_vector_Bx1xC = torch.nn.functional.normalize(query_vector_Bx1xC, p=2, dim=2)
#logger.debug("norm_query_vector_Bx1xC {}:\n {}".format(norm_query_vector_Bx1xC.size(), norm_query_vector_Bx1xC))
# Normalize memory - along content.
norm_memory_BxAxC = torch.nn.functional.normalize(prev_memory_BxAxC, p=2, dim=2)
#logger.debug("norm_memory_BxAxC {}:\n {}".format(norm_memory_BxAxC.size(), norm_memory_BxAxC))
# Calculate cosine similarity [BATCH_SIZE x MEMORY_ADDRESSES x 1].
similarity_BxAx1 = torch.matmul(
norm_memory_BxAxC, torch.transpose(norm_query_vector_Bx1xC, 1, 2))
#logger.debug("similarity_BxAx1 {}:\n {}".format(similarity_BxAx1.size(), similarity_BxAx1))
# Element-wise multiplication [BATCH_SIZE x MEMORY_ADDRESSES x 1]
strengthtened_similarity_BxAx1 = torch.matmul(
similarity_BxAx1, beta_Bx1x1)
#logger.debug("strengthtened_similarity_BxAx1 {}:\n {}".format(strengthtened_similarity_BxAx1.size(), strengthtened_similarity_BxAx1))
# Calculate attention based on similarity along the "slot dimension"
# [BATCH_SIZE x MEMORY_ADDRESSES x 1].
attention_BxAx1 = torch.nn.functional.softmax(strengthtened_similarity_BxAx1, dim=1)
#logger.debug("attention_BxAx1 {}:\n {}".format(attention_BxAx1.size(), attention_BxAx1))
return attention_BxAx1
def location_based_addressing(
self, attention_BxAx1, shift_BxSx1, gamma_Bx1x1):
"""
Computes location-based addressing, i.e. shitfts the head and sharpens.
:param attention_BxAx1: Current attention [BATCH_SIZE x ADDRESS_SIZE x 1]
:param shift_BxSx1: soft shift maks (convolutional kernel) [BATCH_SIZE x SHIFT_SIZE x 1]
:param gamma_Bx1x1: sharpening factor [BATCH_SIZE x 1 x 1]
:returns: attention vector of size [BATCH_SIZE x ADDRESS_SIZE x 1]
"""
# 1. Perform circular convolution.
shifted_attention_BxAx1 = self.circular_convolution(
attention_BxAx1, shift_BxSx1)
# 2. Perform Sharpening.
sharpened_attention_BxAx1 = self.sharpening(
shifted_attention_BxAx1, gamma_Bx1x1)
return sharpened_attention_BxAx1
def circular_convolution(self, attention_BxAx1, shift_BxSx1):
"""
Performs circular convolution, i.e. shitfts the attention accodring to
given shift vector (convolution mask).
:param attention_BxAx1: Current attention [BATCH_SIZE x ADDRESS_SIZE x 1]
:param shift_BxSx1: soft shift maks (convolutional kernel) [BATCH_SIZE x SHIFT_SIZE x 1]
:returns: attention vector of size [BATCH_SIZE x ADDRESS_SIZE x 1]
"""
def circular_index(idx, num_addr):
"""
Calculates the index, taking into consideration the number of
addresses in memory.
:param idx: index (single element)
:param num_addr: number of addresses in memory
"""
if idx < 0:
return num_addr + idx
elif idx >= num_addr:
return idx - num_addr
else:
return idx
# Check whether inputs are already on GPU or not.
#dtype = torch.cuda.LongTensor if attention_BxAx1.is_cuda else torch.LongTensor
dtype = AppState().LongTensor
# Get number of memory addresses and batch size.
batch_size = attention_BxAx1.size(0)
num_addr = attention_BxAx1.size(1)
shift_size = self.interface_shift_size
#logger.debug("shift_BxSx1 {}: {}".format(shift_BxSx1, shift_BxSx1.size()))
# Create an extended list of indices indicating what elements of the
# sequence will be where.
ext_indices_tensor = torch.Tensor(
[circular_index(shift, num_addr)
for shift in range(
-shift_size // 2 + 1, num_addr + shift_size // 2)]).type(dtype)
#logger.debug("ext_indices {}:\n {}".format(ext_indices_tensor.size(), ext_indices_tensor))
# Use indices for creation of an extended attention vector.
ext_attention_BxEAx1 = torch.index_select(
attention_BxAx1, dim=1, index=ext_indices_tensor)
#logger.debug("ext_attention_BxEAx1 {}:\n {}".format(ext_attention_BxEAx1.size(), ext_attention_BxEAx1))
# Transpose inputs to convolution.
ext_att_trans_Bx1xEA = torch.transpose(ext_attention_BxEAx1, 1, 2)
shift_trans_Bx1xS = torch.transpose(shift_BxSx1, 1, 2)
# Perform convolution for every batch-filter pair.
tmp_attention_list = []
for b in range(batch_size):
tmp_attention_list.append(torch.nn.functional.conv1d(ext_att_trans_Bx1xEA.narrow(
0, b, 1), shift_trans_Bx1xS.narrow(0, b, 1)))
# Concatenate list into a single tensor.
shifted_attention_BxAx1 = torch.transpose(
torch.cat(tmp_attention_list, dim=0), 1, 2)
#logger.debug("shifted_attention_BxAx1 {}:\n {}".format(shifted_attention_BxAx1.size(), shifted_attention_BxAx1))
return shifted_attention_BxAx1
def sharpening(self, attention_BxAx1, gamma_Bx1x1):
"""
Performs attention sharpening.
:param attention_BxAx1: Current attention [BATCH_SIZE x ADDRESS_SIZE x 1]
:param gamma_Bx1x1: sharpening factor [BATCH_SIZE x 1 x 1]
:returns: attention vector of size [BATCH_SIZE x ADDRESS_SIZE x 1]
"""
# Power.
pow_attention_BxAx1 = torch.pow(attention_BxAx1 + 1e-12, gamma_Bx1x1)
#logger.error("pow_attention_BxAx1 {}:\n {}".format(pow_attention_BxAx1.size(), pow_attention_BxAx1))
# Normalize along addresses.
norm_attention_BxAx1 = torch.nn.functional.normalize(pow_attention_BxAx1, p=1, dim=1)
#logger.error("EEEE norm_attention_BxAx1 {}:\n {}".format(norm_attention_BxAx1.size(), norm_attention_BxAx1))
return norm_attention_BxAx1
def read_from_memory(self, attention_BxAx1, memory_BxAxC):
"""
Returns 2D tensor of size [BATCH_SIZE x CONTENT_BITS] storing vector
read from memory given the attention.
:param attention_BxAx1: Current attention [BATCH_SIZE x ADDRESS_SIZE x 1]
:param memory_BxAxC: tensor containing memory [BATCH_SIZE x MEMORY_ADDRESSES x CONTENT_BITS]
:returns: vector read from the memory [BATCH_SIZE x CONTENT_BITS]
"""
read_vector_Bx1xC = torch.matmul(
torch.transpose(attention_BxAx1, 1, 2), memory_BxAxC)
#logger.debug("read_vector_Bx1xC {}:\n {}".format(read_vector_Bx1xC.size(), read_vector_Bx1xC))
# Return 2D tensor.
return read_vector_Bx1xC.squeeze(dim=1)
def update_memory(self, write_attention_BxAx1,
erase_vector_Bx1xC, add_vector_Bx1xC, prev_memory_BxAxC):
"""
Returns 3D tensor of size [BATCH_SIZE x MEMORY_ADDRESSES x
CONTENT_BITS] storing new content of the memory.
:param write_attention_BxAx1: Current write attention [BATCH_SIZE x ADDRESS_SIZE x 1]
:param erase_vector_Bx1xC: Erase vector [BATCH_SIZE x 1 x CONTENT_BITS]
:param add_vector_Bx1xC: Add vector [BATCH_SIZE x 1 x CONTENT_BITS]
:param prev_memory_BxAxC: tensor containing previous state of the memory [BATCH_SIZE x MEMORY_ADDRESSES x CONTENT_BITS]
:returns: vector read from the memory [BATCH_SIZE x CONTENT_BITS]
"""
# 1. Calculate the preserved content.
preserve_content_BxAxC = 1 - \
torch.matmul(write_attention_BxAx1, erase_vector_Bx1xC)
# 2. Calculate the added content.
add_content_BxAxC = torch.matmul(
write_attention_BxAx1, add_vector_Bx1xC)
# 3. Update memory.
memory_BxAxC = prev_memory_BxAxC * preserve_content_BxAxC + add_content_BxAxC
return memory_BxAxC
| [
"torch.cat",
"torch.nn.functional.sigmoid",
"torch.nn.functional.normalize",
"miprometheus.utils.app_state.AppState",
"torch.ones",
"numpy.insert",
"torch.nn.Linear",
"torch.zeros",
"torch.matmul",
"torch.zeros_like",
"torch.nn.ModuleList",
"torch.pow",
"torch.ones_like",
"torch.nn.functio... | [((847, 881), 'logging.getLogger', 'logging.getLogger', (['"""NTM-Interface"""'], {}), "('NTM-Interface')\n", (864, 881), False, 'import logging\n'), ((979, 1069), 'collections.namedtuple', 'collections.namedtuple', (['"""HeadStateTuple"""', "('attention', 'similarity', 'gate', 'shift')"], {}), "('HeadStateTuple', ('attention', 'similarity', 'gate',\n 'shift'))\n", (1001, 1069), False, 'import collections\n'), ((1270, 1345), 'collections.namedtuple', 'collections.namedtuple', (['"""InterfaceStateTuple"""', "('read_heads', 'write_head')"], {}), "('InterfaceStateTuple', ('read_heads', 'write_head'))\n", (1292, 1345), False, 'import collections\n'), ((4388, 4409), 'torch.nn.ModuleList', 'torch.nn.ModuleList', ([], {}), '()\n', (4407, 4409), False, 'import torch\n'), ((6541, 6603), 'torch.nn.Linear', 'torch.nn.Linear', (['self.ctrl_hidden_state_size', 'num_write_params'], {}), '(self.ctrl_hidden_state_size, num_write_params)\n', (6556, 6603), False, 'import torch\n'), ((18359, 18420), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['query_vector_Bx1xC'], {'p': '(2)', 'dim': '(2)'}), '(query_vector_Bx1xC, p=2, dim=2)\n', (18388, 18420), False, 'import torch\n'), ((18617, 18677), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['prev_memory_BxAxC'], {'p': '(2)', 'dim': '(2)'}), '(prev_memory_BxAxC, p=2, dim=2)\n', (18646, 18677), False, 'import torch\n'), ((19197, 19239), 'torch.matmul', 'torch.matmul', (['similarity_BxAx1', 'beta_Bx1x1'], {}), '(similarity_BxAx1, beta_Bx1x1)\n', (19209, 19239), False, 'import torch\n'), ((19548, 19614), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['strengthtened_similarity_BxAx1'], {'dim': '(1)'}), '(strengthtened_similarity_BxAx1, dim=1)\n', (19575, 19614), False, 'import torch\n'), ((22516, 22584), 'torch.index_select', 'torch.index_select', (['attention_BxAx1'], {'dim': '(1)', 'index': 'ext_indices_tensor'}), '(attention_BxAx1, dim=1, index=ext_indices_tensor)\n', (22534, 22584), False, 'import torch\n'), ((22787, 22830), 'torch.transpose', 'torch.transpose', (['ext_attention_BxEAx1', '(1)', '(2)'], {}), '(ext_attention_BxEAx1, 1, 2)\n', (22802, 22830), False, 'import torch\n'), ((22859, 22893), 'torch.transpose', 'torch.transpose', (['shift_BxSx1', '(1)', '(2)'], {}), '(shift_BxSx1, 1, 2)\n', (22874, 22893), False, 'import torch\n'), ((23890, 23937), 'torch.pow', 'torch.pow', (['(attention_BxAx1 + 1e-12)', 'gamma_Bx1x1'], {}), '(attention_BxAx1 + 1e-12, gamma_Bx1x1)\n', (23899, 23937), False, 'import torch\n'), ((24118, 24180), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['pow_attention_BxAx1'], {'p': '(1)', 'dim': '(1)'}), '(pow_attention_BxAx1, p=1, dim=1)\n', (24147, 24180), False, 'import torch\n'), ((26055, 26108), 'torch.matmul', 'torch.matmul', (['write_attention_BxAx1', 'add_vector_Bx1xC'], {}), '(write_attention_BxAx1, add_vector_Bx1xC)\n', (26067, 26108), False, 'import torch\n'), ((6980, 6990), 'miprometheus.utils.app_state.AppState', 'AppState', ([], {}), '()\n', (6988, 6990), False, 'from miprometheus.utils.app_state import AppState\n'), ((17258, 17300), 'torch.zeros_like', 'torch.zeros_like', (['location_attention_BxAx1'], {}), '(location_attention_BxAx1)\n', (17274, 17300), False, 'import torch\n'), ((17343, 17372), 'torch.zeros_like', 'torch.zeros_like', (['gamma_Bx1x1'], {}), '(gamma_Bx1x1)\n', (17359, 17372), False, 'import torch\n'), ((18931, 18977), 'torch.transpose', 'torch.transpose', (['norm_query_vector_Bx1xC', '(1)', '(2)'], {}), '(norm_query_vector_Bx1xC, 1, 2)\n', (18946, 18977), False, 'import torch\n'), ((21701, 21711), 'miprometheus.utils.app_state.AppState', 'AppState', ([], {}), '()\n', (21709, 21711), False, 'from miprometheus.utils.app_state import AppState\n'), ((23290, 23326), 'torch.cat', 'torch.cat', (['tmp_attention_list'], {'dim': '(0)'}), '(tmp_attention_list, dim=0)\n', (23299, 23326), False, 'import torch\n'), ((24861, 24899), 'torch.transpose', 'torch.transpose', (['attention_BxAx1', '(1)', '(2)'], {}), '(attention_BxAx1, 1, 2)\n', (24876, 24899), False, 'import torch\n'), ((25929, 25984), 'torch.matmul', 'torch.matmul', (['write_attention_BxAx1', 'erase_vector_Bx1xC'], {}), '(write_attention_BxAx1, erase_vector_Bx1xC)\n', (25941, 25984), False, 'import torch\n'), ((4506, 4567), 'torch.nn.Linear', 'torch.nn.Linear', (['self.ctrl_hidden_state_size', 'num_read_params'], {}), '(self.ctrl_hidden_state_size, num_read_params)\n', (4521, 4567), False, 'import torch\n'), ((7229, 7277), 'torch.zeros', 'torch.zeros', (['batch_size', 'num_memory_addresses', '(1)'], {}), '(batch_size, num_memory_addresses, 1)\n', (7240, 7277), False, 'import torch\n'), ((7429, 7457), 'torch.ones', 'torch.ones', (['batch_size', '(1)', '(1)'], {}), '(batch_size, 1, 1)\n', (7439, 7457), False, 'import torch\n'), ((7530, 7583), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.interface_shift_size', '(1)'], {}), '(batch_size, self.interface_shift_size, 1)\n', (7541, 7583), False, 'import torch\n'), ((12531, 12576), 'torch.nn.functional.sigmoid', 'torch.nn.functional.sigmoid', (['erase_vector_BxC'], {}), '(erase_vector_BxC)\n', (12558, 12576), False, 'import torch\n'), ((12617, 12660), 'torch.nn.functional.sigmoid', 'torch.nn.functional.sigmoid', (['add_vector_BxC'], {}), '(add_vector_BxC)\n', (12644, 12660), False, 'import torch\n'), ((15539, 15584), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['shift_BxS'], {'dim': '(1)'}), '(shift_BxS, dim=1)\n', (15566, 15584), False, 'import torch\n'), ((13998, 14022), 'numpy.insert', 'np.insert', (['lengths', '(0)', '(0)'], {}), '(lengths, 0, 0)\n', (14007, 14022), True, 'import numpy as np\n'), ((15647, 15686), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['gamma_Bx1'], {}), '(gamma_Bx1)\n', (15675, 15686), False, 'import torch\n'), ((15919, 15964), 'torch.nn.functional.sigmoid', 'torch.nn.functional.sigmoid', (['query_vector_BxC'], {}), '(query_vector_BxC)\n', (15946, 15964), False, 'import torch\n'), ((16148, 16185), 'torch.nn.functional.sigmoid', 'torch.nn.functional.sigmoid', (['gate_Bx1'], {}), '(gate_Bx1)\n', (16175, 16185), False, 'import torch\n'), ((16031, 16069), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['beta_Bx1'], {}), '(beta_Bx1)\n', (16059, 16069), False, 'import torch\n'), ((16632, 16659), 'torch.ones_like', 'torch.ones_like', (['gate_Bx1x1'], {}), '(gate_Bx1x1)\n', (16647, 16659), False, 'import torch\n')] |
import numpy as np
# matplotlib don't use Xwindows backend (must be before pyplot import)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def analyse_ekf(estimator_status, ekf2_innovations, sensor_preflight, check_levels,
plot=False, output_plot_filename=None, late_start_early_ending=True):
if plot:
# create summary plots
# save the plots to PDF
pp = PdfPages(output_plot_filename)
# plot IMU consistency data
if ('accel_inconsistency_m_s_s' in sensor_preflight.keys()) and (
'gyro_inconsistency_rad_s' in sensor_preflight.keys()):
plt.figure(0, figsize=(20, 13))
plt.subplot(2, 1, 1)
plt.plot(sensor_preflight['accel_inconsistency_m_s_s'], 'b')
plt.title('IMU Consistency Check Levels')
plt.ylabel('acceleration (m/s/s)')
plt.xlabel('data index')
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(sensor_preflight['gyro_inconsistency_rad_s'], 'b')
plt.ylabel('angular rate (rad/s)')
plt.xlabel('data index')
pp.savefig()
plt.close(0)
# generate max, min and 1-std metadata
innov_time = 1e-6 * ekf2_innovations['timestamp']
status_time = 1e-6 * estimator_status['timestamp']
if plot:
# vertical velocity and position innovations
plt.figure(1, figsize=(20, 13))
# generate metadata for velocity innovations
innov_2_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[2]'])
innov_2_max_time = innov_time[innov_2_max_arg]
innov_2_max = np.amax(ekf2_innovations['vel_pos_innov[2]'])
innov_2_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[2]'])
innov_2_min_time = innov_time[innov_2_min_arg]
innov_2_min = np.amin(ekf2_innovations['vel_pos_innov[2]'])
s_innov_2_max = str(round(innov_2_max, 2))
s_innov_2_min = str(round(innov_2_min, 2))
# s_innov_2_std = str(round(np.std(ekf2_innovations['vel_pos_innov[2]']),2))
# generate metadata for position innovations
innov_5_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[5]'])
innov_5_max_time = innov_time[innov_5_max_arg]
innov_5_max = np.amax(ekf2_innovations['vel_pos_innov[5]'])
innov_5_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[5]'])
innov_5_min_time = innov_time[innov_5_min_arg]
innov_5_min = np.amin(ekf2_innovations['vel_pos_innov[5]'])
s_innov_5_max = str(round(innov_5_max, 2))
s_innov_5_min = str(round(innov_5_min, 2))
# s_innov_5_std = str(round(np.std(ekf2_innovations['vel_pos_innov[5]']),2))
# generate plot for vertical velocity innovations
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[2]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[2]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[2]']), 'r')
plt.title('Vertical Innovations')
plt.ylabel('Down Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_2_std],loc='upper left',frameon=False)
# generate plot for vertical position innovations
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[5]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[5]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[5]']), 'r')
plt.ylabel('Down Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_5_max_time, innov_5_max, 'max=' + s_innov_5_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_5_min_time, innov_5_min, 'min=' + s_innov_5_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_5_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(1)
# horizontal velocity innovations
plt.figure(2, figsize=(20, 13))
# generate North axis metadata
innov_0_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[0]'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['vel_pos_innov[0]'])
innov_0_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[0]'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['vel_pos_innov[0]'])
s_innov_0_max = str(round(innov_0_max, 2))
s_innov_0_min = str(round(innov_0_min, 2))
# s_innov_0_std = str(round(np.std(ekf2_innovations['vel_pos_innov[0]']),2))
# Generate East axis metadata
innov_1_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[1]'])
innov_1_max_time = innov_time[innov_1_max_arg]
innov_1_max = np.amax(ekf2_innovations['vel_pos_innov[1]'])
innov_1_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[1]'])
innov_1_min_time = innov_time[innov_1_min_arg]
innov_1_min = np.amin(ekf2_innovations['vel_pos_innov[1]'])
s_innov_1_max = str(round(innov_1_max, 2))
s_innov_1_min = str(round(innov_1_min, 2))
# s_innov_1_std = str(round(np.std(ekf2_innovations['vel_pos_innov[1]']),2))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[0]']), 'r')
plt.title('Horizontal Velocity Innovations')
plt.ylabel('North Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[1]']), 'r')
plt.ylabel('East Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_1_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(2)
# horizontal position innovations
plt.figure(3, figsize=(20, 13))
# generate North axis metadata
innov_3_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[3]'])
innov_3_max_time = innov_time[innov_3_max_arg]
innov_3_max = np.amax(ekf2_innovations['vel_pos_innov[3]'])
innov_3_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[3]'])
innov_3_min_time = innov_time[innov_3_min_arg]
innov_3_min = np.amin(ekf2_innovations['vel_pos_innov[3]'])
s_innov_3_max = str(round(innov_3_max, 2))
s_innov_3_min = str(round(innov_3_min, 2))
# s_innov_3_std = str(round(np.std(ekf2_innovations['vel_pos_innov[3]']),2))
# generate East axis metadata
innov_4_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[4]'])
innov_4_max_time = innov_time[innov_4_max_arg]
innov_4_max = np.amax(ekf2_innovations['vel_pos_innov[4]'])
innov_4_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[4]'])
innov_4_min_time = innov_time[innov_4_min_arg]
innov_4_min = np.amin(ekf2_innovations['vel_pos_innov[4]'])
s_innov_4_max = str(round(innov_4_max, 2))
s_innov_4_min = str(round(innov_4_min, 2))
# s_innov_4_std = str(round(np.std(ekf2_innovations['vel_pos_innov[4]']),2))
# generate plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[3]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[3]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[3]']), 'r')
plt.title('Horizontal Position Innovations')
plt.ylabel('North Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_3_max_time, innov_3_max, 'max=' + s_innov_3_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_3_min_time, innov_3_min, 'min=' + s_innov_3_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_3_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[4]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[4]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[4]']), 'r')
plt.ylabel('East Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_4_max_time, innov_4_max, 'max=' + s_innov_4_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_4_min_time, innov_4_min, 'min=' + s_innov_4_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_4_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(3)
# manetometer innovations
plt.figure(4, figsize=(20, 13))
# generate X axis metadata
innov_0_max_arg = np.argmax(ekf2_innovations['mag_innov[0]'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['mag_innov[0]'])
innov_0_min_arg = np.argmin(ekf2_innovations['mag_innov[0]'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['mag_innov[0]'])
s_innov_0_max = str(round(innov_0_max, 3))
s_innov_0_min = str(round(innov_0_min, 3))
# s_innov_0_std = str(round(np.std(ekf2_innovations['mag_innov[0]']),3))
# generate Y axis metadata
innov_1_max_arg = np.argmax(ekf2_innovations['mag_innov[1]'])
innov_1_max_time = innov_time[innov_1_max_arg]
innov_1_max = np.amax(ekf2_innovations['mag_innov[1]'])
innov_1_min_arg = np.argmin(ekf2_innovations['mag_innov[1]'])
innov_1_min_time = innov_time[innov_1_min_arg]
innov_1_min = np.amin(ekf2_innovations['mag_innov[1]'])
s_innov_1_max = str(round(innov_1_max, 3))
s_innov_1_min = str(round(innov_1_min, 3))
# s_innov_1_std = str(round(np.std(ekf2_innovations['mag_innov[1]']),3))
# generate Z axis metadata
innov_2_max_arg = np.argmax(ekf2_innovations['mag_innov[2]'])
innov_2_max_time = innov_time[innov_2_max_arg]
innov_2_max = np.amax(ekf2_innovations['mag_innov[2]'])
innov_2_min_arg = np.argmin(ekf2_innovations['mag_innov[2]'])
innov_2_min_time = innov_time[innov_2_min_arg]
innov_2_min = np.amin(ekf2_innovations['mag_innov[2]'])
s_innov_2_max = str(round(innov_2_max, 3))
s_innov_2_min = str(round(innov_2_min, 3))
# s_innov_2_std = str(round(np.std(ekf2_innovations['mag_innov[0]']),3))
# draw plots
plt.subplot(3, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[0]']), 'r')
plt.title('Magnetometer Innovations')
plt.ylabel('X (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
plt.subplot(3, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[1]']), 'r')
plt.ylabel('Y (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_1_std],loc='upper left',frameon=False)
plt.subplot(3, 1, 3)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[2]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[2]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[2]']), 'r')
plt.ylabel('Z (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_2_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(4)
# magnetic heading innovations
plt.figure(5, figsize=(20, 13))
# generate metadata
innov_0_max_arg = np.argmax(ekf2_innovations['heading_innov'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['heading_innov'])
innov_0_min_arg = np.argmin(ekf2_innovations['heading_innov'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['heading_innov'])
s_innov_0_max = str(round(innov_0_max, 3))
s_innov_0_min = str(round(innov_0_min, 3))
# s_innov_0_std = str(round(np.std(ekf2_innovations['heading_innov']),3))
# draw plot
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['heading_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['heading_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['heading_innov_var']), 'r')
plt.title('Magnetic Heading Innovations')
plt.ylabel('Heaing (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(5)
# air data innovations
plt.figure(6, figsize=(20, 13))
# generate airspeed metadata
airspeed_innov_max_arg = np.argmax(ekf2_innovations['airspeed_innov'])
airspeed_innov_max_time = innov_time[airspeed_innov_max_arg]
airspeed_innov_max = np.amax(ekf2_innovations['airspeed_innov'])
airspeed_innov_min_arg = np.argmin(ekf2_innovations['airspeed_innov'])
airspeed_innov_min_time = innov_time[airspeed_innov_min_arg]
airspeed_innov_min = np.amin(ekf2_innovations['airspeed_innov'])
s_airspeed_innov_max = str(round(airspeed_innov_max, 3))
s_airspeed_innov_min = str(round(airspeed_innov_min, 3))
# generate sideslip metadata
beta_innov_max_arg = np.argmax(ekf2_innovations['beta_innov'])
beta_innov_max_time = innov_time[beta_innov_max_arg]
beta_innov_max = np.amax(ekf2_innovations['beta_innov'])
beta_innov_min_arg = np.argmin(ekf2_innovations['beta_innov'])
beta_innov_min_time = innov_time[beta_innov_min_arg]
beta_innov_min = np.amin(ekf2_innovations['beta_innov'])
s_beta_innov_max = str(round(beta_innov_max, 3))
s_beta_innov_min = str(round(beta_innov_min, 3))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['airspeed_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['airspeed_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['airspeed_innov_var']), 'r')
plt.title('True Airspeed Innovations')
plt.ylabel('innovation (m/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(airspeed_innov_max_time, airspeed_innov_max, 'max=' + s_airspeed_innov_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(airspeed_innov_min_time, airspeed_innov_min, 'min=' + s_airspeed_innov_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['beta_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['beta_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['beta_innov_var']), 'r')
plt.title('Sythetic Sideslip Innovations')
plt.ylabel('innovation (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(beta_innov_max_time, beta_innov_max, 'max=' + s_beta_innov_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(beta_innov_min_time, beta_innov_min, 'min=' + s_beta_innov_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
pp.savefig()
plt.close(6)
# optical flow innovations
plt.figure(7, figsize=(20, 13))
# generate X axis metadata
flow_innov_x_max_arg = np.argmax(ekf2_innovations['flow_innov[0]'])
flow_innov_x_max_time = innov_time[flow_innov_x_max_arg]
flow_innov_x_max = np.amax(ekf2_innovations['flow_innov[0]'])
flow_innov_x_min_arg = np.argmin(ekf2_innovations['flow_innov[0]'])
flow_innov_x_min_time = innov_time[flow_innov_x_min_arg]
flow_innov_x_min = np.amin(ekf2_innovations['flow_innov[0]'])
s_flow_innov_x_max = str(round(flow_innov_x_max, 3))
s_flow_innov_x_min = str(round(flow_innov_x_min, 3))
# s_flow_innov_x_std = str(round(np.std(ekf2_innovations['flow_innov[0]']),3))
# generate Y axis metadata
flow_innov_y_max_arg = np.argmax(ekf2_innovations['flow_innov[1]'])
flow_innov_y_max_time = innov_time[flow_innov_y_max_arg]
flow_innov_y_max = np.amax(ekf2_innovations['flow_innov[1]'])
flow_innov_y_min_arg = np.argmin(ekf2_innovations['flow_innov[1]'])
flow_innov_y_min_time = innov_time[flow_innov_y_min_arg]
flow_innov_y_min = np.amin(ekf2_innovations['flow_innov[1]'])
s_flow_innov_y_max = str(round(flow_innov_y_max, 3))
s_flow_innov_y_min = str(round(flow_innov_y_min, 3))
# s_flow_innov_y_std = str(round(np.std(ekf2_innovations['flow_innov[1]']),3))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['flow_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['flow_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['flow_innov_var[0]']), 'r')
plt.title('Optical Flow Innovations')
plt.ylabel('X (rad/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(flow_innov_x_max_time, flow_innov_x_max, 'max=' + s_flow_innov_x_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(flow_innov_x_min_time, flow_innov_x_min, 'min=' + s_flow_innov_x_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
# plt.legend(['std='+s_flow_innov_x_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['flow_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['flow_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['flow_innov_var[1]']), 'r')
plt.ylabel('Y (rad/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(flow_innov_y_max_time, flow_innov_y_max, 'max=' + s_flow_innov_y_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(flow_innov_y_min_time, flow_innov_y_min, 'min=' + s_flow_innov_y_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
# plt.legend(['std='+s_flow_innov_y_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(7)
# generate metadata for the normalised innovation consistency test levels
# a value > 1.0 means the measurement data for that test has been rejected by the EKF
# magnetometer data
mag_test_max_arg = np.argmax(estimator_status['mag_test_ratio'])
mag_test_max_time = status_time[mag_test_max_arg]
mag_test_max = np.amax(estimator_status['mag_test_ratio'])
mag_test_mean = np.mean(estimator_status['mag_test_ratio'])
# velocity data (GPS)
vel_test_max_arg = np.argmax(estimator_status['vel_test_ratio'])
vel_test_max_time = status_time[vel_test_max_arg]
vel_test_max = np.amax(estimator_status['vel_test_ratio'])
vel_test_mean = np.mean(estimator_status['vel_test_ratio'])
# horizontal position data (GPS or external vision)
pos_test_max_arg = np.argmax(estimator_status['pos_test_ratio'])
pos_test_max_time = status_time[pos_test_max_arg]
pos_test_max = np.amax(estimator_status['pos_test_ratio'])
pos_test_mean = np.mean(estimator_status['pos_test_ratio'])
# height data (Barometer, GPS or rangefinder)
hgt_test_max_arg = np.argmax(estimator_status['hgt_test_ratio'])
hgt_test_max_time = status_time[hgt_test_max_arg]
hgt_test_max = np.amax(estimator_status['hgt_test_ratio'])
hgt_test_mean = np.mean(estimator_status['hgt_test_ratio'])
# airspeed data
tas_test_max_arg = np.argmax(estimator_status['tas_test_ratio'])
tas_test_max_time = status_time[tas_test_max_arg]
tas_test_max = np.amax(estimator_status['tas_test_ratio'])
tas_test_mean = np.mean(estimator_status['tas_test_ratio'])
# height above ground data (rangefinder)
hagl_test_max_arg = np.argmax(estimator_status['hagl_test_ratio'])
hagl_test_max_time = status_time[hagl_test_max_arg]
hagl_test_max = np.amax(estimator_status['hagl_test_ratio'])
hagl_test_mean = np.mean(estimator_status['hagl_test_ratio'])
if plot:
# plot normalised innovation test levels
plt.figure(8, figsize=(20, 13))
if tas_test_max == 0.0:
n_plots = 3
else:
n_plots = 4
plt.subplot(n_plots, 1, 1)
plt.plot(status_time, estimator_status['mag_test_ratio'], 'b')
plt.title('Normalised Innovation Test Levels')
plt.ylabel('mag')
plt.xlabel('time (sec)')
plt.grid()
plt.text(mag_test_max_time, mag_test_max,
'max=' + str(round(mag_test_max, 2)) + ' , mean=' + str(round(mag_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
plt.subplot(n_plots, 1, 2)
plt.plot(status_time, estimator_status['vel_test_ratio'], 'b')
plt.plot(status_time, estimator_status['pos_test_ratio'], 'r')
plt.ylabel('vel,pos')
plt.xlabel('time (sec)')
plt.grid()
plt.text(vel_test_max_time, vel_test_max,
'vel max=' + str(round(vel_test_max, 2)) + ' , mean=' + str(round(vel_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
plt.text(pos_test_max_time, pos_test_max,
'pos max=' + str(round(pos_test_max, 2)) + ' , mean=' + str(round(pos_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='r')
plt.subplot(n_plots, 1, 3)
plt.plot(status_time, estimator_status['hgt_test_ratio'], 'b')
plt.ylabel('hgt')
plt.xlabel('time (sec)')
plt.grid()
plt.text(hgt_test_max_time, hgt_test_max,
'hgt max=' + str(round(hgt_test_max, 2)) + ' , mean=' + str(round(hgt_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
if hagl_test_max > 0.0:
plt.plot(status_time, estimator_status['hagl_test_ratio'], 'r')
plt.text(hagl_test_max_time, hagl_test_max,
'hagl max=' + str(round(hagl_test_max, 2)) + ' , mean=' + str(round(hagl_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='r')
plt.ylabel('hgt,HAGL')
if n_plots == 4:
plt.subplot(n_plots, 1, 4)
plt.plot(status_time, estimator_status['tas_test_ratio'], 'b')
plt.ylabel('TAS')
plt.xlabel('time (sec)')
plt.grid()
plt.text(tas_test_max_time, tas_test_max,
'max=' + str(round(tas_test_max, 2)) + ' , mean=' + str(round(tas_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
pp.savefig()
plt.close(8)
# extract control mode metadata from estimator_status.control_mode_flags
# 0 - true if the filter tilt alignment is complete
# 1 - true if the filter yaw alignment is complete
# 2 - true if GPS measurements are being fused
# 3 - true if optical flow measurements are being fused
# 4 - true if a simple magnetic yaw heading is being fused
# 5 - true if 3-axis magnetometer measurement are being fused
# 6 - true if synthetic magnetic declination measurements are being fused
# 7 - true when the vehicle is airborne
# 8 - true when wind velocity is being estimated
# 9 - true when baro height is being fused as a primary height reference
# 10 - true when range finder height is being fused as a primary height reference
# 11 - true when range finder height is being fused as a primary height reference
# 12 - true when local position data from external vision is being fused
# 13 - true when yaw data from external vision measurements is being fused
# 14 - true when height data from external vision measurements is being fused
tilt_aligned = ((2 ** 0 & estimator_status['control_mode_flags']) > 0) * 1
yaw_aligned = ((2 ** 1 & estimator_status['control_mode_flags']) > 0) * 1
using_gps = ((2 ** 2 & estimator_status['control_mode_flags']) > 0) * 1
using_optflow = ((2 ** 3 & estimator_status['control_mode_flags']) > 0) * 1
using_magyaw = ((2 ** 4 & estimator_status['control_mode_flags']) > 0) * 1
using_mag3d = ((2 ** 5 & estimator_status['control_mode_flags']) > 0) * 1
using_magdecl = ((2 ** 6 & estimator_status['control_mode_flags']) > 0) * 1
airborne = ((2 ** 7 & estimator_status['control_mode_flags']) > 0) * 1
estimating_wind = ((2 ** 8 & estimator_status['control_mode_flags']) > 0) * 1
using_barohgt = ((2 ** 9 & estimator_status['control_mode_flags']) > 0) * 1
using_rnghgt = ((2 ** 10 & estimator_status['control_mode_flags']) > 0) * 1
using_gpshgt = ((2 ** 11 & estimator_status['control_mode_flags']) > 0) * 1
using_evpos = ((2 ** 12 & estimator_status['control_mode_flags']) > 0) * 1
using_evyaw = ((2 ** 13 & estimator_status['control_mode_flags']) > 0) * 1
using_evhgt = ((2 ** 14 & estimator_status['control_mode_flags']) > 0) * 1
# define flags for starting and finishing in air
b_starts_in_air = False
b_finishes_in_air = False
# calculate in-air transition time
if (np.amin(airborne) < 0.5) and (np.amax(airborne) > 0.5):
in_air_transtion_time_arg = np.argmax(np.diff(airborne))
in_air_transition_time = status_time[in_air_transtion_time_arg]
elif (np.amax(airborne) > 0.5):
in_air_transition_time = np.amin(status_time)
print('log starts while in-air at ' + str(round(in_air_transition_time, 1)) + ' sec')
b_starts_in_air = True
else:
in_air_transition_time = float('NaN')
print('always on ground')
# calculate on-ground transition time
if (np.amin(np.diff(airborne)) < 0.0):
on_ground_transition_time_arg = np.argmin(np.diff(airborne))
on_ground_transition_time = status_time[on_ground_transition_time_arg]
elif (np.amax(airborne) > 0.5):
on_ground_transition_time = np.amax(status_time)
print('log finishes while in-air at ' + str(round(on_ground_transition_time, 1)) + ' sec')
b_finishes_in_air = True
else:
on_ground_transition_time = float('NaN')
print('always on ground')
if (np.amax(np.diff(airborne)) > 0.5) and (np.amin(np.diff(airborne)) < -0.5):
if ((on_ground_transition_time - in_air_transition_time) > 0.0):
in_air_duration = on_ground_transition_time - in_air_transition_time;
else:
in_air_duration = float('NaN')
else:
in_air_duration = float('NaN')
# calculate alignment completion times
tilt_align_time_arg = np.argmax(np.diff(tilt_aligned))
tilt_align_time = status_time[tilt_align_time_arg]
yaw_align_time_arg = np.argmax(np.diff(yaw_aligned))
yaw_align_time = status_time[yaw_align_time_arg]
# calculate position aiding start times
gps_aid_time_arg = np.argmax(np.diff(using_gps))
gps_aid_time = status_time[gps_aid_time_arg]
optflow_aid_time_arg = np.argmax(np.diff(using_optflow))
optflow_aid_time = status_time[optflow_aid_time_arg]
evpos_aid_time_arg = np.argmax(np.diff(using_evpos))
evpos_aid_time = status_time[evpos_aid_time_arg]
# calculate height aiding start times
barohgt_aid_time_arg = np.argmax(np.diff(using_barohgt))
barohgt_aid_time = status_time[barohgt_aid_time_arg]
gpshgt_aid_time_arg = np.argmax(np.diff(using_gpshgt))
gpshgt_aid_time = status_time[gpshgt_aid_time_arg]
rnghgt_aid_time_arg = np.argmax(np.diff(using_rnghgt))
rnghgt_aid_time = status_time[rnghgt_aid_time_arg]
evhgt_aid_time_arg = np.argmax(np.diff(using_evhgt))
evhgt_aid_time = status_time[evhgt_aid_time_arg]
# calculate magnetometer aiding start times
using_magyaw_time_arg = np.argmax(np.diff(using_magyaw))
using_magyaw_time = status_time[using_magyaw_time_arg]
using_mag3d_time_arg = np.argmax(np.diff(using_mag3d))
using_mag3d_time = status_time[using_mag3d_time_arg]
using_magdecl_time_arg = np.argmax(np.diff(using_magdecl))
using_magdecl_time = status_time[using_magdecl_time_arg]
if plot:
# control mode summary plot A
plt.figure(9, figsize=(20, 13))
# subplot for alignment completion
plt.subplot(4, 1, 1)
plt.title('EKF Control Status - Figure A')
plt.plot(status_time, tilt_aligned, 'b')
plt.plot(status_time, yaw_aligned, 'r')
plt.ylim(-0.1, 1.1)
plt.ylabel('aligned')
plt.grid()
if np.amin(tilt_aligned) > 0:
plt.text(tilt_align_time, 0.5, 'no pre-arm data - cannot calculate alignment completion times', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='black')
else:
plt.text(tilt_align_time, 0.33, 'tilt alignment at ' + str(round(tilt_align_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
plt.text(yaw_align_time, 0.67, 'yaw alignment at ' + str(round(tilt_align_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
# subplot for position aiding
plt.subplot(4, 1, 2)
plt.plot(status_time, using_gps, 'b')
plt.plot(status_time, using_optflow, 'r')
plt.plot(status_time, using_evpos, 'g')
plt.ylim(-0.1, 1.1)
plt.ylabel('pos aiding')
plt.grid()
if np.amin(using_gps) > 0:
plt.text(gps_aid_time, 0.25, 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_gps) > 0:
plt.text(gps_aid_time, 0.25, 'GPS aiding at ' + str(round(gps_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(using_optflow) > 0:
plt.text(optflow_aid_time, 0.50, 'no pre-arm data - cannot calculate optical flow aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_optflow) > 0:
plt.text(optflow_aid_time, 0.50, 'optical flow aiding at ' + str(round(optflow_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_evpos) > 0:
plt.text(evpos_aid_time, 0.75, 'no pre-arm data - cannot calculate external vision aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_evpos) > 0:
plt.text(evpos_aid_time, 0.75, 'external vision aiding at ' + str(round(evpos_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
# subplot for height aiding
plt.subplot(4, 1, 3)
plt.plot(status_time, using_barohgt, 'b')
plt.plot(status_time, using_gpshgt, 'r')
plt.plot(status_time, using_rnghgt, 'g')
plt.plot(status_time, using_evhgt, 'c')
plt.ylim(-0.1, 1.1)
plt.ylabel('hgt aiding')
plt.grid()
if np.amin(using_barohgt) > 0:
plt.text(barohgt_aid_time, 0.2, 'no pre-arm data - cannot calculate Baro aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_barohgt) > 0:
plt.text(barohgt_aid_time, 0.2, 'Baro aiding at ' + str(round(gps_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(using_gpshgt) > 0:
plt.text(gpshgt_aid_time, 0.4, 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_gpshgt) > 0:
plt.text(gpshgt_aid_time, 0.4, 'GPS aiding at ' + str(round(gpshgt_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_rnghgt) > 0:
plt.text(rnghgt_aid_time, 0.6, 'no pre-arm data - cannot calculate rangfinder aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_rnghgt) > 0:
plt.text(rnghgt_aid_time, 0.6, 'rangefinder aiding at ' + str(round(rnghgt_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
if np.amin(using_evhgt) > 0:
plt.text(evhgt_aid_time, 0.8, 'no pre-arm data - cannot calculate external vision aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='c')
elif np.amax(using_evhgt) > 0:
plt.text(evhgt_aid_time, 0.8, 'external vision aiding at ' + str(round(evhgt_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='c')
# subplot for magnetometer aiding
plt.subplot(4, 1, 4)
plt.plot(status_time, using_magyaw, 'b')
plt.plot(status_time, using_mag3d, 'r')
plt.plot(status_time, using_magdecl, 'g')
plt.ylim(-0.1, 1.1)
plt.ylabel('mag aiding')
plt.xlabel('time (sec)')
plt.grid()
if np.amin(using_magyaw) > 0:
plt.text(using_magyaw_time, 0.25, 'no pre-arm data - cannot calculate magnetic yaw aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_magyaw) > 0:
plt.text(using_magyaw_time, 0.25, 'magnetic yaw aiding at ' + str(round(using_magyaw_time, 1)) + ' sec',
fontsize=12, horizontalalignment='right', verticalalignment='center', color='b')
if np.amin(using_mag3d) > 0:
plt.text(using_mag3d_time, 0.50, 'no pre-arm data - cannot calculate 3D magnetoemter aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_mag3d) > 0:
plt.text(using_mag3d_time, 0.50, 'magnetometer 3D aiding at ' + str(round(using_mag3d_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_magdecl) > 0:
plt.text(using_magdecl_time, 0.75, 'no pre-arm data - cannot magnetic declination aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_magdecl) > 0:
plt.text(using_magdecl_time, 0.75,
'magnetic declination aiding at ' + str(round(using_magdecl_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
pp.savefig()
plt.close(9)
# control mode summary plot B
plt.figure(10, figsize=(20, 13))
# subplot for airborne status
plt.subplot(2, 1, 1)
plt.title('EKF Control Status - Figure B')
plt.plot(status_time, airborne, 'b')
plt.ylim(-0.1, 1.1)
plt.ylabel('airborne')
plt.grid()
if np.amax(np.diff(airborne)) < 0.5:
plt.text(in_air_transition_time, 0.67, 'ground to air transition not detected', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
else:
plt.text(in_air_transition_time, 0.67, 'in-air at ' + str(round(in_air_transition_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(np.diff(airborne)) > -0.5:
plt.text(on_ground_transition_time, 0.33, 'air to ground transition not detected', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
else:
plt.text(on_ground_transition_time, 0.33, 'on-ground at ' + str(round(on_ground_transition_time, 1)) + ' sec',
fontsize=12, horizontalalignment='right', verticalalignment='center', color='b')
if in_air_duration > 0.0:
plt.text((in_air_transition_time + on_ground_transition_time) / 2, 0.5,
'duration = ' + str(round(in_air_duration, 1)) + ' sec', fontsize=12, horizontalalignment='center',
verticalalignment='center', color='b')
# subplot for wind estimation status
plt.subplot(2, 1, 2)
plt.plot(status_time, estimating_wind, 'b')
plt.ylim(-0.1, 1.1)
plt.ylabel('estimating wind')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(10)
# innovation_check_flags summary
# 0 - true if velocity observations have been rejected
# 1 - true if horizontal position observations have been rejected
# 2 - true if true if vertical position observations have been rejected
# 3 - true if the X magnetometer observation has been rejected
# 4 - true if the Y magnetometer observation has been rejected
# 5 - true if the Z magnetometer observation has been rejected
# 6 - true if the yaw observation has been rejected
# 7 - true if the airspeed observation has been rejected
# 8 - true if synthetic sideslip observation has been rejected
# 9 - true if the height above ground observation has been rejected
# 10 - true if the X optical flow observation has been rejected
# 11 - true if the Y optical flow observation has been rejected
vel_innov_fail = ((2 ** 0 & estimator_status['innovation_check_flags']) > 0) * 1
posh_innov_fail = ((2 ** 1 & estimator_status['innovation_check_flags']) > 0) * 1
posv_innov_fail = ((2 ** 2 & estimator_status['innovation_check_flags']) > 0) * 1
magx_innov_fail = ((2 ** 3 & estimator_status['innovation_check_flags']) > 0) * 1
magy_innov_fail = ((2 ** 4 & estimator_status['innovation_check_flags']) > 0) * 1
magz_innov_fail = ((2 ** 5 & estimator_status['innovation_check_flags']) > 0) * 1
yaw_innov_fail = ((2 ** 6 & estimator_status['innovation_check_flags']) > 0) * 1
tas_innov_fail = ((2 ** 7 & estimator_status['innovation_check_flags']) > 0) * 1
sli_innov_fail = ((2 ** 8 & estimator_status['innovation_check_flags']) > 0) * 1
hagl_innov_fail = ((2 ** 9 & estimator_status['innovation_check_flags']) > 0) * 1
ofx_innov_fail = ((2 ** 10 & estimator_status['innovation_check_flags']) > 0) * 1
ofy_innov_fail = ((2 ** 11 & estimator_status['innovation_check_flags']) > 0) * 1
if plot:
# plot innovation_check_flags summary
plt.figure(11, figsize=(20, 13))
plt.subplot(6, 1, 1)
plt.title('EKF Innovation Test Fails')
plt.plot(status_time, vel_innov_fail, 'b', label='vel NED')
plt.plot(status_time, posh_innov_fail, 'r', label='pos NE')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 2)
plt.plot(status_time, posv_innov_fail, 'b', label='hgt absolute')
plt.plot(status_time, hagl_innov_fail, 'r', label='hgt above ground')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 3)
plt.plot(status_time, magx_innov_fail, 'b', label='mag_x')
plt.plot(status_time, magy_innov_fail, 'r', label='mag_y')
plt.plot(status_time, magz_innov_fail, 'g', label='mag_z')
plt.plot(status_time, yaw_innov_fail, 'c', label='yaw')
plt.legend(loc='upper left')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.grid()
plt.subplot(6, 1, 4)
plt.plot(status_time, tas_innov_fail, 'b', label='airspeed')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 5)
plt.plot(status_time, sli_innov_fail, 'b', label='sideslip')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 6)
plt.plot(status_time, ofx_innov_fail, 'b', label='flow X')
plt.plot(status_time, ofy_innov_fail, 'r', label='flow Y')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.xlabel('time (sec')
plt.legend(loc='upper left')
plt.grid()
pp.savefig()
plt.close(11)
# gps_check_fail_flags summary
plt.figure(12, figsize=(20, 13))
# 0 : insufficient fix type (no 3D solution)
# 1 : minimum required sat count fail
# 2 : minimum required GDoP fail
# 3 : maximum allowed horizontal position error fail
# 4 : maximum allowed vertical position error fail
# 5 : maximum allowed speed error fail
# 6 : maximum allowed horizontal position drift fail
# 7 : maximum allowed vertical position drift fail
# 8 : maximum allowed horizontal speed fail
# 9 : maximum allowed vertical velocity discrepancy fail
gfix_fail = ((2 ** 0 & estimator_status['gps_check_fail_flags']) > 0) * 1
nsat_fail = ((2 ** 1 & estimator_status['gps_check_fail_flags']) > 0) * 1
gdop_fail = ((2 ** 2 & estimator_status['gps_check_fail_flags']) > 0) * 1
herr_fail = ((2 ** 3 & estimator_status['gps_check_fail_flags']) > 0) * 1
verr_fail = ((2 ** 4 & estimator_status['gps_check_fail_flags']) > 0) * 1
serr_fail = ((2 ** 5 & estimator_status['gps_check_fail_flags']) > 0) * 1
hdrift_fail = ((2 ** 6 & estimator_status['gps_check_fail_flags']) > 0) * 1
vdrift_fail = ((2 ** 7 & estimator_status['gps_check_fail_flags']) > 0) * 1
hspd_fail = ((2 ** 8 & estimator_status['gps_check_fail_flags']) > 0) * 1
veld_diff_fail = ((2 ** 9 & estimator_status['gps_check_fail_flags']) > 0) * 1
plt.subplot(2, 1, 1)
plt.title('GPS Direct Output Check Failures')
plt.plot(status_time, gfix_fail, 'k', label='fix type')
plt.plot(status_time, nsat_fail, 'b', label='N sats')
plt.plot(status_time, gdop_fail, 'r', label='GDOP')
plt.plot(status_time, herr_fail, 'g', label='horiz pos error')
plt.plot(status_time, verr_fail, 'c', label='vert pos error')
plt.plot(status_time, serr_fail, 'm', label='speed error')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper right')
plt.grid()
plt.subplot(2, 1, 2)
plt.title('GPS Derived Output Check Failures')
plt.plot(status_time, hdrift_fail, 'b', label='horiz drift')
plt.plot(status_time, vdrift_fail, 'r', label='vert drift')
plt.plot(status_time, hspd_fail, 'g', label='horiz speed')
plt.plot(status_time, veld_diff_fail, 'c', label='vert vel inconsistent')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.xlabel('time (sec')
plt.legend(loc='upper right')
plt.grid()
pp.savefig()
plt.close(12)
# filter reported accuracy
plt.figure(13, figsize=(20, 13))
plt.title('Reported Accuracy')
plt.plot(status_time, estimator_status['pos_horiz_accuracy'], 'b', label='horizontal')
plt.plot(status_time, estimator_status['pos_vert_accuracy'], 'r', label='vertical')
plt.ylabel('accuracy (m)')
plt.xlabel('time (sec')
plt.legend(loc='upper right')
plt.grid()
pp.savefig()
plt.close(13)
# Plot the EKF IMU vibration metrics
plt.figure(14, figsize=(20, 13))
vibe_coning_max_arg = np.argmax(estimator_status['vibe[0]'])
vibe_coning_max_time = status_time[vibe_coning_max_arg]
vibe_coning_max = np.amax(estimator_status['vibe[0]'])
vibe_hf_dang_max_arg = np.argmax(estimator_status['vibe[1]'])
vibe_hf_dang_max_time = status_time[vibe_hf_dang_max_arg]
vibe_hf_dang_max = np.amax(estimator_status['vibe[1]'])
vibe_hf_dvel_max_arg = np.argmax(estimator_status['vibe[2]'])
vibe_hf_dvel_max_time = status_time[vibe_hf_dvel_max_arg]
vibe_hf_dvel_max = np.amax(estimator_status['vibe[2]'])
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], 1000.0 * estimator_status['vibe[0]'], 'b')
plt.title('IMU Vibration Metrics')
plt.ylabel('Del Ang Coning (mrad)')
plt.grid()
plt.text(vibe_coning_max_time, 1000.0 * vibe_coning_max, 'max=' + str(round(1000.0 * vibe_coning_max, 5)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], 1000.0 * estimator_status['vibe[1]'], 'b')
plt.ylabel('HF Del Ang (mrad)')
plt.grid()
plt.text(vibe_hf_dang_max_time, 1000.0 * vibe_hf_dang_max, 'max=' + str(round(1000.0 * vibe_hf_dang_max, 3)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['vibe[2]'], 'b')
plt.ylabel('HF Del Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(vibe_hf_dvel_max_time, vibe_hf_dvel_max, 'max=' + str(round(vibe_hf_dvel_max, 4)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
pp.savefig()
plt.close(14)
# Plot the EKF output observer tracking errors
plt.figure(15, figsize=(20, 13))
ang_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[0]'])
ang_track_err_max_time = innov_time[ang_track_err_max_arg]
ang_track_err_max = np.amax(ekf2_innovations['output_tracking_error[0]'])
vel_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[1]'])
vel_track_err_max_time = innov_time[vel_track_err_max_arg]
vel_track_err_max = np.amax(ekf2_innovations['output_tracking_error[1]'])
pos_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[2]'])
pos_track_err_max_time = innov_time[pos_track_err_max_arg]
pos_track_err_max = np.amax(ekf2_innovations['output_tracking_error[2]'])
plt.subplot(3, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], 1e3 * ekf2_innovations['output_tracking_error[0]'], 'b')
plt.title('Output Observer Tracking Error Magnitudes')
plt.ylabel('angles (mrad)')
plt.grid()
plt.text(ang_track_err_max_time, 1e3 * ang_track_err_max, 'max=' + str(round(1e3 * ang_track_err_max, 2)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['output_tracking_error[1]'], 'b')
plt.ylabel('velocity (m/s)')
plt.grid()
plt.text(vel_track_err_max_time, vel_track_err_max, 'max=' + str(round(vel_track_err_max, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 3)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['output_tracking_error[2]'], 'b')
plt.ylabel('position (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(pos_track_err_max_time, pos_track_err_max, 'max=' + str(round(pos_track_err_max, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
pp.savefig()
plt.close(15)
# Plot the delta angle bias estimates
plt.figure(16, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[10]'], 'b')
plt.title('Delta Angle Bias Estimates')
plt.ylabel('X (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[11]'], 'b')
plt.ylabel('Y (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[12]'], 'b')
plt.ylabel('Z (rad)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(16)
# Plot the delta velocity bias estimates
plt.figure(17, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[13]'], 'b')
plt.title('Delta Velocity Bias Estimates')
plt.ylabel('X (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[14]'], 'b')
plt.ylabel('Y (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[15]'], 'b')
plt.ylabel('Z (m/s)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(17)
# Plot the earth frame magnetic field estimates
plt.figure(18, figsize=(20, 13))
plt.subplot(3, 1, 3)
strength = (estimator_status['states[16]'] ** 2 + estimator_status['states[17]'] ** 2 + estimator_status[
'states[18]'] ** 2) ** 0.5
plt.plot(1e-6 * estimator_status['timestamp'], strength, 'b')
plt.ylabel('strength (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 1)
rad2deg = 57.2958
declination = rad2deg * np.arctan2(estimator_status['states[17]'], estimator_status['states[16]'])
plt.plot(1e-6 * estimator_status['timestamp'], declination, 'b')
plt.title('Earth Magnetic Field Estimates')
plt.ylabel('declination (deg)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
inclination = rad2deg * np.arcsin(estimator_status['states[18]'] / np.maximum(strength, np.finfo(np.float32).eps) )
plt.plot(1e-6 * estimator_status['timestamp'], inclination, 'b')
plt.ylabel('inclination (deg)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(18)
# Plot the body frame magnetic field estimates
plt.figure(19, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[19]'], 'b')
plt.title('Magnetomer Bias Estimates')
plt.ylabel('X (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[20]'], 'b')
plt.ylabel('Y (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[21]'], 'b')
plt.ylabel('Z (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(19)
# Plot the EKF wind estimates
plt.figure(20, figsize=(20, 13))
plt.subplot(2, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[22]'], 'b')
plt.title('Wind Velocity Estimates')
plt.ylabel('North (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[23]'], 'b')
plt.ylabel('East (m/s)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(20)
# close the pdf file
pp.close()
# don't display to screen
# plt.show()
# clase all figures
plt.close("all")
# Do some automated analysis of the status data
# normal index range is defined by the flight duration
start_index = np.amin(np.where(status_time > in_air_transition_time))
end_index = np.amax(np.where(status_time <= on_ground_transition_time))
num_valid_values = (end_index - start_index + 1)
# find a late/early index range from 5 sec after in_air_transtion_time to 5 sec before on-ground transition time for mag and optical flow checks to avoid false positives
# this can be used to prevent false positives for sensors adversely affected by close proximity to the ground
# don't do this if the log starts or finishes in air or if it is shut off by flag
late_start_index = np.amin(np.where(status_time > (in_air_transition_time + 5.0)))\
if (late_start_early_ending and not b_starts_in_air) else start_index
early_end_index = np.amax(np.where(status_time <= (on_ground_transition_time - 5.0))) \
if (late_start_early_ending and not b_finishes_in_air) else end_index
num_valid_values_trimmed = (early_end_index - late_start_index + 1)
# also find the start and finish indexes for the innovation data
innov_start_index = np.amin(np.where(innov_time > in_air_transition_time))
innov_end_index = np.amax(np.where(innov_time <= on_ground_transition_time))
innov_num_valid_values = (innov_end_index - innov_start_index + 1)
innov_late_start_index = np.amin(np.where(innov_time > (in_air_transition_time + 5.0))) \
if (late_start_early_ending and not b_starts_in_air) else innov_start_index
innov_early_end_index = np.amax(np.where(innov_time <= (on_ground_transition_time - 5.0))) \
if (late_start_early_ending and not b_finishes_in_air) else innov_end_index
innov_num_valid_values_trimmed = (innov_early_end_index - innov_late_start_index + 1)
# define dictionary of test results and descriptions
test_results = {
'master_status': ['Pass',
'Master check status which can be either Pass Warning or Fail. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'mag_sensor_status': ['Pass',
'Magnetometer sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'yaw_sensor_status': ['Pass',
'Yaw sensor check summary. This sensor data can be sourced from the magnetometer or an external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'vel_sensor_status': ['Pass',
'Velocity sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'pos_sensor_status': ['Pass',
'Position sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'hgt_sensor_status': ['Pass',
'Height sensor check summary. This sensor data can be sourced from either Baro or GPS or range finder or external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no anomalies were detected and no further investigation is required'],
'hagl_sensor_status': ['Pass',
'Height above ground sensor check summary. This sensor data is normally sourced from a rangefinder sensor. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'tas_sensor_status': ['Pass',
'Airspeed sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_sensor_status': ['Pass',
'IMU sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_vibration_check': ['Pass',
'IMU vibration check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_bias_check': ['Pass',
'IMU bias check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_output_predictor_check': ['Pass',
'IMU output predictor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'flow_sensor_status': ['Pass',
'Optical Flow sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'filter_fault_status': ['Pass',
'Internal Filter check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'mag_percentage_red': [float('NaN'),
'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 1.0.'],
'mag_percentage_amber': [float('NaN'),
'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 0.5.'],
'magx_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the X-axis magnetic field sensor innovation consistency test.'],
'magy_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the Y-axis magnetic field sensor innovation consistency test.'],
'magz_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the Z-axis magnetic field sensor innovation consistency test.'],
'yaw_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the yaw sensor innovation consistency test.'],
'mag_test_max': [float('NaN'),
'The maximum in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'mag_test_mean': [float('NaN'),
'The mean in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'vel_percentage_red': [float('NaN'),
'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 1.0.'],
'vel_percentage_amber': [float('NaN'),
'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 0.5.'],
'vel_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'vel_test_max': [float('NaN'),
'The maximum in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'vel_test_mean': [float('NaN'),
'The mean in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'pos_percentage_red': [float('NaN'),
'The percentage of in-flight position sensor consolidated innovation consistency test values > 1.0.'],
'pos_percentage_amber': [float('NaN'),
'The percentage of in-flight position sensor consolidated innovation consistency test values > 0.5.'],
'pos_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'pos_test_max': [float('NaN'),
'The maximum in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'pos_test_mean': [float('NaN'),
'The mean in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'hgt_percentage_red': [float('NaN'),
'The percentage of in-flight height sensor innovation consistency test values > 1.0.'],
'hgt_percentage_amber': [float('NaN'),
'The percentage of in-flight height sensor innovation consistency test values > 0.5.'],
'hgt_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the height sensor innovation consistency test.'],
'hgt_test_max': [float('NaN'),
'The maximum in-flight value of the height sensor innovation consistency test ratio.'],
'hgt_test_mean': [float('NaN'),
'The mean in-flight value of the height sensor innovation consistency test ratio.'],
'tas_percentage_red': [float('NaN'),
'The percentage of in-flight airspeed sensor innovation consistency test values > 1.0.'],
'tas_percentage_amber': [float('NaN'),
'The percentage of in-flight airspeed sensor innovation consistency test values > 0.5.'],
'tas_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the airspeed sensor innovation consistency test.'],
'tas_test_max': [float('NaN'),
'The maximum in-flight value of the airspeed sensor innovation consistency test ratio.'],
'tas_test_mean': [float('NaN'),
'The mean in-flight value of the airspeed sensor innovation consistency test ratio.'],
'hagl_percentage_red': [float('NaN'),
'The percentage of in-flight height above ground sensor innovation consistency test values > 1.0.'],
'hagl_percentage_amber': [float('NaN'),
'The percentage of in-flight height above ground sensor innovation consistency test values > 0.5.'],
'hagl_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the height above ground sensor innovation consistency test.'],
'hagl_test_max': [float('NaN'),
'The maximum in-flight value of the height above ground sensor innovation consistency test ratio.'],
'hagl_test_mean': [float('NaN'),
'The mean in-flight value of the height above ground sensor innovation consistency test ratio.'],
'ofx_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the optical flow sensor X-axis innovation consistency test.'],
'ofy_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the optical flow sensor Y-axis innovation consistency test.'],
'filter_faults_max': [float('NaN'),
'Largest recorded value of the filter internal fault bitmask. Should always be zero.'],
'imu_coning_peak': [float('NaN'), 'Peak in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_coning_mean': [float('NaN'), 'Mean in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_hfdang_peak': [float('NaN'),
'Peak in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdang_mean': [float('NaN'),
'Mean in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdvel_peak': [float('NaN'),
'Peak in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'imu_hfdvel_mean': [float('NaN'),
'Mean in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'output_obs_ang_err_median': [float('NaN'),
'Median in-flight value of the output observer angular error (rad)'],
'output_obs_vel_err_median': [float('NaN'),
'Median in-flight value of the output observer velocity error (m/s)'],
'output_obs_pos_err_median': [float('NaN'), 'Median in-flight value of the output observer position error (m)'],
'imu_dang_bias_median': [float('NaN'), 'Median in-flight value of the delta angle bias vector length (rad)'],
'imu_dvel_bias_median': [float('NaN'), 'Median in-flight value of the delta velocity bias vector length (m/s)'],
'tilt_align_time': [float('NaN'),
'The time in seconds measured from startup that the EKF completed the tilt alignment. A nan value indicates that the alignment had completed before logging started or alignment did not complete.'],
'yaw_align_time': [float('NaN'),
'The time in seconds measured from startup that the EKF completed the yaw alignment.'],
'in_air_transition_time': [round(in_air_transition_time, 1),
'The time in seconds measured from startup that the EKF transtioned into in-air mode. Set to a nan if a transition event is not detected.'],
'on_ground_transition_time': [round(on_ground_transition_time, 1),
'The time in seconds measured from startup that the EKF transitioned out of in-air mode. Set to a nan if a transition event is not detected.'],
}
# generate test metadata
# reduction of innovation message data
if (innov_early_end_index > (innov_late_start_index + 50)):
# Output Observer Tracking Errors
test_results['output_obs_ang_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[0]'][innov_late_start_index:innov_early_end_index + 1])
test_results['output_obs_vel_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[1]'][innov_late_start_index:innov_early_end_index + 1])
test_results['output_obs_pos_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[2]'][innov_late_start_index:innov_early_end_index + 1])
# reduction of status message data
if (early_end_index > (late_start_index + 50)):
# IMU vibration checks
temp = np.amax(estimator_status['vibe[0]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_coning_peak'][0] = temp
test_results['imu_coning_mean'][0] = np.mean(estimator_status['vibe[0]'][late_start_index:early_end_index + 1])
temp = np.amax(estimator_status['vibe[1]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_hfdang_peak'][0] = temp
test_results['imu_hfdang_mean'][0] = np.mean(estimator_status['vibe[1]'][late_start_index:early_end_index + 1])
temp = np.amax(estimator_status['vibe[2]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_hfdvel_peak'][0] = temp
test_results['imu_hfdvel_mean'][0] = np.mean(estimator_status['vibe[2]'][late_start_index:early_end_index + 1])
# Magnetometer Sensor Checks
if (np.amax(yaw_aligned) > 0.5):
mag_num_red = (estimator_status['mag_test_ratio'][start_index:end_index + 1] > 1.0).sum()
mag_num_amber = (estimator_status['mag_test_ratio'][start_index:end_index + 1] > 0.5).sum() - mag_num_red
test_results['mag_percentage_red'][0] = 100.0 * mag_num_red / num_valid_values_trimmed
test_results['mag_percentage_amber'][0] = 100.0 * mag_num_amber / num_valid_values_trimmed
test_results['mag_test_max'][0] = np.amax(
estimator_status['mag_test_ratio'][late_start_index:early_end_index + 1])
test_results['mag_test_mean'][0] = np.mean(estimator_status['mag_test_ratio'][start_index:end_index])
test_results['magx_fail_percentage'][0] = 100.0 * (
magx_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['magy_fail_percentage'][0] = 100.0 * (
magy_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['magz_fail_percentage'][0] = 100.0 * (
magz_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['yaw_fail_percentage'][0] = 100.0 * (
yaw_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# Velocity Sensor Checks
if (np.amax(using_gps) > 0.5):
vel_num_red = (estimator_status['vel_test_ratio'][start_index:end_index + 1] > 1.0).sum()
vel_num_amber = (estimator_status['vel_test_ratio'][start_index:end_index + 1] > 0.5).sum() - vel_num_red
test_results['vel_percentage_red'][0] = 100.0 * vel_num_red / num_valid_values
test_results['vel_percentage_amber'][0] = 100.0 * vel_num_amber / num_valid_values
test_results['vel_test_max'][0] = np.amax(estimator_status['vel_test_ratio'][start_index:end_index + 1])
test_results['vel_test_mean'][0] = np.mean(estimator_status['vel_test_ratio'][start_index:end_index + 1])
test_results['vel_fail_percentage'][0] = 100.0 * (
vel_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# Position Sensor Checks
if ((np.amax(using_gps) > 0.5) or (np.amax(using_evpos) > 0.5)):
pos_num_red = (estimator_status['pos_test_ratio'][start_index:end_index + 1] > 1.0).sum()
pos_num_amber = (estimator_status['pos_test_ratio'][start_index:end_index + 1] > 0.5).sum() - pos_num_red
test_results['pos_percentage_red'][0] = 100.0 * pos_num_red / num_valid_values
test_results['pos_percentage_amber'][0] = 100.0 * pos_num_amber / num_valid_values
test_results['pos_test_max'][0] = np.amax(estimator_status['pos_test_ratio'][start_index:end_index + 1])
test_results['pos_test_mean'][0] = np.mean(estimator_status['pos_test_ratio'][start_index:end_index + 1])
test_results['pos_fail_percentage'][0] = 100.0 * (
posh_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# Height Sensor Checks
hgt_num_red = (estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1] > 1.0).sum()
hgt_num_amber = (estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1] > 0.5).sum() - hgt_num_red
test_results['hgt_percentage_red'][0] = 100.0 * hgt_num_red / num_valid_values_trimmed
test_results['hgt_percentage_amber'][0] = 100.0 * hgt_num_amber / num_valid_values_trimmed
test_results['hgt_test_max'][0] = np.amax(estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1])
test_results['hgt_test_mean'][0] = np.mean(estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1])
test_results['hgt_fail_percentage'][0] = 100.0 * (
posv_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# Airspeed Sensor Checks
if (tas_test_max > 0.0):
tas_num_red = (estimator_status['tas_test_ratio'][start_index:end_index + 1] > 1.0).sum()
tas_num_amber = (estimator_status['tas_test_ratio'][start_index:end_index + 1] > 0.5).sum() - tas_num_red
test_results['tas_percentage_red'][0] = 100.0 * tas_num_red / num_valid_values
test_results['tas_percentage_amber'][0] = 100.0 * tas_num_amber / num_valid_values
test_results['tas_test_max'][0] = np.amax(estimator_status['tas_test_ratio'][start_index:end_index + 1])
test_results['tas_test_mean'][0] = np.mean(estimator_status['tas_test_ratio'][start_index:end_index + 1])
test_results['tas_fail_percentage'][0] = 100.0 * (
tas_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# HAGL Sensor Checks
if (hagl_test_max > 0.0):
hagl_num_red = (estimator_status['hagl_test_ratio'][start_index:end_index + 1] > 1.0).sum()
hagl_num_amber = (estimator_status['hagl_test_ratio'][start_index:end_index + 1] > 0.5).sum() - hagl_num_red
test_results['hagl_percentage_red'][0] = 100.0 * hagl_num_red / num_valid_values
test_results['hagl_percentage_amber'][0] = 100.0 * hagl_num_amber / num_valid_values
test_results['hagl_test_max'][0] = np.amax(estimator_status['hagl_test_ratio'][start_index:end_index + 1])
test_results['hagl_test_mean'][0] = np.mean(estimator_status['hagl_test_ratio'][start_index:end_index + 1])
test_results['hagl_fail_percentage'][0] = 100.0 * (
hagl_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# optical flow sensor checks
if (np.amax(using_optflow) > 0.5):
test_results['ofx_fail_percentage'][0] = 100.0 * (
ofx_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['ofy_fail_percentage'][0] = 100.0 * (
ofy_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# IMU bias checks
test_results['imu_dang_bias_median'][0] = (np.median(estimator_status['states[10]']) ** 2 + np.median(
estimator_status['states[11]']) ** 2 + np.median(estimator_status['states[12]']) ** 2) ** 0.5
test_results['imu_dvel_bias_median'][0] = (np.median(estimator_status['states[13]']) ** 2 + np.median(
estimator_status['states[14]']) ** 2 + np.median(estimator_status['states[15]']) ** 2) ** 0.5
# Check for internal filter nummerical faults
test_results['filter_faults_max'][0] = np.amax(estimator_status['filter_fault_flags'])
# TODO - process the following bitmask's when they have been properly documented in the uORB topic
# estimator_status['health_flags']
# estimator_status['timeout_flags']
# calculate a master status - Fail, Warning, Pass
# check test results against levels to provide a master status
# check for warnings
if (test_results.get('mag_percentage_amber')[0] > check_levels.get('mag_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['mag_sensor_status'][0] = 'Warning'
if (test_results.get('vel_percentage_amber')[0] > check_levels.get('vel_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['vel_sensor_status'][0] = 'Warning'
if (test_results.get('pos_percentage_amber')[0] > check_levels.get('pos_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['pos_sensor_status'][0] = 'Warning'
if (test_results.get('hgt_percentage_amber')[0] > check_levels.get('hgt_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['hgt_sensor_status'][0] = 'Warning'
if (test_results.get('hagl_percentage_amber')[0] > check_levels.get('hagl_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['hagl_sensor_status'][0] = 'Warning'
if (test_results.get('tas_percentage_amber')[0] > check_levels.get('tas_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['tas_sensor_status'][0] = 'Warning'
# check for IMU sensor warnings
if ((test_results.get('imu_coning_peak')[0] > check_levels.get('imu_coning_peak_warn')) or
(test_results.get('imu_coning_mean')[0] > check_levels.get('imu_coning_mean_warn')) or
(test_results.get('imu_hfdang_peak')[0] > check_levels.get('imu_hfdang_peak_warn')) or
(test_results.get('imu_hfdang_mean')[0] > check_levels.get('imu_hfdang_mean_warn')) or
(test_results.get('imu_hfdvel_peak')[0] > check_levels.get('imu_hfdvel_peak_warn')) or
(test_results.get('imu_hfdvel_mean')[0] > check_levels.get('imu_hfdvel_mean_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_vibration_check'][0] = 'Warning'
print('IMU vibration check warning.')
if ((test_results.get('imu_dang_bias_median')[0] > check_levels.get('imu_dang_bias_median_warn')) or
(test_results.get('imu_dvel_bias_median')[0] > check_levels.get('imu_dvel_bias_median_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_bias_check'][0] = 'Warning'
print('IMU bias check warning.')
if ((test_results.get('output_obs_ang_err_median')[0] > check_levels.get('obs_ang_err_median_warn')) or
(test_results.get('output_obs_vel_err_median')[0] > check_levels.get('obs_vel_err_median_warn')) or
(test_results.get('output_obs_pos_err_median')[0] > check_levels.get('obs_pos_err_median_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_output_predictor_check'][0] = 'Warning'
print('IMU output predictor check warning.')
# check for failures
if ((test_results.get('magx_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('magy_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('magz_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('mag_percentage_amber')[0] > check_levels.get('mag_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['mag_sensor_status'][0] = 'Fail'
print('Magnetometer sensor check failure.')
if (test_results.get('yaw_fail_percentage')[0] > check_levels.get('yaw_fail_pct')):
test_results['master_status'][0] = 'Fail'
test_results['yaw_sensor_status'][0] = 'Fail'
print('Yaw sensor check failure.')
if ((test_results.get('vel_fail_percentage')[0] > check_levels.get('vel_fail_pct')) or
(test_results.get('vel_percentage_amber')[0] > check_levels.get('vel_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['vel_sensor_status'][0] = 'Fail'
print('Velocity sensor check failure.')
if ((test_results.get('pos_fail_percentage')[0] > check_levels.get('pos_fail_pct')) or
(test_results.get('pos_percentage_amber')[0] > check_levels.get('pos_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['pos_sensor_status'][0] = 'Fail'
print('Position sensor check failure.')
if ((test_results.get('hgt_fail_percentage')[0] > check_levels.get('hgt_fail_pct')) or
(test_results.get('hgt_percentage_amber')[0] > check_levels.get('hgt_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['hgt_sensor_status'][0] = 'Fail'
print('Height sensor check failure.')
if ((test_results.get('tas_fail_percentage')[0] > check_levels.get('tas_fail_pct')) or
(test_results.get('tas_percentage_amber')[0] > check_levels.get('tas_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['tas_sensor_status'][0] = 'Fail'
print('Airspeed sensor check failure.')
if ((test_results.get('hagl_fail_percentage')[0] > check_levels.get('hagl_fail_pct')) or
(test_results.get('hagl_percentage_amber')[0] > check_levels.get('hagl_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['hagl_sensor_status'][0] = 'Fail'
print('Height above ground sensor check failure.')
if ((test_results.get('ofx_fail_percentage')[0] > check_levels.get('flow_fail_pct')) or
(test_results.get('ofy_fail_percentage')[0] > check_levels.get('flow_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['flow_sensor_status'][0] = 'Fail'
print('Optical flow sensor check failure.')
if (test_results.get('filter_faults_max')[0] > 0):
test_results['master_status'][0] = 'Fail'
test_results['filter_fault_status'][0] = 'Fail'
return test_results | [
"matplotlib.pyplot.title",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.arctan2",
"numpy.amin",
"numpy.argmax",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.close",
"numpy.finfo",
"matplotlib.pyplot.ylim",
"numpy.median",
"matplotlib.pyplot.legend",
"m... | [((109, 130), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (123, 130), False, 'import matplotlib\n'), ((23155, 23200), 'numpy.argmax', 'np.argmax', (["estimator_status['mag_test_ratio']"], {}), "(estimator_status['mag_test_ratio'])\n", (23164, 23200), True, 'import numpy as np\n'), ((23274, 23317), 'numpy.amax', 'np.amax', (["estimator_status['mag_test_ratio']"], {}), "(estimator_status['mag_test_ratio'])\n", (23281, 23317), True, 'import numpy as np\n'), ((23338, 23381), 'numpy.mean', 'np.mean', (["estimator_status['mag_test_ratio']"], {}), "(estimator_status['mag_test_ratio'])\n", (23345, 23381), True, 'import numpy as np\n'), ((23431, 23476), 'numpy.argmax', 'np.argmax', (["estimator_status['vel_test_ratio']"], {}), "(estimator_status['vel_test_ratio'])\n", (23440, 23476), True, 'import numpy as np\n'), ((23550, 23593), 'numpy.amax', 'np.amax', (["estimator_status['vel_test_ratio']"], {}), "(estimator_status['vel_test_ratio'])\n", (23557, 23593), True, 'import numpy as np\n'), ((23614, 23657), 'numpy.mean', 'np.mean', (["estimator_status['vel_test_ratio']"], {}), "(estimator_status['vel_test_ratio'])\n", (23621, 23657), True, 'import numpy as np\n'), ((23737, 23782), 'numpy.argmax', 'np.argmax', (["estimator_status['pos_test_ratio']"], {}), "(estimator_status['pos_test_ratio'])\n", (23746, 23782), True, 'import numpy as np\n'), ((23856, 23899), 'numpy.amax', 'np.amax', (["estimator_status['pos_test_ratio']"], {}), "(estimator_status['pos_test_ratio'])\n", (23863, 23899), True, 'import numpy as np\n'), ((23920, 23963), 'numpy.mean', 'np.mean', (["estimator_status['pos_test_ratio']"], {}), "(estimator_status['pos_test_ratio'])\n", (23927, 23963), True, 'import numpy as np\n'), ((24037, 24082), 'numpy.argmax', 'np.argmax', (["estimator_status['hgt_test_ratio']"], {}), "(estimator_status['hgt_test_ratio'])\n", (24046, 24082), True, 'import numpy as np\n'), ((24156, 24199), 'numpy.amax', 'np.amax', (["estimator_status['hgt_test_ratio']"], {}), "(estimator_status['hgt_test_ratio'])\n", (24163, 24199), True, 'import numpy as np\n'), ((24220, 24263), 'numpy.mean', 'np.mean', (["estimator_status['hgt_test_ratio']"], {}), "(estimator_status['hgt_test_ratio'])\n", (24227, 24263), True, 'import numpy as np\n'), ((24307, 24352), 'numpy.argmax', 'np.argmax', (["estimator_status['tas_test_ratio']"], {}), "(estimator_status['tas_test_ratio'])\n", (24316, 24352), True, 'import numpy as np\n'), ((24426, 24469), 'numpy.amax', 'np.amax', (["estimator_status['tas_test_ratio']"], {}), "(estimator_status['tas_test_ratio'])\n", (24433, 24469), True, 'import numpy as np\n'), ((24490, 24533), 'numpy.mean', 'np.mean', (["estimator_status['tas_test_ratio']"], {}), "(estimator_status['tas_test_ratio'])\n", (24497, 24533), True, 'import numpy as np\n'), ((24603, 24649), 'numpy.argmax', 'np.argmax', (["estimator_status['hagl_test_ratio']"], {}), "(estimator_status['hagl_test_ratio'])\n", (24612, 24649), True, 'import numpy as np\n'), ((24726, 24770), 'numpy.amax', 'np.amax', (["estimator_status['hagl_test_ratio']"], {}), "(estimator_status['hagl_test_ratio'])\n", (24733, 24770), True, 'import numpy as np\n'), ((24792, 24836), 'numpy.mean', 'np.mean', (["estimator_status['hagl_test_ratio']"], {}), "(estimator_status['hagl_test_ratio'])\n", (24799, 24836), True, 'import numpy as np\n'), ((83434, 83481), 'numpy.amax', 'np.amax', (["estimator_status['filter_fault_flags']"], {}), "(estimator_status['filter_fault_flags'])\n", (83441, 83481), True, 'import numpy as np\n'), ((478, 508), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['output_plot_filename'], {}), '(output_plot_filename)\n', (486, 508), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1469, 1500), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 13)'}), '(1, figsize=(20, 13))\n', (1479, 1500), True, 'import matplotlib.pyplot as plt\n'), ((1580, 1627), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[2]']"], {}), "(ekf2_innovations['vel_pos_innov[2]'])\n", (1589, 1627), True, 'import numpy as np\n'), ((1705, 1750), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[2]']"], {}), "(ekf2_innovations['vel_pos_innov[2]'])\n", (1712, 1750), True, 'import numpy as np\n'), ((1777, 1824), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[2]']"], {}), "(ekf2_innovations['vel_pos_innov[2]'])\n", (1786, 1824), True, 'import numpy as np\n'), ((1902, 1947), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[2]']"], {}), "(ekf2_innovations['vel_pos_innov[2]'])\n", (1909, 1947), True, 'import numpy as np\n'), ((2214, 2261), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[5]']"], {}), "(ekf2_innovations['vel_pos_innov[5]'])\n", (2223, 2261), True, 'import numpy as np\n'), ((2339, 2384), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[5]']"], {}), "(ekf2_innovations['vel_pos_innov[5]'])\n", (2346, 2384), True, 'import numpy as np\n'), ((2411, 2458), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[5]']"], {}), "(ekf2_innovations['vel_pos_innov[5]'])\n", (2420, 2458), True, 'import numpy as np\n'), ((2536, 2581), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[5]']"], {}), "(ekf2_innovations['vel_pos_innov[5]'])\n", (2543, 2581), True, 'import numpy as np\n'), ((2835, 2855), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2846, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2864, 2959), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[2]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[2]'], 'b')\n", (2872, 2959), True, 'import matplotlib.pyplot as plt\n'), ((3185, 3218), 'matplotlib.pyplot.title', 'plt.title', (['"""Vertical Innovations"""'], {}), "('Vertical Innovations')\n", (3194, 3218), True, 'import matplotlib.pyplot as plt\n'), ((3227, 3255), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Down Vel (m/s)"""'], {}), "('Down Vel (m/s)')\n", (3237, 3255), True, 'import matplotlib.pyplot as plt\n'), ((3264, 3288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (3274, 3288), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3307), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3305, 3307), True, 'import matplotlib.pyplot as plt\n'), ((3316, 3452), 'matplotlib.pyplot.text', 'plt.text', (['innov_2_max_time', 'innov_2_max', "('max=' + s_innov_2_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (3324, 3452), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3607), 'matplotlib.pyplot.text', 'plt.text', (['innov_2_min_time', 'innov_2_min', "('min=' + s_innov_2_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (3482, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3783), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (3774, 3783), True, 'import matplotlib.pyplot as plt\n'), ((3792, 3887), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[5]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[5]'], 'b')\n", (3800, 3887), True, 'import matplotlib.pyplot as plt\n'), ((4113, 4139), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Down Pos (m)"""'], {}), "('Down Pos (m)')\n", (4123, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4172), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (4158, 4172), True, 'import matplotlib.pyplot as plt\n'), ((4181, 4191), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4189, 4191), True, 'import matplotlib.pyplot as plt\n'), ((4200, 4336), 'matplotlib.pyplot.text', 'plt.text', (['innov_5_max_time', 'innov_5_max', "('max=' + s_innov_5_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_5_max_time, innov_5_max, 'max=' + s_innov_5_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (4208, 4336), True, 'import matplotlib.pyplot as plt\n'), ((4358, 4491), 'matplotlib.pyplot.text', 'plt.text', (['innov_5_min_time', 'innov_5_min', "('min=' + s_innov_5_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_5_min_time, innov_5_min, 'min=' + s_innov_5_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (4366, 4491), True, 'import matplotlib.pyplot as plt\n'), ((4610, 4622), 'matplotlib.pyplot.close', 'plt.close', (['(1)'], {}), '(1)\n', (4619, 4622), True, 'import matplotlib.pyplot as plt\n'), ((4673, 4704), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(20, 13)'}), '(2, figsize=(20, 13))\n', (4683, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4770, 4817), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[0]']"], {}), "(ekf2_innovations['vel_pos_innov[0]'])\n", (4779, 4817), True, 'import numpy as np\n'), ((4895, 4940), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[0]']"], {}), "(ekf2_innovations['vel_pos_innov[0]'])\n", (4902, 4940), True, 'import numpy as np\n'), ((4967, 5014), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[0]']"], {}), "(ekf2_innovations['vel_pos_innov[0]'])\n", (4976, 5014), True, 'import numpy as np\n'), ((5092, 5137), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[0]']"], {}), "(ekf2_innovations['vel_pos_innov[0]'])\n", (5099, 5137), True, 'import numpy as np\n'), ((5389, 5436), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[1]']"], {}), "(ekf2_innovations['vel_pos_innov[1]'])\n", (5398, 5436), True, 'import numpy as np\n'), ((5514, 5559), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[1]']"], {}), "(ekf2_innovations['vel_pos_innov[1]'])\n", (5521, 5559), True, 'import numpy as np\n'), ((5586, 5633), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[1]']"], {}), "(ekf2_innovations['vel_pos_innov[1]'])\n", (5595, 5633), True, 'import numpy as np\n'), ((5711, 5756), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[1]']"], {}), "(ekf2_innovations['vel_pos_innov[1]'])\n", (5718, 5756), True, 'import numpy as np\n'), ((5973, 5993), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (5984, 5993), True, 'import matplotlib.pyplot as plt\n'), ((6002, 6097), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[0]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[0]'], 'b')\n", (6010, 6097), True, 'import matplotlib.pyplot as plt\n'), ((6323, 6368), 'matplotlib.pyplot.title', 'plt.title', (['"""Horizontal Velocity Innovations"""'], {}), "('Horizontal Velocity Innovations')\n", (6332, 6368), True, 'import matplotlib.pyplot as plt\n'), ((6377, 6406), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""North Vel (m/s)"""'], {}), "('North Vel (m/s)')\n", (6387, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6415, 6439), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (6425, 6439), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6458), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6456, 6458), True, 'import matplotlib.pyplot as plt\n'), ((6467, 6603), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_max_time', 'innov_0_max', "('max=' + s_innov_0_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (6475, 6603), True, 'import matplotlib.pyplot as plt\n'), ((6625, 6758), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_min_time', 'innov_0_min', "('min=' + s_innov_0_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (6633, 6758), True, 'import matplotlib.pyplot as plt\n'), ((6856, 6876), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (6867, 6876), True, 'import matplotlib.pyplot as plt\n'), ((6885, 6980), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[1]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[1]'], 'b')\n", (6893, 6980), True, 'import matplotlib.pyplot as plt\n'), ((7206, 7234), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""East Vel (m/s)"""'], {}), "('East Vel (m/s)')\n", (7216, 7234), True, 'import matplotlib.pyplot as plt\n'), ((7243, 7267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (7253, 7267), True, 'import matplotlib.pyplot as plt\n'), ((7276, 7286), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7284, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7295, 7431), 'matplotlib.pyplot.text', 'plt.text', (['innov_1_max_time', 'innov_1_max', "('max=' + s_innov_1_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (7303, 7431), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7586), 'matplotlib.pyplot.text', 'plt.text', (['innov_1_min_time', 'innov_1_min', "('min=' + s_innov_1_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (7461, 7586), True, 'import matplotlib.pyplot as plt\n'), ((7705, 7717), 'matplotlib.pyplot.close', 'plt.close', (['(2)'], {}), '(2)\n', (7714, 7717), True, 'import matplotlib.pyplot as plt\n'), ((7768, 7799), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {'figsize': '(20, 13)'}), '(3, figsize=(20, 13))\n', (7778, 7799), True, 'import matplotlib.pyplot as plt\n'), ((7865, 7912), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[3]']"], {}), "(ekf2_innovations['vel_pos_innov[3]'])\n", (7874, 7912), True, 'import numpy as np\n'), ((7990, 8035), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[3]']"], {}), "(ekf2_innovations['vel_pos_innov[3]'])\n", (7997, 8035), True, 'import numpy as np\n'), ((8062, 8109), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[3]']"], {}), "(ekf2_innovations['vel_pos_innov[3]'])\n", (8071, 8109), True, 'import numpy as np\n'), ((8187, 8232), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[3]']"], {}), "(ekf2_innovations['vel_pos_innov[3]'])\n", (8194, 8232), True, 'import numpy as np\n'), ((8484, 8531), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['vel_pos_innov[4]']"], {}), "(ekf2_innovations['vel_pos_innov[4]'])\n", (8493, 8531), True, 'import numpy as np\n'), ((8609, 8654), 'numpy.amax', 'np.amax', (["ekf2_innovations['vel_pos_innov[4]']"], {}), "(ekf2_innovations['vel_pos_innov[4]'])\n", (8616, 8654), True, 'import numpy as np\n'), ((8681, 8728), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['vel_pos_innov[4]']"], {}), "(ekf2_innovations['vel_pos_innov[4]'])\n", (8690, 8728), True, 'import numpy as np\n'), ((8806, 8851), 'numpy.amin', 'np.amin', (["ekf2_innovations['vel_pos_innov[4]']"], {}), "(ekf2_innovations['vel_pos_innov[4]'])\n", (8813, 8851), True, 'import numpy as np\n'), ((9072, 9092), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (9083, 9092), True, 'import matplotlib.pyplot as plt\n'), ((9101, 9196), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[3]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[3]'], 'b')\n", (9109, 9196), True, 'import matplotlib.pyplot as plt\n'), ((9422, 9466), 'matplotlib.pyplot.title', 'plt.title', (['"""Horizontal Position Innovations"""'], {}), "('Horizontal Position Innovations')\n", (9431, 9466), True, 'import matplotlib.pyplot as plt\n'), ((9475, 9502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""North Pos (m)"""'], {}), "('North Pos (m)')\n", (9485, 9502), True, 'import matplotlib.pyplot as plt\n'), ((9511, 9535), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (9521, 9535), True, 'import matplotlib.pyplot as plt\n'), ((9544, 9554), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9552, 9554), True, 'import matplotlib.pyplot as plt\n'), ((9563, 9699), 'matplotlib.pyplot.text', 'plt.text', (['innov_3_max_time', 'innov_3_max', "('max=' + s_innov_3_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_3_max_time, innov_3_max, 'max=' + s_innov_3_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (9571, 9699), True, 'import matplotlib.pyplot as plt\n'), ((9721, 9854), 'matplotlib.pyplot.text', 'plt.text', (['innov_3_min_time', 'innov_3_min', "('min=' + s_innov_3_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_3_min_time, innov_3_min, 'min=' + s_innov_3_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (9729, 9854), True, 'import matplotlib.pyplot as plt\n'), ((9952, 9972), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (9963, 9972), True, 'import matplotlib.pyplot as plt\n'), ((9981, 10076), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['vel_pos_innov[4]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'vel_pos_innov[4]'], 'b')\n", (9989, 10076), True, 'import matplotlib.pyplot as plt\n'), ((10302, 10328), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""East Pos (m)"""'], {}), "('East Pos (m)')\n", (10312, 10328), True, 'import matplotlib.pyplot as plt\n'), ((10337, 10361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (10347, 10361), True, 'import matplotlib.pyplot as plt\n'), ((10370, 10380), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10378, 10380), True, 'import matplotlib.pyplot as plt\n'), ((10389, 10525), 'matplotlib.pyplot.text', 'plt.text', (['innov_4_max_time', 'innov_4_max', "('max=' + s_innov_4_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_4_max_time, innov_4_max, 'max=' + s_innov_4_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (10397, 10525), True, 'import matplotlib.pyplot as plt\n'), ((10547, 10680), 'matplotlib.pyplot.text', 'plt.text', (['innov_4_min_time', 'innov_4_min', "('min=' + s_innov_4_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_4_min_time, innov_4_min, 'min=' + s_innov_4_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (10555, 10680), True, 'import matplotlib.pyplot as plt\n'), ((10799, 10811), 'matplotlib.pyplot.close', 'plt.close', (['(3)'], {}), '(3)\n', (10808, 10811), True, 'import matplotlib.pyplot as plt\n'), ((10854, 10885), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {'figsize': '(20, 13)'}), '(4, figsize=(20, 13))\n', (10864, 10885), True, 'import matplotlib.pyplot as plt\n'), ((10947, 10990), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['mag_innov[0]']"], {}), "(ekf2_innovations['mag_innov[0]'])\n", (10956, 10990), True, 'import numpy as np\n'), ((11068, 11109), 'numpy.amax', 'np.amax', (["ekf2_innovations['mag_innov[0]']"], {}), "(ekf2_innovations['mag_innov[0]'])\n", (11075, 11109), True, 'import numpy as np\n'), ((11136, 11179), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['mag_innov[0]']"], {}), "(ekf2_innovations['mag_innov[0]'])\n", (11145, 11179), True, 'import numpy as np\n'), ((11257, 11298), 'numpy.amin', 'np.amin', (["ekf2_innovations['mag_innov[0]']"], {}), "(ekf2_innovations['mag_innov[0]'])\n", (11264, 11298), True, 'import numpy as np\n'), ((11543, 11586), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['mag_innov[1]']"], {}), "(ekf2_innovations['mag_innov[1]'])\n", (11552, 11586), True, 'import numpy as np\n'), ((11664, 11705), 'numpy.amax', 'np.amax', (["ekf2_innovations['mag_innov[1]']"], {}), "(ekf2_innovations['mag_innov[1]'])\n", (11671, 11705), True, 'import numpy as np\n'), ((11732, 11775), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['mag_innov[1]']"], {}), "(ekf2_innovations['mag_innov[1]'])\n", (11741, 11775), True, 'import numpy as np\n'), ((11853, 11894), 'numpy.amin', 'np.amin', (["ekf2_innovations['mag_innov[1]']"], {}), "(ekf2_innovations['mag_innov[1]'])\n", (11860, 11894), True, 'import numpy as np\n'), ((12139, 12182), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['mag_innov[2]']"], {}), "(ekf2_innovations['mag_innov[2]'])\n", (12148, 12182), True, 'import numpy as np\n'), ((12260, 12301), 'numpy.amax', 'np.amax', (["ekf2_innovations['mag_innov[2]']"], {}), "(ekf2_innovations['mag_innov[2]'])\n", (12267, 12301), True, 'import numpy as np\n'), ((12328, 12371), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['mag_innov[2]']"], {}), "(ekf2_innovations['mag_innov[2]'])\n", (12337, 12371), True, 'import numpy as np\n'), ((12449, 12490), 'numpy.amin', 'np.amin', (["ekf2_innovations['mag_innov[2]']"], {}), "(ekf2_innovations['mag_innov[2]'])\n", (12456, 12490), True, 'import numpy as np\n'), ((12703, 12723), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (12714, 12723), True, 'import matplotlib.pyplot as plt\n'), ((12732, 12823), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['mag_innov[0]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'mag_innov[0]'], 'b')\n", (12740, 12823), True, 'import matplotlib.pyplot as plt\n'), ((13041, 13078), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetometer Innovations"""'], {}), "('Magnetometer Innovations')\n", (13050, 13078), True, 'import matplotlib.pyplot as plt\n'), ((13087, 13110), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X (Gauss)"""'], {}), "('X (Gauss)')\n", (13097, 13110), True, 'import matplotlib.pyplot as plt\n'), ((13119, 13143), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (13129, 13143), True, 'import matplotlib.pyplot as plt\n'), ((13152, 13162), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (13160, 13162), True, 'import matplotlib.pyplot as plt\n'), ((13171, 13307), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_max_time', 'innov_0_max', "('max=' + s_innov_0_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (13179, 13307), True, 'import matplotlib.pyplot as plt\n'), ((13329, 13462), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_min_time', 'innov_0_min', "('min=' + s_innov_0_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (13337, 13462), True, 'import matplotlib.pyplot as plt\n'), ((13560, 13580), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (13571, 13580), True, 'import matplotlib.pyplot as plt\n'), ((13589, 13680), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['mag_innov[1]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'mag_innov[1]'], 'b')\n", (13597, 13680), True, 'import matplotlib.pyplot as plt\n'), ((13898, 13921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (Gauss)"""'], {}), "('Y (Gauss)')\n", (13908, 13921), True, 'import matplotlib.pyplot as plt\n'), ((13930, 13954), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (13940, 13954), True, 'import matplotlib.pyplot as plt\n'), ((13963, 13973), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (13971, 13973), True, 'import matplotlib.pyplot as plt\n'), ((13982, 14118), 'matplotlib.pyplot.text', 'plt.text', (['innov_1_max_time', 'innov_1_max', "('max=' + s_innov_1_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (13990, 14118), True, 'import matplotlib.pyplot as plt\n'), ((14140, 14273), 'matplotlib.pyplot.text', 'plt.text', (['innov_1_min_time', 'innov_1_min', "('min=' + s_innov_1_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (14148, 14273), True, 'import matplotlib.pyplot as plt\n'), ((14371, 14391), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (14382, 14391), True, 'import matplotlib.pyplot as plt\n'), ((14400, 14491), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['mag_innov[2]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'mag_innov[2]'], 'b')\n", (14408, 14491), True, 'import matplotlib.pyplot as plt\n'), ((14709, 14732), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z (Gauss)"""'], {}), "('Z (Gauss)')\n", (14719, 14732), True, 'import matplotlib.pyplot as plt\n'), ((14741, 14765), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (14751, 14765), True, 'import matplotlib.pyplot as plt\n'), ((14774, 14784), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (14782, 14784), True, 'import matplotlib.pyplot as plt\n'), ((14793, 14929), 'matplotlib.pyplot.text', 'plt.text', (['innov_2_max_time', 'innov_2_max', "('max=' + s_innov_2_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (14801, 14929), True, 'import matplotlib.pyplot as plt\n'), ((14951, 15084), 'matplotlib.pyplot.text', 'plt.text', (['innov_2_min_time', 'innov_2_min', "('min=' + s_innov_2_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (14959, 15084), True, 'import matplotlib.pyplot as plt\n'), ((15203, 15215), 'matplotlib.pyplot.close', 'plt.close', (['(4)'], {}), '(4)\n', (15212, 15215), True, 'import matplotlib.pyplot as plt\n'), ((15263, 15294), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {'figsize': '(20, 13)'}), '(5, figsize=(20, 13))\n', (15273, 15294), True, 'import matplotlib.pyplot as plt\n'), ((15349, 15393), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['heading_innov']"], {}), "(ekf2_innovations['heading_innov'])\n", (15358, 15393), True, 'import numpy as np\n'), ((15471, 15513), 'numpy.amax', 'np.amax', (["ekf2_innovations['heading_innov']"], {}), "(ekf2_innovations['heading_innov'])\n", (15478, 15513), True, 'import numpy as np\n'), ((15540, 15584), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['heading_innov']"], {}), "(ekf2_innovations['heading_innov'])\n", (15549, 15584), True, 'import numpy as np\n'), ((15662, 15704), 'numpy.amin', 'np.amin', (["ekf2_innovations['heading_innov']"], {}), "(ekf2_innovations['heading_innov'])\n", (15669, 15704), True, 'import numpy as np\n'), ((15917, 16009), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['heading_innov']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'heading_innov'], 'b')\n", (15925, 16009), True, 'import matplotlib.pyplot as plt\n'), ((16229, 16270), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic Heading Innovations"""'], {}), "('Magnetic Heading Innovations')\n", (16238, 16270), True, 'import matplotlib.pyplot as plt\n'), ((16279, 16305), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Heaing (rad)"""'], {}), "('Heaing (rad)')\n", (16289, 16305), True, 'import matplotlib.pyplot as plt\n'), ((16314, 16338), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (16324, 16338), True, 'import matplotlib.pyplot as plt\n'), ((16347, 16357), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (16355, 16357), True, 'import matplotlib.pyplot as plt\n'), ((16366, 16502), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_max_time', 'innov_0_max', "('max=' + s_innov_0_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12,\n horizontalalignment='left', verticalalignment='bottom')\n", (16374, 16502), True, 'import matplotlib.pyplot as plt\n'), ((16524, 16657), 'matplotlib.pyplot.text', 'plt.text', (['innov_0_min_time', 'innov_0_min', "('min=' + s_innov_0_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12,\n horizontalalignment='left', verticalalignment='top')\n", (16532, 16657), True, 'import matplotlib.pyplot as plt\n'), ((16776, 16788), 'matplotlib.pyplot.close', 'plt.close', (['(5)'], {}), '(5)\n', (16785, 16788), True, 'import matplotlib.pyplot as plt\n'), ((16828, 16859), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {'figsize': '(20, 13)'}), '(6, figsize=(20, 13))\n', (16838, 16859), True, 'import matplotlib.pyplot as plt\n'), ((16930, 16975), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['airspeed_innov']"], {}), "(ekf2_innovations['airspeed_innov'])\n", (16939, 16975), True, 'import numpy as np\n'), ((17074, 17117), 'numpy.amax', 'np.amax', (["ekf2_innovations['airspeed_innov']"], {}), "(ekf2_innovations['airspeed_innov'])\n", (17081, 17117), True, 'import numpy as np\n'), ((17151, 17196), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['airspeed_innov']"], {}), "(ekf2_innovations['airspeed_innov'])\n", (17160, 17196), True, 'import numpy as np\n'), ((17295, 17338), 'numpy.amin', 'np.amin', (["ekf2_innovations['airspeed_innov']"], {}), "(ekf2_innovations['airspeed_innov'])\n", (17302, 17338), True, 'import numpy as np\n'), ((17535, 17576), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['beta_innov']"], {}), "(ekf2_innovations['beta_innov'])\n", (17544, 17576), True, 'import numpy as np\n'), ((17663, 17702), 'numpy.amax', 'np.amax', (["ekf2_innovations['beta_innov']"], {}), "(ekf2_innovations['beta_innov'])\n", (17670, 17702), True, 'import numpy as np\n'), ((17732, 17773), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['beta_innov']"], {}), "(ekf2_innovations['beta_innov'])\n", (17741, 17773), True, 'import numpy as np\n'), ((17860, 17899), 'numpy.amin', 'np.amin', (["ekf2_innovations['beta_innov']"], {}), "(ekf2_innovations['beta_innov'])\n", (17867, 17899), True, 'import numpy as np\n'), ((18043, 18063), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (18054, 18063), True, 'import matplotlib.pyplot as plt\n'), ((18072, 18165), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['airspeed_innov']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'airspeed_innov'], 'b')\n", (18080, 18165), True, 'import matplotlib.pyplot as plt\n'), ((18387, 18425), 'matplotlib.pyplot.title', 'plt.title', (['"""True Airspeed Innovations"""'], {}), "('True Airspeed Innovations')\n", (18396, 18425), True, 'import matplotlib.pyplot as plt\n'), ((18434, 18466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""innovation (m/sec)"""'], {}), "('innovation (m/sec)')\n", (18444, 18466), True, 'import matplotlib.pyplot as plt\n'), ((18475, 18499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (18485, 18499), True, 'import matplotlib.pyplot as plt\n'), ((18508, 18518), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (18516, 18518), True, 'import matplotlib.pyplot as plt\n'), ((18527, 18688), 'matplotlib.pyplot.text', 'plt.text', (['airspeed_innov_max_time', 'airspeed_innov_max', "('max=' + s_airspeed_innov_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(airspeed_innov_max_time, airspeed_innov_max, 'max=' +\n s_airspeed_innov_max, fontsize=12, horizontalalignment='left',\n verticalalignment='bottom')\n", (18535, 18688), True, 'import matplotlib.pyplot as plt\n'), ((18706, 18864), 'matplotlib.pyplot.text', 'plt.text', (['airspeed_innov_min_time', 'airspeed_innov_min', "('min=' + s_airspeed_innov_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(airspeed_innov_min_time, airspeed_innov_min, 'min=' +\n s_airspeed_innov_min, fontsize=12, horizontalalignment='left',\n verticalalignment='top')\n", (18714, 18864), True, 'import matplotlib.pyplot as plt\n'), ((18882, 18902), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (18893, 18902), True, 'import matplotlib.pyplot as plt\n'), ((18911, 19000), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['beta_innov']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'beta_innov'], 'b')\n", (18919, 19000), True, 'import matplotlib.pyplot as plt\n'), ((19214, 19256), 'matplotlib.pyplot.title', 'plt.title', (['"""Sythetic Sideslip Innovations"""'], {}), "('Sythetic Sideslip Innovations')\n", (19223, 19256), True, 'import matplotlib.pyplot as plt\n'), ((19265, 19295), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""innovation (rad)"""'], {}), "('innovation (rad)')\n", (19275, 19295), True, 'import matplotlib.pyplot as plt\n'), ((19304, 19328), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (19314, 19328), True, 'import matplotlib.pyplot as plt\n'), ((19337, 19347), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (19345, 19347), True, 'import matplotlib.pyplot as plt\n'), ((19356, 19501), 'matplotlib.pyplot.text', 'plt.text', (['beta_innov_max_time', 'beta_innov_max', "('max=' + s_beta_innov_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(beta_innov_max_time, beta_innov_max, 'max=' + s_beta_innov_max,\n fontsize=12, horizontalalignment='left', verticalalignment='bottom')\n", (19364, 19501), True, 'import matplotlib.pyplot as plt\n'), ((19523, 19665), 'matplotlib.pyplot.text', 'plt.text', (['beta_innov_min_time', 'beta_innov_min', "('min=' + s_beta_innov_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(beta_innov_min_time, beta_innov_min, 'min=' + s_beta_innov_min,\n fontsize=12, horizontalalignment='left', verticalalignment='top')\n", (19531, 19665), True, 'import matplotlib.pyplot as plt\n'), ((19708, 19720), 'matplotlib.pyplot.close', 'plt.close', (['(6)'], {}), '(6)\n', (19717, 19720), True, 'import matplotlib.pyplot as plt\n'), ((19764, 19795), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {'figsize': '(20, 13)'}), '(7, figsize=(20, 13))\n', (19774, 19795), True, 'import matplotlib.pyplot as plt\n'), ((19862, 19906), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['flow_innov[0]']"], {}), "(ekf2_innovations['flow_innov[0]'])\n", (19871, 19906), True, 'import numpy as np\n'), ((19999, 20041), 'numpy.amax', 'np.amax', (["ekf2_innovations['flow_innov[0]']"], {}), "(ekf2_innovations['flow_innov[0]'])\n", (20006, 20041), True, 'import numpy as np\n'), ((20073, 20117), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['flow_innov[0]']"], {}), "(ekf2_innovations['flow_innov[0]'])\n", (20082, 20117), True, 'import numpy as np\n'), ((20210, 20252), 'numpy.amin', 'np.amin', (["ekf2_innovations['flow_innov[0]']"], {}), "(ekf2_innovations['flow_innov[0]'])\n", (20217, 20252), True, 'import numpy as np\n'), ((20528, 20572), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['flow_innov[1]']"], {}), "(ekf2_innovations['flow_innov[1]'])\n", (20537, 20572), True, 'import numpy as np\n'), ((20665, 20707), 'numpy.amax', 'np.amax', (["ekf2_innovations['flow_innov[1]']"], {}), "(ekf2_innovations['flow_innov[1]'])\n", (20672, 20707), True, 'import numpy as np\n'), ((20739, 20783), 'numpy.argmin', 'np.argmin', (["ekf2_innovations['flow_innov[1]']"], {}), "(ekf2_innovations['flow_innov[1]'])\n", (20748, 20783), True, 'import numpy as np\n'), ((20876, 20918), 'numpy.amin', 'np.amin', (["ekf2_innovations['flow_innov[1]']"], {}), "(ekf2_innovations['flow_innov[1]'])\n", (20883, 20918), True, 'import numpy as np\n'), ((21157, 21177), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (21168, 21177), True, 'import matplotlib.pyplot as plt\n'), ((21186, 21278), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['flow_innov[0]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'flow_innov[0]'], 'b')\n", (21194, 21278), True, 'import matplotlib.pyplot as plt\n'), ((21498, 21535), 'matplotlib.pyplot.title', 'plt.title', (['"""Optical Flow Innovations"""'], {}), "('Optical Flow Innovations')\n", (21507, 21535), True, 'import matplotlib.pyplot as plt\n'), ((21544, 21569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X (rad/sec)"""'], {}), "('X (rad/sec)')\n", (21554, 21569), True, 'import matplotlib.pyplot as plt\n'), ((21578, 21602), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (21588, 21602), True, 'import matplotlib.pyplot as plt\n'), ((21611, 21621), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (21619, 21621), True, 'import matplotlib.pyplot as plt\n'), ((21630, 21785), 'matplotlib.pyplot.text', 'plt.text', (['flow_innov_x_max_time', 'flow_innov_x_max', "('max=' + s_flow_innov_x_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(flow_innov_x_max_time, flow_innov_x_max, 'max=' +\n s_flow_innov_x_max, fontsize=12, horizontalalignment='left',\n verticalalignment='bottom')\n", (21638, 21785), True, 'import matplotlib.pyplot as plt\n'), ((21803, 21955), 'matplotlib.pyplot.text', 'plt.text', (['flow_innov_x_min_time', 'flow_innov_x_min', "('min=' + s_flow_innov_x_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(flow_innov_x_min_time, flow_innov_x_min, 'min=' +\n s_flow_innov_x_min, fontsize=12, horizontalalignment='left',\n verticalalignment='top')\n", (21811, 21955), True, 'import matplotlib.pyplot as plt\n'), ((22054, 22074), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (22065, 22074), True, 'import matplotlib.pyplot as plt\n'), ((22083, 22175), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['flow_innov[1]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'flow_innov[1]'], 'b')\n", (22091, 22175), True, 'import matplotlib.pyplot as plt\n'), ((22395, 22420), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (rad/sec)"""'], {}), "('Y (rad/sec)')\n", (22405, 22420), True, 'import matplotlib.pyplot as plt\n'), ((22429, 22453), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (22439, 22453), True, 'import matplotlib.pyplot as plt\n'), ((22462, 22472), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (22470, 22472), True, 'import matplotlib.pyplot as plt\n'), ((22481, 22636), 'matplotlib.pyplot.text', 'plt.text', (['flow_innov_y_max_time', 'flow_innov_y_max', "('max=' + s_flow_innov_y_max)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""bottom"""'}), "(flow_innov_y_max_time, flow_innov_y_max, 'max=' +\n s_flow_innov_y_max, fontsize=12, horizontalalignment='left',\n verticalalignment='bottom')\n", (22489, 22636), True, 'import matplotlib.pyplot as plt\n'), ((22654, 22806), 'matplotlib.pyplot.text', 'plt.text', (['flow_innov_y_min_time', 'flow_innov_y_min', "('min=' + s_flow_innov_y_min)"], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""top"""'}), "(flow_innov_y_min_time, flow_innov_y_min, 'min=' +\n s_flow_innov_y_min, fontsize=12, horizontalalignment='left',\n verticalalignment='top')\n", (22662, 22806), True, 'import matplotlib.pyplot as plt\n'), ((22926, 22938), 'matplotlib.pyplot.close', 'plt.close', (['(7)'], {}), '(7)\n', (22935, 22938), True, 'import matplotlib.pyplot as plt\n'), ((24908, 24939), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {'figsize': '(20, 13)'}), '(8, figsize=(20, 13))\n', (24918, 24939), True, 'import matplotlib.pyplot as plt\n'), ((25042, 25068), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_plots', '(1)', '(1)'], {}), '(n_plots, 1, 1)\n', (25053, 25068), True, 'import matplotlib.pyplot as plt\n'), ((25077, 25139), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['mag_test_ratio']", '"""b"""'], {}), "(status_time, estimator_status['mag_test_ratio'], 'b')\n", (25085, 25139), True, 'import matplotlib.pyplot as plt\n'), ((25148, 25194), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalised Innovation Test Levels"""'], {}), "('Normalised Innovation Test Levels')\n", (25157, 25194), True, 'import matplotlib.pyplot as plt\n'), ((25203, 25220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mag"""'], {}), "('mag')\n", (25213, 25220), True, 'import matplotlib.pyplot as plt\n'), ((25229, 25253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (25239, 25253), True, 'import matplotlib.pyplot as plt\n'), ((25262, 25272), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (25270, 25272), True, 'import matplotlib.pyplot as plt\n'), ((25527, 25553), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_plots', '(1)', '(2)'], {}), '(n_plots, 1, 2)\n', (25538, 25553), True, 'import matplotlib.pyplot as plt\n'), ((25562, 25624), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['vel_test_ratio']", '"""b"""'], {}), "(status_time, estimator_status['vel_test_ratio'], 'b')\n", (25570, 25624), True, 'import matplotlib.pyplot as plt\n'), ((25633, 25695), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['pos_test_ratio']", '"""r"""'], {}), "(status_time, estimator_status['pos_test_ratio'], 'r')\n", (25641, 25695), True, 'import matplotlib.pyplot as plt\n'), ((25704, 25725), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""vel,pos"""'], {}), "('vel,pos')\n", (25714, 25725), True, 'import matplotlib.pyplot as plt\n'), ((25734, 25758), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (25744, 25758), True, 'import matplotlib.pyplot as plt\n'), ((25767, 25777), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (25775, 25777), True, 'import matplotlib.pyplot as plt\n'), ((26286, 26312), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_plots', '(1)', '(3)'], {}), '(n_plots, 1, 3)\n', (26297, 26312), True, 'import matplotlib.pyplot as plt\n'), ((26321, 26383), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['hgt_test_ratio']", '"""b"""'], {}), "(status_time, estimator_status['hgt_test_ratio'], 'b')\n", (26329, 26383), True, 'import matplotlib.pyplot as plt\n'), ((26392, 26409), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""hgt"""'], {}), "('hgt')\n", (26402, 26409), True, 'import matplotlib.pyplot as plt\n'), ((26418, 26442), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (26428, 26442), True, 'import matplotlib.pyplot as plt\n'), ((26451, 26461), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (26459, 26461), True, 'import matplotlib.pyplot as plt\n'), ((27638, 27650), 'matplotlib.pyplot.close', 'plt.close', (['(8)'], {}), '(8)\n', (27647, 27650), True, 'import matplotlib.pyplot as plt\n'), ((31558, 31579), 'numpy.diff', 'np.diff', (['tilt_aligned'], {}), '(tilt_aligned)\n', (31565, 31579), True, 'import numpy as np\n'), ((31671, 31691), 'numpy.diff', 'np.diff', (['yaw_aligned'], {}), '(yaw_aligned)\n', (31678, 31691), True, 'import numpy as np\n'), ((31823, 31841), 'numpy.diff', 'np.diff', (['using_gps'], {}), '(using_gps)\n', (31830, 31841), True, 'import numpy as np\n'), ((31929, 31951), 'numpy.diff', 'np.diff', (['using_optflow'], {}), '(using_optflow)\n', (31936, 31951), True, 'import numpy as np\n'), ((32045, 32065), 'numpy.diff', 'np.diff', (['using_evpos'], {}), '(using_evpos)\n', (32052, 32065), True, 'import numpy as np\n'), ((32199, 32221), 'numpy.diff', 'np.diff', (['using_barohgt'], {}), '(using_barohgt)\n', (32206, 32221), True, 'import numpy as np\n'), ((32316, 32337), 'numpy.diff', 'np.diff', (['using_gpshgt'], {}), '(using_gpshgt)\n', (32323, 32337), True, 'import numpy as np\n'), ((32430, 32451), 'numpy.diff', 'np.diff', (['using_rnghgt'], {}), '(using_rnghgt)\n', (32437, 32451), True, 'import numpy as np\n'), ((32543, 32563), 'numpy.diff', 'np.diff', (['using_evhgt'], {}), '(using_evhgt)\n', (32550, 32563), True, 'import numpy as np\n'), ((32704, 32725), 'numpy.diff', 'np.diff', (['using_magyaw'], {}), '(using_magyaw)\n', (32711, 32725), True, 'import numpy as np\n'), ((32823, 32843), 'numpy.diff', 'np.diff', (['using_mag3d'], {}), '(using_mag3d)\n', (32830, 32843), True, 'import numpy as np\n'), ((32941, 32963), 'numpy.diff', 'np.diff', (['using_magdecl'], {}), '(using_magdecl)\n', (32948, 32963), True, 'import numpy as np\n'), ((33086, 33117), 'matplotlib.pyplot.figure', 'plt.figure', (['(9)'], {'figsize': '(20, 13)'}), '(9, figsize=(20, 13))\n', (33096, 33117), True, 'import matplotlib.pyplot as plt\n'), ((33169, 33189), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (33180, 33189), True, 'import matplotlib.pyplot as plt\n'), ((33198, 33240), 'matplotlib.pyplot.title', 'plt.title', (['"""EKF Control Status - Figure A"""'], {}), "('EKF Control Status - Figure A')\n", (33207, 33240), True, 'import matplotlib.pyplot as plt\n'), ((33249, 33289), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'tilt_aligned', '"""b"""'], {}), "(status_time, tilt_aligned, 'b')\n", (33257, 33289), True, 'import matplotlib.pyplot as plt\n'), ((33298, 33337), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'yaw_aligned', '"""r"""'], {}), "(status_time, yaw_aligned, 'r')\n", (33306, 33337), True, 'import matplotlib.pyplot as plt\n'), ((33346, 33365), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (33354, 33365), True, 'import matplotlib.pyplot as plt\n'), ((33374, 33395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""aligned"""'], {}), "('aligned')\n", (33384, 33395), True, 'import matplotlib.pyplot as plt\n'), ((33404, 33414), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (33412, 33414), True, 'import matplotlib.pyplot as plt\n'), ((34142, 34162), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (34153, 34162), True, 'import matplotlib.pyplot as plt\n'), ((34171, 34208), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_gps', '"""b"""'], {}), "(status_time, using_gps, 'b')\n", (34179, 34208), True, 'import matplotlib.pyplot as plt\n'), ((34217, 34258), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_optflow', '"""r"""'], {}), "(status_time, using_optflow, 'r')\n", (34225, 34258), True, 'import matplotlib.pyplot as plt\n'), ((34267, 34306), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_evpos', '"""g"""'], {}), "(status_time, using_evpos, 'g')\n", (34275, 34306), True, 'import matplotlib.pyplot as plt\n'), ((34315, 34334), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (34323, 34334), True, 'import matplotlib.pyplot as plt\n'), ((34343, 34367), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pos aiding"""'], {}), "('pos aiding')\n", (34353, 34367), True, 'import matplotlib.pyplot as plt\n'), ((34376, 34386), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (34384, 34386), True, 'import matplotlib.pyplot as plt\n'), ((35922, 35942), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (35933, 35942), True, 'import matplotlib.pyplot as plt\n'), ((35951, 35992), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_barohgt', '"""b"""'], {}), "(status_time, using_barohgt, 'b')\n", (35959, 35992), True, 'import matplotlib.pyplot as plt\n'), ((36001, 36041), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_gpshgt', '"""r"""'], {}), "(status_time, using_gpshgt, 'r')\n", (36009, 36041), True, 'import matplotlib.pyplot as plt\n'), ((36050, 36090), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_rnghgt', '"""g"""'], {}), "(status_time, using_rnghgt, 'g')\n", (36058, 36090), True, 'import matplotlib.pyplot as plt\n'), ((36099, 36138), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_evhgt', '"""c"""'], {}), "(status_time, using_evhgt, 'c')\n", (36107, 36138), True, 'import matplotlib.pyplot as plt\n'), ((36147, 36166), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (36155, 36166), True, 'import matplotlib.pyplot as plt\n'), ((36175, 36199), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""hgt aiding"""'], {}), "('hgt aiding')\n", (36185, 36199), True, 'import matplotlib.pyplot as plt\n'), ((36208, 36218), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (36216, 36218), True, 'import matplotlib.pyplot as plt\n'), ((38250, 38270), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (38261, 38270), True, 'import matplotlib.pyplot as plt\n'), ((38279, 38319), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_magyaw', '"""b"""'], {}), "(status_time, using_magyaw, 'b')\n", (38287, 38319), True, 'import matplotlib.pyplot as plt\n'), ((38328, 38367), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_mag3d', '"""r"""'], {}), "(status_time, using_mag3d, 'r')\n", (38336, 38367), True, 'import matplotlib.pyplot as plt\n'), ((38376, 38417), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'using_magdecl', '"""g"""'], {}), "(status_time, using_magdecl, 'g')\n", (38384, 38417), True, 'import matplotlib.pyplot as plt\n'), ((38426, 38445), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (38434, 38445), True, 'import matplotlib.pyplot as plt\n'), ((38454, 38478), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mag aiding"""'], {}), "('mag aiding')\n", (38464, 38478), True, 'import matplotlib.pyplot as plt\n'), ((38487, 38511), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (38497, 38511), True, 'import matplotlib.pyplot as plt\n'), ((38520, 38530), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (38528, 38530), True, 'import matplotlib.pyplot as plt\n'), ((40130, 40142), 'matplotlib.pyplot.close', 'plt.close', (['(9)'], {}), '(9)\n', (40139, 40142), True, 'import matplotlib.pyplot as plt\n'), ((40189, 40221), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {'figsize': '(20, 13)'}), '(10, figsize=(20, 13))\n', (40199, 40221), True, 'import matplotlib.pyplot as plt\n'), ((40268, 40288), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (40279, 40288), True, 'import matplotlib.pyplot as plt\n'), ((40297, 40339), 'matplotlib.pyplot.title', 'plt.title', (['"""EKF Control Status - Figure B"""'], {}), "('EKF Control Status - Figure B')\n", (40306, 40339), True, 'import matplotlib.pyplot as plt\n'), ((40348, 40384), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'airborne', '"""b"""'], {}), "(status_time, airborne, 'b')\n", (40356, 40384), True, 'import matplotlib.pyplot as plt\n'), ((40393, 40412), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (40401, 40412), True, 'import matplotlib.pyplot as plt\n'), ((40421, 40443), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""airborne"""'], {}), "('airborne')\n", (40431, 40443), True, 'import matplotlib.pyplot as plt\n'), ((40452, 40462), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (40460, 40462), True, 'import matplotlib.pyplot as plt\n'), ((41763, 41783), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (41774, 41783), True, 'import matplotlib.pyplot as plt\n'), ((41792, 41835), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'estimating_wind', '"""b"""'], {}), "(status_time, estimating_wind, 'b')\n", (41800, 41835), True, 'import matplotlib.pyplot as plt\n'), ((41844, 41863), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (41852, 41863), True, 'import matplotlib.pyplot as plt\n'), ((41872, 41901), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""estimating wind"""'], {}), "('estimating wind')\n", (41882, 41901), True, 'import matplotlib.pyplot as plt\n'), ((41910, 41934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (41920, 41934), True, 'import matplotlib.pyplot as plt\n'), ((41943, 41953), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (41951, 41953), True, 'import matplotlib.pyplot as plt\n'), ((41983, 41996), 'matplotlib.pyplot.close', 'plt.close', (['(10)'], {}), '(10)\n', (41992, 41996), True, 'import matplotlib.pyplot as plt\n'), ((43929, 43961), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {'figsize': '(20, 13)'}), '(11, figsize=(20, 13))\n', (43939, 43961), True, 'import matplotlib.pyplot as plt\n'), ((43970, 43990), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(1)'], {}), '(6, 1, 1)\n', (43981, 43990), True, 'import matplotlib.pyplot as plt\n'), ((43999, 44037), 'matplotlib.pyplot.title', 'plt.title', (['"""EKF Innovation Test Fails"""'], {}), "('EKF Innovation Test Fails')\n", (44008, 44037), True, 'import matplotlib.pyplot as plt\n'), ((44046, 44105), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'vel_innov_fail', '"""b"""'], {'label': '"""vel NED"""'}), "(status_time, vel_innov_fail, 'b', label='vel NED')\n", (44054, 44105), True, 'import matplotlib.pyplot as plt\n'), ((44114, 44173), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'posh_innov_fail', '"""r"""'], {'label': '"""pos NE"""'}), "(status_time, posh_innov_fail, 'r', label='pos NE')\n", (44122, 44173), True, 'import matplotlib.pyplot as plt\n'), ((44182, 44201), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (44190, 44201), True, 'import matplotlib.pyplot as plt\n'), ((44210, 44230), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (44220, 44230), True, 'import matplotlib.pyplot as plt\n'), ((44239, 44267), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (44249, 44267), True, 'import matplotlib.pyplot as plt\n'), ((44276, 44286), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (44284, 44286), True, 'import matplotlib.pyplot as plt\n'), ((44295, 44315), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(2)'], {}), '(6, 1, 2)\n', (44306, 44315), True, 'import matplotlib.pyplot as plt\n'), ((44324, 44389), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'posv_innov_fail', '"""b"""'], {'label': '"""hgt absolute"""'}), "(status_time, posv_innov_fail, 'b', label='hgt absolute')\n", (44332, 44389), True, 'import matplotlib.pyplot as plt\n'), ((44398, 44467), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'hagl_innov_fail', '"""r"""'], {'label': '"""hgt above ground"""'}), "(status_time, hagl_innov_fail, 'r', label='hgt above ground')\n", (44406, 44467), True, 'import matplotlib.pyplot as plt\n'), ((44476, 44495), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (44484, 44495), True, 'import matplotlib.pyplot as plt\n'), ((44504, 44524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (44514, 44524), True, 'import matplotlib.pyplot as plt\n'), ((44533, 44561), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (44543, 44561), True, 'import matplotlib.pyplot as plt\n'), ((44570, 44580), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (44578, 44580), True, 'import matplotlib.pyplot as plt\n'), ((44589, 44609), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(3)'], {}), '(6, 1, 3)\n', (44600, 44609), True, 'import matplotlib.pyplot as plt\n'), ((44618, 44676), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'magx_innov_fail', '"""b"""'], {'label': '"""mag_x"""'}), "(status_time, magx_innov_fail, 'b', label='mag_x')\n", (44626, 44676), True, 'import matplotlib.pyplot as plt\n'), ((44685, 44743), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'magy_innov_fail', '"""r"""'], {'label': '"""mag_y"""'}), "(status_time, magy_innov_fail, 'r', label='mag_y')\n", (44693, 44743), True, 'import matplotlib.pyplot as plt\n'), ((44752, 44810), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'magz_innov_fail', '"""g"""'], {'label': '"""mag_z"""'}), "(status_time, magz_innov_fail, 'g', label='mag_z')\n", (44760, 44810), True, 'import matplotlib.pyplot as plt\n'), ((44819, 44874), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'yaw_innov_fail', '"""c"""'], {'label': '"""yaw"""'}), "(status_time, yaw_innov_fail, 'c', label='yaw')\n", (44827, 44874), True, 'import matplotlib.pyplot as plt\n'), ((44883, 44911), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (44893, 44911), True, 'import matplotlib.pyplot as plt\n'), ((44920, 44939), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (44928, 44939), True, 'import matplotlib.pyplot as plt\n'), ((44948, 44968), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (44958, 44968), True, 'import matplotlib.pyplot as plt\n'), ((44977, 44987), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (44985, 44987), True, 'import matplotlib.pyplot as plt\n'), ((44996, 45016), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(4)'], {}), '(6, 1, 4)\n', (45007, 45016), True, 'import matplotlib.pyplot as plt\n'), ((45025, 45085), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'tas_innov_fail', '"""b"""'], {'label': '"""airspeed"""'}), "(status_time, tas_innov_fail, 'b', label='airspeed')\n", (45033, 45085), True, 'import matplotlib.pyplot as plt\n'), ((45094, 45113), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (45102, 45113), True, 'import matplotlib.pyplot as plt\n'), ((45122, 45142), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (45132, 45142), True, 'import matplotlib.pyplot as plt\n'), ((45151, 45179), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (45161, 45179), True, 'import matplotlib.pyplot as plt\n'), ((45188, 45198), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (45196, 45198), True, 'import matplotlib.pyplot as plt\n'), ((45207, 45227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(5)'], {}), '(6, 1, 5)\n', (45218, 45227), True, 'import matplotlib.pyplot as plt\n'), ((45236, 45296), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'sli_innov_fail', '"""b"""'], {'label': '"""sideslip"""'}), "(status_time, sli_innov_fail, 'b', label='sideslip')\n", (45244, 45296), True, 'import matplotlib.pyplot as plt\n'), ((45305, 45324), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (45313, 45324), True, 'import matplotlib.pyplot as plt\n'), ((45333, 45353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (45343, 45353), True, 'import matplotlib.pyplot as plt\n'), ((45362, 45390), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (45372, 45390), True, 'import matplotlib.pyplot as plt\n'), ((45399, 45409), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (45407, 45409), True, 'import matplotlib.pyplot as plt\n'), ((45418, 45438), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(6)', '(1)', '(6)'], {}), '(6, 1, 6)\n', (45429, 45438), True, 'import matplotlib.pyplot as plt\n'), ((45447, 45505), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'ofx_innov_fail', '"""b"""'], {'label': '"""flow X"""'}), "(status_time, ofx_innov_fail, 'b', label='flow X')\n", (45455, 45505), True, 'import matplotlib.pyplot as plt\n'), ((45514, 45572), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'ofy_innov_fail', '"""r"""'], {'label': '"""flow Y"""'}), "(status_time, ofy_innov_fail, 'r', label='flow Y')\n", (45522, 45572), True, 'import matplotlib.pyplot as plt\n'), ((45581, 45600), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (45589, 45600), True, 'import matplotlib.pyplot as plt\n'), ((45609, 45629), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (45619, 45629), True, 'import matplotlib.pyplot as plt\n'), ((45638, 45661), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec"""'], {}), "('time (sec')\n", (45648, 45661), True, 'import matplotlib.pyplot as plt\n'), ((45670, 45698), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (45680, 45698), True, 'import matplotlib.pyplot as plt\n'), ((45707, 45717), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (45715, 45717), True, 'import matplotlib.pyplot as plt\n'), ((45747, 45760), 'matplotlib.pyplot.close', 'plt.close', (['(11)'], {}), '(11)\n', (45756, 45760), True, 'import matplotlib.pyplot as plt\n'), ((45808, 45840), 'matplotlib.pyplot.figure', 'plt.figure', (['(12)'], {'figsize': '(20, 13)'}), '(12, figsize=(20, 13))\n', (45818, 45840), True, 'import matplotlib.pyplot as plt\n'), ((47222, 47242), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (47233, 47242), True, 'import matplotlib.pyplot as plt\n'), ((47251, 47296), 'matplotlib.pyplot.title', 'plt.title', (['"""GPS Direct Output Check Failures"""'], {}), "('GPS Direct Output Check Failures')\n", (47260, 47296), True, 'import matplotlib.pyplot as plt\n'), ((47305, 47360), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'gfix_fail', '"""k"""'], {'label': '"""fix type"""'}), "(status_time, gfix_fail, 'k', label='fix type')\n", (47313, 47360), True, 'import matplotlib.pyplot as plt\n'), ((47369, 47422), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'nsat_fail', '"""b"""'], {'label': '"""N sats"""'}), "(status_time, nsat_fail, 'b', label='N sats')\n", (47377, 47422), True, 'import matplotlib.pyplot as plt\n'), ((47431, 47482), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'gdop_fail', '"""r"""'], {'label': '"""GDOP"""'}), "(status_time, gdop_fail, 'r', label='GDOP')\n", (47439, 47482), True, 'import matplotlib.pyplot as plt\n'), ((47491, 47553), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'herr_fail', '"""g"""'], {'label': '"""horiz pos error"""'}), "(status_time, herr_fail, 'g', label='horiz pos error')\n", (47499, 47553), True, 'import matplotlib.pyplot as plt\n'), ((47562, 47623), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'verr_fail', '"""c"""'], {'label': '"""vert pos error"""'}), "(status_time, verr_fail, 'c', label='vert pos error')\n", (47570, 47623), True, 'import matplotlib.pyplot as plt\n'), ((47632, 47690), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'serr_fail', '"""m"""'], {'label': '"""speed error"""'}), "(status_time, serr_fail, 'm', label='speed error')\n", (47640, 47690), True, 'import matplotlib.pyplot as plt\n'), ((47699, 47718), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (47707, 47718), True, 'import matplotlib.pyplot as plt\n'), ((47727, 47747), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (47737, 47747), True, 'import matplotlib.pyplot as plt\n'), ((47756, 47785), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (47766, 47785), True, 'import matplotlib.pyplot as plt\n'), ((47794, 47804), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (47802, 47804), True, 'import matplotlib.pyplot as plt\n'), ((47813, 47833), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (47824, 47833), True, 'import matplotlib.pyplot as plt\n'), ((47842, 47888), 'matplotlib.pyplot.title', 'plt.title', (['"""GPS Derived Output Check Failures"""'], {}), "('GPS Derived Output Check Failures')\n", (47851, 47888), True, 'import matplotlib.pyplot as plt\n'), ((47897, 47957), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'hdrift_fail', '"""b"""'], {'label': '"""horiz drift"""'}), "(status_time, hdrift_fail, 'b', label='horiz drift')\n", (47905, 47957), True, 'import matplotlib.pyplot as plt\n'), ((47966, 48025), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'vdrift_fail', '"""r"""'], {'label': '"""vert drift"""'}), "(status_time, vdrift_fail, 'r', label='vert drift')\n", (47974, 48025), True, 'import matplotlib.pyplot as plt\n'), ((48034, 48092), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'hspd_fail', '"""g"""'], {'label': '"""horiz speed"""'}), "(status_time, hspd_fail, 'g', label='horiz speed')\n", (48042, 48092), True, 'import matplotlib.pyplot as plt\n'), ((48101, 48174), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', 'veld_diff_fail', '"""c"""'], {'label': '"""vert vel inconsistent"""'}), "(status_time, veld_diff_fail, 'c', label='vert vel inconsistent')\n", (48109, 48174), True, 'import matplotlib.pyplot as plt\n'), ((48183, 48202), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (48191, 48202), True, 'import matplotlib.pyplot as plt\n'), ((48211, 48231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""failed"""'], {}), "('failed')\n", (48221, 48231), True, 'import matplotlib.pyplot as plt\n'), ((48240, 48263), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec"""'], {}), "('time (sec')\n", (48250, 48263), True, 'import matplotlib.pyplot as plt\n'), ((48272, 48301), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (48282, 48301), True, 'import matplotlib.pyplot as plt\n'), ((48310, 48320), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (48318, 48320), True, 'import matplotlib.pyplot as plt\n'), ((48350, 48363), 'matplotlib.pyplot.close', 'plt.close', (['(12)'], {}), '(12)\n', (48359, 48363), True, 'import matplotlib.pyplot as plt\n'), ((48407, 48439), 'matplotlib.pyplot.figure', 'plt.figure', (['(13)'], {'figsize': '(20, 13)'}), '(13, figsize=(20, 13))\n', (48417, 48439), True, 'import matplotlib.pyplot as plt\n'), ((48448, 48478), 'matplotlib.pyplot.title', 'plt.title', (['"""Reported Accuracy"""'], {}), "('Reported Accuracy')\n", (48457, 48478), True, 'import matplotlib.pyplot as plt\n'), ((48487, 48578), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['pos_horiz_accuracy']", '"""b"""'], {'label': '"""horizontal"""'}), "(status_time, estimator_status['pos_horiz_accuracy'], 'b', label=\n 'horizontal')\n", (48495, 48578), True, 'import matplotlib.pyplot as plt\n'), ((48582, 48670), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['pos_vert_accuracy']", '"""r"""'], {'label': '"""vertical"""'}), "(status_time, estimator_status['pos_vert_accuracy'], 'r', label=\n 'vertical')\n", (48590, 48670), True, 'import matplotlib.pyplot as plt\n'), ((48674, 48700), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy (m)"""'], {}), "('accuracy (m)')\n", (48684, 48700), True, 'import matplotlib.pyplot as plt\n'), ((48709, 48732), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec"""'], {}), "('time (sec')\n", (48719, 48732), True, 'import matplotlib.pyplot as plt\n'), ((48741, 48770), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (48751, 48770), True, 'import matplotlib.pyplot as plt\n'), ((48779, 48789), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (48787, 48789), True, 'import matplotlib.pyplot as plt\n'), ((48819, 48832), 'matplotlib.pyplot.close', 'plt.close', (['(13)'], {}), '(13)\n', (48828, 48832), True, 'import matplotlib.pyplot as plt\n'), ((48886, 48918), 'matplotlib.pyplot.figure', 'plt.figure', (['(14)'], {'figsize': '(20, 13)'}), '(14, figsize=(20, 13))\n', (48896, 48918), True, 'import matplotlib.pyplot as plt\n'), ((48949, 48987), 'numpy.argmax', 'np.argmax', (["estimator_status['vibe[0]']"], {}), "(estimator_status['vibe[0]'])\n", (48958, 48987), True, 'import numpy as np\n'), ((49078, 49114), 'numpy.amax', 'np.amax', (["estimator_status['vibe[0]']"], {}), "(estimator_status['vibe[0]'])\n", (49085, 49114), True, 'import numpy as np\n'), ((49146, 49184), 'numpy.argmax', 'np.argmax', (["estimator_status['vibe[1]']"], {}), "(estimator_status['vibe[1]'])\n", (49155, 49184), True, 'import numpy as np\n'), ((49278, 49314), 'numpy.amax', 'np.amax', (["estimator_status['vibe[1]']"], {}), "(estimator_status['vibe[1]'])\n", (49285, 49314), True, 'import numpy as np\n'), ((49346, 49384), 'numpy.argmax', 'np.argmax', (["estimator_status['vibe[2]']"], {}), "(estimator_status['vibe[2]'])\n", (49355, 49384), True, 'import numpy as np\n'), ((49478, 49514), 'numpy.amax', 'np.amax', (["estimator_status['vibe[2]']"], {}), "(estimator_status['vibe[2]'])\n", (49485, 49514), True, 'import numpy as np\n'), ((49523, 49543), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (49534, 49543), True, 'import matplotlib.pyplot as plt\n'), ((49552, 49647), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "(1000.0 * estimator_status['vibe[0]'])", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], 1000.0 * estimator_status[\n 'vibe[0]'], 'b')\n", (49560, 49647), True, 'import matplotlib.pyplot as plt\n'), ((49650, 49684), 'matplotlib.pyplot.title', 'plt.title', (['"""IMU Vibration Metrics"""'], {}), "('IMU Vibration Metrics')\n", (49659, 49684), True, 'import matplotlib.pyplot as plt\n'), ((49693, 49728), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Del Ang Coning (mrad)"""'], {}), "('Del Ang Coning (mrad)')\n", (49703, 49728), True, 'import matplotlib.pyplot as plt\n'), ((49737, 49747), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (49745, 49747), True, 'import matplotlib.pyplot as plt\n'), ((49954, 49974), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (49965, 49974), True, 'import matplotlib.pyplot as plt\n'), ((49983, 50078), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "(1000.0 * estimator_status['vibe[1]'])", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], 1000.0 * estimator_status[\n 'vibe[1]'], 'b')\n", (49991, 50078), True, 'import matplotlib.pyplot as plt\n'), ((50081, 50112), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""HF Del Ang (mrad)"""'], {}), "('HF Del Ang (mrad)')\n", (50091, 50112), True, 'import matplotlib.pyplot as plt\n'), ((50121, 50131), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (50129, 50131), True, 'import matplotlib.pyplot as plt\n'), ((50341, 50361), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (50352, 50361), True, 'import matplotlib.pyplot as plt\n'), ((50370, 50455), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['vibe[2]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status['vibe[2]'],\n 'b')\n", (50378, 50455), True, 'import matplotlib.pyplot as plt\n'), ((50459, 50489), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""HF Del Vel (m/s)"""'], {}), "('HF Del Vel (m/s)')\n", (50469, 50489), True, 'import matplotlib.pyplot as plt\n'), ((50498, 50522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (50508, 50522), True, 'import matplotlib.pyplot as plt\n'), ((50531, 50541), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (50539, 50541), True, 'import matplotlib.pyplot as plt\n'), ((50754, 50767), 'matplotlib.pyplot.close', 'plt.close', (['(14)'], {}), '(14)\n', (50763, 50767), True, 'import matplotlib.pyplot as plt\n'), ((50831, 50863), 'matplotlib.pyplot.figure', 'plt.figure', (['(15)'], {'figsize': '(20, 13)'}), '(15, figsize=(20, 13))\n', (50841, 50863), True, 'import matplotlib.pyplot as plt\n'), ((50896, 50951), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['output_tracking_error[0]']"], {}), "(ekf2_innovations['output_tracking_error[0]'])\n", (50905, 50951), True, 'import numpy as np\n'), ((51047, 51100), 'numpy.amax', 'np.amax', (["ekf2_innovations['output_tracking_error[0]']"], {}), "(ekf2_innovations['output_tracking_error[0]'])\n", (51054, 51100), True, 'import numpy as np\n'), ((51133, 51188), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['output_tracking_error[1]']"], {}), "(ekf2_innovations['output_tracking_error[1]'])\n", (51142, 51188), True, 'import numpy as np\n'), ((51284, 51337), 'numpy.amax', 'np.amax', (["ekf2_innovations['output_tracking_error[1]']"], {}), "(ekf2_innovations['output_tracking_error[1]'])\n", (51291, 51337), True, 'import numpy as np\n'), ((51370, 51425), 'numpy.argmax', 'np.argmax', (["ekf2_innovations['output_tracking_error[2]']"], {}), "(ekf2_innovations['output_tracking_error[2]'])\n", (51379, 51425), True, 'import numpy as np\n'), ((51521, 51574), 'numpy.amax', 'np.amax', (["ekf2_innovations['output_tracking_error[2]']"], {}), "(ekf2_innovations['output_tracking_error[2]'])\n", (51528, 51574), True, 'import numpy as np\n'), ((51583, 51603), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (51594, 51603), True, 'import matplotlib.pyplot as plt\n'), ((51612, 51724), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "(1000.0 * ekf2_innovations['output_tracking_error[0]'])", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], 1000.0 * ekf2_innovations[\n 'output_tracking_error[0]'], 'b')\n", (51620, 51724), True, 'import matplotlib.pyplot as plt\n'), ((51724, 51778), 'matplotlib.pyplot.title', 'plt.title', (['"""Output Observer Tracking Error Magnitudes"""'], {}), "('Output Observer Tracking Error Magnitudes')\n", (51733, 51778), True, 'import matplotlib.pyplot as plt\n'), ((51787, 51814), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""angles (mrad)"""'], {}), "('angles (mrad)')\n", (51797, 51814), True, 'import matplotlib.pyplot as plt\n'), ((51823, 51833), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (51831, 51833), True, 'import matplotlib.pyplot as plt\n'), ((52040, 52060), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (52051, 52060), True, 'import matplotlib.pyplot as plt\n'), ((52069, 52172), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['output_tracking_error[1]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'output_tracking_error[1]'], 'b')\n", (52077, 52172), True, 'import matplotlib.pyplot as plt\n'), ((52175, 52203), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity (m/s)"""'], {}), "('velocity (m/s)')\n", (52185, 52203), True, 'import matplotlib.pyplot as plt\n'), ((52212, 52222), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (52220, 52222), True, 'import matplotlib.pyplot as plt\n'), ((52417, 52437), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (52428, 52437), True, 'import matplotlib.pyplot as plt\n'), ((52446, 52549), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * ekf2_innovations['timestamp'])", "ekf2_innovations['output_tracking_error[2]']", '"""b"""'], {}), "(1e-06 * ekf2_innovations['timestamp'], ekf2_innovations[\n 'output_tracking_error[2]'], 'b')\n", (52454, 52549), True, 'import matplotlib.pyplot as plt\n'), ((52552, 52578), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""position (m)"""'], {}), "('position (m)')\n", (52562, 52578), True, 'import matplotlib.pyplot as plt\n'), ((52587, 52611), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (52597, 52611), True, 'import matplotlib.pyplot as plt\n'), ((52620, 52630), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (52628, 52630), True, 'import matplotlib.pyplot as plt\n'), ((52846, 52859), 'matplotlib.pyplot.close', 'plt.close', (['(15)'], {}), '(15)\n', (52855, 52859), True, 'import matplotlib.pyplot as plt\n'), ((52914, 52946), 'matplotlib.pyplot.figure', 'plt.figure', (['(16)'], {'figsize': '(20, 13)'}), '(16, figsize=(20, 13))\n', (52924, 52946), True, 'import matplotlib.pyplot as plt\n'), ((52955, 52975), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (52966, 52975), True, 'import matplotlib.pyplot as plt\n'), ((52984, 53073), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[10]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[10]'], 'b')\n", (52992, 53073), True, 'import matplotlib.pyplot as plt\n'), ((53076, 53115), 'matplotlib.pyplot.title', 'plt.title', (['"""Delta Angle Bias Estimates"""'], {}), "('Delta Angle Bias Estimates')\n", (53085, 53115), True, 'import matplotlib.pyplot as plt\n'), ((53124, 53145), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X (rad)"""'], {}), "('X (rad)')\n", (53134, 53145), True, 'import matplotlib.pyplot as plt\n'), ((53154, 53178), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (53164, 53178), True, 'import matplotlib.pyplot as plt\n'), ((53187, 53197), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (53195, 53197), True, 'import matplotlib.pyplot as plt\n'), ((53206, 53226), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (53217, 53226), True, 'import matplotlib.pyplot as plt\n'), ((53235, 53324), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[11]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[11]'], 'b')\n", (53243, 53324), True, 'import matplotlib.pyplot as plt\n'), ((53327, 53348), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (rad)"""'], {}), "('Y (rad)')\n", (53337, 53348), True, 'import matplotlib.pyplot as plt\n'), ((53357, 53381), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (53367, 53381), True, 'import matplotlib.pyplot as plt\n'), ((53390, 53400), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (53398, 53400), True, 'import matplotlib.pyplot as plt\n'), ((53409, 53429), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (53420, 53429), True, 'import matplotlib.pyplot as plt\n'), ((53438, 53527), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[12]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[12]'], 'b')\n", (53446, 53527), True, 'import matplotlib.pyplot as plt\n'), ((53530, 53551), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z (rad)"""'], {}), "('Z (rad)')\n", (53540, 53551), True, 'import matplotlib.pyplot as plt\n'), ((53560, 53584), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (53570, 53584), True, 'import matplotlib.pyplot as plt\n'), ((53593, 53603), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (53601, 53603), True, 'import matplotlib.pyplot as plt\n'), ((53633, 53646), 'matplotlib.pyplot.close', 'plt.close', (['(16)'], {}), '(16)\n', (53642, 53646), True, 'import matplotlib.pyplot as plt\n'), ((53704, 53736), 'matplotlib.pyplot.figure', 'plt.figure', (['(17)'], {'figsize': '(20, 13)'}), '(17, figsize=(20, 13))\n', (53714, 53736), True, 'import matplotlib.pyplot as plt\n'), ((53745, 53765), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (53756, 53765), True, 'import matplotlib.pyplot as plt\n'), ((53774, 53863), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[13]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[13]'], 'b')\n", (53782, 53863), True, 'import matplotlib.pyplot as plt\n'), ((53866, 53908), 'matplotlib.pyplot.title', 'plt.title', (['"""Delta Velocity Bias Estimates"""'], {}), "('Delta Velocity Bias Estimates')\n", (53875, 53908), True, 'import matplotlib.pyplot as plt\n'), ((53917, 53938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X (m/s)"""'], {}), "('X (m/s)')\n", (53927, 53938), True, 'import matplotlib.pyplot as plt\n'), ((53947, 53971), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (53957, 53971), True, 'import matplotlib.pyplot as plt\n'), ((53980, 53990), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (53988, 53990), True, 'import matplotlib.pyplot as plt\n'), ((53999, 54019), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (54010, 54019), True, 'import matplotlib.pyplot as plt\n'), ((54028, 54117), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[14]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[14]'], 'b')\n", (54036, 54117), True, 'import matplotlib.pyplot as plt\n'), ((54120, 54141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (m/s)"""'], {}), "('Y (m/s)')\n", (54130, 54141), True, 'import matplotlib.pyplot as plt\n'), ((54150, 54174), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (54160, 54174), True, 'import matplotlib.pyplot as plt\n'), ((54183, 54193), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (54191, 54193), True, 'import matplotlib.pyplot as plt\n'), ((54202, 54222), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (54213, 54222), True, 'import matplotlib.pyplot as plt\n'), ((54231, 54320), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[15]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[15]'], 'b')\n", (54239, 54320), True, 'import matplotlib.pyplot as plt\n'), ((54323, 54344), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z (m/s)"""'], {}), "('Z (m/s)')\n", (54333, 54344), True, 'import matplotlib.pyplot as plt\n'), ((54353, 54377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (54363, 54377), True, 'import matplotlib.pyplot as plt\n'), ((54386, 54396), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (54394, 54396), True, 'import matplotlib.pyplot as plt\n'), ((54426, 54439), 'matplotlib.pyplot.close', 'plt.close', (['(17)'], {}), '(17)\n', (54435, 54439), True, 'import matplotlib.pyplot as plt\n'), ((54504, 54536), 'matplotlib.pyplot.figure', 'plt.figure', (['(18)'], {'figsize': '(20, 13)'}), '(18, figsize=(20, 13))\n', (54514, 54536), True, 'import matplotlib.pyplot as plt\n'), ((54545, 54565), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (54556, 54565), True, 'import matplotlib.pyplot as plt\n'), ((54727, 54789), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", 'strength', '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], strength, 'b')\n", (54735, 54789), True, 'import matplotlib.pyplot as plt\n'), ((54797, 54827), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""strength (Gauss)"""'], {}), "('strength (Gauss)')\n", (54807, 54827), True, 'import matplotlib.pyplot as plt\n'), ((54836, 54860), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (54846, 54860), True, 'import matplotlib.pyplot as plt\n'), ((54869, 54879), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (54877, 54879), True, 'import matplotlib.pyplot as plt\n'), ((54888, 54908), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (54899, 54908), True, 'import matplotlib.pyplot as plt\n'), ((55050, 55115), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", 'declination', '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], declination, 'b')\n", (55058, 55115), True, 'import matplotlib.pyplot as plt\n'), ((55123, 55166), 'matplotlib.pyplot.title', 'plt.title', (['"""Earth Magnetic Field Estimates"""'], {}), "('Earth Magnetic Field Estimates')\n", (55132, 55166), True, 'import matplotlib.pyplot as plt\n'), ((55175, 55206), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""declination (deg)"""'], {}), "('declination (deg)')\n", (55185, 55206), True, 'import matplotlib.pyplot as plt\n'), ((55215, 55239), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (55225, 55239), True, 'import matplotlib.pyplot as plt\n'), ((55248, 55258), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (55256, 55258), True, 'import matplotlib.pyplot as plt\n'), ((55267, 55287), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (55278, 55287), True, 'import matplotlib.pyplot as plt\n'), ((55420, 55485), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", 'inclination', '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], inclination, 'b')\n", (55428, 55485), True, 'import matplotlib.pyplot as plt\n'), ((55493, 55524), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""inclination (deg)"""'], {}), "('inclination (deg)')\n", (55503, 55524), True, 'import matplotlib.pyplot as plt\n'), ((55533, 55557), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (55543, 55557), True, 'import matplotlib.pyplot as plt\n'), ((55566, 55576), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (55574, 55576), True, 'import matplotlib.pyplot as plt\n'), ((55606, 55619), 'matplotlib.pyplot.close', 'plt.close', (['(18)'], {}), '(18)\n', (55615, 55619), True, 'import matplotlib.pyplot as plt\n'), ((55683, 55715), 'matplotlib.pyplot.figure', 'plt.figure', (['(19)'], {'figsize': '(20, 13)'}), '(19, figsize=(20, 13))\n', (55693, 55715), True, 'import matplotlib.pyplot as plt\n'), ((55724, 55744), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (55735, 55744), True, 'import matplotlib.pyplot as plt\n'), ((55753, 55842), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[19]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[19]'], 'b')\n", (55761, 55842), True, 'import matplotlib.pyplot as plt\n'), ((55845, 55883), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetomer Bias Estimates"""'], {}), "('Magnetomer Bias Estimates')\n", (55854, 55883), True, 'import matplotlib.pyplot as plt\n'), ((55892, 55915), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X (Gauss)"""'], {}), "('X (Gauss)')\n", (55902, 55915), True, 'import matplotlib.pyplot as plt\n'), ((55924, 55948), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (55934, 55948), True, 'import matplotlib.pyplot as plt\n'), ((55957, 55967), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (55965, 55967), True, 'import matplotlib.pyplot as plt\n'), ((55976, 55996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (55987, 55996), True, 'import matplotlib.pyplot as plt\n'), ((56005, 56094), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[20]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[20]'], 'b')\n", (56013, 56094), True, 'import matplotlib.pyplot as plt\n'), ((56097, 56120), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (Gauss)"""'], {}), "('Y (Gauss)')\n", (56107, 56120), True, 'import matplotlib.pyplot as plt\n'), ((56129, 56153), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (56139, 56153), True, 'import matplotlib.pyplot as plt\n'), ((56162, 56172), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (56170, 56172), True, 'import matplotlib.pyplot as plt\n'), ((56181, 56201), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (56192, 56201), True, 'import matplotlib.pyplot as plt\n'), ((56210, 56299), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[21]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[21]'], 'b')\n", (56218, 56299), True, 'import matplotlib.pyplot as plt\n'), ((56302, 56325), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z (Gauss)"""'], {}), "('Z (Gauss)')\n", (56312, 56325), True, 'import matplotlib.pyplot as plt\n'), ((56334, 56358), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (56344, 56358), True, 'import matplotlib.pyplot as plt\n'), ((56367, 56377), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (56375, 56377), True, 'import matplotlib.pyplot as plt\n'), ((56407, 56420), 'matplotlib.pyplot.close', 'plt.close', (['(19)'], {}), '(19)\n', (56416, 56420), True, 'import matplotlib.pyplot as plt\n'), ((56467, 56499), 'matplotlib.pyplot.figure', 'plt.figure', (['(20)'], {'figsize': '(20, 13)'}), '(20, figsize=(20, 13))\n', (56477, 56499), True, 'import matplotlib.pyplot as plt\n'), ((56508, 56528), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (56519, 56528), True, 'import matplotlib.pyplot as plt\n'), ((56537, 56626), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[22]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[22]'], 'b')\n", (56545, 56626), True, 'import matplotlib.pyplot as plt\n'), ((56629, 56665), 'matplotlib.pyplot.title', 'plt.title', (['"""Wind Velocity Estimates"""'], {}), "('Wind Velocity Estimates')\n", (56638, 56665), True, 'import matplotlib.pyplot as plt\n'), ((56674, 56699), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""North (m/s)"""'], {}), "('North (m/s)')\n", (56684, 56699), True, 'import matplotlib.pyplot as plt\n'), ((56708, 56732), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (56718, 56732), True, 'import matplotlib.pyplot as plt\n'), ((56741, 56751), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (56749, 56751), True, 'import matplotlib.pyplot as plt\n'), ((56760, 56780), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (56771, 56780), True, 'import matplotlib.pyplot as plt\n'), ((56789, 56878), 'matplotlib.pyplot.plot', 'plt.plot', (["(1e-06 * estimator_status['timestamp'])", "estimator_status['states[23]']", '"""b"""'], {}), "(1e-06 * estimator_status['timestamp'], estimator_status[\n 'states[23]'], 'b')\n", (56797, 56878), True, 'import matplotlib.pyplot as plt\n'), ((56881, 56905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""East (m/s)"""'], {}), "('East (m/s)')\n", (56891, 56905), True, 'import matplotlib.pyplot as plt\n'), ((56914, 56938), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (56924, 56938), True, 'import matplotlib.pyplot as plt\n'), ((56947, 56957), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (56955, 56957), True, 'import matplotlib.pyplot as plt\n'), ((56987, 57000), 'matplotlib.pyplot.close', 'plt.close', (['(20)'], {}), '(20)\n', (56996, 57000), True, 'import matplotlib.pyplot as plt\n'), ((57140, 57156), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (57149, 57156), True, 'import matplotlib.pyplot as plt\n'), ((57295, 57341), 'numpy.where', 'np.where', (['(status_time > in_air_transition_time)'], {}), '(status_time > in_air_transition_time)\n', (57303, 57341), True, 'import numpy as np\n'), ((57367, 57417), 'numpy.where', 'np.where', (['(status_time <= on_ground_transition_time)'], {}), '(status_time <= on_ground_transition_time)\n', (57375, 57417), True, 'import numpy as np\n'), ((58355, 58400), 'numpy.where', 'np.where', (['(innov_time > in_air_transition_time)'], {}), '(innov_time > in_air_transition_time)\n', (58363, 58400), True, 'import numpy as np\n'), ((58432, 58481), 'numpy.where', 'np.where', (['(innov_time <= on_ground_transition_time)'], {}), '(innov_time <= on_ground_transition_time)\n', (58440, 58481), True, 'import numpy as np\n'), ((75124, 75234), 'numpy.median', 'np.median', (["ekf2_innovations['output_tracking_error[0]'][innov_late_start_index:\n innov_early_end_index + 1]"], {}), "(ekf2_innovations['output_tracking_error[0]'][\n innov_late_start_index:innov_early_end_index + 1])\n", (75133, 75234), True, 'import numpy as np\n'), ((75298, 75408), 'numpy.median', 'np.median', (["ekf2_innovations['output_tracking_error[1]'][innov_late_start_index:\n innov_early_end_index + 1]"], {}), "(ekf2_innovations['output_tracking_error[1]'][\n innov_late_start_index:innov_early_end_index + 1])\n", (75307, 75408), True, 'import numpy as np\n'), ((75472, 75582), 'numpy.median', 'np.median', (["ekf2_innovations['output_tracking_error[2]'][innov_late_start_index:\n innov_early_end_index + 1]"], {}), "(ekf2_innovations['output_tracking_error[2]'][\n innov_late_start_index:innov_early_end_index + 1])\n", (75481, 75582), True, 'import numpy as np\n'), ((75728, 75798), 'numpy.amax', 'np.amax', (["estimator_status['vibe[0]'][late_start_index:early_end_index]"], {}), "(estimator_status['vibe[0]'][late_start_index:early_end_index])\n", (75735, 75798), True, 'import numpy as np\n'), ((76017, 76087), 'numpy.amax', 'np.amax', (["estimator_status['vibe[1]'][late_start_index:early_end_index]"], {}), "(estimator_status['vibe[1]'][late_start_index:early_end_index])\n", (76024, 76087), True, 'import numpy as np\n'), ((76306, 76376), 'numpy.amax', 'np.amax', (["estimator_status['vibe[2]'][late_start_index:early_end_index]"], {}), "(estimator_status['vibe[2]'][late_start_index:early_end_index])\n", (76313, 76376), True, 'import numpy as np\n'), ((80329, 80414), 'numpy.amax', 'np.amax', (["estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['hgt_test_ratio'][late_start_index:early_end_index +\n 1])\n", (80336, 80414), True, 'import numpy as np\n'), ((80454, 80539), 'numpy.mean', 'np.mean', (["estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['hgt_test_ratio'][late_start_index:early_end_index +\n 1])\n", (80461, 80539), True, 'import numpy as np\n'), ((703, 734), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(20, 13)'}), '(0, figsize=(20, 13))\n', (713, 734), True, 'import matplotlib.pyplot as plt\n'), ((747, 767), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (758, 767), True, 'import matplotlib.pyplot as plt\n'), ((780, 840), 'matplotlib.pyplot.plot', 'plt.plot', (["sensor_preflight['accel_inconsistency_m_s_s']", '"""b"""'], {}), "(sensor_preflight['accel_inconsistency_m_s_s'], 'b')\n", (788, 840), True, 'import matplotlib.pyplot as plt\n'), ((853, 894), 'matplotlib.pyplot.title', 'plt.title', (['"""IMU Consistency Check Levels"""'], {}), "('IMU Consistency Check Levels')\n", (862, 894), True, 'import matplotlib.pyplot as plt\n'), ((907, 941), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""acceleration (m/s/s)"""'], {}), "('acceleration (m/s/s)')\n", (917, 941), True, 'import matplotlib.pyplot as plt\n'), ((954, 978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data index"""'], {}), "('data index')\n", (964, 978), True, 'import matplotlib.pyplot as plt\n'), ((991, 1001), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (999, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1014, 1034), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1025, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1106), 'matplotlib.pyplot.plot', 'plt.plot', (["sensor_preflight['gyro_inconsistency_rad_s']", '"""b"""'], {}), "(sensor_preflight['gyro_inconsistency_rad_s'], 'b')\n", (1055, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1119, 1153), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""angular rate (rad/s)"""'], {}), "('angular rate (rad/s)')\n", (1129, 1153), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1190), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data index"""'], {}), "('data index')\n", (1176, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1228, 1240), 'matplotlib.pyplot.close', 'plt.close', (['(0)'], {}), '(0)\n', (1237, 1240), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3058), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[2]']"], {}), "(ekf2_innovations['vel_pos_innov_var[2]'])\n", (3016, 3058), True, 'import numpy as np\n'), ((3937, 3986), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[5]']"], {}), "(ekf2_innovations['vel_pos_innov_var[5]'])\n", (3944, 3986), True, 'import numpy as np\n'), ((6147, 6196), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[0]']"], {}), "(ekf2_innovations['vel_pos_innov_var[0]'])\n", (6154, 6196), True, 'import numpy as np\n'), ((7030, 7079), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[1]']"], {}), "(ekf2_innovations['vel_pos_innov_var[1]'])\n", (7037, 7079), True, 'import numpy as np\n'), ((9246, 9295), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[3]']"], {}), "(ekf2_innovations['vel_pos_innov_var[3]'])\n", (9253, 9295), True, 'import numpy as np\n'), ((10126, 10175), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[4]']"], {}), "(ekf2_innovations['vel_pos_innov_var[4]'])\n", (10133, 10175), True, 'import numpy as np\n'), ((12873, 12918), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[0]']"], {}), "(ekf2_innovations['mag_innov_var[0]'])\n", (12880, 12918), True, 'import numpy as np\n'), ((13730, 13775), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[1]']"], {}), "(ekf2_innovations['mag_innov_var[1]'])\n", (13737, 13775), True, 'import numpy as np\n'), ((14541, 14586), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[2]']"], {}), "(ekf2_innovations['mag_innov_var[2]'])\n", (14548, 14586), True, 'import numpy as np\n'), ((16059, 16105), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['heading_innov_var']"], {}), "(ekf2_innovations['heading_innov_var'])\n", (16066, 16105), True, 'import numpy as np\n'), ((18215, 18262), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['airspeed_innov_var']"], {}), "(ekf2_innovations['airspeed_innov_var'])\n", (18222, 18262), True, 'import numpy as np\n'), ((19050, 19093), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['beta_innov_var']"], {}), "(ekf2_innovations['beta_innov_var'])\n", (19057, 19093), True, 'import numpy as np\n'), ((21328, 21374), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['flow_innov_var[0]']"], {}), "(ekf2_innovations['flow_innov_var[0]'])\n", (21335, 21374), True, 'import numpy as np\n'), ((22225, 22271), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['flow_innov_var[1]']"], {}), "(ekf2_innovations['flow_innov_var[1]'])\n", (22232, 22271), True, 'import numpy as np\n'), ((26756, 26819), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['hagl_test_ratio']", '"""r"""'], {}), "(status_time, estimator_status['hagl_test_ratio'], 'r')\n", (26764, 26819), True, 'import matplotlib.pyplot as plt\n'), ((27099, 27121), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""hgt,HAGL"""'], {}), "('hgt,HAGL')\n", (27109, 27121), True, 'import matplotlib.pyplot as plt\n'), ((27159, 27185), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_plots', '(1)', '(4)'], {}), '(n_plots, 1, 4)\n', (27170, 27185), True, 'import matplotlib.pyplot as plt\n'), ((27198, 27260), 'matplotlib.pyplot.plot', 'plt.plot', (['status_time', "estimator_status['tas_test_ratio']", '"""b"""'], {}), "(status_time, estimator_status['tas_test_ratio'], 'b')\n", (27206, 27260), True, 'import matplotlib.pyplot as plt\n'), ((27273, 27290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""TAS"""'], {}), "('TAS')\n", (27283, 27290), True, 'import matplotlib.pyplot as plt\n'), ((27303, 27327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (sec)"""'], {}), "('time (sec)')\n", (27313, 27327), True, 'import matplotlib.pyplot as plt\n'), ((27340, 27350), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (27348, 27350), True, 'import matplotlib.pyplot as plt\n'), ((30086, 30103), 'numpy.amin', 'np.amin', (['airborne'], {}), '(airborne)\n', (30093, 30103), True, 'import numpy as np\n'), ((30116, 30133), 'numpy.amax', 'np.amax', (['airborne'], {}), '(airborne)\n', (30123, 30133), True, 'import numpy as np\n'), ((30188, 30205), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (30195, 30205), True, 'import numpy as np\n'), ((30289, 30306), 'numpy.amax', 'np.amax', (['airborne'], {}), '(airborne)\n', (30296, 30306), True, 'import numpy as np\n'), ((30348, 30368), 'numpy.amin', 'np.amin', (['status_time'], {}), '(status_time)\n', (30355, 30368), True, 'import numpy as np\n'), ((30642, 30659), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (30649, 30659), True, 'import numpy as np\n'), ((30719, 30736), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (30726, 30736), True, 'import numpy as np\n'), ((30827, 30844), 'numpy.amax', 'np.amax', (['airborne'], {}), '(airborne)\n', (30834, 30844), True, 'import numpy as np\n'), ((30889, 30909), 'numpy.amax', 'np.amax', (['status_time'], {}), '(status_time)\n', (30896, 30909), True, 'import numpy as np\n'), ((33426, 33447), 'numpy.amin', 'np.amin', (['tilt_aligned'], {}), '(tilt_aligned)\n', (33433, 33447), True, 'import numpy as np\n'), ((33465, 33656), 'matplotlib.pyplot.text', 'plt.text', (['tilt_align_time', '(0.5)', '"""no pre-arm data - cannot calculate alignment completion times"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""black"""'}), "(tilt_align_time, 0.5,\n 'no pre-arm data - cannot calculate alignment completion times',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='black')\n", (33473, 33656), True, 'import matplotlib.pyplot as plt\n'), ((34398, 34416), 'numpy.amin', 'np.amin', (['using_gps'], {}), '(using_gps)\n', (34405, 34416), True, 'import numpy as np\n'), ((34434, 34610), 'matplotlib.pyplot.text', 'plt.text', (['gps_aid_time', '(0.25)', '"""no pre-arm data - cannot calculate GPS aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""b"""'}), "(gps_aid_time, 0.25,\n 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,\n horizontalalignment='left', verticalalignment='center', color='b')\n", (34442, 34610), True, 'import matplotlib.pyplot as plt\n'), ((34871, 34893), 'numpy.amin', 'np.amin', (['using_optflow'], {}), '(using_optflow)\n', (34878, 34893), True, 'import numpy as np\n'), ((34911, 35103), 'matplotlib.pyplot.text', 'plt.text', (['optflow_aid_time', '(0.5)', '"""no pre-arm data - cannot calculate optical flow aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""r"""'}), "(optflow_aid_time, 0.5,\n 'no pre-arm data - cannot calculate optical flow aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='r')\n", (34919, 35103), True, 'import matplotlib.pyplot as plt\n'), ((35382, 35402), 'numpy.amin', 'np.amin', (['using_evpos'], {}), '(using_evpos)\n', (35389, 35402), True, 'import numpy as np\n'), ((35420, 35614), 'matplotlib.pyplot.text', 'plt.text', (['evpos_aid_time', '(0.75)', '"""no pre-arm data - cannot calculate external vision aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""g"""'}), "(evpos_aid_time, 0.75,\n 'no pre-arm data - cannot calculate external vision aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='g')\n", (35428, 35614), True, 'import matplotlib.pyplot as plt\n'), ((36230, 36252), 'numpy.amin', 'np.amin', (['using_barohgt'], {}), '(using_barohgt)\n', (36237, 36252), True, 'import numpy as np\n'), ((36270, 36451), 'matplotlib.pyplot.text', 'plt.text', (['barohgt_aid_time', '(0.2)', '"""no pre-arm data - cannot calculate Baro aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""b"""'}), "(barohgt_aid_time, 0.2,\n 'no pre-arm data - cannot calculate Baro aiding start time', fontsize=\n 12, horizontalalignment='left', verticalalignment='center', color='b')\n", (36278, 36451), True, 'import matplotlib.pyplot as plt\n'), ((36719, 36740), 'numpy.amin', 'np.amin', (['using_gpshgt'], {}), '(using_gpshgt)\n', (36726, 36740), True, 'import numpy as np\n'), ((36758, 36936), 'matplotlib.pyplot.text', 'plt.text', (['gpshgt_aid_time', '(0.4)', '"""no pre-arm data - cannot calculate GPS aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""r"""'}), "(gpshgt_aid_time, 0.4,\n 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,\n horizontalalignment='left', verticalalignment='center', color='r')\n", (36766, 36936), True, 'import matplotlib.pyplot as plt\n'), ((37205, 37226), 'numpy.amin', 'np.amin', (['using_rnghgt'], {}), '(using_rnghgt)\n', (37212, 37226), True, 'import numpy as np\n'), ((37244, 37433), 'matplotlib.pyplot.text', 'plt.text', (['rnghgt_aid_time', '(0.6)', '"""no pre-arm data - cannot calculate rangfinder aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""g"""'}), "(rnghgt_aid_time, 0.6,\n 'no pre-arm data - cannot calculate rangfinder aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='g')\n", (37252, 37433), True, 'import matplotlib.pyplot as plt\n'), ((37706, 37726), 'numpy.amin', 'np.amin', (['using_evhgt'], {}), '(using_evhgt)\n', (37713, 37726), True, 'import numpy as np\n'), ((37744, 37937), 'matplotlib.pyplot.text', 'plt.text', (['evhgt_aid_time', '(0.8)', '"""no pre-arm data - cannot calculate external vision aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""c"""'}), "(evhgt_aid_time, 0.8,\n 'no pre-arm data - cannot calculate external vision aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='c')\n", (37752, 37937), True, 'import matplotlib.pyplot as plt\n'), ((38542, 38563), 'numpy.amin', 'np.amin', (['using_magyaw'], {}), '(using_magyaw)\n', (38549, 38563), True, 'import numpy as np\n'), ((38581, 38775), 'matplotlib.pyplot.text', 'plt.text', (['using_magyaw_time', '(0.25)', '"""no pre-arm data - cannot calculate magnetic yaw aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""b"""'}), "(using_magyaw_time, 0.25,\n 'no pre-arm data - cannot calculate magnetic yaw aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='b')\n", (38589, 38775), True, 'import matplotlib.pyplot as plt\n'), ((39055, 39075), 'numpy.amin', 'np.amin', (['using_mag3d'], {}), '(using_mag3d)\n', (39062, 39075), True, 'import numpy as np\n'), ((39093, 39288), 'matplotlib.pyplot.text', 'plt.text', (['using_mag3d_time', '(0.5)', '"""no pre-arm data - cannot calculate 3D magnetoemter aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""r"""'}), "(using_mag3d_time, 0.5,\n 'no pre-arm data - cannot calculate 3D magnetoemter aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='r')\n", (39101, 39288), True, 'import matplotlib.pyplot as plt\n'), ((39568, 39590), 'numpy.amin', 'np.amin', (['using_magdecl'], {}), '(using_magdecl)\n', (39575, 39590), True, 'import numpy as np\n'), ((39608, 39801), 'matplotlib.pyplot.text', 'plt.text', (['using_magdecl_time', '(0.75)', '"""no pre-arm data - cannot magnetic declination aiding start time"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""g"""'}), "(using_magdecl_time, 0.75,\n 'no pre-arm data - cannot magnetic declination aiding start time',\n fontsize=12, horizontalalignment='left', verticalalignment='center',\n color='g')\n", (39616, 39801), True, 'import matplotlib.pyplot as plt\n'), ((40520, 40687), 'matplotlib.pyplot.text', 'plt.text', (['in_air_transition_time', '(0.67)', '"""ground to air transition not detected"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""b"""'}), "(in_air_transition_time, 0.67,\n 'ground to air transition not detected', fontsize=12,\n horizontalalignment='left', verticalalignment='center', color='b')\n", (40528, 40687), True, 'import matplotlib.pyplot as plt\n'), ((40988, 41158), 'matplotlib.pyplot.text', 'plt.text', (['on_ground_transition_time', '(0.33)', '"""air to ground transition not detected"""'], {'fontsize': '(12)', 'horizontalalignment': '"""left"""', 'verticalalignment': '"""center"""', 'color': '"""b"""'}), "(on_ground_transition_time, 0.33,\n 'air to ground transition not detected', fontsize=12,\n horizontalalignment='left', verticalalignment='center', color='b')\n", (40996, 41158), True, 'import matplotlib.pyplot as plt\n'), ((54967, 55041), 'numpy.arctan2', 'np.arctan2', (["estimator_status['states[17]']", "estimator_status['states[16]']"], {}), "(estimator_status['states[17]'], estimator_status['states[16]'])\n", (54977, 55041), True, 'import numpy as np\n'), ((57877, 57929), 'numpy.where', 'np.where', (['(status_time > in_air_transition_time + 5.0)'], {}), '(status_time > in_air_transition_time + 5.0)\n', (57885, 57929), True, 'import numpy as np\n'), ((58042, 58098), 'numpy.where', 'np.where', (['(status_time <= on_ground_transition_time - 5.0)'], {}), '(status_time <= on_ground_transition_time - 5.0)\n', (58050, 58098), True, 'import numpy as np\n'), ((58591, 58642), 'numpy.where', 'np.where', (['(innov_time > in_air_transition_time + 5.0)'], {}), '(innov_time > in_air_transition_time + 5.0)\n', (58599, 58642), True, 'import numpy as np\n'), ((58768, 58823), 'numpy.where', 'np.where', (['(innov_time <= on_ground_transition_time - 5.0)'], {}), '(innov_time <= on_ground_transition_time - 5.0)\n', (58776, 58823), True, 'import numpy as np\n'), ((75927, 76001), 'numpy.mean', 'np.mean', (["estimator_status['vibe[0]'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['vibe[0]'][late_start_index:early_end_index + 1])\n", (75934, 76001), True, 'import numpy as np\n'), ((76216, 76290), 'numpy.mean', 'np.mean', (["estimator_status['vibe[1]'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['vibe[1]'][late_start_index:early_end_index + 1])\n", (76223, 76290), True, 'import numpy as np\n'), ((76505, 76579), 'numpy.mean', 'np.mean', (["estimator_status['vibe[2]'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['vibe[2]'][late_start_index:early_end_index + 1])\n", (76512, 76579), True, 'import numpy as np\n'), ((76630, 76650), 'numpy.amax', 'np.amax', (['yaw_aligned'], {}), '(yaw_aligned)\n', (76637, 76650), True, 'import numpy as np\n'), ((77127, 77212), 'numpy.amax', 'np.amax', (["estimator_status['mag_test_ratio'][late_start_index:early_end_index + 1]"], {}), "(estimator_status['mag_test_ratio'][late_start_index:early_end_index +\n 1])\n", (77134, 77212), True, 'import numpy as np\n'), ((77273, 77339), 'numpy.mean', 'np.mean', (["estimator_status['mag_test_ratio'][start_index:end_index]"], {}), "(estimator_status['mag_test_ratio'][start_index:end_index])\n", (77280, 77339), True, 'import numpy as np\n'), ((78096, 78114), 'numpy.amax', 'np.amax', (['using_gps'], {}), '(using_gps)\n', (78103, 78114), True, 'import numpy as np\n'), ((78575, 78645), 'numpy.amax', 'np.amax', (["estimator_status['vel_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['vel_test_ratio'][start_index:end_index + 1])\n", (78582, 78645), True, 'import numpy as np\n'), ((78693, 78763), 'numpy.mean', 'np.mean', (["estimator_status['vel_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['vel_test_ratio'][start_index:end_index + 1])\n", (78700, 78763), True, 'import numpy as np\n'), ((79480, 79550), 'numpy.amax', 'np.amax', (["estimator_status['pos_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['pos_test_ratio'][start_index:end_index + 1])\n", (79487, 79550), True, 'import numpy as np\n'), ((79598, 79668), 'numpy.mean', 'np.mean', (["estimator_status['pos_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['pos_test_ratio'][start_index:end_index + 1])\n", (79605, 79668), True, 'import numpy as np\n'), ((81224, 81294), 'numpy.amax', 'np.amax', (["estimator_status['tas_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['tas_test_ratio'][start_index:end_index + 1])\n", (81231, 81294), True, 'import numpy as np\n'), ((81342, 81412), 'numpy.mean', 'np.mean', (["estimator_status['tas_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['tas_test_ratio'][start_index:end_index + 1])\n", (81349, 81412), True, 'import numpy as np\n'), ((82096, 82167), 'numpy.amax', 'np.amax', (["estimator_status['hagl_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['hagl_test_ratio'][start_index:end_index + 1])\n", (82103, 82167), True, 'import numpy as np\n'), ((82216, 82287), 'numpy.mean', 'np.mean', (["estimator_status['hagl_test_ratio'][start_index:end_index + 1]"], {}), "(estimator_status['hagl_test_ratio'][start_index:end_index + 1])\n", (82223, 82287), True, 'import numpy as np\n'), ((82497, 82519), 'numpy.amax', 'np.amax', (['using_optflow'], {}), '(using_optflow)\n', (82504, 82519), True, 'import numpy as np\n'), ((3121, 3170), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[2]']"], {}), "(ekf2_innovations['vel_pos_innov_var[2]'])\n", (3128, 3170), True, 'import numpy as np\n'), ((4049, 4098), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[5]']"], {}), "(ekf2_innovations['vel_pos_innov_var[5]'])\n", (4056, 4098), True, 'import numpy as np\n'), ((6259, 6308), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[0]']"], {}), "(ekf2_innovations['vel_pos_innov_var[0]'])\n", (6266, 6308), True, 'import numpy as np\n'), ((7142, 7191), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[1]']"], {}), "(ekf2_innovations['vel_pos_innov_var[1]'])\n", (7149, 7191), True, 'import numpy as np\n'), ((9358, 9407), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[3]']"], {}), "(ekf2_innovations['vel_pos_innov_var[3]'])\n", (9365, 9407), True, 'import numpy as np\n'), ((10238, 10287), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['vel_pos_innov_var[4]']"], {}), "(ekf2_innovations['vel_pos_innov_var[4]'])\n", (10245, 10287), True, 'import numpy as np\n'), ((12981, 13026), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[0]']"], {}), "(ekf2_innovations['mag_innov_var[0]'])\n", (12988, 13026), True, 'import numpy as np\n'), ((13838, 13883), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[1]']"], {}), "(ekf2_innovations['mag_innov_var[1]'])\n", (13845, 13883), True, 'import numpy as np\n'), ((14649, 14694), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['mag_innov_var[2]']"], {}), "(ekf2_innovations['mag_innov_var[2]'])\n", (14656, 14694), True, 'import numpy as np\n'), ((16168, 16214), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['heading_innov_var']"], {}), "(ekf2_innovations['heading_innov_var'])\n", (16175, 16214), True, 'import numpy as np\n'), ((18325, 18372), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['airspeed_innov_var']"], {}), "(ekf2_innovations['airspeed_innov_var'])\n", (18332, 18372), True, 'import numpy as np\n'), ((19156, 19199), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['beta_innov_var']"], {}), "(ekf2_innovations['beta_innov_var'])\n", (19163, 19199), True, 'import numpy as np\n'), ((21437, 21483), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['flow_innov_var[0]']"], {}), "(ekf2_innovations['flow_innov_var[0]'])\n", (21444, 21483), True, 'import numpy as np\n'), ((22334, 22380), 'numpy.sqrt', 'np.sqrt', (["ekf2_innovations['flow_innov_var[1]']"], {}), "(ekf2_innovations['flow_innov_var[1]'])\n", (22341, 22380), True, 'import numpy as np\n'), ((31151, 31168), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (31158, 31168), True, 'import numpy as np\n'), ((31190, 31207), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (31197, 31207), True, 'import numpy as np\n'), ((34637, 34655), 'numpy.amax', 'np.amax', (['using_gps'], {}), '(using_gps)\n', (34644, 34655), True, 'import numpy as np\n'), ((35127, 35149), 'numpy.amax', 'np.amax', (['using_optflow'], {}), '(using_optflow)\n', (35134, 35149), True, 'import numpy as np\n'), ((35637, 35657), 'numpy.amax', 'np.amax', (['using_evpos'], {}), '(using_evpos)\n', (35644, 35657), True, 'import numpy as np\n'), ((36477, 36499), 'numpy.amax', 'np.amax', (['using_barohgt'], {}), '(using_barohgt)\n', (36484, 36499), True, 'import numpy as np\n'), ((36963, 36984), 'numpy.amax', 'np.amax', (['using_gpshgt'], {}), '(using_gpshgt)\n', (36970, 36984), True, 'import numpy as np\n'), ((37456, 37477), 'numpy.amax', 'np.amax', (['using_rnghgt'], {}), '(using_rnghgt)\n', (37463, 37477), True, 'import numpy as np\n'), ((37960, 37980), 'numpy.amax', 'np.amax', (['using_evhgt'], {}), '(using_evhgt)\n', (37967, 37980), True, 'import numpy as np\n'), ((38798, 38819), 'numpy.amax', 'np.amax', (['using_magyaw'], {}), '(using_magyaw)\n', (38805, 38819), True, 'import numpy as np\n'), ((39312, 39332), 'numpy.amax', 'np.amax', (['using_mag3d'], {}), '(using_mag3d)\n', (39319, 39332), True, 'import numpy as np\n'), ((39824, 39846), 'numpy.amax', 'np.amax', (['using_magdecl'], {}), '(using_magdecl)\n', (39831, 39846), True, 'import numpy as np\n'), ((40482, 40499), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (40489, 40499), True, 'import numpy as np\n'), ((40949, 40966), 'numpy.diff', 'np.diff', (['airborne'], {}), '(airborne)\n', (40956, 40966), True, 'import numpy as np\n'), ((78968, 78986), 'numpy.amax', 'np.amax', (['using_gps'], {}), '(using_gps)\n', (78975, 78986), True, 'import numpy as np\n'), ((78998, 79018), 'numpy.amax', 'np.amax', (['using_evpos'], {}), '(using_evpos)\n', (79005, 79018), True, 'import numpy as np\n'), ((83069, 83110), 'numpy.median', 'np.median', (["estimator_status['states[12]']"], {}), "(estimator_status['states[12]'])\n", (83078, 83110), True, 'import numpy as np\n'), ((83286, 83327), 'numpy.median', 'np.median', (["estimator_status['states[15]']"], {}), "(estimator_status['states[15]'])\n", (83295, 83327), True, 'import numpy as np\n'), ((82958, 82999), 'numpy.median', 'np.median', (["estimator_status['states[10]']"], {}), "(estimator_status['states[10]'])\n", (82967, 82999), True, 'import numpy as np\n'), ((83007, 83048), 'numpy.median', 'np.median', (["estimator_status['states[11]']"], {}), "(estimator_status['states[11]'])\n", (83016, 83048), True, 'import numpy as np\n'), ((83175, 83216), 'numpy.median', 'np.median', (["estimator_status['states[13]']"], {}), "(estimator_status['states[13]'])\n", (83184, 83216), True, 'import numpy as np\n'), ((83224, 83265), 'numpy.median', 'np.median', (["estimator_status['states[14]']"], {}), "(estimator_status['states[14]'])\n", (83233, 83265), True, 'import numpy as np\n'), ((55384, 55404), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (55392, 55404), True, 'import numpy as np\n')] |
## set up logging
import logging, os
logging.basicConfig(level=os.environ.get("LOGLEVEL","INFO"))
log = logging.getLogger(__name__)
## import modules
import octvi.exceptions, octvi.array, gdal
from gdalnumeric import *
import numpy as np
def getDatasetNames(stack_path:str) -> list:
"""
Returns list of all subdataset names, in format
suitable for passing to other functions'
'dataset_name' argument
"""
## parsing arguments
ext = os.path.splitext(stack_path)[1]
if ext == ".hdf":
splitter = ":"
elif ext == ".h5":
splitter = "/"
else:
raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
## loop over all subdatasets
outSds = []
ds = gdal.Open(stack_path,0) # open stack as gdal dataset
for sd in ds.GetSubDatasets():
sdName = sd[0].split(splitter)[-1] # split name out of path
outSds.append(sdName.strip("\"")) # strip away quotes
return outSds
def datasetToPath(stack_path,dataset_name) -> str:
## parsing arguments
ext = os.path.splitext(stack_path)[1]
if ext == ".hdf":
splitter = ":"
elif ext == ".h5":
splitter = "/"
else:
raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
## searching heirarchy for matching subdataset
outSd = None
ds = gdal.Open(stack_path,0) # open stack as gdal dataset
for sd in ds.GetSubDatasets():
sdName = sd[0].split(splitter)[-1]
if sdName.strip("\"") == dataset_name:
outSd = sd[0]
if outSd is None:
raise octvi.exceptions.DatasetNotFoundError(f"Dataset '{dataset_name}' not found in '{os.path.basename(stack_path)}'")
return outSd
def datasetToArray(stack_path,dataset_name) -> "numpy array":
"""
This function copies a specified subdataset from a heirarchical format
(such as HDF or NetCDF) to a single file such as a Tiff.
...
Parameters
----------
stack_path: str
Full path to heirarchical file containing the desired subdataset
dataset_name: str
Name of desired subdataset, as it appears in the heirarchical file
"""
sd = datasetToPath(stack_path, dataset_name)
## return subdataset as numpy array
subDs = gdal.Open(sd, 0)
subDs_band = subDs.GetRasterBand(1)
return BandReadAsArray(subDs_band)
def datasetToRaster(stack_path,dataset_name, out_path,dtype = None) -> None:
"""
Wrapper for extractAsArray and arrayToRaster which pulls
subdataset from hdf or h5 file and saves to new location.
...
Arguments
---------
stack_path: str
dataset_name: str
out_path: str
"""
sd_array = datasetToArray(stack_path, dataset_name)
return octvi.array.toRaster(sd_array, out_path, model_file = datasetToPath(stack_path, dataset_name),dtype=dtype)
def gcviToArray(in_stack) -> "numpy array":
"""
This function finds the correct Green and NIR bands
from a hierarchical file, calculates an GCVI array,
and returns the outpus in numpy array format.
Valid input formats are MODIS HDF or VIIRS HDF5 (h5).
...
Parameters
----------
in_stack: str
Full path to input hierarchical file
"""
suffix = os.path.basename(in_stack).split(".")[0][3:7]
# check whether it's an ndvi product
if suffix == "09Q4" or suffix == "13Q4":
arr_ndvi = datasetToArray(in_stack, "250m 8 days NDVI")
elif suffix == "13Q1":
arr_ndvi = datasetToArray(in_stack, "250m 16 days NDVI")
elif suffix == "09CM":
sdName_grn = "Coarse Resolution Surface Reflectance Band 4"
sdName_nir = "Coarse Resolution Surface Reflectance Band 2"
## extract red and nir bands from stack
arr_grn = datasetToArray(in_stack,sdName_grn)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_gcvi = octvi.array.calcGcvi(arr_grn,arr_nir)
else:
## determine correct band subdataset names
ext = os.path.splitext(in_stack)[1]
if ext == ".hdf":
sdName_grn = "sur_refl_b04"
sdName_nir = "sur_refl_b02"
elif ext == ".h5":
sdName_grn = "SurfReflect_I4"
sdName_nir = "SurfReflect_I2"
else:
raise octvi.exceptions.FileTypeError("File must be of type .hdf or .h5")
## extract red and nir bands from stack
arr_grn = datasetToArray(in_stack,sdName_grn)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_gcvi = octvi.array.calcGcvi(arr_grn,arr_nir)
return arr_gcvi
def ndviToArray(in_stack) -> "numpy array":
"""
This function finds the correct Red and NIR bands
from a hierarchical file, calculates an NDVI array,
and returns the outpus in numpy array format.
Valid input formats are MODIS HDF or VIIRS HDF5 (h5).
...
Parameters
----------
in_stack: str
Full path to input hierarchical file
"""
suffix = os.path.basename(in_stack).split(".")[0][3:7]
# check whether it's an ndvi product
if suffix == "09Q4" or suffix == "13Q4":
arr_ndvi = datasetToArray(in_stack, "250m 8 days NDVI")
elif suffix == "13Q1":
arr_ndvi = datasetToArray(in_stack, "250m 16 days NDVI")
elif suffix == "09CM":
sdName_red = "Coarse Resolution Surface Reflectance Band 1"
sdName_nir = "Coarse Resolution Surface Reflectance Band 2"
## extract red and nir bands from stack
arr_red = datasetToArray(in_stack,sdName_red)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_ndvi = octvi.array.calcNdvi(arr_red,arr_nir)
else:
## determine correct band subdataset names
ext = os.path.splitext(in_stack)[1]
if ext == ".hdf":
sdName_red = "sur_refl_b01"
sdName_nir = "sur_refl_b02"
elif ext == ".h5":
sdName_red = "SurfReflect_I1"
sdName_nir = "SurfReflect_I2"
else:
raise octvi.exceptions.FileTypeError("File must be of type .hdf or .h5")
## extract red and nir bands from stack
arr_red = datasetToArray(in_stack,sdName_red)
arr_nir = datasetToArray(in_stack,sdName_nir)
## perform calculation
arr_ndvi = octvi.array.calcNdvi(arr_red,arr_nir)
return arr_ndvi
def gcviToRaster(in_stack,out_path) -> str:
"""
This function directly converts a hierarchical data
file into an GCVI raster.
Returns the string path to the output file
"""
# create gcvi array
gcviArray = gcviToArray(in_stack)
# apply cloud, shadow, and water masks
gcviArray = octvi.array.mask(gcviArray, in_stack)
sample_sd = getDatasetNames(in_stack)[0]
#ext = os.path.splitext(in_stack)[1]
#if ext == ".hdf":
#sample_sd = "sur_refl_b01"
#elif ext == ".h5":
#sample_sd = "SurfReflect_I1"
#else:
#raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
octvi.array.toRaster(ndviArray,out_path,datasetToPath(in_stack,sample_sd))
return out_path
def ndviToRaster(in_stack,out_path) -> str:
"""
This function directly converts a hierarchical data
file into an NDVI raster.
Returns the string path to the output file
"""
# create ndvi array
ndviArray = ndviToArray(in_stack)
# apply cloud, shadow, and water masks
ndviArray = octvi.array.mask(ndviArray, in_stack)
sample_sd = getDatasetNames(in_stack)[0]
#ext = os.path.splitext(in_stack)[1]
#if ext == ".hdf":
#sample_sd = "sur_refl_b01"
#elif ext == ".h5":
#sample_sd = "SurfReflect_I1"
#else:
#raise octvi.exceptions.FileTypeError("File must be of format .hdf or .h5")
octvi.array.toRaster(ndviArray,out_path,datasetToPath(in_stack,sample_sd))
return out_path
def cmgToViewAngArray(source_stack) -> "numpy array":
"""
This function takes the path to a M*D CMG file, and returns
the view angle of each pixel. Ephemeral water pixels are
set to 999, to be used as a last resort in compositing.
Returns a numpy array of the same dimensions as the input raster.
***
Parameters
----------
source_stack:str
Path to the M*D CMG .hdf file on disk
"""
vang_arr = datasetToArray(source_stack,"Coarse Resolution View Zenith Angle")
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
water = ((state_arr & 0b111000)) # check bits
vang_arr[water==32]=9999 # ephemeral water???
return vang_arr
def cmgListToWaterArray(stacks:list) -> "numpy array":
"""
This function takes a list of CMG .hdf files, and returns
a binary array, with "0" for non-water pixels and "1" for
water pixels. If any file flags water in a pixel, its value
is stored as "1"
***
Parameters
----------
stacks:list
List of hdf filepaths (M*D**CMG)
"""
water_list = []
for source_stack in stacks:
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
water = ((state_arr & 0b111000)) # check bits
water[water==56]=1 # deep ocean
water[water==48]=1 # continental/moderate ocean
water[water==24]=1 # shallow inland water
water[water==40]=1 # deep inland water
water[water==0]=1 # shallow ocean
water[state_arr==0]=0
water[water!=1]=0 # set non-water to zero
water_list.append(water)
water_final = np.maximum.reduce(water_list)
return water_final
def cmgToRankArray(source_stack) -> "numpy array":
"""
This function takes the path to a MOD**CMG file, and returns
the rank of each pixel, as defined on page 7 of the MOD09 user
guide (http://modis-sr.ltdri.org/guide/MOD09_UserGuide_v1.4.pdf)
Returns a numpy array of the same dimensions as the input raster
***
Parameters
----------
source_stack:str
Path to the MOD**CMG .hdf file on disk
"""
qa_arr = datasetToArray(source_stack,"Coarse Resolution QA")
state_arr = datasetToArray(source_stack,"Coarse Resolution State QA")
vang_arr = datasetToArray(source_stack,"Coarse Resolution View Zenith Angle")
vang_arr[vang_arr<=0]=9999
sang_arr = datasetToArray(source_stack,"Coarse Resolution Solar Zenith Angle")
rank_arr = np.full(qa_arr.shape,10) # empty rank array
## perform the ranking!
logging.debug("--rank 9: SNOW")
SNOW = ((state_arr & 0b1000000000000) | (state_arr & 0b1000000000000000)) # state bit 12 OR 15
rank_arr[SNOW>0]=9 # snow
del SNOW
logging.debug("--rank 8: HIGHAEROSOL")
HIGHAEROSOL=(state_arr & 0b11000000) # state bits 6 AND 7
rank_arr[HIGHAEROSOL==192]=8
del HIGHAEROSOL
logging.debug("--rank 7: CLIMAEROSOL")
CLIMAEROSOL=(state_arr & 0b11000000) # state bits 6 & 7
#CLIMAEROSOL=(cloudMask & 0b100000000000000) # cloudMask bit 14
rank_arr[CLIMAEROSOL==0]=7 # default aerosol level
del CLIMAEROSOL
logging.debug("--rank 6: UNCORRECTED")
UNCORRECTED = (qa_arr & 0b11) # qa bits 0 AND 1
rank_arr[UNCORRECTED==3]=6 # flagged uncorrected
del UNCORRECTED
logging.debug("--rank 5: SHADOW")
SHADOW = (state_arr & 0b100) # state bit 2
rank_arr[SHADOW==4]=5 # cloud shadow
del SHADOW
logging.debug("--rank 4: CLOUDY")
# set adj to 11 and internal to 12 to verify in qa output
CLOUDY = ((state_arr & 0b11)) # state bit 0 OR bit 1 OR bit 10 OR bit 13
#rank_arr[CLOUDY!=0]=4 # cloud pixel
del CLOUDY
CLOUDADJ = (state_arr & 0b10000000000000)
#rank_arr[CLOUDADJ>0]=4 # adjacent to cloud
del CLOUDADJ
CLOUDINT = (state_arr & 0b10000000000)
rank_arr[CLOUDINT>0]=4
del CLOUDINT
logging.debug("--rank 3: HIGHVIEW")
rank_arr[sang_arr>(85/0.01)]=3 # HIGHVIEW
logging.debug("--rank 2: LOWSUN")
rank_arr[vang_arr>(60/0.01)]=2 # LOWSUN
# BAD pixels
logging.debug("--rank 1: BAD pixels") # qa bits (2-5 OR 6-9 == 1110)
BAD = ((qa_arr & 0b111100) | (qa_arr & 0b1110000000))
rank_arr[BAD==112]=1
rank_arr[BAD==896]=1
rank_arr[BAD==952]=1
del BAD
logging.debug("-building water mask")
water = ((state_arr & 0b111000)) # check bits
water[water==56]=1 # deep ocean
water[water==48]=1 # continental/moderate ocean
water[water==24]=1 # shallow inland water
water[water==40]=1 # deep inland water
water[water==0]=1 # shallow ocean
rank_arr[water==1]=0
vang_arr[water==32]=9999 # ephemeral water???
water[state_arr==0]=0
water[water!=1]=0 # set non-water to zero
# return the results
return rank_arr
def cmgBestNdviPixels(input_stacks:list) -> "numpy array":
"""
This function takes a list of hdf stack paths, and
returns the 'best' NDVI value for each pixel location,
determined through the ranking method (see
cmgToRankArray() for details).
***
Parameters
----------
input_stacks:list
A list of strings, each pointing to a MOD**CMG hdf file
on disk
"""
rankArrays = [cmgToRankArray(hdf) for hdf in input_stacks]
vangArrays = [cmgToViewAngArray(hdf) for hdf in input_stacks]
ndviArrays = [ndviToArray(hdf) for hdf in input_stacks]
#ndviArrays = [octvi.array.mask(ndviToArray(hdf),hdf) for hdf in input_stacks]
# no nodata wanted
for i in range(len(rankArrays)):
rankArrays[i][ndviArrays[i] == -3000] = 0
idealRank = np.maximum.reduce(rankArrays)
# mask non-ideal view angles
for i in range(len(vangArrays)):
vangArrays[i][rankArrays[i] != idealRank] = 9998
vangArrays[i][vangArrays[i] == 0] = 9997
idealVang = np.minimum.reduce(vangArrays)
#print("Max vang:")
#print(np.amax(idealVang))
#octvi.array.toRaster(idealVang,"C:/temp/MOD09CMG.VANG.tif",input_stacks[0])
#octvi.array.toRaster(idealRank,"C:/temp/MOD09CMG.RANK.tif",input_stacks[0])
finalNdvi = np.full(ndviArrays[0].shape,-3000)
# mask each ndviArray to only where it matches ideal rank
for i in range(len(ndviArrays)):
finalNdvi[vangArrays[i] == idealVang] = ndviArrays[i][vangArrays[i] == idealVang]
# mask out ranks that are too low
finalNdvi[idealRank <=7] = -3000
# mask water
water = cmgListToWaterArray(input_stacks)
finalNdvi[water==1] = -3000
# return result
return finalNdvi
def cmgBestGcviPixels(input_stacks:list) -> "numpy array":
"""
This function takes a list of hdf stack paths, and
returns the 'best' GCVI value for each pixel location,
determined through the ranking method (see
cmgToRankArray() for details).
***
Parameters
----------
input_stacks:list
A list of strings, each pointing to a MOD**CMG hdf file
on disk
"""
rankArrays = [cmgToRankArray(hdf) for hdf in input_stacks]
vangArrays = [cmgToViewAngArray(hdf) for hdf in input_stacks]
gcviArrays = [gcviToArray(hdf) for hdf in input_stacks]
#ndviArrays = [octvi.array.mask(ndviToArray(hdf),hdf) for hdf in input_stacks]
# no nodata wanted
for i in range(len(rankArrays)):
rankArrays[i][gcviArrays[i] == -3000] = 0
idealRank = np.maximum.reduce(rankArrays)
# mask non-ideal view angles
for i in range(len(vangArrays)):
vangArrays[i][rankArrays[i] != idealRank] = 9998
vangArrays[i][vangArrays[i] == 0] = 9997
idealVang = np.minimum.reduce(vangArrays)
#print("Max vang:")
#print(np.amax(idealVang))
#octvi.array.toRaster(idealVang,"C:/temp/MOD09CMG.VANG.tif",input_stacks[0])
#octvi.array.toRaster(idealRank,"C:/temp/MOD09CMG.RANK.tif",input_stacks[0])
finalGcvi = np.full(gcviArrays[0].shape,-3000)
# mask each gcviArray to only where it matches ideal rank
for i in range(len(gcviArrays)):
finalGcvi[vangArrays[i] == idealVang] = gcviArrays[i][vangArrays[i] == idealVang]
# mask out ranks that are too low
finalGcvi[idealRank <=7] = -3000
# mask water
water = cmgListToWaterArray(input_stacks)
finalGcvi[water==1] = -3000
# return result
return finalGcvi | [
"numpy.full",
"logging.debug",
"os.path.basename",
"gdal.Open",
"os.environ.get",
"os.path.splitext",
"numpy.maximum.reduce",
"numpy.minimum.reduce",
"logging.getLogger"
] | [((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging, os\n'), ((711, 735), 'gdal.Open', 'gdal.Open', (['stack_path', '(0)'], {}), '(stack_path, 0)\n', (720, 735), False, 'import octvi.exceptions, octvi.array, gdal\n'), ((1288, 1312), 'gdal.Open', 'gdal.Open', (['stack_path', '(0)'], {}), '(stack_path, 0)\n', (1297, 1312), False, 'import octvi.exceptions, octvi.array, gdal\n'), ((2155, 2171), 'gdal.Open', 'gdal.Open', (['sd', '(0)'], {}), '(sd, 0)\n', (2164, 2171), False, 'import octvi.exceptions, octvi.array, gdal\n'), ((8965, 8994), 'numpy.maximum.reduce', 'np.maximum.reduce', (['water_list'], {}), '(water_list)\n', (8982, 8994), True, 'import numpy as np\n'), ((9782, 9807), 'numpy.full', 'np.full', (['qa_arr.shape', '(10)'], {}), '(qa_arr.shape, 10)\n', (9789, 9807), True, 'import numpy as np\n'), ((9856, 9887), 'logging.debug', 'logging.debug', (['"""--rank 9: SNOW"""'], {}), "('--rank 9: SNOW')\n", (9869, 9887), False, 'import logging, os\n'), ((10026, 10064), 'logging.debug', 'logging.debug', (['"""--rank 8: HIGHAEROSOL"""'], {}), "('--rank 8: HIGHAEROSOL')\n", (10039, 10064), False, 'import logging, os\n'), ((10176, 10214), 'logging.debug', 'logging.debug', (['"""--rank 7: CLIMAEROSOL"""'], {}), "('--rank 7: CLIMAEROSOL')\n", (10189, 10214), False, 'import logging, os\n'), ((10412, 10450), 'logging.debug', 'logging.debug', (['"""--rank 6: UNCORRECTED"""'], {}), "('--rank 6: UNCORRECTED')\n", (10425, 10450), False, 'import logging, os\n'), ((10572, 10605), 'logging.debug', 'logging.debug', (['"""--rank 5: SHADOW"""'], {}), "('--rank 5: SHADOW')\n", (10585, 10605), False, 'import logging, os\n'), ((10705, 10738), 'logging.debug', 'logging.debug', (['"""--rank 4: CLOUDY"""'], {}), "('--rank 4: CLOUDY')\n", (10718, 10738), False, 'import logging, os\n'), ((11114, 11149), 'logging.debug', 'logging.debug', (['"""--rank 3: HIGHVIEW"""'], {}), "('--rank 3: HIGHVIEW')\n", (11127, 11149), False, 'import logging, os\n'), ((11196, 11229), 'logging.debug', 'logging.debug', (['"""--rank 2: LOWSUN"""'], {}), "('--rank 2: LOWSUN')\n", (11209, 11229), False, 'import logging, os\n'), ((11289, 11326), 'logging.debug', 'logging.debug', (['"""--rank 1: BAD pixels"""'], {}), "('--rank 1: BAD pixels')\n", (11302, 11326), False, 'import logging, os\n'), ((11497, 11534), 'logging.debug', 'logging.debug', (['"""-building water mask"""'], {}), "('-building water mask')\n", (11510, 11534), False, 'import logging, os\n'), ((12744, 12773), 'numpy.maximum.reduce', 'np.maximum.reduce', (['rankArrays'], {}), '(rankArrays)\n', (12761, 12773), True, 'import numpy as np\n'), ((12954, 12983), 'numpy.minimum.reduce', 'np.minimum.reduce', (['vangArrays'], {}), '(vangArrays)\n', (12971, 12983), True, 'import numpy as np\n'), ((13209, 13244), 'numpy.full', 'np.full', (['ndviArrays[0].shape', '(-3000)'], {}), '(ndviArrays[0].shape, -3000)\n', (13216, 13244), True, 'import numpy as np\n'), ((14402, 14431), 'numpy.maximum.reduce', 'np.maximum.reduce', (['rankArrays'], {}), '(rankArrays)\n', (14419, 14431), True, 'import numpy as np\n'), ((14612, 14641), 'numpy.minimum.reduce', 'np.minimum.reduce', (['vangArrays'], {}), '(vangArrays)\n', (14629, 14641), True, 'import numpy as np\n'), ((14867, 14902), 'numpy.full', 'np.full', (['gcviArrays[0].shape', '(-3000)'], {}), '(gcviArrays[0].shape, -3000)\n', (14874, 14902), True, 'import numpy as np\n'), ((65, 99), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (79, 99), False, 'import logging, os\n'), ((462, 490), 'os.path.splitext', 'os.path.splitext', (['stack_path'], {}), '(stack_path)\n', (478, 490), False, 'import logging, os\n'), ((1020, 1048), 'os.path.splitext', 'os.path.splitext', (['stack_path'], {}), '(stack_path)\n', (1036, 1048), False, 'import logging, os\n'), ((1583, 1611), 'os.path.basename', 'os.path.basename', (['stack_path'], {}), '(stack_path)\n', (1599, 1611), False, 'import logging, os\n'), ((3104, 3130), 'os.path.basename', 'os.path.basename', (['in_stack'], {}), '(in_stack)\n', (3120, 3130), False, 'import logging, os\n'), ((3821, 3847), 'os.path.splitext', 'os.path.splitext', (['in_stack'], {}), '(in_stack)\n', (3837, 3847), False, 'import logging, os\n'), ((4735, 4761), 'os.path.basename', 'os.path.basename', (['in_stack'], {}), '(in_stack)\n', (4751, 4761), False, 'import logging, os\n'), ((5452, 5478), 'os.path.splitext', 'os.path.splitext', (['in_stack'], {}), '(in_stack)\n', (5468, 5478), False, 'import logging, os\n')] |
import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess
from tqdm import tqdm
from urllib.request import urlopen
from urllib.parse import urlparse
import cv2
from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes
from scipy.spatial import ConvexHull
import numpy as np
import colorsys
import random
import torch
from . import metrics
def rgb_to_hsv(arr):
rgb_to_hsv_channels = np.vectorize(colorsys.rgb_to_hsv)
r, g, b = np.rollaxis(arr, axis=-1)
h, s, v = rgb_to_hsv_channels(r, g, b)
hsv = np.stack((h,s,v), axis=-1)
return hsv
def hsv_to_rgb(arr):
hsv_to_rgb_channels = np.vectorize(colorsys.hsv_to_rgb)
h, s, v = np.rollaxis(arr, axis=-1)
r, g, b = hsv_to_rgb_channels(h, s, v)
rgb = np.stack((r,g,b), axis=-1)
return rgb
def download_url_to_file(url, dst, progress=True):
r"""Download object at the given URL to a local path.
Thanks to torch, slightly modified
Args:
url (string): URL of the object to download
dst (string): Full path where object will be saved, e.g. `/tmp/temporary_file`
progress (bool, optional): whether or not to display a progress bar to stderr
Default: True
"""
file_size = None
u = urlopen(url)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
# We deliberately save it in a temp file and move it after
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
with tqdm(total=file_size, disable=not progress,
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
pbar.update(len(buffer))
f.close()
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
def distance_to_boundary(masks):
""" get distance to boundary of mask pixels
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
dist_to_bound: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx]
"""
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('distance_to_boundary takes 2D or 3D array, not %dD array'%masks.ndim)
dist_to_bound = np.zeros(masks.shape, np.float64)
if masks.ndim==3:
for i in range(masks.shape[0]):
dist_to_bound[i] = distance_to_boundary(masks[i])
return dist_to_bound
else:
slices = find_objects(masks.astype(np.int16))
for i,si in enumerate(slices):
if si is not None:
sr,sc = si
mask = (masks[sr, sc] == (i+1)).astype(np.uint8)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = np.concatenate(contours[-2], axis=0).squeeze().T
ypix, xpix = np.nonzero(mask)
min_dist = ((ypix[:,np.newaxis] - pvr)**2 +
(xpix[:,np.newaxis] - pvc)**2).min(axis=1)
dist_to_bound[ypix + sr.start, xpix + sc.start] = min_dist
return dist_to_bound
def masks_to_edges(masks, threshold=1.0):
""" get edges of masks as a 0-1 array
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
edges: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], True pixels are edge pixels
"""
dist_to_bound = distance_to_boundary(masks)
edges = (dist_to_bound < threshold) * (masks > 0)
return edges
def masks_to_outlines(masks):
""" get outlines of masks as a 0-1 array
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
outlines: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], True pixels are outlines
"""
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('masks_to_outlines takes 2D or 3D array, not %dD array'%masks.ndim)
outlines = np.zeros(masks.shape, np.bool)
if masks.ndim==3:
for i in range(masks.shape[0]):
outlines[i] = masks_to_outlines(masks[i])
return outlines
else:
slices = find_objects(masks.astype(int))
for i,si in enumerate(slices):
if si is not None:
sr,sc = si
mask = (masks[sr, sc] == (i+1)).astype(np.uint8)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = np.concatenate(contours[-2], axis=0).squeeze().T
vr, vc = pvr + sr.start, pvc + sc.start
outlines[vr, vc] = 1
return outlines
def outlines_list(masks):
""" get outlines of masks as a list to loop over for plotting """
outpix=[]
for n in np.unique(masks)[1:]:
mn = masks==n
if mn.sum() > 0:
contours = cv2.findContours(mn.astype(np.uint8), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
contours = contours[-2]
cmax = np.argmax([c.shape[0] for c in contours])
pix = contours[cmax].astype(int).squeeze()
if len(pix)>4:
outpix.append(pix)
else:
outpix.append(np.zeros((0,2)))
return outpix
def get_perimeter(points):
""" perimeter of points - npoints x ndim """
if points.shape[0]>4:
points = np.append(points, points[:1], axis=0)
return ((np.diff(points, axis=0)**2).sum(axis=1)**0.5).sum()
else:
return 0
def get_mask_compactness(masks):
perimeters = get_mask_perimeters(masks)
#outlines = masks_to_outlines(masks)
#perimeters = np.unique(outlines*masks, return_counts=True)[1][1:]
npoints = np.unique(masks, return_counts=True)[1][1:]
areas = npoints
compactness = 4 * np.pi * areas / perimeters**2
compactness[perimeters==0] = 0
compactness[compactness>1.0] = 1.0
return compactness
def get_mask_perimeters(masks):
""" get perimeters of masks """
perimeters = np.zeros(masks.max())
for n in range(masks.max()):
mn = masks==(n+1)
if mn.sum() > 0:
contours = cv2.findContours(mn.astype(np.uint8), mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)[-2]
#cmax = np.argmax([c.shape[0] for c in contours])
#perimeters[n] = get_perimeter(contours[cmax].astype(int).squeeze())
perimeters[n] = np.array([get_perimeter(c.astype(int).squeeze()) for c in contours]).sum()
return perimeters
def circleMask(d0):
""" creates array with indices which are the radius of that x,y point
inputs:
d0 (patch of (-d0,d0+1) over which radius computed
outputs:
rs: array (2*d0+1,2*d0+1) of radii
dx,dy: indices of patch
"""
dx = np.tile(np.arange(-d0[1],d0[1]+1), (2*d0[0]+1,1))
dy = np.tile(np.arange(-d0[0],d0[0]+1), (2*d0[1]+1,1))
dy = dy.transpose()
rs = (dy**2 + dx**2) ** 0.5
return rs, dx, dy
def get_mask_stats(masks_true):
mask_perimeters = get_mask_perimeters(masks_true)
# disk for compactness
rs,dy,dx = circleMask(np.array([100, 100]))
rsort = np.sort(rs.flatten())
# area for solidity
npoints = np.unique(masks_true, return_counts=True)[1][1:]
areas = npoints - mask_perimeters / 2 - 1
compactness = np.zeros(masks_true.max())
convexity = np.zeros(masks_true.max())
solidity = np.zeros(masks_true.max())
convex_perimeters = np.zeros(masks_true.max())
convex_areas = np.zeros(masks_true.max())
for ic in range(masks_true.max()):
points = np.array(np.nonzero(masks_true==(ic+1))).T
if len(points)>15 and mask_perimeters[ic] > 0:
med = np.median(points, axis=0)
# compute compactness of ROI
r2 = ((points - med)**2).sum(axis=1)**0.5
compactness[ic] = (rsort[:r2.size].mean() + 1e-10) / r2.mean()
try:
hull = ConvexHull(points)
convex_perimeters[ic] = hull.area
convex_areas[ic] = hull.volume
except:
convex_perimeters[ic] = 0
convexity[mask_perimeters>0.0] = (convex_perimeters[mask_perimeters>0.0] /
mask_perimeters[mask_perimeters>0.0])
solidity[convex_areas>0.0] = (areas[convex_areas>0.0] /
convex_areas[convex_areas>0.0])
convexity = np.clip(convexity, 0.0, 1.0)
solidity = np.clip(solidity, 0.0, 1.0)
compactness = np.clip(compactness, 0.0, 1.0)
return convexity, solidity, compactness
def get_masks_unet(output, cell_threshold=0, boundary_threshold=0):
""" create masks using cell probability and cell boundary """
cells = (output[...,1] - output[...,0])>cell_threshold
selem = generate_binary_structure(cells.ndim, connectivity=1)
labels, nlabels = label(cells, selem)
if output.shape[-1]>2:
slices = find_objects(labels)
dists = 10000*np.ones(labels.shape, np.float32)
mins = np.zeros(labels.shape, np.int32)
borders = np.logical_and(~(labels>0), output[...,2]>boundary_threshold)
pad = 10
for i,slc in enumerate(slices):
if slc is not None:
slc_pad = tuple([slice(max(0,sli.start-pad), min(labels.shape[j], sli.stop+pad))
for j,sli in enumerate(slc)])
msk = (labels[slc_pad] == (i+1)).astype(np.float32)
msk = 1 - gaussian_filter(msk, 5)
dists[slc_pad] = np.minimum(dists[slc_pad], msk)
mins[slc_pad][dists[slc_pad]==msk] = (i+1)
labels[labels==0] = borders[labels==0] * mins[labels==0]
masks = labels
shape0 = masks.shape
_,masks = np.unique(masks, return_inverse=True)
masks = np.reshape(masks, shape0)
return masks
def stitch3D(masks, stitch_threshold=0.25):
""" stitch 2D masks into 3D volume with stitch_threshold on IOU """
mmax = masks[0].max()
for i in range(len(masks)-1):
iou = metrics._intersection_over_union(masks[i+1], masks[i])[1:,1:]
iou[iou < stitch_threshold] = 0.0
iou[iou < iou.max(axis=0)] = 0.0
istitch = iou.argmax(axis=1) + 1
ino = np.nonzero(iou.max(axis=1)==0.0)[0]
istitch[ino] = np.arange(mmax+1, mmax+len(ino)+1, 1, int)
mmax += len(ino)
istitch = np.append(np.array(0), istitch)
masks[i+1] = istitch[masks[i+1]]
return masks
def diameters(masks):
""" get median 'diameter' of masks """
_, counts = np.unique(np.int32(masks), return_counts=True)
counts = counts[1:]
md = np.median(counts**0.5)
if np.isnan(md):
md = 0
md /= (np.pi**0.5)/2
return md, counts**0.5
def radius_distribution(masks, bins):
unique, counts = np.unique(masks, return_counts=True)
counts = counts[unique!=0]
nb, _ = np.histogram((counts**0.5)*0.5, bins)
nb = nb.astype(np.float32)
if nb.sum() > 0:
nb = nb / nb.sum()
md = np.median(counts**0.5)*0.5
if np.isnan(md):
md = 0
md /= (np.pi**0.5)/2
return nb, md, (counts**0.5)/2
def size_distribution(masks):
counts = np.unique(masks, return_counts=True)[1][1:]
return np.percentile(counts, 25) / np.percentile(counts, 75)
def normalize99(img):
X = img.copy()
X = (X - np.percentile(X, 1)) / (np.percentile(X, 99) - np.percentile(X, 1))
return X
def process_cells(M0, npix=20):
unq, ic = np.unique(M0, return_counts=True)
for j in range(len(unq)):
if ic[j]<npix:
M0[M0==unq[j]] = 0
return M0
def fill_holes_and_remove_small_masks(masks, min_size=15):
""" fill holes in masks (2D/3D) and discard masks smaller than min_size (2D)
fill holes in each mask using scipy.ndimage.morphology.binary_fill_holes
Parameters
----------------
masks: int, 2D or 3D array
labelled masks, 0=NO masks; 1,2,...=mask labels,
size [Ly x Lx] or [Lz x Ly x Lx]
min_size: int (optional, default 15)
minimum number of pixels per mask, can turn off with -1
Returns
---------------
masks: int, 2D or 3D array
masks with holes filled and masks smaller than min_size removed,
0=NO masks; 1,2,...=mask labels,
size [Ly x Lx] or [Lz x Ly x Lx]
"""
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('masks_to_outlines takes 2D or 3D array, not %dD array'%masks.ndim)
slices = find_objects(masks)
j = 0
for i,slc in enumerate(slices):
if slc is not None:
msk = masks[slc] == (i+1)
npix = msk.sum()
# print('npix', npix)
if min_size > 0 and npix < min_size:
masks[slc][msk] = 0
else:
if msk.ndim==3:
for k in range(msk.shape[0]):
msk[k] = binary_fill_holes(msk[k])
else:
msk = binary_fill_holes(msk)
masks[slc][msk] = (j+1)
j+=1
return masks
def set_manual_seed(seed):
"""
(https://github.com/vqdang/hover_net)
If manual seed is not specified, choose a random one and communicate it to the user.
Args:
seed: seed to check
"""
seed = seed or random.randint(1, 10000)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# ia.random.seed(seed)
print(">>>> using manual seed: {seed}".format(seed=seed))
return
def crop_images_and_masks(X, M, crop_side_fraction=2):
nimg = len(X)
imgs = []
masks = []
for i in range(nimg):
Ly0, Lx0 = X[i].shape[:2]
crop_side_ysize = int(Ly0 // crop_side_fraction)
crop_side_xsize = int(Lx0 // crop_side_fraction)
for m in range(crop_side_fraction):
for n in range(crop_side_fraction):
ycropl = m*crop_side_ysize
ycropr = ycropl+crop_side_ysize
if ycropr > Ly0:
ycropr = Ly0
xcropl = n*crop_side_xsize
xcropr = xcropl+crop_side_xsize
if xcropr > Lx0:
xcropr = Lx0
if X[i].ndim == 3:
imgs.append(X[i][ycropl:ycropr, xcropl:xcropr, :])
else:
imgs.append(X[i][ycropl:ycropr, xcropl:xcropr])
masks.append(M[i][ycropl:ycropr, xcropl:xcropr])
return imgs, masks
def get_centroids(type_inst, class_inst):
inst_id_list = np.unique(type_inst)[1:] # exlcude background
inst_centroids = []
inst_types = []
for inst_id in inst_id_list:
inst_map = type_inst == inst_id
inst_type = np.unique(class_inst[type_inst == inst_id])[0]
# TODO: chane format of bbox output
rmin, rmax, cmin, cmax = get_bounding_box(inst_map)
inst_bbox = np.array([[rmin, cmin], [rmax, cmax]])
inst_map = inst_map[inst_bbox[0][0]: inst_bbox[1][0], inst_bbox[0][1]: inst_bbox[1][1]]
inst_map = inst_map.astype(np.uint8)
inst_moment = cv2.moments(inst_map)
inst_centroid = [(inst_moment["m10"] / inst_moment["m00"]), (inst_moment["m01"] / inst_moment["m00"]),]
inst_centroid = np.array(inst_centroid)
inst_centroid[0] += inst_bbox[0][1] # X
inst_centroid[1] += inst_bbox[0][0] # Y
inst_centroids.append(inst_centroid)
inst_types.append(inst_type)
return np.array(inst_centroids).astype("float32"), np.array(inst_types)
def get_bounding_box(img):
"""Get bounding box coordinate information."""
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
# due to python indexing, need to add 1 to max
# else accessing will be 1px in the box, not out
rmax += 1
cmax += 1
return [rmin, rmax, cmin, cmax]
def make_folder(folder_name):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
def process_different_model(model_name):
style_scale_on = False
attn_on, dense_on = False, False
if 'scellseg' in model_name:
task_mode = 'cellpose'
postproc_mode = 'cellpose'
attn_on, dense_on = True, True
style_scale_on = True
elif 'cellpose' in model_name:
task_mode = 'cellpose'
postproc_mode = 'cellpose'
elif 'hover' in model_name:
task_mode = 'hover'
postproc_mode = 'watershed'
elif 'unet3' in model_name:
task_mode = 'unet3'
postproc_mode = None
elif 'unet2' in model_name:
task_mode = 'unet2'
postproc_mode = None
return task_mode, postproc_mode, attn_on, dense_on, style_scale_on
def process_model_type(model_name):
if 'scellseg' in model_name:
model_type = 'scellseg'
elif 'cellpose' in model_name:
model_type = 'cellpose'
elif 'hover' in model_name:
model_type = 'hover'
elif 'unet3' in model_name:
model_type = 'unet3'
elif 'unet2' in model_name:
model_type = 'unet2'
return model_type | [
"scipy.ndimage.generate_binary_structure",
"os.mkdir",
"os.remove",
"numpy.random.seed",
"numpy.argmax",
"scipy.ndimage.find_objects",
"scipy.ndimage.binary_fill_holes",
"numpy.ones",
"numpy.clip",
"numpy.isnan",
"numpy.histogram",
"numpy.arange",
"numpy.unique",
"random.randint",
"scipy... | [((474, 507), 'numpy.vectorize', 'np.vectorize', (['colorsys.rgb_to_hsv'], {}), '(colorsys.rgb_to_hsv)\n', (486, 507), True, 'import numpy as np\n'), ((522, 547), 'numpy.rollaxis', 'np.rollaxis', (['arr'], {'axis': '(-1)'}), '(arr, axis=-1)\n', (533, 547), True, 'import numpy as np\n'), ((601, 629), 'numpy.stack', 'np.stack', (['(h, s, v)'], {'axis': '(-1)'}), '((h, s, v), axis=-1)\n', (609, 629), True, 'import numpy as np\n'), ((691, 724), 'numpy.vectorize', 'np.vectorize', (['colorsys.hsv_to_rgb'], {}), '(colorsys.hsv_to_rgb)\n', (703, 724), True, 'import numpy as np\n'), ((739, 764), 'numpy.rollaxis', 'np.rollaxis', (['arr'], {'axis': '(-1)'}), '(arr, axis=-1)\n', (750, 764), True, 'import numpy as np\n'), ((818, 846), 'numpy.stack', 'np.stack', (['(r, g, b)'], {'axis': '(-1)'}), '((r, g, b), axis=-1)\n', (826, 846), True, 'import numpy as np\n'), ((1315, 1327), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1322, 1327), False, 'from urllib.request import urlopen\n'), ((1688, 1711), 'os.path.expanduser', 'os.path.expanduser', (['dst'], {}), '(dst)\n', (1706, 1711), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((1726, 1746), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (1741, 1746), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((1755, 1809), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'dir': 'dst_dir'}), '(delete=False, dir=dst_dir)\n', (1782, 1809), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((2801, 2834), 'numpy.zeros', 'np.zeros', (['masks.shape', 'np.float64'], {}), '(masks.shape, np.float64)\n', (2809, 2834), True, 'import numpy as np\n'), ((4680, 4710), 'numpy.zeros', 'np.zeros', (['masks.shape', 'np.bool'], {}), '(masks.shape, np.bool)\n', (4688, 4710), True, 'import numpy as np\n'), ((9223, 9251), 'numpy.clip', 'np.clip', (['convexity', '(0.0)', '(1.0)'], {}), '(convexity, 0.0, 1.0)\n', (9230, 9251), True, 'import numpy as np\n'), ((9267, 9294), 'numpy.clip', 'np.clip', (['solidity', '(0.0)', '(1.0)'], {}), '(solidity, 0.0, 1.0)\n', (9274, 9294), True, 'import numpy as np\n'), ((9313, 9343), 'numpy.clip', 'np.clip', (['compactness', '(0.0)', '(1.0)'], {}), '(compactness, 0.0, 1.0)\n', (9320, 9343), True, 'import numpy as np\n'), ((9594, 9647), 'scipy.ndimage.generate_binary_structure', 'generate_binary_structure', (['cells.ndim'], {'connectivity': '(1)'}), '(cells.ndim, connectivity=1)\n', (9619, 9647), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((9670, 9689), 'scipy.ndimage.label', 'label', (['cells', 'selem'], {}), '(cells, selem)\n', (9675, 9689), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((10566, 10603), 'numpy.unique', 'np.unique', (['masks'], {'return_inverse': '(True)'}), '(masks, return_inverse=True)\n', (10575, 10603), True, 'import numpy as np\n'), ((10616, 10641), 'numpy.reshape', 'np.reshape', (['masks', 'shape0'], {}), '(masks, shape0)\n', (10626, 10641), True, 'import numpy as np\n'), ((11447, 11471), 'numpy.median', 'np.median', (['(counts ** 0.5)'], {}), '(counts ** 0.5)\n', (11456, 11471), True, 'import numpy as np\n'), ((11477, 11489), 'numpy.isnan', 'np.isnan', (['md'], {}), '(md)\n', (11485, 11489), True, 'import numpy as np\n'), ((11618, 11654), 'numpy.unique', 'np.unique', (['masks'], {'return_counts': '(True)'}), '(masks, return_counts=True)\n', (11627, 11654), True, 'import numpy as np\n'), ((11698, 11737), 'numpy.histogram', 'np.histogram', (['(counts ** 0.5 * 0.5)', 'bins'], {}), '(counts ** 0.5 * 0.5, bins)\n', (11710, 11737), True, 'import numpy as np\n'), ((11858, 11870), 'numpy.isnan', 'np.isnan', (['md'], {}), '(md)\n', (11866, 11870), True, 'import numpy as np\n'), ((12283, 12316), 'numpy.unique', 'np.unique', (['M0'], {'return_counts': '(True)'}), '(M0, return_counts=True)\n', (12292, 12316), True, 'import numpy as np\n'), ((13302, 13321), 'scipy.ndimage.find_objects', 'find_objects', (['masks'], {}), '(masks)\n', (13314, 13321), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((14157, 14174), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (14168, 14174), False, 'import random\n'), ((14179, 14199), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14193, 14199), True, 'import numpy as np\n'), ((14204, 14227), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (14221, 14227), False, 'import torch\n'), ((14232, 14260), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (14254, 14260), False, 'import torch\n'), ((16568, 16587), 'numpy.any', 'np.any', (['img'], {'axis': '(1)'}), '(img, axis=1)\n', (16574, 16587), True, 'import numpy as np\n'), ((16599, 16618), 'numpy.any', 'np.any', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (16605, 16618), True, 'import numpy as np\n'), ((2173, 2197), 'shutil.move', 'shutil.move', (['f.name', 'dst'], {}), '(f.name, dst)\n', (2184, 2197), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((2240, 2262), 'os.path.exists', 'os.path.exists', (['f.name'], {}), '(f.name)\n', (2254, 2262), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((5499, 5515), 'numpy.unique', 'np.unique', (['masks'], {}), '(masks)\n', (5508, 5515), True, 'import numpy as np\n'), ((6100, 6137), 'numpy.append', 'np.append', (['points', 'points[:1]'], {'axis': '(0)'}), '(points, points[:1], axis=0)\n', (6109, 6137), True, 'import numpy as np\n'), ((7572, 7600), 'numpy.arange', 'np.arange', (['(-d0[1])', '(d0[1] + 1)'], {}), '(-d0[1], d0[1] + 1)\n', (7581, 7600), True, 'import numpy as np\n'), ((7632, 7660), 'numpy.arange', 'np.arange', (['(-d0[0])', '(d0[0] + 1)'], {}), '(-d0[0], d0[0] + 1)\n', (7641, 7660), True, 'import numpy as np\n'), ((7896, 7916), 'numpy.array', 'np.array', (['[100, 100]'], {}), '([100, 100])\n', (7904, 7916), True, 'import numpy as np\n'), ((9735, 9755), 'scipy.ndimage.find_objects', 'find_objects', (['labels'], {}), '(labels)\n', (9747, 9755), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((9827, 9859), 'numpy.zeros', 'np.zeros', (['labels.shape', 'np.int32'], {}), '(labels.shape, np.int32)\n', (9835, 9859), True, 'import numpy as np\n'), ((9878, 9944), 'numpy.logical_and', 'np.logical_and', (['(~(labels > 0))', '(output[..., 2] > boundary_threshold)'], {}), '(~(labels > 0), output[..., 2] > boundary_threshold)\n', (9892, 9944), True, 'import numpy as np\n'), ((11377, 11392), 'numpy.int32', 'np.int32', (['masks'], {}), '(masks)\n', (11385, 11392), True, 'import numpy as np\n'), ((11824, 11848), 'numpy.median', 'np.median', (['(counts ** 0.5)'], {}), '(counts ** 0.5)\n', (11833, 11848), True, 'import numpy as np\n'), ((12046, 12071), 'numpy.percentile', 'np.percentile', (['counts', '(25)'], {}), '(counts, 25)\n', (12059, 12071), True, 'import numpy as np\n'), ((12074, 12099), 'numpy.percentile', 'np.percentile', (['counts', '(75)'], {}), '(counts, 75)\n', (12087, 12099), True, 'import numpy as np\n'), ((14128, 14152), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (14142, 14152), False, 'import random\n'), ((15479, 15499), 'numpy.unique', 'np.unique', (['type_inst'], {}), '(type_inst)\n', (15488, 15499), True, 'import numpy as np\n'), ((15835, 15873), 'numpy.array', 'np.array', (['[[rmin, cmin], [rmax, cmax]]'], {}), '([[rmin, cmin], [rmax, cmax]])\n', (15843, 15873), True, 'import numpy as np\n'), ((16037, 16058), 'cv2.moments', 'cv2.moments', (['inst_map'], {}), '(inst_map)\n', (16048, 16058), False, 'import cv2\n'), ((16197, 16220), 'numpy.array', 'np.array', (['inst_centroid'], {}), '(inst_centroid)\n', (16205, 16220), True, 'import numpy as np\n'), ((16457, 16477), 'numpy.array', 'np.array', (['inst_types'], {}), '(inst_types)\n', (16465, 16477), True, 'import numpy as np\n'), ((16917, 16943), 'os.path.isdir', 'os.path.isdir', (['folder_name'], {}), '(folder_name)\n', (16930, 16943), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((16953, 16974), 'os.mkdir', 'os.mkdir', (['folder_name'], {}), '(folder_name)\n', (16961, 16974), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((1832, 1925), 'tqdm.tqdm', 'tqdm', ([], {'total': 'file_size', 'disable': '(not progress)', 'unit': '"""B"""', 'unit_scale': '(True)', 'unit_divisor': '(1024)'}), "(total=file_size, disable=not progress, unit='B', unit_scale=True,\n unit_divisor=1024)\n", (1836, 1925), False, 'from tqdm import tqdm\n'), ((2276, 2293), 'os.remove', 'os.remove', (['f.name'], {}), '(f.name)\n', (2285, 2293), False, 'import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess\n'), ((5738, 5779), 'numpy.argmax', 'np.argmax', (['[c.shape[0] for c in contours]'], {}), '([c.shape[0] for c in contours])\n', (5747, 5779), True, 'import numpy as np\n'), ((6438, 6474), 'numpy.unique', 'np.unique', (['masks'], {'return_counts': '(True)'}), '(masks, return_counts=True)\n', (6447, 6474), True, 'import numpy as np\n'), ((7991, 8032), 'numpy.unique', 'np.unique', (['masks_true'], {'return_counts': '(True)'}), '(masks_true, return_counts=True)\n', (8000, 8032), True, 'import numpy as np\n'), ((8490, 8515), 'numpy.median', 'np.median', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (8499, 8515), True, 'import numpy as np\n'), ((9778, 9811), 'numpy.ones', 'np.ones', (['labels.shape', 'np.float32'], {}), '(labels.shape, np.float32)\n', (9785, 9811), True, 'import numpy as np\n'), ((11205, 11216), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (11213, 11216), True, 'import numpy as np\n'), ((11991, 12027), 'numpy.unique', 'np.unique', (['masks'], {'return_counts': '(True)'}), '(masks, return_counts=True)\n', (12000, 12027), True, 'import numpy as np\n'), ((12155, 12174), 'numpy.percentile', 'np.percentile', (['X', '(1)'], {}), '(X, 1)\n', (12168, 12174), True, 'import numpy as np\n'), ((12179, 12199), 'numpy.percentile', 'np.percentile', (['X', '(99)'], {}), '(X, 99)\n', (12192, 12199), True, 'import numpy as np\n'), ((12202, 12221), 'numpy.percentile', 'np.percentile', (['X', '(1)'], {}), '(X, 1)\n', (12215, 12221), True, 'import numpy as np\n'), ((15663, 15706), 'numpy.unique', 'np.unique', (['class_inst[type_inst == inst_id]'], {}), '(class_inst[type_inst == inst_id])\n', (15672, 15706), True, 'import numpy as np\n'), ((16636, 16650), 'numpy.where', 'np.where', (['rows'], {}), '(rows)\n', (16644, 16650), True, 'import numpy as np\n'), ((16680, 16694), 'numpy.where', 'np.where', (['cols'], {}), '(cols)\n', (16688, 16694), True, 'import numpy as np\n'), ((3246, 3310), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (3262, 3310), False, 'import cv2\n'), ((3418, 3434), 'numpy.nonzero', 'np.nonzero', (['mask'], {}), '(mask)\n', (3428, 3434), True, 'import numpy as np\n'), ((5104, 5168), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (5120, 5168), False, 'import cv2\n'), ((8383, 8415), 'numpy.nonzero', 'np.nonzero', (['(masks_true == ic + 1)'], {}), '(masks_true == ic + 1)\n', (8393, 8415), True, 'import numpy as np\n'), ((8726, 8744), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (8736, 8744), False, 'from scipy.spatial import ConvexHull\n'), ((10343, 10374), 'numpy.minimum', 'np.minimum', (['dists[slc_pad]', 'msk'], {}), '(dists[slc_pad], msk)\n', (10353, 10374), True, 'import numpy as np\n'), ((16413, 16437), 'numpy.array', 'np.array', (['inst_centroids'], {}), '(inst_centroids)\n', (16421, 16437), True, 'import numpy as np\n'), ((5945, 5961), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (5953, 5961), True, 'import numpy as np\n'), ((10286, 10309), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['msk', '(5)'], {}), '(msk, 5)\n', (10301, 10309), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((13793, 13815), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['msk'], {}), '(msk)\n', (13810, 13815), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((13719, 13744), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['msk[k]'], {}), '(msk[k])\n', (13736, 13744), False, 'from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes\n'), ((3338, 3374), 'numpy.concatenate', 'np.concatenate', (['contours[-2]'], {'axis': '(0)'}), '(contours[-2], axis=0)\n', (3352, 3374), True, 'import numpy as np\n'), ((5196, 5232), 'numpy.concatenate', 'np.concatenate', (['contours[-2]'], {'axis': '(0)'}), '(contours[-2], axis=0)\n', (5210, 5232), True, 'import numpy as np\n'), ((6155, 6178), 'numpy.diff', 'np.diff', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (6162, 6178), True, 'import numpy as np\n')] |
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
win = pg.GraphicsWindow()
win.setWindowTitle('Scroll and Zoomed Plot')
plotScroll = win.addPlot()
plotScroll.setDownsampling(mode='peak')
plotScroll.setClipToView(True)
curveScroll = plotScroll.plot()
dataRnd = np.empty(100)
ptrDataRnd = 0
def updateScroll():
global dataRnd, ptrDataRnd
dataRnd[ptrDataRnd] = np.random.normal()
ptrDataRnd += 1
if ptrDataRnd >= dataRnd.shape[0]:
tmp = dataRnd
dataRnd = np.empty(dataRnd.shape[0] * 2)
dataRnd[:tmp.shape[0]] = tmp
curveScroll.setData(dataRnd[:ptrDataRnd])
LinRegionItem = pg.LinearRegionItem([0, 100])
LinRegionItem.setZValue(-10)
plotScroll.addItem(LinRegionItem)
win.nextRow()
plotZoom = win.addPlot(title="Zoomed graph for Random plot ")
plotZoom.plot(dataRnd, pen=(255, 255, 255, 200))
def updatePlot():
plotZoom.setXRange(*LinRegionItem.getRegion(), padding=0)
def updateRegion():
LinRegionItem.setRegion(plotZoom.getViewBox().viewRange()[0])
LinRegionItem.sigRegionChanged.connect(updatePlot)
plotZoom.sigXRangeChanged.connect(updateRegion)
updatePlot()
# update all plots
def update():
updateScroll()
timer = pg.QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
# Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| [
"pyqtgraph.GraphicsWindow",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"pyqtgraph.QtCore.QTimer",
"numpy.empty",
"numpy.random.normal",
"pyqtgraph.LinearRegionItem"
] | [((92, 111), 'pyqtgraph.GraphicsWindow', 'pg.GraphicsWindow', ([], {}), '()\n', (109, 111), True, 'import pyqtgraph as pg\n'), ((307, 320), 'numpy.empty', 'np.empty', (['(100)'], {}), '(100)\n', (315, 320), True, 'import numpy as np\n'), ((680, 709), 'pyqtgraph.LinearRegionItem', 'pg.LinearRegionItem', (['[0, 100]'], {}), '([0, 100])\n', (699, 709), True, 'import pyqtgraph as pg\n'), ((1278, 1296), 'pyqtgraph.QtCore.QTimer', 'pg.QtCore.QTimer', ([], {}), '()\n', (1294, 1296), True, 'import pyqtgraph as pg\n'), ((421, 439), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (437, 439), True, 'import numpy as np\n'), ((543, 573), 'numpy.empty', 'np.empty', (['(dataRnd.shape[0] * 2)'], {}), '(dataRnd.shape[0] * 2)\n', (551, 573), True, 'import numpy as np\n'), ((1552, 1581), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (1579, 1581), False, 'from pyqtgraph.Qt import QtCore, QtGui\n')] |
'''
Collated by <NAME> 鄒慶士 博士 (Ph.D.) Distinguished Prof. at the Department of Mechanical Engineering/Director at the Center of Artificial Intelligence & Data Science (機械工程系特聘教授兼人工智慧暨資料科學研究中心主任), MCUT (明志科技大學); Prof. at the Institute of Information & Decision Sciences (資訊與決策科學研究所教授), NTUB (國立臺北商業大學); the Chinese Academy of R Software (CARS) (中華R軟體學會創會理事長); the Data Science and Business Applications Association of Taiwan (DSBA) (臺灣資料科學與商業應用協會創會理事長); the Chinese Association for Quality Assessment and Evaluation (CAQAE) (中華品質評鑑協會常務監事); the Chinese Society of Quality (CSQ) (中華民國品質學會大數據品質應用委員會主任委員
Notes: This code is provided without warranty.
'''
# Take an arbitrary cosine function of the form x(t) = Acos(2π fct +φ) and proceed step by step as follows
# • Represent the signal x(t) in computer memory (discrete-time x[n]) and plot the signal in time domain
# • Represent the signal in frequency domain using FFT (X[k])
# • Extract magnitude and phase information from the FFT result
# • Reconstruct the time domain signal from the frequency domain samples
from scipy.fftpack import fft, ifft, fftshift, ifftshift
import numpy as np
import matplotlib.pyplot as plt
A = 0.5 # amplitude of the cosine wave
fc = 10 # frequency of the cosine wave in Hz
phase = 30 # desired phase shift of the cosine in degrees
fs = 32*fc # sampling frequency with oversampling factor 32
t = np.arange(start = 0,stop = 2,step = 1/fs) # 2 seconds duration
phi = phase*np.pi/180; # convert phase shift in degrees in radians 轉換為弳/弧度量
x = A*np.cos(2*np.pi*fc*t+phi) # time domain signal with phase shift
N = 256 # FFT size
X = 1/N*fftshift(fft(x, N)) # N-point complex DFT
df = fs/N # frequency resolution
sampleIndex = np.arange(start = -N//2, stop = N//2) # // for integer division (-128, 127)
f = sampleIndex*df # x-axis index converted to ordered frequencies
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, ncols=1)
ax1.plot(t,x) # plot time domain representation
ax1.set_title(r'$x(t) = 0.5 cos (2 \pi 10 t + \pi/6)$')
ax1.set_xlabel('time (t seconds)');ax1.set_ylabel('x(t)')
ax2.stem(f, abs(X), use_line_collection=True) # magnitudes vs frequencies
ax2.set_xlim(-30, 30)
ax2.set_title('Amplitude spectrum')
ax2.set_xlabel('f (Hz)');ax2.set_ylabel(r'$ \left| X(k) \right|$')
phase=np.arctan2(np.imag(X),np.real(X))*180/np.pi # phase information
ax3.plot(f,phase) # phase vs frequencies
X2 = X #store the FFT results in another array
# detect noise (very small numbers (eps)) and ignore them
threshold = max(abs(X))/10000; # tolerance threshold
X2[abs(X)<threshold]=0 # maskout values below the threshold
phase=np.arctan2(np.imag(X2),np.real(X2))*180/np.pi # phase information
ax4.stem(f,phase, use_line_collection=True) # phase vs frequencies
ax4.set_xlim(-30, 30);ax4.set_title('Phase spectrum')
ax4.set_ylabel(r"$\angle$ X[k]");ax4.set_xlabel('f(Hz)')
fig.show()
X[0:5]
np.arctan2(np.imag(X[0:5]),np.real(X[0:5]))
#### Reconstructing the time domain signal from the frequency domain samples
x_recon = N*ifft(ifftshift(X),N) # reconstructed signal
t = np.arange(start = 0,stop = len(x_recon))/fs # recompute time index
fig2, ax5 = plt.subplots()
ax5.plot(t,np.real(x_recon)) # reconstructed signal
ax5.set_title('reconstructed signal')
ax5.set_xlabel('time (t seconds)');ax5.set_ylabel('x(t)');
fig2.show()
#### Reference:
# Viswanathan, Mathuranathan, Digital Modulations using Python, December 2019. | [
"scipy.fftpack.fft",
"numpy.imag",
"numpy.arange",
"numpy.real",
"numpy.cos",
"matplotlib.pyplot.subplots",
"scipy.fftpack.ifftshift"
] | [((1378, 1417), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(2)', 'step': '(1 / fs)'}), '(start=0, stop=2, step=1 / fs)\n', (1387, 1417), True, 'import numpy as np\n'), ((1704, 1741), 'numpy.arange', 'np.arange', ([], {'start': '(-N // 2)', 'stop': '(N // 2)'}), '(start=-N // 2, stop=N // 2)\n', (1713, 1741), True, 'import numpy as np\n'), ((1876, 1906), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'ncols': '(1)'}), '(nrows=4, ncols=1)\n', (1888, 1906), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3149), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3147, 3149), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1555), 'numpy.cos', 'np.cos', (['(2 * np.pi * fc * t + phi)'], {}), '(2 * np.pi * fc * t + phi)\n', (1529, 1555), True, 'import numpy as np\n'), ((2884, 2899), 'numpy.imag', 'np.imag', (['X[0:5]'], {}), '(X[0:5])\n', (2891, 2899), True, 'import numpy as np\n'), ((2900, 2915), 'numpy.real', 'np.real', (['X[0:5]'], {}), '(X[0:5])\n', (2907, 2915), True, 'import numpy as np\n'), ((3161, 3177), 'numpy.real', 'np.real', (['x_recon'], {}), '(x_recon)\n', (3168, 3177), True, 'import numpy as np\n'), ((1623, 1632), 'scipy.fftpack.fft', 'fft', (['x', 'N'], {}), '(x, N)\n', (1626, 1632), False, 'from scipy.fftpack import fft, ifft, fftshift, ifftshift\n'), ((3012, 3024), 'scipy.fftpack.ifftshift', 'ifftshift', (['X'], {}), '(X)\n', (3021, 3024), False, 'from scipy.fftpack import fft, ifft, fftshift, ifftshift\n'), ((2289, 2299), 'numpy.imag', 'np.imag', (['X'], {}), '(X)\n', (2296, 2299), True, 'import numpy as np\n'), ((2300, 2310), 'numpy.real', 'np.real', (['X'], {}), '(X)\n', (2307, 2310), True, 'import numpy as np\n'), ((2620, 2631), 'numpy.imag', 'np.imag', (['X2'], {}), '(X2)\n', (2627, 2631), True, 'import numpy as np\n'), ((2632, 2643), 'numpy.real', 'np.real', (['X2'], {}), '(X2)\n', (2639, 2643), True, 'import numpy as np\n')] |
import numpy as np
import kplr
client = kplr.API()
import pandas as pd
import os
from kepler_data import load_kepler_data
import matplotlib.pyplot as plt
def assemble_data(N, lc_dir, ftrain=.8, fdouble=.5, ndays=70, path=".",
saveplot=False):
"""
Code for downloading a training set for Kepler binary project.
params:
-------
N: (int)
The total number of stars light curves to download. This will be
adjusted up to give integer fractions of star numbers.
lc_dir: (str)
The path to the place where .kplr downloads light curves.
On my machine this is "~/.kplr/data/lightcurves"
ftrain: (float)
The fraction of stars to train on.
(1-ftrain) is the fraction of test stars. default is 0.8.
Must be between 0 and 1.
fdouble: (float)
The fraction of "double" stars you want.
(Warning: some of these might be triples or quadruples!)
Must be between 0 and 1. Default is 0.5 and must be 0.5 for now.
fsingle = 1-fdouble.
ndays: (int)
The number of days of light curves to download. Default is 70.
path: str
The path to the directory you'd like the train and test light curves
and plots to be saved in. Default is current directory.
Two subdirectories will be created containing test and train light
curves.
saveplot: (boolean)
If true, plots of the light curves will be saved to the train and test
directories.
"""
while (N * ftrain) % 3 and (N * (1 - ftrain)) % 3:
N += 1
print("actually using {} stars".format(N))
df = pd.read_csv("../data/KICs.csv")
kids = df.KIC.values
np.random.shuffle(kids)
kids = kids[:N]
ntrain = int(N*ftrain)
print(N, "stars in total")
print(ntrain, "for training, ", N - ntrain, "for testing")
print(ntrain/3, "training singles, ", (ntrain/3)*2, "training doubles, ",
(N - ntrain)/3, "testing singles, ", 2*((N - ntrain)/3),
"testing doubles")
# Create the directories for saving data
dirname = "{0}/train/".format(path)
if not os.path.exists(dirname):
os.mkdir(dirname)
print("Directory " , dirname , " Created ")
dirname = "{0}/test/".format(path)
if not os.path.exists(dirname):
os.mkdir(dirname)
print("Directory " , dirname , " Created ")
# Allocate indices for splitting train and test.
inds = np.arange(N)
np.random.seed(42)
np.random.shuffle(inds)
training_inds = inds[:ntrain]
testing_inds = inds[ntrain:]
training_ids = df.KIC.iloc[training_inds]
make_singles_and_doubles(training_ids.values, "train", ndays, path=path,
lc_dir=lc_dir, saveplot=saveplot)
testing_ids = df.KIC.iloc[testing_inds]
make_singles_and_doubles(testing_ids.values, "test", ndays, path=path,
lc_dir=lc_dir, saveplot=saveplot)
def make_singles_and_doubles(ids, train_or_test, ndays, lc_dir, fdouble=.5,
path=".", saveplot=False):
"""
params:
-------
ids: list or array
The KIC ids to create light curves from.
train_or_test: str
The directory in which to save the resulting light curves.
"""
ntrain = len(ids)
ndouble = int((ntrain/3) * 2)
double_ids = ids[:ndouble]
single_ids = ids[ndouble:]
print(train_or_test, "set: ", ndouble/2, "doubles, made with {} light"
"curves and ".format(ndouble), len(single_ids), "singles \n")
for j, i in enumerate(np.arange(0, len(double_ids), 2)):
id0, id1, id2 = str(double_ids[i]).zfill(9), \
str(double_ids[i + 1]).zfill(9), str(single_ids[j]).zfill(9)
print("double 1 id = ", id0, "double 2 id = ", id1, "single id = ",
id2)
# Download the light curves to add together
fname = "{0}/{1}".format(lc_dir, id0)
if not os.path.exists(fname):
download_light_curve(double_ids[i])
fname = "{0}/{1}".format(lc_dir, id1)
if not os.path.exists(fname):
download_light_curve(double_ids[i + 1])
# Download the single star light curve.
fname = "{0}/{1}".format(lc_dir, id2)
if not os.path.exists(fname):
download_light_curve(single_ids[j])
# Load the light curves into memory
p0 = "{0}/{1}".format(lc_dir, id0)
x0, y0, yerr0, cadence0 = load_kepler_data(p0)
p1 = "{0}/{1}".format(lc_dir, id1)
x1, y1, yerr1, cadence1 = load_kepler_data(p1)
p2 = "{0}/{1}".format(lc_dir, id2)
x2, y2, yerr2, cadence2 = load_kepler_data(p2)
# Add the light curves together according to their cadences.
x, y0, yerrdouble, x1, y1, yerr1, ydouble, \
xsingle, ysingle, yerrsingle, cadence = \
add_lcs_together(x0, y0, yerr0, cadence0,
x1, y1, yerr1, cadence1,
x2, y2, yerr2, cadence2)
# Choose a random segment of the light curve that is ndays long and
# make sure it's not empty!
tmin, tmax = 0, max(x) - ndays
for k in range(100):
t = np.random.uniform(tmin, tmax)
m = (t < x) * (x < t + ndays)
if len(x[m]):
break
x, y0, yerrdouble, x1, y1, yerr1, ydouble, xsingle, ysingle, \
yerrsingle, cadence = x[m], y0[m], yerrdouble[m], x1[m], y1[m], \
yerr1[m], ydouble[m], xsingle[m], ysingle[m], yerrsingle[m], \
cadence[m]
if saveplot:
plt.figure(figsize=(16, 9))
plt.plot(x, y0, ".", label="star 1")
plt.plot(x1, y1 + .01, ".", label="star 2")
plt.plot(x, ydouble + .02, ".", label="double star")
plt.plot(xsingle, ysingle + .03, "k.", label="single star")
plt.legend()
figname = "{0}/{1}/{2}_{3}_{4}_plot".format(path, train_or_test,
id0, id1, id2)
print("saving figure as ", figname)
plt.savefig(figname)
# Save the light curves.
double_lc = pd.DataFrame(dict({"time": x, "flux": ydouble,
"flux_err": yerrdouble,
"cadence": cadence}))
fname = "{0}/{1}/{2}_{3}_lc.csv".format(path, train_or_test, id0, id1)
print("saving double lc to ", fname)
double_lc.to_csv(fname)
single_lc = pd.DataFrame(dict({"time": x, "flux": ysingle,
"flux_err": yerrsingle,
"cadence": cadence}))
fname = "{0}/{1}/{2}_lc.csv".format(path, train_or_test, id2)
print("saving single lc to ", fname, "\n")
single_lc.to_csv(fname)
def add_lcs_together(x0, y0, yerr0, cadence0, x1, y1, yerr1, cadence1, x2, y2,
yerr2, cadence2):
df0 = pd.DataFrame(dict({"x0": x0, "y0": y0, "yerr0": yerr0,
"cadence": cadence0}))
df1 = pd.DataFrame(dict({"x1": x1, "y1": y1, "yerr1": yerr1,
"cadence": cadence1}))
df2 = pd.DataFrame(dict({"x2": x2, "y2": y2, "yerr2": yerr2,
"cadence": cadence2}))
_df = pd.merge(df0, df1, on="cadence", how="inner")
df = pd.merge(_df, df2, on="cadence", how="inner")
return df.x0.values, df.y0.values, df.yerr0.values, \
df.x1.values, df.y1.values, df.yerr1.values, \
df.y0.values+df.y1.values, \
df.x2.values, df.y2.values, df.yerr2.values, df.cadence.values
def download_light_curve(id):
"""
Download the Kepler light curves of star (id). Default location is
~/.kplr/data/lightcurves/
"""
star = client.star(id)
print("downloading light curves for star ", id, "...")
star.get_light_curves(fetch=True, short_cadence=False)
if __name__ == "__main__":
lc_dir = "/Users/ruthangus/.kplr/data/lightcurves"
assemble_data(10, lc_dir, ftrain=.8, fdouble=.5, ndays=70, path=".",
saveplot=True)
| [
"os.mkdir",
"numpy.random.uniform",
"matplotlib.pyplot.savefig",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"pandas.merge",
"matplotlib.pyplot.legend",
"os.path.exists",
"matplotlib.pyplot.figure",
"numpy.arange",
"kplr.API",
"kepler_data.load_kepler_data",
"numpy.ra... | [((40, 50), 'kplr.API', 'kplr.API', ([], {}), '()\n', (48, 50), False, 'import kplr\n'), ((1644, 1675), 'pandas.read_csv', 'pd.read_csv', (['"""../data/KICs.csv"""'], {}), "('../data/KICs.csv')\n", (1655, 1675), True, 'import pandas as pd\n'), ((1705, 1728), 'numpy.random.shuffle', 'np.random.shuffle', (['kids'], {}), '(kids)\n', (1722, 1728), True, 'import numpy as np\n'), ((2466, 2478), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2475, 2478), True, 'import numpy as np\n'), ((2483, 2501), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2497, 2501), True, 'import numpy as np\n'), ((2506, 2529), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (2523, 2529), True, 'import numpy as np\n'), ((7373, 7418), 'pandas.merge', 'pd.merge', (['df0', 'df1'], {'on': '"""cadence"""', 'how': '"""inner"""'}), "(df0, df1, on='cadence', how='inner')\n", (7381, 7418), True, 'import pandas as pd\n'), ((7428, 7473), 'pandas.merge', 'pd.merge', (['_df', 'df2'], {'on': '"""cadence"""', 'how': '"""inner"""'}), "(_df, df2, on='cadence', how='inner')\n", (7436, 7473), True, 'import pandas as pd\n'), ((2142, 2165), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (2156, 2165), False, 'import os\n'), ((2175, 2192), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (2183, 2192), False, 'import os\n'), ((2297, 2320), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (2311, 2320), False, 'import os\n'), ((2330, 2347), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (2338, 2347), False, 'import os\n'), ((4478, 4498), 'kepler_data.load_kepler_data', 'load_kepler_data', (['p0'], {}), '(p0)\n', (4494, 4498), False, 'from kepler_data import load_kepler_data\n'), ((4576, 4596), 'kepler_data.load_kepler_data', 'load_kepler_data', (['p1'], {}), '(p1)\n', (4592, 4596), False, 'from kepler_data import load_kepler_data\n'), ((4674, 4694), 'kepler_data.load_kepler_data', 'load_kepler_data', (['p2'], {}), '(p2)\n', (4690, 4694), False, 'from kepler_data import load_kepler_data\n'), ((3968, 3989), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (3982, 3989), False, 'import os\n'), ((4100, 4121), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (4114, 4121), False, 'import os\n'), ((4285, 4306), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (4299, 4306), False, 'import os\n'), ((5241, 5270), 'numpy.random.uniform', 'np.random.uniform', (['tmin', 'tmax'], {}), '(tmin, tmax)\n', (5258, 5270), True, 'import numpy as np\n'), ((5642, 5669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (5652, 5669), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5718), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y0', '"""."""'], {'label': '"""star 1"""'}), "(x, y0, '.', label='star 1')\n", (5690, 5718), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5775), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', '(y1 + 0.01)', '"""."""'], {'label': '"""star 2"""'}), "(x1, y1 + 0.01, '.', label='star 2')\n", (5739, 5775), True, 'import matplotlib.pyplot as plt\n'), ((5787, 5840), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(ydouble + 0.02)', '"""."""'], {'label': '"""double star"""'}), "(x, ydouble + 0.02, '.', label='double star')\n", (5795, 5840), True, 'import matplotlib.pyplot as plt\n'), ((5852, 5912), 'matplotlib.pyplot.plot', 'plt.plot', (['xsingle', '(ysingle + 0.03)', '"""k."""'], {'label': '"""single star"""'}), "(xsingle, ysingle + 0.03, 'k.', label='single star')\n", (5860, 5912), True, 'import matplotlib.pyplot as plt\n'), ((5924, 5936), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5934, 5936), True, 'import matplotlib.pyplot as plt\n'), ((6145, 6165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (6156, 6165), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 20:31:03 2020
@author: maria
LSTM Neural Network Architecture
using the chats only as input
"""
from keras.models import Sequential
from keras.layers import Embedding,Dense,LSTM
from keras.optimizers import Adam
from keras.initializers import RandomNormal, Zeros
import matplotlib.pyplot as plt
from keras.utils.vis_utils import plot_model
from metrics import recall_m, precision_m, f1_m
from keras.callbacks import ModelCheckpoint, LambdaCallback, EarlyStopping
from keras import backend as K
import numpy as np
import pickle
import csv
from pathlib import Path
# load data
#X_train,X_test,y_train,y_test,embedding_matrix, vocab_size,
# max_doc_len):
x_train = np.load('./traindata/chats.npy')
y_train = np.load('./traindata/labels.npy')
x_test = np.load('./testdata/chats.npy')
y_test = np.load('./testdata/labels.npy')
# load embeddings and their params
embedding_matrix = np.load('./embedding_matrix.npy')
vocab_size, max_doc_len = pickle.load(open('embeddingparams.pkl','rb'))
# make directories to save the results
models_dir = Path.cwd() / "models/chatsLSTM"
models_dir.mkdir(parents=True, exist_ok=True)
pics_dir = Path.cwd() / "pics/chatsLSTM"
pics_dir.mkdir(parents=True, exist_ok=True)
# define keras model
model = Sequential()
# input length = longest doc length
e = Embedding(vocab_size,100,weights=[embedding_matrix],
input_length=max_doc_len,trainable=False, mask_zero = True)
# add embedding layer
model.add(e)
# lstm
# inpute shape = (timesteps, features)
model.add(LSTM(32))
# nn
model.add(Dense(10, activation='sigmoid'))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy',
metrics=['accuracy',recall_m,precision_m,f1_m])
# add checkpoints
filepath="./models/chatsLSTM/weights-improvement-{epoch:02d}-{val_accuracy:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
# SOS REMEMBER TO CLOSE f
# log the weights to check if they get updated during training
f = open('./models/chatsLSTM/weights.csv','w')
writer = csv.writer(f, delimiter='|')
print_weights = LambdaCallback(on_epoch_end=lambda batch,
logs: writer.writerow(model.get_weights()))
# This callback will stop the training when there is no improvement in
# the validation loss for 5 consecutive epochs.
early_stop = EarlyStopping(monitor='val_loss',min_delta =0.01,
patience=5, mode='min')
callbacks_list = [checkpoint,print_weights,early_stop]
# train
history = model.fit(x_train,y_train, validation_data=(x_test,y_test),
callbacks=callbacks_list,epochs=50,batch_size=100,
verbose=0)
f.close()
# plot performance-learning curves
# summarize history for accuracy
plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show()
plt.savefig('./pics/chatsLSTM/accuracy.png',bbox_inches='tight',dpi=300)
# summarize history for loss
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show()
plt.savefig('./pics/chatsLSTM/loss.png',bbox_inches='tight',dpi=300)
# summarize history for precision
plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_precision_m'])
plt.title('model accuracy')
plt.ylabel('precision')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show()
plt.savefig('./pics/chatsLSTM/precision.png',bbox_inches='tight',dpi=300)
# summarize history for recall
plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_recall_m'])
plt.title('model accuracy')
plt.ylabel('recall')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show() ]
plt.savefig('./pics/chatsLSTM/recall.png',bbox_inches='tight',dpi=300)
# summarize history for f1
plt.figure()
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_f1_m'])
plt.title('model accuracy')
plt.ylabel('f1 score')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#plt.show()
plt.savefig('./pics/chatsLSTM/f1score.png',bbox_inches='tight',dpi=300)
#print(model.summary())
plot_model(model, to_file='./pics/chatsLSTM/model_summaryplot.png',
show_shapes=True, show_layer_names=False)
# save trained model
#model.save('multilabel_class.h5')
# release memory after done
K.clear_session()
# allso possible options to try: del model, gc.collect()
| [
"matplotlib.pyplot.title",
"numpy.load",
"keras.backend.clear_session",
"csv.writer",
"matplotlib.pyplot.plot",
"keras.callbacks.ModelCheckpoint",
"keras.layers.LSTM",
"matplotlib.pyplot.legend",
"keras.utils.vis_utils.plot_model",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.figure",
"keras... | [((753, 785), 'numpy.load', 'np.load', (['"""./traindata/chats.npy"""'], {}), "('./traindata/chats.npy')\n", (760, 785), True, 'import numpy as np\n'), ((796, 829), 'numpy.load', 'np.load', (['"""./traindata/labels.npy"""'], {}), "('./traindata/labels.npy')\n", (803, 829), True, 'import numpy as np\n'), ((839, 870), 'numpy.load', 'np.load', (['"""./testdata/chats.npy"""'], {}), "('./testdata/chats.npy')\n", (846, 870), True, 'import numpy as np\n'), ((880, 912), 'numpy.load', 'np.load', (['"""./testdata/labels.npy"""'], {}), "('./testdata/labels.npy')\n", (887, 912), True, 'import numpy as np\n'), ((971, 1004), 'numpy.load', 'np.load', (['"""./embedding_matrix.npy"""'], {}), "('./embedding_matrix.npy')\n", (978, 1004), True, 'import numpy as np\n'), ((1335, 1347), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1345, 1347), False, 'from keras.models import Sequential\n'), ((1389, 1507), 'keras.layers.Embedding', 'Embedding', (['vocab_size', '(100)'], {'weights': '[embedding_matrix]', 'input_length': 'max_doc_len', 'trainable': '(False)', 'mask_zero': '(True)'}), '(vocab_size, 100, weights=[embedding_matrix], input_length=\n max_doc_len, trainable=False, mask_zero=True)\n', (1398, 1507), False, 'from keras.layers import Embedding, Dense, LSTM\n'), ((1934, 2032), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_accuracy', verbose=1, save_best_only\n =True, mode='max')\n", (1949, 2032), False, 'from keras.callbacks import ModelCheckpoint, LambdaCallback, EarlyStopping\n'), ((2174, 2202), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '"""|"""'}), "(f, delimiter='|')\n", (2184, 2202), False, 'import csv\n'), ((2474, 2547), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.01)', 'patience': '(5)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0.01, patience=5, mode='min')\n", (2487, 2547), False, 'from keras.callbacks import ModelCheckpoint, LambdaCallback, EarlyStopping\n'), ((2892, 2904), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2902, 2904), True, 'import matplotlib.pyplot as plt\n'), ((2905, 2942), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (2913, 2942), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2984), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {}), "(history.history['val_accuracy'])\n", (2951, 2984), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3012), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (2994, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3035), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (3023, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3055), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3046, 3055), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3103), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3066, 3103), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3190), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./pics/chatsLSTM/accuracy.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./pics/chatsLSTM/accuracy.png', bbox_inches='tight', dpi=300)\n", (3127, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3221, 3233), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3231, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3267), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (3242, 3267), True, 'import matplotlib.pyplot as plt\n'), ((3268, 3305), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (3276, 3305), True, 'import matplotlib.pyplot as plt\n'), ((3306, 3329), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (3315, 3329), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3348), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (3340, 3348), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3368), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3359, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3369, 3416), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3379, 3416), True, 'import matplotlib.pyplot as plt\n'), ((3429, 3499), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./pics/chatsLSTM/loss.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./pics/chatsLSTM/loss.png', bbox_inches='tight', dpi=300)\n", (3440, 3499), True, 'import matplotlib.pyplot as plt\n'), ((3534, 3546), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3544, 3546), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3584), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (3555, 3584), True, 'import matplotlib.pyplot as plt\n'), ((3585, 3629), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_precision_m']"], {}), "(history.history['val_precision_m'])\n", (3593, 3629), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3657), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (3639, 3657), True, 'import matplotlib.pyplot as plt\n'), ((3658, 3681), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (3668, 3681), True, 'import matplotlib.pyplot as plt\n'), ((3682, 3701), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (3692, 3701), True, 'import matplotlib.pyplot as plt\n'), ((3702, 3749), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (3712, 3749), True, 'import matplotlib.pyplot as plt\n'), ((3766, 3841), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./pics/chatsLSTM/precision.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./pics/chatsLSTM/precision.png', bbox_inches='tight', dpi=300)\n", (3777, 3841), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3885), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3883, 3885), True, 'import matplotlib.pyplot as plt\n'), ((3886, 3923), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (3894, 3923), True, 'import matplotlib.pyplot as plt\n'), ((3924, 3965), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_recall_m']"], {}), "(history.history['val_recall_m'])\n", (3932, 3965), True, 'import matplotlib.pyplot as plt\n'), ((3966, 3993), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (3975, 3993), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4014), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (4004, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4015, 4034), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4025, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4082), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (4045, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4100, 4172), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./pics/chatsLSTM/recall.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./pics/chatsLSTM/recall.png', bbox_inches='tight', dpi=300)\n", (4111, 4172), True, 'import matplotlib.pyplot as plt\n'), ((4200, 4212), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4210, 4212), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4250), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {}), "(history.history['accuracy'])\n", (4221, 4250), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4288), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_f1_m']"], {}), "(history.history['val_f1_m'])\n", (4259, 4288), True, 'import matplotlib.pyplot as plt\n'), ((4289, 4316), 'matplotlib.pyplot.title', 'plt.title', (['"""model accuracy"""'], {}), "('model accuracy')\n", (4298, 4316), True, 'import matplotlib.pyplot as plt\n'), ((4317, 4339), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f1 score"""'], {}), "('f1 score')\n", (4327, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4340, 4359), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (4350, 4359), True, 'import matplotlib.pyplot as plt\n'), ((4360, 4407), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (4370, 4407), True, 'import matplotlib.pyplot as plt\n'), ((4424, 4497), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./pics/chatsLSTM/f1score.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "('./pics/chatsLSTM/f1score.png', bbox_inches='tight', dpi=300)\n", (4435, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4521, 4634), 'keras.utils.vis_utils.plot_model', 'plot_model', (['model'], {'to_file': '"""./pics/chatsLSTM/model_summaryplot.png"""', 'show_shapes': '(True)', 'show_layer_names': '(False)'}), "(model, to_file='./pics/chatsLSTM/model_summaryplot.png',\n show_shapes=True, show_layer_names=False)\n", (4531, 4634), False, 'from keras.utils.vis_utils import plot_model\n'), ((4728, 4745), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (4743, 4745), True, 'from keras import backend as K\n'), ((1130, 1140), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1138, 1140), False, 'from pathlib import Path\n'), ((1221, 1231), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1229, 1231), False, 'from pathlib import Path\n'), ((1612, 1620), 'keras.layers.LSTM', 'LSTM', (['(32)'], {}), '(32)\n', (1616, 1620), False, 'from keras.layers import Embedding, Dense, LSTM\n'), ((1638, 1669), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""sigmoid"""'}), "(10, activation='sigmoid')\n", (1643, 1669), False, 'from keras.layers import Embedding, Dense, LSTM\n')] |
import numpy as np
import plotly.graph_objs as go
def volcovered(robobj, trajectorymat):
l = len(trajectorymat)
arr = np.zeros((3, (robobj.jointno - 1)*l + 1))
arr[0][0] = trajectorymat[0][1][0]
arr[1][0] = trajectorymat[0][1][1]
arr[2][0] = trajectorymat[0][1][2]
i = 0
m = 1
while i < l:
k = 2
while k < robobj.jointno + 1:
arr[0][m] = trajectorymat[i][k][0]
arr[1][m] = trajectorymat[i][k][1]
arr[2][m] = trajectorymat[i][k][2]
k = k + 1
m = m + 1
i = i + 1
vol = go.Mesh3d(x=arr[0],
y=arr[1],
z=arr[2],
opacity=0.8,
color='rgba(244,22,100,0.6)'
)
data = [vol]
return data
#needed fixing future project
def obstacle(l, b, h, pos):
x_neg = pos[0] - l/2
x_pos = pos[0] + l/2
y_neg = pos[1] - b/2
y_pos = pos[1] + b/2
z_neg = pos[2] - h/2
z_pos = pos[2] + h/2
offset = 0.01
x_arr1 = np.zeros(4)
y_arr1 = np.zeros(4)
z_arr1 = np.zeros(4)
[x_arr1[0], y_arr1[0], z_arr1[0]] = [x_pos + offset, y_pos, z_pos]
[x_arr1[1], y_arr1[1], z_arr1[1]] = [x_pos, y_pos, z_neg]
[x_arr1[2], y_arr1[2], z_arr1[2]] = [x_pos, y_neg, z_neg]
[x_arr1[3], y_arr1[3], z_arr1[3]] = [x_pos + offset, y_neg, z_pos]
x_arr2 = np.zeros(4)
y_arr2 = np.zeros(4)
z_arr2 = np.zeros(4)
[x_arr2[0], y_arr2[0], z_arr2[0]] = [x_pos, y_pos + offset, z_pos]
[x_arr2[1], y_arr2[1], z_arr2[1]] = [x_pos, y_pos, z_neg]
[x_arr2[2], y_arr2[2], z_arr2[2]] = [x_neg, y_pos, z_neg]
[x_arr2[3], y_arr2[3], z_arr2[3]] = [x_neg, y_pos + offset, z_pos]
x_arr3 = np.zeros(4)
y_arr3 = np.zeros(4)
z_arr3 = np.zeros(4)
[x_arr3[0], y_arr3[0], z_arr3[0]] = [x_neg - offset, y_pos, z_pos]
[x_arr3[1], y_arr3[1], z_arr3[1]] = [x_neg - offset, y_neg, z_pos]
[x_arr3[2], y_arr3[2], z_arr3[2]] = [x_neg, y_neg, z_neg]
[x_arr3[3], y_arr3[3], z_arr3[3]] = [x_neg, y_pos, z_neg]
x_arr4 = np.zeros(4)
y_arr4 = np.zeros(4)
z_arr4 = np.zeros(4)
[x_arr4[0], y_arr4[0], z_arr4[0]] = [x_pos, y_neg - offset, z_pos]
[x_arr4[1], y_arr4[1], z_arr4[1]] = [x_pos, y_neg, z_neg]
[x_arr4[2], y_arr4[2], z_arr4[2]] = [x_neg, y_neg, z_neg]
[x_arr4[3], y_arr4[3], z_arr4[3]] = [x_neg, y_neg - offset, z_pos]
x_arr5 = np.zeros(4)
y_arr5 = np.zeros(4)
z_arr5 = np.zeros(4)
[x_arr5[0], y_arr5[0], z_arr5[0]] = [x_pos, y_pos, z_pos]
[x_arr5[1], y_arr5[1], z_arr5[1]] = [x_pos, y_neg, z_pos]
[x_arr5[2], y_arr5[2], z_arr5[2]] = [x_neg, y_pos, z_pos]
[x_arr5[3], y_arr5[3], z_arr5[3]] = [x_neg, y_neg, z_pos]
x_arr6 = np.zeros(4)
y_arr6 = np.zeros(4)
z_arr6 = np.zeros(4)
[x_arr6[0], y_arr6[0], z_arr6[0]] = [x_pos, y_pos, z_neg]
[x_arr6[1], y_arr6[1], z_arr6[1]] = [x_pos, y_neg, z_neg]
[x_arr6[2], y_arr6[2], z_arr6[2]] = [x_neg, y_pos, z_neg]
[x_arr6[3], y_arr6[3], z_arr6[3]] = [x_neg, y_neg, z_neg]
data1 = go.Mesh3d(x= x_arr1, y = y_arr1, z= z_arr1,
color ='red',
opacity = 0.5)
data2 = go.Mesh3d(x= x_arr2, y = y_arr2, z= z_arr2,
color ='red',
opacity = 0.5)
data3 = go.Mesh3d(x= x_arr3, y = y_arr3, z= z_arr3,
color ='red',
opacity = 0.5)
data4 = go.Mesh3d(x= x_arr4, y = y_arr4, z= z_arr4,
color ='red',
opacity = 0.5)
data5 = go.Mesh3d(x= x_arr5, y = y_arr5, z= z_arr5,
color ='red',
opacity = 0.5)
data6 = go.Mesh3d(x= x_arr6, y = y_arr6, z= z_arr6,
color ='red',
opacity = 0.5)
data = [data1, data2, data3, data4, data5, data6]
return data
| [
"plotly.graph_objs.Mesh3d",
"numpy.zeros"
] | [((130, 173), 'numpy.zeros', 'np.zeros', (['(3, (robobj.jointno - 1) * l + 1)'], {}), '((3, (robobj.jointno - 1) * l + 1))\n', (138, 173), True, 'import numpy as np\n'), ((595, 682), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'arr[0]', 'y': 'arr[1]', 'z': 'arr[2]', 'opacity': '(0.8)', 'color': '"""rgba(244,22,100,0.6)"""'}), "(x=arr[0], y=arr[1], z=arr[2], opacity=0.8, color=\n 'rgba(244,22,100,0.6)')\n", (604, 682), True, 'import plotly.graph_objs as go\n'), ((1054, 1065), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1062, 1065), True, 'import numpy as np\n'), ((1079, 1090), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1087, 1090), True, 'import numpy as np\n'), ((1104, 1115), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1112, 1115), True, 'import numpy as np\n'), ((1397, 1408), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1405, 1408), True, 'import numpy as np\n'), ((1422, 1433), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1430, 1433), True, 'import numpy as np\n'), ((1447, 1458), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1455, 1458), True, 'import numpy as np\n'), ((1740, 1751), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1748, 1751), True, 'import numpy as np\n'), ((1765, 1776), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1773, 1776), True, 'import numpy as np\n'), ((1790, 1801), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1798, 1801), True, 'import numpy as np\n'), ((2083, 2094), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2091, 2094), True, 'import numpy as np\n'), ((2108, 2119), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2116, 2119), True, 'import numpy as np\n'), ((2133, 2144), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2141, 2144), True, 'import numpy as np\n'), ((2426, 2437), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2434, 2437), True, 'import numpy as np\n'), ((2451, 2462), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2459, 2462), True, 'import numpy as np\n'), ((2476, 2487), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2484, 2487), True, 'import numpy as np\n'), ((2751, 2762), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2759, 2762), True, 'import numpy as np\n'), ((2776, 2787), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2784, 2787), True, 'import numpy as np\n'), ((2801, 2812), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2809, 2812), True, 'import numpy as np\n'), ((3076, 3141), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr1', 'y': 'y_arr1', 'z': 'z_arr1', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr1, y=y_arr1, z=z_arr1, color='red', opacity=0.5)\n", (3085, 3141), True, 'import plotly.graph_objs as go\n'), ((3205, 3270), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr2', 'y': 'y_arr2', 'z': 'z_arr2', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr2, y=y_arr2, z=z_arr2, color='red', opacity=0.5)\n", (3214, 3270), True, 'import plotly.graph_objs as go\n'), ((3334, 3399), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr3', 'y': 'y_arr3', 'z': 'z_arr3', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr3, y=y_arr3, z=z_arr3, color='red', opacity=0.5)\n", (3343, 3399), True, 'import plotly.graph_objs as go\n'), ((3463, 3528), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr4', 'y': 'y_arr4', 'z': 'z_arr4', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr4, y=y_arr4, z=z_arr4, color='red', opacity=0.5)\n", (3472, 3528), True, 'import plotly.graph_objs as go\n'), ((3592, 3657), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr5', 'y': 'y_arr5', 'z': 'z_arr5', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr5, y=y_arr5, z=z_arr5, color='red', opacity=0.5)\n", (3601, 3657), True, 'import plotly.graph_objs as go\n'), ((3721, 3786), 'plotly.graph_objs.Mesh3d', 'go.Mesh3d', ([], {'x': 'x_arr6', 'y': 'y_arr6', 'z': 'z_arr6', 'color': '"""red"""', 'opacity': '(0.5)'}), "(x=x_arr6, y=y_arr6, z=z_arr6, color='red', opacity=0.5)\n", (3730, 3786), True, 'import plotly.graph_objs as go\n')] |
"""
COGCC_2019_productiondata.py loads well location and production data that was downloaded from the COGCC.
The data are used to calculate annual production per site, and the resulting array is saved.
The COGCC provides production and location data by well. In order to group wells into sites, any wells within
50 meters of each other are assumed to be part of the same site.
"""
import pandas as pd
import numpy as np
import pysal as ps
import geopy.distance
from os.path import dirname, abspath
import pickle
from ..input_data_classes import DataFile
import os
rsc_path, _ = os.path.split(dirname(abspath(__file__)))
prod_path = os.path.join(rsc_path, 'RawData', 'COGCC-2019-Production.xlsx')
loc_path = os.path.join(rsc_path, 'RawData', 'COGCC-2019-well-locations.dbf')
file_out = os.path.join(rsc_path, 'DataObjectInstances', 'COGCC_site_prod_2019.p')
# Load location data
db = ps.lib.io.open(loc_path)
d = {str.upper(col): db.by_col(col) for col in db.header}
wells = pd.DataFrame(d)
db.close()
# load production data
dat = pd.read_excel(prod_path)
# extract relevant data from the production data frame
sqnum = np.array(dat['api_seq_num'], dtype=str)
ccode = np.array(dat['api_county_code'], dtype=str)
sdtrack = np.array(dat['sidetrack_num'], dtype=str)
formcode = np.array(dat['formation_code'], dtype=str)
opnum = np.array(dat['operator_num'], dtype=str)
# Store county and sequence codes to a list
cscode = []
for ind in range(len(sqnum)):
c, s = ccode[ind], sqnum[ind]
while len(c) < 3:
c = '0' + c
while len(s) < 5:
s = '0' + s
cscode.append(c + s)
# extract relevant data from the location data object
cond = wells.FACIL_STAT == 'PR'
llsqnum = np.array(wells.API_SEQ[cond], dtype='str')
llcounty = np.array(wells.API_COUNTY[cond], dtype='str')
lats, longs, APIs = list(wells.LATITUDE[cond]), list(wells.LONGITUDE[cond]), list(wells.API[cond])
sortargs = np.argsort(lats)
lats = [lats[l] for l in sortargs]
longs = [longs[l] for l in sortargs]
APIs = [APIs[l] for l in sortargs]
# Determine which wells in the production data set are not in the location data set
missing_codes = []
for ap in APIs:
if ap not in cscode:
missing_codes.append(ap)
cond = np.ones(len(APIs), dtype=bool)
for ind in range(len(APIs)):
if APIs[ind] in missing_codes:
cond[ind] = 0
print("The number of excluded indexes is {:0.0f} and should be 144.".
format(len(cond) - np.sum(cond)))
api = np.array(APIs)[cond]
lat = np.array(lats)[cond]
long = np.array(longs)[cond]
sortargs = np.argsort(lat)
lat = list(lat[sortargs])
long = list(long[sortargs])
api = list(api[sortargs])
def distance(places, point):
"""
calculates the distance between an array of places and a point.
:input places: an array or list of latitudes and longitudes (deg)
:input point: the latitude and longitude of a point (deg)
:return dist: a list of the distances between the elements of "places"
and "point" (km)
"""
dist = np.zeros(len(places))
for ind in range(len(places)):
dist[ind] = geopy.distance.distance(places[ind][:2],
point[:2]).km
return dist
def crude_lat_dist(l1, l2):
"""
An approximation of North-South distance between latitudes
Intended to give a fast rejection of points that don't satisfy
a maximum distance criteria
:input l1: latitude 1 (degrees)
:input l2: latitude 2 (degrees)
:return: the north south distance between the latitudes
"""
earth_radius = 6373 # km
return earth_radius * np.pi / 180 * np.abs(l1 - l2) # km
def binary_find_min(a, ls):
"""
Performs a binary search to find the minimum difference between "a"
in the elements in the list "ls"
:input a: a scalar
:input ls: a sorted list of scalars
:output: the index of the value in ls that is closest to a
"""
top = len(ls) - 1
bottom = 0
while top - bottom > 1:
mid = int((top - bottom) / 2 + bottom)
if ls[mid] > a:
top = mid
elif ls[mid] < a:
bottom = mid
else:
return mid
if np.abs(ls[top] - a) < np.abs(ls[bottom] - a):
return top
else:
return bottom
def finder(ll, lats, longs, APIs, pad, max_dist=0.05):
"""
Determine which lats and longs are within the criteria
distance of ll to be included in the same pad
The function is designed for use in a recursive program
:input ll: latitude and longitude of a point to be considered
:input lats: a sorted array of latitudes
:input longs: an array of longitudes associated with lats
:input APIs: an array of API values of the wells
:input pad: a list of wells grouped into pads.
:input max_dist: maximum distance to the nearest well to be considered
a pad (km)
:return: none
"""
pad.append(ll)
if len(lats) < 1:
return
minind = binary_find_min(ll[0], lats)
# preliminary fast culling of distant wells
if crude_lat_dist(ll[0], lats[minind]) > max_dist * 10:
return
else:
cond = []
temp = minind
while temp >= 0 and \
crude_lat_dist(ll[0], lats[temp]) <= max_dist * 10:
cond.append(temp)
temp -= 1
temp = minind + 1
while temp < len(lats) and \
crude_lat_dist(ll[0], lats[temp]) <= max_dist * 10:
cond.append(temp)
temp += 1
cond.sort(reverse=True)
# final selection of nearby wells to be included in the pad
la = [lats[l] for l in cond]
lo = [longs[l] for l in cond]
distances = distance(np.array(list(zip(la, lo))), ll)
cond2 = np.where(distances < max_dist)[0]
if len(cond) == 0:
return
winners = []
for c2 in cond2:
winners.append([lats[cond[c2]], longs[cond[c2]],
APIs[cond[c2]]])
del (lats[cond[c2]])
del (longs[cond[c2]])
del (APIs[cond[c2]])
for w in winners:
finder(w, lats, longs, APIs, pad)
pad = []
while len(lat) > 0:
if len(lat) % 1000 == 0:
print(len(lat))
ll = [lat[0], long[0], api[0]]
del (lat[0], long[0], api[0])
pad.append([])
finder(ll, lat, long, api, pad[-1])
site_prod = []
cscode = np.array(cscode)
for site in pad:
lats = []
longs = []
site_prod.append([0, 0, 0, 0])
for well in site:
cond = np.where(cscode == well[2])[0]
site_prod[-1][2] += np.sum(dat['gas_prod'][cond])
site_prod[-1][3] += np.sum(dat['oil_prod'][cond])
lats.append(well[0])
longs.append(well[1])
site_prod[-1][0] = np.mean(lats)
site_prod[-1][1] = np.mean(longs)
site_prod = np.array(site_prod)
raw_data_file = ['COGCC-2019-Production.xlsx', 'COGCC-2019-well-locations.dbf']
notes = """"
This object stores well site location and production data developed using data from the COGCC.
Well location and production data that were downloaded from the COGCC.
Production and location data by well for 2019 were downloaded from https://cogcc.state.co.us/data2.html#/downloads.
The downloads were completed on Jan. 13, 2020. In order to group wells into sites, any wells within
50 meters of each other were assumed to be part of the same site.
site_prod.site_prod contains 4 columns: latitude (degrees), longitude (degrees), gas production (mcf/year), oil
production (bbl/year)
"""
site_prod = DataFile(notes=notes, raw_file_name=raw_data_file, data_prep_file='COGCC_2019_productiondata.py',
site_prod=site_prod)
pickle.dump(site_prod, open(file_out, 'wb'))
| [
"pandas.DataFrame",
"os.path.abspath",
"numpy.abs",
"numpy.sum",
"pysal.lib.io.open",
"pandas.read_excel",
"numpy.argsort",
"numpy.mean",
"numpy.array",
"numpy.where",
"os.path.join"
] | [((634, 697), 'os.path.join', 'os.path.join', (['rsc_path', '"""RawData"""', '"""COGCC-2019-Production.xlsx"""'], {}), "(rsc_path, 'RawData', 'COGCC-2019-Production.xlsx')\n", (646, 697), False, 'import os\n'), ((709, 775), 'os.path.join', 'os.path.join', (['rsc_path', '"""RawData"""', '"""COGCC-2019-well-locations.dbf"""'], {}), "(rsc_path, 'RawData', 'COGCC-2019-well-locations.dbf')\n", (721, 775), False, 'import os\n'), ((787, 858), 'os.path.join', 'os.path.join', (['rsc_path', '"""DataObjectInstances"""', '"""COGCC_site_prod_2019.p"""'], {}), "(rsc_path, 'DataObjectInstances', 'COGCC_site_prod_2019.p')\n", (799, 858), False, 'import os\n'), ((887, 911), 'pysal.lib.io.open', 'ps.lib.io.open', (['loc_path'], {}), '(loc_path)\n', (901, 911), True, 'import pysal as ps\n'), ((978, 993), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (990, 993), True, 'import pandas as pd\n'), ((1035, 1059), 'pandas.read_excel', 'pd.read_excel', (['prod_path'], {}), '(prod_path)\n', (1048, 1059), True, 'import pandas as pd\n'), ((1124, 1163), 'numpy.array', 'np.array', (["dat['api_seq_num']"], {'dtype': 'str'}), "(dat['api_seq_num'], dtype=str)\n", (1132, 1163), True, 'import numpy as np\n'), ((1172, 1215), 'numpy.array', 'np.array', (["dat['api_county_code']"], {'dtype': 'str'}), "(dat['api_county_code'], dtype=str)\n", (1180, 1215), True, 'import numpy as np\n'), ((1226, 1267), 'numpy.array', 'np.array', (["dat['sidetrack_num']"], {'dtype': 'str'}), "(dat['sidetrack_num'], dtype=str)\n", (1234, 1267), True, 'import numpy as np\n'), ((1279, 1321), 'numpy.array', 'np.array', (["dat['formation_code']"], {'dtype': 'str'}), "(dat['formation_code'], dtype=str)\n", (1287, 1321), True, 'import numpy as np\n'), ((1330, 1370), 'numpy.array', 'np.array', (["dat['operator_num']"], {'dtype': 'str'}), "(dat['operator_num'], dtype=str)\n", (1338, 1370), True, 'import numpy as np\n'), ((1698, 1740), 'numpy.array', 'np.array', (['wells.API_SEQ[cond]'], {'dtype': '"""str"""'}), "(wells.API_SEQ[cond], dtype='str')\n", (1706, 1740), True, 'import numpy as np\n'), ((1752, 1797), 'numpy.array', 'np.array', (['wells.API_COUNTY[cond]'], {'dtype': '"""str"""'}), "(wells.API_COUNTY[cond], dtype='str')\n", (1760, 1797), True, 'import numpy as np\n'), ((1908, 1924), 'numpy.argsort', 'np.argsort', (['lats'], {}), '(lats)\n', (1918, 1924), True, 'import numpy as np\n'), ((2540, 2555), 'numpy.argsort', 'np.argsort', (['lat'], {}), '(lat)\n', (2550, 2555), True, 'import numpy as np\n'), ((6387, 6403), 'numpy.array', 'np.array', (['cscode'], {}), '(cscode)\n', (6395, 6403), True, 'import numpy as np\n'), ((6815, 6834), 'numpy.array', 'np.array', (['site_prod'], {}), '(site_prod)\n', (6823, 6834), True, 'import numpy as np\n'), ((2452, 2466), 'numpy.array', 'np.array', (['APIs'], {}), '(APIs)\n', (2460, 2466), True, 'import numpy as np\n'), ((2479, 2493), 'numpy.array', 'np.array', (['lats'], {}), '(lats)\n', (2487, 2493), True, 'import numpy as np\n'), ((2507, 2522), 'numpy.array', 'np.array', (['longs'], {}), '(longs)\n', (2515, 2522), True, 'import numpy as np\n'), ((6751, 6764), 'numpy.mean', 'np.mean', (['lats'], {}), '(lats)\n', (6758, 6764), True, 'import numpy as np\n'), ((6788, 6802), 'numpy.mean', 'np.mean', (['longs'], {}), '(longs)\n', (6795, 6802), True, 'import numpy as np\n'), ((602, 619), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (609, 619), False, 'from os.path import dirname, abspath\n'), ((3609, 3624), 'numpy.abs', 'np.abs', (['(l1 - l2)'], {}), '(l1 - l2)\n', (3615, 3624), True, 'import numpy as np\n'), ((4165, 4184), 'numpy.abs', 'np.abs', (['(ls[top] - a)'], {}), '(ls[top] - a)\n', (4171, 4184), True, 'import numpy as np\n'), ((4187, 4209), 'numpy.abs', 'np.abs', (['(ls[bottom] - a)'], {}), '(ls[bottom] - a)\n', (4193, 4209), True, 'import numpy as np\n'), ((6581, 6610), 'numpy.sum', 'np.sum', (["dat['gas_prod'][cond]"], {}), "(dat['gas_prod'][cond])\n", (6587, 6610), True, 'import numpy as np\n'), ((6639, 6668), 'numpy.sum', 'np.sum', (["dat['oil_prod'][cond]"], {}), "(dat['oil_prod'][cond])\n", (6645, 6668), True, 'import numpy as np\n'), ((2430, 2442), 'numpy.sum', 'np.sum', (['cond'], {}), '(cond)\n', (2436, 2442), True, 'import numpy as np\n'), ((5746, 5776), 'numpy.where', 'np.where', (['(distances < max_dist)'], {}), '(distances < max_dist)\n', (5754, 5776), True, 'import numpy as np\n'), ((6522, 6549), 'numpy.where', 'np.where', (['(cscode == well[2])'], {}), '(cscode == well[2])\n', (6530, 6549), True, 'import numpy as np\n')] |
import numpy as np
from pixell import enmap, utils
from cmbpix.utils import *
from cmbpix.lensing.estimator import LensingEstimator
from cmbpix.lensing import *
from scipy.optimize import curve_fit, fsolve
from scipy.special import gamma
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
from mpi4py import MPI
try:
import cmasher as cmr
except(ModuleNotFoundError):
pass
def _lin(x, *p):
"""A generic line function.
Return y = mx + b using p[0] = b, p[1] = m.
Parameters
----------
x: real, array of reals
Point(s) at which to evaluate function.
p: array of size 2
The linear coefficients of the function: p = [b, m].
Returns
-------
y: real, array of reals
The linear function output(s) evaluated at x.
"""
return p[0] + p[1]*x
def _Pearson3(s2, N=1., sig2=1., *p):
"""Return the PDF of the Pearson Type III distribution evaluated at s2.
Evaluate the PDF of the Pearson Type III distribution at s2.
Parameters
----------
s2: value(s)
The value(s) at which to evaluate the Pearson III distribution.
N: value(s), default=1.
The number of independent samples used to estimate s2.
sig2: values(s), default=1.
The width of the distribution from which the samples of s2 were drawn.
"""
if p:
N = p[0]
sig2 = p[1]
ans = (N / (2 * sig2)) ** ((N-1)/2.)
ans *= s2 ** ((N-3)/2.)
ans *= np.exp(-N * s2 / (2 * sig2))
return ans / gamma((N-1) / 2.)
def _logPearson3(s2, N=1., sig2=1., *p):
"""Return the PDF of the Pearson Type III distribution evaluated at s2.
Evaluate the PDF of the Pearson Type III distribution at s2.
Parameters
----------
s2: value(s)
The value(s) at which to evaluate the Pearson III distribution.
N: value(s), default=1.
The number of independent samples used to estimate s2.
sig2: values(s), default=1.
The width of the distribution from which the samples of s2 were drawn.
"""
if p:
N = p[0]
sig2 = p[1]
ans = np.log((N / (2 * sig2))) * ((N-1)/2.)
ans += np.log(s2) * ((N-3)/2.)
ans += (-N * s2 / (2 * sig2))
return ans - np.log(gamma((N-1) / 2.))
def _PearsonWidth(vol, xs, N, sig2, *p):
"""Return the band of s2 where the integral of Pearson III is vol.
Return a, b such that the integral of the Pearson III distribution is
vol, and p(a) = p(b).
Parameters
----------
vol: value between [0,1]
The confidence level to find for the Pearson III distribution
xs: 1d-array
The range of xs to consider
N: value
Pearson III parameter corresponding to the number of indep samples
sig2: value
Pearson III parameter corresponding to the spread of sample dist
Returns
-------
[a, b]: array of size 2
The range of x such that the confidence interval of Pearson III is vol
"""
if p:
N = p[0]
sig2 = p[1]
Px = _Pearson3(xs, N, sig2)
imode = np.argmax(Px)
xmode = xs[imode]
a0 = (xmode - xs.min())/2
def findb(a):
Pa = Pearson3(a, N, sig2)
bguess = xs[imode:][np.argmin(np.abs(Px[imode:] - Pa))]
minb = lambda b : _Pearson3(b, N, sig2) - Pa
b = fsolve(minb, bguess)[0]
return b
def optP(a):
b = findb(a)
ab = np.linspace(a, b, 1001)
intP = np.sum(_Pearson3(ab, N, sig2) * (ab[1]-ab[0]))
return intP - vol
a = fsolve(optP, a0)[0]
b = findb(a)
return [a, b]
def _bin_mids(b):
return (b[1:] + b[:-1]) / 2
class MPIFlatSkyLens(LensingEstimator):
"""Estimator for small scale lensing in a flat sky CMB map.
An object for reducing a pixell CMB map of the cosmic microwave
background in (mostly) pixel-space to estimate the effects
of small angular scale weak lensing due to structure formation.
Attributes
----------
cmbmap: ndmap
The input CMB map in pixell format.
ldT: value, default=2000
The maximum ell to consider for the large scale background
temperature gradient.
lmin: value, default=3000
The minimum ell to consider for the small scale temperature
fluctuations.
lmax: value, default=None
The maximum ell to consider for the small scale temperature
fluctuations. If None, there is no cutoff.
"""
def __init__(self, cmbmap, ldT=2000, lmin=3000, lmax=np.inf,
patchsize=40, Ns=[10,30], theory=None, thcl=None,
fiducialCls=None, applyWiener=True,
comm=None, filt="cos", border=5, savesteps=False,
N1=None, N2=None):
"""Initiate the estimator.
Parameters
----------
cmbmap: ndmap
A CMB map.
ldT: value, default=2000
The maximum ell to consider for the large scale background
temperature gradient.
lmin: value, default=3000
The minimum ell to consider for the small scale temperature
fluctuations.
lmax: value, default=inf
The maximum ell to consider for the small scale temperature
fluctuations. If none given, there is no cutoff.
patchsize: int, default=40
The side length of patches (in number of pixels) for the map to be
broken up into to gather local lensing statistics.
"""
if comm == None:
raise ValueError("This class requires an MPI communicator")
self.comm = comm
self.commrank = self.comm.Get_rank()
self.commsize = self.comm.Get_size()
self._Ns = Ns
if self.commrank == 0:
if N1 is not None and N2 is not None:
self.map1 = cmbmap + N1
self.map2 = cmbmap + N2
# Derived attributes
self._ly, self._lx = self.map1.lmap()
self._lmod = self.map2.modlmap()
else:
self.map_in = cmbmap
# Derived attributes
self._ly, self._lx = self.map_in.lmap()
self._lmod = self.map_in.modlmap()
self.ldT = ldT
self.lmin = lmin
self.lmax = lmax
self.fid = fiducialCls
self._p = patchsize
self._thcl = thcl
self._aW = applyWiener
self.filter= filt
self.savesteps = savesteps
self._b = border
if theory:
self._th = theory
elif self._thcl:
self._th = self.theory_integral()
def theory_integral(self):
if self.filter == 'cos':
ell = np.arange(self._thcl[0].size)
inter = np.sum((ell*self._thcl[0])[self.lmin:self.lmax]) \
/ (4*np.pi)
ell = np.arange(self._thcl[1].size)
slope = 3*np.sum((ell**3*self._thcl[1])[self.lmin:self.lmax]) \
/ (16*np.pi)
elif self.filter == 'cos2':
ell = np.arange(self._thcl[0].size)
inter = 3*np.sum((ell*self._thcl[0])[self.lmin:self.lmax]) \
/ (16*np.pi)
ell = np.arange(self._thcl[1].size)
slope = 5*np.sum((ell**3*self._thcl[1])[self.lmin:self.lmax]) \
/ (32*np.pi)
else:
ell = np.arange(self._thcl[0].size)
inter = np.sum((ell*self._thcl[0])[self.lmin:self.lmax]) \
/ (2*np.pi)
ell = np.arange(self._thcl[1].size)
slope = np.sum((ell**3*self._thcl[1])[self.lmin:self.lmax]) \
/ (4*np.pi)
return [inter, slope]
def gather_patches_plain(self):
"""Assemble patch statistics relevant to lensing at small scales.
Compute the small scale (ell > lmin) temperature power at different
patches across the sky as well as the average amplitude of the
background temperature gradient (ell < ldT).
"""
self._edge = 0 # Not throwing away edge pixels
m_fft = enmap.fft(self.map_in)
hp = np.zeros(self.map_in.shape)
hp[np.where((self._lmod > self.lmin) & (self._lmod < self.lmax))] = 1.
self._Tss = enmap.ifft(m_fft * hp)
self._dTy, self._dTx = gradient_flat(self.map_in, self.ldT)
# Scale geometry for lower res map of patches
pshp, pwcs = enmap.scale_geometry(self.map_in.shape,
self.map_in.wcs, 1./self._p)
if not self.savesteps:
del self.map_in, m_fft
self._T2patch = enmap.zeros(pshp, pwcs)
self._dTxpatch = enmap.zeros(pshp, pwcs)
self._dTypatch = enmap.zeros(pshp, pwcs)
Trs = self._Tss[:pshp[-2]*self._p,
:pshp[-1]*self._p].reshape([pshp[-2], self._p,
pshp[-1], self._p])
dTxrs = self._dTx[:pshp[-2]*self._p,
:pshp[-1]*self._p].reshape([pshp[-2], self._p,
pshp[-1], self._p])
dTyrs = self._dTy[:pshp[-2]*self._p,
:pshp[-1]*self._p].reshape([pshp[-2], self._p,
pshp[-1], self._p])
self._T2patch[:,:] = np.var(Trs, axis=(1,3))
self._dTypatch[:,:] = np.mean(dTyrs, axis=(1,3))
self._dTxpatch[:,:] = np.mean(dTxrs, axis=(1,3))
self._dT2patch = self._dTxpatch**2 + self._dTypatch**2
if not self.savesteps:
del self._dTypatch, self._dTxpatch, self._dTy, self._dTx, self._Tss
def gather_patches_cos(self):
"""Assemble patch statistics for small scale lensing with cos filter.
Compute the small scale (ell > 3000) temperature power at different
patches across the sky as well as the average amplitude of the
background temperature gradient (ell < 2000). For the small scale
statistics, also apply a filter in Fourier space such that:
.. math::
f_\\ell = \\cos(\\hat{\\ell}\\cdot\\hat{\\nabla T})
"""
self._edge = 5 # Edge pixels to throw away
p = self._p
m_fft = enmap.fft(self.map_in)
hp = np.zeros(self.map_in.shape)
hp[np.where((self._lmod > self.lmin) & (self._lmod < self.lmax))] = 1.
# Apply pre-whitening or Wiener/inverse variance filters, then top hat
if self._aW and self.fid is not None:
cs = CubicSpline(self.fid[0], self.fid[1]) # (ell, Cl)
m_fft = m_fft / cs(self._lmod)
self._Tss = enmap.ifft(m_fft * hp)
self._dTy, self._dTx = gradient_flat(self.map_in, self.ldT)
# Scale geometry for lower res map of patches
pshp, pwcs = enmap.scale_geometry(self.map_in.shape,
self.map_in.wcs, 1./self._p)
if not self.savesteps:
del self.map_in, m_fft
self._T2patch = enmap.zeros(pshp, pwcs)
self._dTxpatch = enmap.zeros(pshp, pwcs)
self._dTypatch = enmap.zeros(pshp, pwcs)
self._T_sub = np.zeros((pshp[-2], pshp[-1], p, p))
for i in range(self._T2patch.shape[-2]):
for j in range(self._T2patch.shape[-1]):
self._dTypatch[i,j] = np.mean(self._dTy[i*p:(i+1)*p,
j*p:(j+1)*p])
self._dTxpatch[i,j] = np.mean(self._dTx[i*p:(i+1)*p,
j*p:(j+1)*p])
Tp = self._Tss[i*p:(i+1)*p,j*p:(j+1)*p]
lsuby, lsubx = Tp.lmap()
lsubmod = Tp.modlmap()
lsubmod[0,0] = 1.
fl = 1.j*(lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j]) / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))
fl[0,0] = 0.
self._T_sub[i,j,:,:] = enmap.ifft(enmap.fft(Tp)*fl).real
# Throw away pixels with edge effects
self._T2patch[i,j] = np.var(self._T_sub[i,j,5:-5,5:-5])
self._dT2patch = self._dTxpatch**2 + self._dTypatch**2
if not self.savesteps:
del self._dTypatch, self._dTxpatch, self._dTy, self._dTx, self._Tss
del self._T_sub
def gather_covpatches(self):
"""Assemble patch statistics for small scale lensing with cos filter.
Compute the small scale (ell > 3000) temperature power at different
patches across the sky as well as the average amplitude of the
background temperature gradient (ell < 2000). For the small scale
statistics, also apply a filter in Fourier space such that:
.. math::
f_\\ell = \\cos(\\hat{\\ell}\\cdot\\hat{\\nabla T})
"""
self._edge = 5 # Edge pixels to throw away
p = self._p
m_fft1 = enmap.fft(self.map1)
m_fft2 = enmap.fft(self.map2)
hp = np.zeros(self.map1.shape)
hp[np.where((self._lmod > self.lmin) & (self._lmod < self.lmax))] = 1.
# Apply pre-whitening or Wiener/inverse variance filters, then top hat
if self._aW and self.fid is not None:
cs = CubicSpline(self.fid[0], self.fid[1]) # (ell, Cl)
m_fft1 = m_fft1 / cs(self._lmod)
m_fft2 = m_fft2 / cs(self._lmod)
self._Tss1 = enmap.ifft(m_fft1 * hp)
self._Tss2 = enmap.ifft(m_fft2 * hp)
self._dTy, self._dTx = gradient_flat(self.map1, self.ldT)
# Scale geometry for lower res map of patches
pshp, pwcs = enmap.scale_geometry(self.map1.shape,
self.map1.wcs, 1./self._p)
if not self.savesteps:
del self.map1, m_fft1, self.map2, m_fft2
self._T2patch = enmap.zeros(pshp, pwcs)
self._dTxpatch = enmap.zeros(pshp, pwcs)
self._dTypatch = enmap.zeros(pshp, pwcs)
self._dTxspread = enmap.zeros(pshp, pwcs)
self._dTyspread = enmap.zeros(pshp, pwcs)
self._T_sub1 = np.zeros((pshp[-2], pshp[-1], p, p))
self._T_sub2 = np.zeros((pshp[-2], pshp[-1], p, p))
b = self._b
for i in range(self._T2patch.shape[-2]):
for j in range(self._T2patch.shape[-1]):
self._dTypatch[i,j] = np.mean(self._dTy[i*p+b:(i+1)*p-b,
j*p+b:(j+1)*p-b])
self._dTxpatch[i,j] = np.mean(self._dTx[i*p+b:(i+1)*p-b,
j*p+b:(j+1)*p-b])
self._dTyspread[i,j] = np.std(self._dTy[i*p+b:(i+1)*p-b,
j*p+b:(j+1)*p-b])
self._dTxspread[i,j] = np.std(self._dTx[i*p+b:(i+1)*p-b,
j*p+b:(j+1)*p-b])
Tp1 = self._Tss1[i*p:(i+1)*p,j*p:(j+1)*p]
Tp2 = self._Tss2[i*p:(i+1)*p,j*p:(j+1)*p]
lsuby, lsubx = Tp1.lmap()
lsubmod = Tp1.modlmap()
lsubmod[0,0] = 1.
if self.filter == 'cos':
fl = 1.j*(lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j]) / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))
elif self.filter=='cos2':
fl = (lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j])**2 / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))**2
else:
fl = np.ones(lsubx.shape)
fl[0,0] = 0.
self._T_sub1[i,j,:,:] = enmap.ifft(enmap.fft(Tp1)*fl).real
self._T_sub2[i,j,:,:] = enmap.ifft(enmap.fft(Tp2)*fl).real
# Throw away pixels with edge effects
if b == 0:
T1 = self._T_sub1[i,j].flatten()
T2 = self._T_sub2[i,j].flatten()
else:
T1 = self._T_sub1[i,j,b:-b,b:-b].flatten()
T2 = self._T_sub2[i,j,b:-b,b:-b].flatten()
self._T2patch[i,j] = np.cov(T1, T2)[0,1]
self._dT2patch = self._dTxpatch**2 + self._dTypatch**2
# self._T2patch = np.abs(self._T2patch)
if not self.savesteps:
del self._dTypatch, self._dTxpatch, self._dTy, self._dTx
del self._dTyspread, self._dTxspread
del self._T_sub1, self._T_sub2, self._Tss1, self._Tss2
def gather_patches_cos2(self):
"""Assemble patch statistics for small scale lensing with cos^2 filter.
Compute the small scale (ell > 3000) temperature power at different
patches across the sky as well as the average amplitude of the
background temperature gradient (ell < 2000). For the small scale
statistics, also apply a filter in Fourier space such that:
.. math::
f_\\ell = \\cos^2(\\hat{\\ell}\\cdot\\hat{\\nabla T})
"""
self._edge = 3 # Edge pixels to throw away
p = self._p
m_fft = enmap.fft(self.map_in)
hp = np.zeros(self.map_in.shape)
hp[np.where((self._lmod > self.lmin) & (self._lmod < self.lmax))] = 1.
self._Tss = enmap.ifft(m_fft * hp)
self._dTy, self._dTx = gradient_flat(self.map_in, self.ldT)
# Scale geometry for lower res map of patches
pshp, pwcs = enmap.scale_geometry(self.map_in.shape,
self.map_in.wcs, 1./self._p)
if not self.savesteps:
del self.map_in, m_fft
self._T2patch = enmap.zeros(pshp, pwcs)
self._dTxpatch = enmap.zeros(pshp, pwcs)
self._dTypatch = enmap.zeros(pshp, pwcs)
self._T_sub = np.zeros((pshp[-2], pshp[-1], p, p))
for i in range(self._T2patch.shape[-2]):
for j in range(self._T2patch.shape[-1]):
self._dTypatch[i,j] = np.mean(self._dTy[i*p:(i+1)*p,
j*p:(j+1)*p])
self._dTxpatch[i,j] = np.mean(self._dTx[i*p:(i+1)*p,
j*p:(j+1)*p])
Tp = self._Tss[i*p:(i+1)*p,j*p:(j+1)*p]
lsuby, lsubx = Tp.lmap()
lsubmod = Tp.modlmap()
lsubmod[0,0] = 1. # Avoid divide by 0; set fl here to 0 later
fl = (lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j])**2 / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))**2
fl[0,0] = 0.
self._T_sub[i,j,:,:] = enmap.ifft(enmap.fft(Tp)*fl).real
# Throw away pixels with edge effects
self._T2patch[i,j] = np.var(self._T_sub[i,j,3:-3,3:-3])
self._dT2patch = self._dTxpatch**2 + self._dTypatch**2
if not self.savesteps:
del self._dTypatch, self._dTxpatch, self._dTy, self._dTx, self._Tss
del self._T_sub
def gather_patches(self):
"""Assemble patch statistics
"""
if self.filter == "cos":
self.gather_patches_cos()
elif self.filter == "cos2":
self.gather_patches_cos2()
else:
self.gather_patches_plain()
def fit_binerr(self, bins=50, edges=None, errs=None, plot=False,
showerr=True, showline=True, filename=None, scale='log'):
"""
"""
g_ord = np.argsort(self._dT2patch.flatten())
dT_ord = self._dT2patch.flatten()[g_ord]
T_ord = self._T2patch.flatten()[g_ord]
T_err = np.zeros(T_ord.size)
if errs is None and edges is None:
N = T_ord.size // bins # Roughly equal number of points in bins
self.bin_edges = dT_ord[np.arange(bins, dtype=int)*N]
self.bin_edges[0] = 0 # Ensure we get everything
self.errs = np.zeros(bins)
for b in range(bins):
self.errs[b] = T_ord[N*b:N*(b+1)].std()
T_err[N*b:] = T_ord[N*b:N*(b+1)].std()
else:
self.bin_edges = edges
self.bin_edges[0] = 0 # Ensure we get everything
self.errs = errs
for i, b in enumerate(self.bin_edges):
T_err[np.where(dT_ord > b)] = self.errs[i]
self._T_err = T_err
self._T_ord = T_ord
self._dT_ord = dT_ord
popt, pcov = curve_fit(_lin, dT_ord, T_ord,
[np.mean(T_ord), np.mean(T_ord/dT_ord)],
sigma=T_err,
absolute_sigma=True)
self.line = popt
self.dline = pcov
if plot:
plt.figure(figsize=(12,8))
if showerr:
plt.errorbar(dT_ord.flatten(), T_ord.flatten(),
yerr=T_err, fmt='.')
else:
plt.plot(dT_ord.flatten(), T_ord.flatten(), '.')
gs = np.linspace(dT_ord.min(), dT_ord.max(), 200)
if showline:
plt.plot(gs, _lin(gs, *popt), c='C1')
plt.xscale(scale)
plt.xlabel(r"$|\nabla T|^2~[\mu{\rm K}^2/rad^2$]")
plt.ylabel(r"$\sigma_T^2~[\mu{\rm K}^2]$")
plt.tight_layout()
if filename is not None:
plt.savefig(str(filename))
plt.show()
plt.close()
def chi2line(self, p=None):
"""Determine the reduced chi2 statistic for the given line.
Determine the reduced chi2 statistic for the given line. If no line
is given, then the fitted line is used.
Parameters
----------
p: array of size 2, default=None
The parameters of the line: [Intercept, Slope]
Returns
-------
red_chi2:
The reduced chi2 statistic for the line
"""
diff = self._T_ord - _lin(self._dT_ord, *p)
chi2 = np.sum(diff**2 / self._T_err**2)
return chi2 / (self._T_ord.size - 2)
def chi2grid(self, plot=False, filename=None):
"""Compute the grid of reduced chi2 values around the fitted space.
Compute the grid of reduced chi2 values for the space +/- 5 sigma
around the parameters fitted for by one of the curve_fit methods.
Also saves the parameters/errors inferred from the chi2 statistics.
Parameters
----------
plot: bool, default=False
If True, generate a 2D plot of the grid of reduced chi2 values.
filename: str, default=None
If given, save the plot at this location.
"""
bgrid = np.linspace(self.line[0] - 5*np.sqrt(self.dline[0][0]),
self.line[0] + 5*np.sqrt(self.dline[0][0]),
100
)
mgrid = np.linspace(self.line[1] - 5*np.sqrt(self.dline[1][1]),
self.line[1] + 5*np.sqrt(self.dline[1][1]),
100
)
db = bgrid[1] - bgrid[0]
dm = mgrid[1] - mgrid[0]
pgrid = np.meshgrid(bgrid, mgrid, indexing='ij')
self.npgrid = pgrid
self.cgrid = np.zeros((100,100))
for i, b in enumerate(bgrid):
for j, m in enumerate(mgrid):
self.cgrid[i,j] = self.chi2line([b, m])
npatch = self._dT2patch.flatten().size-2
self.cgrid *= npatch
offset = np.median(self.cgrid)
Pgrid = np.exp(-(self.cgrid-offset)/2)
self.nPgrid = Pgrid
norm = np.sum(Pgrid * db * dm)
mI = np.sum(Pgrid * pgrid[0] * db * dm) / norm
dI = np.sum(Pgrid * (pgrid[0] - mI)**2 * db * dm) / norm
mS = np.sum(Pgrid * pgrid[1] * db * dm) / norm
dS = np.sum(Pgrid * (pgrid[1] - mS)**2 * db * dm) / norm
self.pc2 = np.array([mI, mS])
self.dpc2 = np.array([dI, dS])
nsigs = []
for i in [3,2,1]:
nsigs.append(np.exp(-(npatch*
self.chi2line([mI, mS+np.sqrt(dS)*i])-offset)/2)/norm)
self.nnorm = norm
self.nsigs = nsigs
if plot:
plt.figure(figsize=(12,8))
try:
plt.pcolormesh(pgrid[0], pgrid[1], Pgrid/norm,
cmap=cmr.ocean_r, shading='auto')
except(NameError):
plt.pcolormesh(pgrid[0], pgrid[1], Pgrid/norm, shading='auto')
plt.colorbar(label=r"Likelihood")
ncontours = plt.contour(pgrid[0], pgrid[1], Pgrid/norm, nsigs,
colors='red')
fmt = {}
strs = [r"$3\sigma$", r"$2\sigma$", r"$1\sigma$"]
for l, s in zip(ncontours.levels, strs):
fmt[l] = s
plt.clabel(ncontours, fmt=fmt, inline=True, fontsize=20)
plt.axvline(self._th[0], c='k')
plt.axhline(self._th[1], c='k')
plt.xlabel(r"Intercept [$\mu$K$^2$]")
plt.ylabel(r"Slope [rad$^2$]")
if filename is not None:
plt.savefig(filename)
plt.show()
plt.close()
def PearsonLikelihood(self, plot=None, plotname=None):
"""Compute the model Pearson Type III model likelihood with patches.
Compute the model likelihood for the map's patch statistics assuming
that the small scale temperature variance follows a Pearson Type III
distribution dictated by the background temperature gradient.
Estimator outputs are stored in the pP3, and errors in the dpP3
attributes.
Parameters
----------
plot: str, default=None
If plot is any of ['s', 'm'], then this method calls the
Sliced or Marginalized plotting methods associated with the
Pearson likelihood, respectively. Also works for
['sliced', 'marginalized'], or ['slice', 'margin']. No plot
if None is given.
plotname: str, default=None
If given, then save the likelihood corner plot at this location
"""
bgrid = np.linspace(self.line[0] - 5*np.sqrt(self.dline[0][0]),
self.line[0] + 10*np.sqrt(self.dline[0][0]),
200
)
mgrid = np.linspace(self.line[1] - 5*np.sqrt(self.dline[1][1]),
self.line[1] + 10*np.sqrt(self.dline[1][1]),
200
)
Ngrid = np.linspace(self._Ns[0], self._Ns[1], 100)
pgrid = np.meshgrid(bgrid, mgrid, Ngrid, indexing='ij')
self.pgrid = pgrid
Pgrid = np.zeros((200,200,100))
db = bgrid[1] - bgrid[0]
dm = mgrid[1] - mgrid[0]
dN = Ngrid[1] - Ngrid[0]
dT2p = self._dT2patch.flatten()
T2p = np.abs(self._T2patch.flatten())
for k, N in enumerate(Ngrid):
Pgrid[:,:,k] = np.sum(_logPearson3(T2p[None,None,...], N,
np.abs(_lin(dT2p,
*[pgrid[0][:,:,k][...,None],
pgrid[1][:,:,k][...,None]]
))), axis=2)
# Pgrid[:,:,k] = np.sum(np.log(_Pearson3(T2p[None,None,...], N,
# _lin(dT2p,
# *[pgrid[0][:,:,k][...,None],
# pgrid[1][:,:,k][...,None]]
# ))), axis=2)
Pmean = np.mean(Pgrid)
Pgrid = np.exp(Pgrid-Pmean)
norm = np.sum(Pgrid * db * dm * dN)
Pgrid /= norm
self.Pgrid = Pgrid
mI = np.sum(Pgrid * pgrid[0] * db * dm * dN)
dI = np.sum(Pgrid * (pgrid[0] - mI)**2 * db * dm * dN)
mS = np.sum(Pgrid * pgrid[1] * db * dm * dN)
dS = np.sum(Pgrid * (pgrid[1] - mS)**2 * db * dm * dN)
mN = np.sum(Pgrid * pgrid[2] * db * dm * dN)
dN = np.sum(Pgrid * (pgrid[2] - mN)**2 * db * dm * dN)
self.pP3 = np.array([mI, mS, mN])
self.dpP3 = np.array([dI, dS, dN])
sigs = []
for i in [3,2,1]:
cline = [mI+np.sqrt(dI)*i, mS]
P = np.sum(np.log(_Pearson3(T2p, mN, _lin(dT2p, *cline))))
sigs.append(np.exp(P - Pmean)/norm)
self.sigs = sigs
sls = [":", "--", "-"]
if plot == "m" or plot == "marginalized" or plot == "margin":
self.PearsonPlotMarginalized(plotname)
if plot == "s" or plot == "sliced" or plot == "slice":
self.PearsonPlotSliced(plotname)
def PearsonPlotMarginalized(self, plotname=None):
"""Plot the corner plot for the computed Pearson Type III likelihood.
Plot the corner plot for the computed Pearson Type III likelihood.
The 2D likelihood ellipses will be marginalized over the remaining
parameter.
Parameters
----------
plotname: str, default=None
If given, then save the likelihood corner plot at this location
"""
pgrid = self.pgrid
Pgrid = self.Pgrid
sigs = self.sigs
f, axs = plt.subplots(3, 3, figsize=(10,10),
gridspec_kw={'hspace':0.125,
'wspace':0.125})
# 2D plots
## b, m
axs[2,0].pcolormesh(pgrid[0][:,:,0], pgrid[1][:,:,0],
np.sum(Pgrid, axis=2), cmap=cmr.ocean_r,
shading='auto')
iN = np.argmin(np.abs(pgrid[2][0,0,:] - self.pP3[2]))
axs[2,0].contour(pgrid[0][:,:,iN], pgrid[1][:,:,iN],
Pgrid[:,:,iN], sigs, linestyles=sls, colors='C1')
axs[2,0].contour(self.npgrid[0], self.npgrid[1],
self.nPgrid/self.nnorm, self.nsigs,
linestyles=sls, colors='red')
axs[2,0].set(xlabel=r"$b$ [$\mu$K$^2$]", ylabel=r"$m$ [rad$^2$]")
## N, m
axs[2,1].pcolormesh(pgrid[2][0,:,:], pgrid[1][0,:,:],
np.sum(Pgrid, axis=0), cmap=cmr.ocean_r,
shading='auto')
iI = np.argmin(np.abs(pgrid[0][:,0,0] - self.pP3[0]))
axs[2,1].contour(pgrid[2][iI,:,:], pgrid[1][iI,:,:],
Pgrid[iI,:,:], sigs, linestyles=sls, colors='C1')
axs[2,1].set(yticklabels=[], xlabel=r"$N$")
## b, N
axs[1,0].pcolormesh(pgrid[0][:,0,:], pgrid[2][:,0,:],
np.sum(Pgrid, axis=1), cmap=cmr.ocean_r,
shading='auto')
iS = np.argmin(np.abs(self.pgrid[1][0,:,0] - self.pP3[1]))
axs[1,0].contour(pgrid[0][:,iS,:], pgrid[2][:,iS,:],
Pgrid[:,iS,:], sigs, linestyles=sls, colors='C1')
axs[1,0].set(xticklabels=[], ylabel=r"$N$")
# 1D histograms
## b
axs[0,0].plot(pgrid[0][:,0,0], np.sum(Pgrid, axis=(1,2)))
axs[0,0].set(xticklabels=[], yticks=[],
title=r"$b = {:.4f} \pm {:.4f}$".format(self.pP3[0],
np.sqrt(self.dpP3[0])))
## N
axs[1,1].plot(pgrid[2][0,0,:], np.sum(Pgrid, axis=(0,1)))
axs[1,1].set(xticklabels=[], yticks=[],
title=r"$N = {:.2f} \pm {:.2f}$".format(self.pP3[2],
np.sqrt(self.dpP3[2])))
## m
axs[2,2].plot(self.pgrid[1][0,:,0], np.sum(Pgrid, axis=(0,2)))
axs[2,2].set(yticks=[], xlabel=r"$m$ [rad$^2$]",
title=r"$m = {:.2e} \pm {:.2e}$".format(self.pP3[1],
np.sqrt(self.dpP3[1])))
# Hide unused axes
axs[0,1].axis('off')
axs[0,2].axis('off')
axs[1,2].axis('off')
plt.savefig(plotname)
plt.show()
def PearsonPlotSliced(self, plotname=None):
"""Plot the corner plot for the computed Pearson Type III likelihood.
Plot the corner plot for the computed Pearson Type III likelihood.
The 2D likelihood ellipses will be slices at the estimated value of
the remaining parameter.
Parameters
----------
plotname: str, default=None
If given, then save the likelihood corner plot at this location
"""
pgrid = self.pgrid
Pgrid = self.Pgrid
sigs = self.sigs
sls = [":", "--", "-"]
f, axs = plt.subplots(3, 3, figsize=(10,10),
gridspec_kw={'hspace':0.125,
'wspace':0.125})
# 2D plots
## b, m
iN = np.argmin(np.abs(pgrid[2][0,0,:] - self.pP3[2]))
axs[2,0].pcolormesh(pgrid[0][:,:,0], pgrid[1][:,:,0],
Pgrid[:,:,iN], cmap=cmr.ocean_r, shading='auto')
axs[2,0].contour(pgrid[0][:,:,iN], pgrid[1][:,:,iN],
Pgrid[:,:,iN], sigs, linestyles=sls, colors='C1')
axs[2,0].contour(self.npgrid[0], self.npgrid[1],
self.nPgrid/self.nnorm, self.nsigs,
linestyles=sls, colors='red')
if self._th is not None:
axs[2,0].axvline(self._th[0], c='k')
axs[2,0].axhline(self._th[1], c='k')
axs[2,0].set(xlabel=r"$b$ [$\mu$K$^2$]", ylabel=r"$m$ [rad$^2$]")
## N, m
iI = np.argmin(np.abs(pgrid[0][:,0,0] - self.pP3[0]))
axs[2,1].pcolormesh(pgrid[2][0,:,:], pgrid[1][0,:,:],
Pgrid[iI,:,:], cmap=cmr.ocean_r, shading='auto')
axs[2,1].contour(pgrid[2][iI,:,:], pgrid[1][iI,:,:],
Pgrid[iI,:,:], sigs, linestyles=sls, colors='C1')
if self._th is not None:
axs[2,1].axhline(self._th[1], c='k')
axs[2,1].set(yticklabels=[], xlabel=r"$N$")
## b, N
iS = np.argmin(np.abs(self.pgrid[1][0,:,0] - self.pP3[1]))
axs[1,0].pcolormesh(pgrid[0][:,0,:], pgrid[2][:,0,:],
Pgrid[:,iS,:], cmap=cmr.ocean_r, shading='auto')
axs[1,0].contour(pgrid[0][:,iS,:], pgrid[2][:,iS,:],
Pgrid[:,iS,:], sigs, linestyles=sls, colors='C1')
if self._th is not None:
axs[1,0].axvline(self._th[0], c='k')
axs[1,0].set(xticklabels=[], ylabel=r"$N$")
# 1D histograms
## b
axs[0,0].plot(pgrid[0][:,0,0], np.sum(Pgrid, axis=(1,2)))
if self._th is not None:
axs[0,0].axvline(self._th[0], c='k')
axs[0,0].set(xticklabels=[], yticks=[],
title=r"$b = {:.2e} \pm {:.2e}$".format(self.pP3[0],
np.sqrt(self.dpP3[0])))
## N
axs[1,1].plot(pgrid[2][0,0,:], np.sum(Pgrid, axis=(0,1)))
axs[1,1].set(xticklabels=[], yticks=[],
title=r"$N = {:.2f} \pm {:.2f}$".format(self.pP3[2],
np.sqrt(self.dpP3[2])))
## m
axs[2,2].plot(self.pgrid[1][0,:,0], np.sum(Pgrid, axis=(0,2)))
if self._th is not None:
axs[2,2].axvline(self._th[1], c='k')
axs[2,2].set(yticks=[], xlabel=r"$m$ [rad$^2$]",
title=r"$m = {:.2e} \pm {:.2e}$".format(self.pP3[1],
np.sqrt(self.dpP3[1])))
# Hide unused axes
axs[0,1].axis('off')
axs[0,2].axis('off')
axs[1,2].axis('off')
plt.savefig(plotname)
plt.show()
def PearsonMPI(self):
"""Compute the Pearson Type III Likelihood (MPI implementation).
Compute the model likelihood for the patch statistics of the map
assuming the small scale temperature variance follows a Pearson Type
III distribution dictated by the background temperature gradient.
This is the MPI implementation, which splits up the computation of
the likelihood along the N parameter equally amongst the processes.
Estimator outputs are stored in the pP3, and errors in the dpP3
attributes.
Currently, only supporting 80 steps in N, so the number of processes
should divide 80 evenly.
"""
line = np.empty(2)
dline = np.empty(2)
if self.commrank == 0:
line = self.line
dline = np.sqrt(np.diag(self.dline))
self.comm.Bcast(line, root=0)
self.comm.Bcast(dline, root=0)
bgrid = np.linspace(line[0] - 10*dline[0],
line[0] + 10*dline[0],
200
)
mgrid = np.linspace(line[1] - 10*dline[1],
line[1] + 10*dline[1],
200
)
Ngrid = np.linspace(self._Ns[0], self._Ns[1], 80)
pgrid = np.meshgrid(bgrid, mgrid, Ngrid, indexing='ij')
if self.commrank == 0:
self.pgrid = pgrid
db = bgrid[1] - bgrid[0]
dm = mgrid[1] - mgrid[0]
dN = Ngrid[1] - Ngrid[0]
npatch = np.empty(1, dtype=int)
if self.commrank == 0:
npatch[0] = self._dT2patch.size
self.comm.Bcast(npatch, root=0)
if self.commrank == 0:
dT2p = self._dT2patch.flatten()
T2p = np.abs(self._T2patch.flatten())
else:
dT2p = np.empty(npatch)
T2p = np.empty(npatch)
self.comm.Bcast(dT2p, root=0)
self.comm.Bcast(T2p, root=0)
subsize = 80 // self.commsize
subN = Ngrid[self.commrank*subsize:(self.commrank+1)*subsize]
subP = np.zeros((subsize,200,200))
Pgrid = np.zeros((self.commsize,subsize,200,200))
for k, N in enumerate(subN):
subP[k,:,:] = np.sum(_logPearson3(T2p[None,None,...], N,
np.abs(_lin(np.abs(dT2p),
*[pgrid[0][:,:,k][...,None],
pgrid[1][:,:,k][...,None]]
))), axis=2)
self.comm.Gather(subP, Pgrid, root=0)
if self.commrank == 0:
Pgrid = np.concatenate(Pgrid, axis=0)
Pgrid = Pgrid.swapaxes(0, 1).swapaxes(1, 2)
Pmean = np.mean(Pgrid)
Pgrid = np.exp(Pgrid-Pmean)
norm = np.sum(Pgrid * db * dm * dN)
Pgrid /= norm
self.Pgrid = Pgrid
mI = np.sum(Pgrid * pgrid[0] * db * dm * dN)
dI = np.sum(Pgrid * (pgrid[0] - mI)**2 * db * dm * dN)
mS = np.sum(Pgrid * pgrid[1] * db * dm * dN)
dS = np.sum(Pgrid * (pgrid[1] - mS)**2 * db * dm * dN)
mN = np.sum(Pgrid * pgrid[2] * db * dm * dN)
dN = np.sum(Pgrid * (pgrid[2] - mN)**2 * db * dm * dN)
self.pP3 = np.array([mI, mS, mN])
self.dpP3 = np.array([dI, dS, dN])
sigs = []
for i in [3,2,1]:
cline = [mI+np.sqrt(dI)*i, mS]
P = np.sum(np.log(_Pearson3(T2p, mN, _lin(dT2p, *cline))))
sigs.append(np.exp(P - Pmean)/norm)
self.sigs = sigs
def gather_covMPI(self):
"""Assemble patch statistics for small scale lensing with cos filter.
Compute the small scale (ell > 3000) temperature power at different
patches across the sky as well as the average amplitude of the
background temperature gradient (ell < 2000). For the small scale
statistics, also apply a filter in Fourier space such that:
.. math::
f_\\ell = \\cos(\\hat{\\ell}\\cdot\\hat{\\nabla T})
"""
self._edge = 5 # Edge pixels to throw away
p = self._p
m_fft1 = enmap.fft(self.map1)
m_fft2 = enmap.fft(self.map2)
hp = np.zeros(self.map1.shape)
hp[np.where((self._lmod > self.lmin) & (self._lmod < self.lmax))] = 1.
# Apply pre-whitening or Wiener/inverse variance filters, then top hat
if self._aW and self.fid is not None:
cs = CubicSpline(self.fid[0], self.fid[1]) # (ell, Cl)
m_fft1 = m_fft1 / cs(self._lmod)
m_fft2 = m_fft2 / cs(self._lmod)
self._Tss1 = enmap.ifft(m_fft1 * hp)
self._Tss2 = enmap.ifft(m_fft2 * hp)
self._dTy, self._dTx = gradient_flat(self.map1, self.ldT)
# Scale geometry for lower res map of patches
pshp, pwcs = enmap.scale_geometry(self.map1.shape,
self.map1.wcs, 1./self._p)
if not self.savesteps:
del self.map1, m_fft1, self.map2, m_fft2
self._T2patch = enmap.zeros(pshp, pwcs)
self._dTxpatch = enmap.zeros(pshp, pwcs)
self._dTypatch = enmap.zeros(pshp, pwcs)
self._T_sub1 = np.zeros((pshp[-2], pshp[-1], p, p))
self._T_sub2 = np.zeros((pshp[-2], pshp[-1], p, p))
for i in range(self._T2patch.shape[-2]):
for j in range(self._T2patch.shape[-1]):
self._dTypatch[i,j] = np.mean(self._dTy[i*p:(i+1)*p,
j*p:(j+1)*p])
self._dTxpatch[i,j] = np.mean(self._dTx[i*p:(i+1)*p,
j*p:(j+1)*p])
Tp1 = self._Tss1[i*p:(i+1)*p,j*p:(j+1)*p]
Tp2 = self._Tss2[i*p:(i+1)*p,j*p:(j+1)*p]
lsuby, lsubx = Tp1.lmap()
lsubmod = Tp1.modlmap()
lsubmod[0,0] = 1.
if self.filter == 'cos':
fl = 1.j*(lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j]) / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))
elif self.filter=='cos2':
fl = (lsubx*self._dTxpatch[i,j] + \
lsuby*self._dTypatch[i,j])**2 / \
(lsubmod * np.sqrt(self._dTxpatch[i,j]**2 + \
self._dTypatch[i,j]**2))**2
else:
fl = np.ones(lsubx.shape)
fl[0,0] = 0.
self._T_sub1[i,j,:,:] = enmap.ifft(enmap.fft(Tp1)*fl).real
self._T_sub2[i,j,:,:] = enmap.ifft(enmap.fft(Tp2)*fl).real
# Throw away pixels with edge effects
self._T2patch[i,j] = np.cov(self._T_sub1[i,j,5:-5,5:-5].flatten(),
self._T_sub2[i,j,5:-5,5:-5].flatten())[0,1]
self._dT2patch = self._dTxpatch**2 + self._dTypatch**2
# self._T2patch = np.abs(self._T2patch)
if not self.savesteps:
del self._dTypatch, self._dTxpatch, self._dTy, self._dTx
del self._T_sub1, self._T_sub2, self._Tss1, self._Tss2 | [
"pixell.enmap.zeros",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"numpy.empty",
"scipy.interpolate.CubicSpline",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"matplotlib.pyplot.contour",
"numpy.diag",
"matplotlib.pyplot.tight_layout",
"matplotlib.py... | [((1533, 1561), 'numpy.exp', 'np.exp', (['(-N * s2 / (2 * sig2))'], {}), '(-N * s2 / (2 * sig2))\n', (1539, 1561), True, 'import numpy as np\n'), ((3199, 3212), 'numpy.argmax', 'np.argmax', (['Px'], {}), '(Px)\n', (3208, 3212), True, 'import numpy as np\n'), ((1580, 1600), 'scipy.special.gamma', 'gamma', (['((N - 1) / 2.0)'], {}), '((N - 1) / 2.0)\n', (1585, 1600), False, 'from scipy.special import gamma\n'), ((2196, 2218), 'numpy.log', 'np.log', (['(N / (2 * sig2))'], {}), '(N / (2 * sig2))\n', (2202, 2218), True, 'import numpy as np\n'), ((2246, 2256), 'numpy.log', 'np.log', (['s2'], {}), '(s2)\n', (2252, 2256), True, 'import numpy as np\n'), ((3549, 3572), 'numpy.linspace', 'np.linspace', (['a', 'b', '(1001)'], {}), '(a, b, 1001)\n', (3560, 3572), True, 'import numpy as np\n'), ((3672, 3688), 'scipy.optimize.fsolve', 'fsolve', (['optP', 'a0'], {}), '(optP, a0)\n', (3678, 3688), False, 'from scipy.optimize import curve_fit, fsolve\n'), ((8416, 8438), 'pixell.enmap.fft', 'enmap.fft', (['self.map_in'], {}), '(self.map_in)\n', (8425, 8438), False, 'from pixell import enmap, utils\n'), ((8453, 8480), 'numpy.zeros', 'np.zeros', (['self.map_in.shape'], {}), '(self.map_in.shape)\n', (8461, 8480), True, 'import numpy as np\n'), ((8582, 8604), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft * hp)'], {}), '(m_fft * hp)\n', (8592, 8604), False, 'from pixell import enmap, utils\n'), ((8753, 8824), 'pixell.enmap.scale_geometry', 'enmap.scale_geometry', (['self.map_in.shape', 'self.map_in.wcs', '(1.0 / self._p)'], {}), '(self.map_in.shape, self.map_in.wcs, 1.0 / self._p)\n', (8773, 8824), False, 'from pixell import enmap, utils\n'), ((8961, 8984), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (8972, 8984), False, 'from pixell import enmap, utils\n'), ((9011, 9034), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (9022, 9034), False, 'from pixell import enmap, utils\n'), ((9061, 9084), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (9072, 9084), False, 'from pixell import enmap, utils\n'), ((9705, 9729), 'numpy.var', 'np.var', (['Trs'], {'axis': '(1, 3)'}), '(Trs, axis=(1, 3))\n', (9711, 9729), True, 'import numpy as np\n'), ((9760, 9787), 'numpy.mean', 'np.mean', (['dTyrs'], {'axis': '(1, 3)'}), '(dTyrs, axis=(1, 3))\n', (9767, 9787), True, 'import numpy as np\n'), ((9818, 9845), 'numpy.mean', 'np.mean', (['dTxrs'], {'axis': '(1, 3)'}), '(dTxrs, axis=(1, 3))\n', (9825, 9845), True, 'import numpy as np\n'), ((10637, 10659), 'pixell.enmap.fft', 'enmap.fft', (['self.map_in'], {}), '(self.map_in)\n', (10646, 10659), False, 'from pixell import enmap, utils\n'), ((10674, 10701), 'numpy.zeros', 'np.zeros', (['self.map_in.shape'], {}), '(self.map_in.shape)\n', (10682, 10701), True, 'import numpy as np\n'), ((11042, 11064), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft * hp)'], {}), '(m_fft * hp)\n', (11052, 11064), False, 'from pixell import enmap, utils\n'), ((11211, 11282), 'pixell.enmap.scale_geometry', 'enmap.scale_geometry', (['self.map_in.shape', 'self.map_in.wcs', '(1.0 / self._p)'], {}), '(self.map_in.shape, self.map_in.wcs, 1.0 / self._p)\n', (11231, 11282), False, 'from pixell import enmap, utils\n'), ((11419, 11442), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (11430, 11442), False, 'from pixell import enmap, utils\n'), ((11469, 11492), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (11480, 11492), False, 'from pixell import enmap, utils\n'), ((11519, 11542), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (11530, 11542), False, 'from pixell import enmap, utils\n'), ((11566, 11602), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (11574, 11602), True, 'import numpy as np\n'), ((13474, 13494), 'pixell.enmap.fft', 'enmap.fft', (['self.map1'], {}), '(self.map1)\n', (13483, 13494), False, 'from pixell import enmap, utils\n'), ((13513, 13533), 'pixell.enmap.fft', 'enmap.fft', (['self.map2'], {}), '(self.map2)\n', (13522, 13533), False, 'from pixell import enmap, utils\n'), ((13548, 13573), 'numpy.zeros', 'np.zeros', (['self.map1.shape'], {}), '(self.map1.shape)\n', (13556, 13573), True, 'import numpy as np\n'), ((13963, 13986), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft1 * hp)'], {}), '(m_fft1 * hp)\n', (13973, 13986), False, 'from pixell import enmap, utils\n'), ((14009, 14032), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft2 * hp)'], {}), '(m_fft2 * hp)\n', (14019, 14032), False, 'from pixell import enmap, utils\n'), ((14177, 14244), 'pixell.enmap.scale_geometry', 'enmap.scale_geometry', (['self.map1.shape', 'self.map1.wcs', '(1.0 / self._p)'], {}), '(self.map1.shape, self.map1.wcs, 1.0 / self._p)\n', (14197, 14244), False, 'from pixell import enmap, utils\n'), ((14399, 14422), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (14410, 14422), False, 'from pixell import enmap, utils\n'), ((14449, 14472), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (14460, 14472), False, 'from pixell import enmap, utils\n'), ((14499, 14522), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (14510, 14522), False, 'from pixell import enmap, utils\n'), ((14550, 14573), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (14561, 14573), False, 'from pixell import enmap, utils\n'), ((14601, 14624), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (14612, 14624), False, 'from pixell import enmap, utils\n'), ((14649, 14685), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (14657, 14685), True, 'import numpy as np\n'), ((14710, 14746), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (14718, 14746), True, 'import numpy as np\n'), ((17927, 17949), 'pixell.enmap.fft', 'enmap.fft', (['self.map_in'], {}), '(self.map_in)\n', (17936, 17949), False, 'from pixell import enmap, utils\n'), ((17964, 17991), 'numpy.zeros', 'np.zeros', (['self.map_in.shape'], {}), '(self.map_in.shape)\n', (17972, 17991), True, 'import numpy as np\n'), ((18093, 18115), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft * hp)'], {}), '(m_fft * hp)\n', (18103, 18115), False, 'from pixell import enmap, utils\n'), ((18262, 18333), 'pixell.enmap.scale_geometry', 'enmap.scale_geometry', (['self.map_in.shape', 'self.map_in.wcs', '(1.0 / self._p)'], {}), '(self.map_in.shape, self.map_in.wcs, 1.0 / self._p)\n', (18282, 18333), False, 'from pixell import enmap, utils\n'), ((18470, 18493), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (18481, 18493), False, 'from pixell import enmap, utils\n'), ((18520, 18543), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (18531, 18543), False, 'from pixell import enmap, utils\n'), ((18570, 18593), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (18581, 18593), False, 'from pixell import enmap, utils\n'), ((18617, 18653), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (18625, 18653), True, 'import numpy as np\n'), ((20593, 20613), 'numpy.zeros', 'np.zeros', (['T_ord.size'], {}), '(T_ord.size)\n', (20601, 20613), True, 'import numpy as np\n'), ((23002, 23038), 'numpy.sum', 'np.sum', (['(diff ** 2 / self._T_err ** 2)'], {}), '(diff ** 2 / self._T_err ** 2)\n', (23008, 23038), True, 'import numpy as np\n'), ((24211, 24251), 'numpy.meshgrid', 'np.meshgrid', (['bgrid', 'mgrid'], {'indexing': '"""ij"""'}), "(bgrid, mgrid, indexing='ij')\n", (24222, 24251), True, 'import numpy as np\n'), ((24303, 24323), 'numpy.zeros', 'np.zeros', (['(100, 100)'], {}), '((100, 100))\n', (24311, 24323), True, 'import numpy as np\n'), ((24560, 24581), 'numpy.median', 'np.median', (['self.cgrid'], {}), '(self.cgrid)\n', (24569, 24581), True, 'import numpy as np\n'), ((24599, 24633), 'numpy.exp', 'np.exp', (['(-(self.cgrid - offset) / 2)'], {}), '(-(self.cgrid - offset) / 2)\n', (24605, 24633), True, 'import numpy as np\n'), ((24675, 24698), 'numpy.sum', 'np.sum', (['(Pgrid * db * dm)'], {}), '(Pgrid * db * dm)\n', (24681, 24698), True, 'import numpy as np\n'), ((24963, 24981), 'numpy.array', 'np.array', (['[mI, mS]'], {}), '([mI, mS])\n', (24971, 24981), True, 'import numpy as np\n'), ((25003, 25021), 'numpy.array', 'np.array', (['[dI, dS]'], {}), '([dI, dS])\n', (25011, 25021), True, 'import numpy as np\n'), ((27720, 27762), 'numpy.linspace', 'np.linspace', (['self._Ns[0]', 'self._Ns[1]', '(100)'], {}), '(self._Ns[0], self._Ns[1], 100)\n', (27731, 27762), True, 'import numpy as np\n'), ((27780, 27827), 'numpy.meshgrid', 'np.meshgrid', (['bgrid', 'mgrid', 'Ngrid'], {'indexing': '"""ij"""'}), "(bgrid, mgrid, Ngrid, indexing='ij')\n", (27791, 27827), True, 'import numpy as np\n'), ((27873, 27898), 'numpy.zeros', 'np.zeros', (['(200, 200, 100)'], {}), '((200, 200, 100))\n', (27881, 27898), True, 'import numpy as np\n'), ((28882, 28896), 'numpy.mean', 'np.mean', (['Pgrid'], {}), '(Pgrid)\n', (28889, 28896), True, 'import numpy as np\n'), ((28914, 28935), 'numpy.exp', 'np.exp', (['(Pgrid - Pmean)'], {}), '(Pgrid - Pmean)\n', (28920, 28935), True, 'import numpy as np\n'), ((28950, 28978), 'numpy.sum', 'np.sum', (['(Pgrid * db * dm * dN)'], {}), '(Pgrid * db * dm * dN)\n', (28956, 28978), True, 'import numpy as np\n'), ((29044, 29083), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[0] * db * dm * dN)'], {}), '(Pgrid * pgrid[0] * db * dm * dN)\n', (29050, 29083), True, 'import numpy as np\n'), ((29098, 29149), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[0] - mI) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[0] - mI) ** 2 * db * dm * dN)\n', (29104, 29149), True, 'import numpy as np\n'), ((29162, 29201), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[1] * db * dm * dN)'], {}), '(Pgrid * pgrid[1] * db * dm * dN)\n', (29168, 29201), True, 'import numpy as np\n'), ((29216, 29267), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[1] - mS) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[1] - mS) ** 2 * db * dm * dN)\n', (29222, 29267), True, 'import numpy as np\n'), ((29280, 29319), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[2] * db * dm * dN)'], {}), '(Pgrid * pgrid[2] * db * dm * dN)\n', (29286, 29319), True, 'import numpy as np\n'), ((29334, 29385), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[2] - mN) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[2] - mN) ** 2 * db * dm * dN)\n', (29340, 29385), True, 'import numpy as np\n'), ((29404, 29426), 'numpy.array', 'np.array', (['[mI, mS, mN]'], {}), '([mI, mS, mN])\n', (29412, 29426), True, 'import numpy as np\n'), ((29448, 29470), 'numpy.array', 'np.array', (['[dI, dS, dN]'], {}), '([dI, dS, dN])\n', (29456, 29470), True, 'import numpy as np\n'), ((30554, 30642), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(10, 10)', 'gridspec_kw': "{'hspace': 0.125, 'wspace': 0.125}"}), "(3, 3, figsize=(10, 10), gridspec_kw={'hspace': 0.125, 'wspace':\n 0.125})\n", (30566, 30642), True, 'import matplotlib.pyplot as plt\n'), ((33285, 33306), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plotname'], {}), '(plotname)\n', (33296, 33306), True, 'import matplotlib.pyplot as plt\n'), ((33316, 33326), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33324, 33326), True, 'import matplotlib.pyplot as plt\n'), ((33949, 34037), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(10, 10)', 'gridspec_kw': "{'hspace': 0.125, 'wspace': 0.125}"}), "(3, 3, figsize=(10, 10), gridspec_kw={'hspace': 0.125, 'wspace':\n 0.125})\n", (33961, 34037), True, 'import matplotlib.pyplot as plt\n'), ((37036, 37057), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plotname'], {}), '(plotname)\n', (37047, 37057), True, 'import matplotlib.pyplot as plt\n'), ((37067, 37077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37075, 37077), True, 'import matplotlib.pyplot as plt\n'), ((37807, 37818), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (37815, 37818), True, 'import numpy as np\n'), ((37836, 37847), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (37844, 37847), True, 'import numpy as np\n'), ((38056, 38122), 'numpy.linspace', 'np.linspace', (['(line[0] - 10 * dline[0])', '(line[0] + 10 * dline[0])', '(200)'], {}), '(line[0] - 10 * dline[0], line[0] + 10 * dline[0], 200)\n', (38067, 38122), True, 'import numpy as np\n'), ((38226, 38292), 'numpy.linspace', 'np.linspace', (['(line[1] - 10 * dline[1])', '(line[1] + 10 * dline[1])', '(200)'], {}), '(line[1] - 10 * dline[1], line[1] + 10 * dline[1], 200)\n', (38237, 38292), True, 'import numpy as np\n'), ((38396, 38437), 'numpy.linspace', 'np.linspace', (['self._Ns[0]', 'self._Ns[1]', '(80)'], {}), '(self._Ns[0], self._Ns[1], 80)\n', (38407, 38437), True, 'import numpy as np\n'), ((38455, 38502), 'numpy.meshgrid', 'np.meshgrid', (['bgrid', 'mgrid', 'Ngrid'], {'indexing': '"""ij"""'}), "(bgrid, mgrid, Ngrid, indexing='ij')\n", (38466, 38502), True, 'import numpy as np\n'), ((38687, 38709), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'int'}), '(1, dtype=int)\n', (38695, 38709), True, 'import numpy as np\n'), ((39249, 39278), 'numpy.zeros', 'np.zeros', (['(subsize, 200, 200)'], {}), '((subsize, 200, 200))\n', (39257, 39278), True, 'import numpy as np\n'), ((39294, 39338), 'numpy.zeros', 'np.zeros', (['(self.commsize, subsize, 200, 200)'], {}), '((self.commsize, subsize, 200, 200))\n', (39302, 39338), True, 'import numpy as np\n'), ((41454, 41474), 'pixell.enmap.fft', 'enmap.fft', (['self.map1'], {}), '(self.map1)\n', (41463, 41474), False, 'from pixell import enmap, utils\n'), ((41493, 41513), 'pixell.enmap.fft', 'enmap.fft', (['self.map2'], {}), '(self.map2)\n', (41502, 41513), False, 'from pixell import enmap, utils\n'), ((41528, 41553), 'numpy.zeros', 'np.zeros', (['self.map1.shape'], {}), '(self.map1.shape)\n', (41536, 41553), True, 'import numpy as np\n'), ((41943, 41966), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft1 * hp)'], {}), '(m_fft1 * hp)\n', (41953, 41966), False, 'from pixell import enmap, utils\n'), ((41989, 42012), 'pixell.enmap.ifft', 'enmap.ifft', (['(m_fft2 * hp)'], {}), '(m_fft2 * hp)\n', (41999, 42012), False, 'from pixell import enmap, utils\n'), ((42157, 42224), 'pixell.enmap.scale_geometry', 'enmap.scale_geometry', (['self.map1.shape', 'self.map1.wcs', '(1.0 / self._p)'], {}), '(self.map1.shape, self.map1.wcs, 1.0 / self._p)\n', (42177, 42224), False, 'from pixell import enmap, utils\n'), ((42379, 42402), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (42390, 42402), False, 'from pixell import enmap, utils\n'), ((42429, 42452), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (42440, 42452), False, 'from pixell import enmap, utils\n'), ((42479, 42502), 'pixell.enmap.zeros', 'enmap.zeros', (['pshp', 'pwcs'], {}), '(pshp, pwcs)\n', (42490, 42502), False, 'from pixell import enmap, utils\n'), ((42527, 42563), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (42535, 42563), True, 'import numpy as np\n'), ((42588, 42624), 'numpy.zeros', 'np.zeros', (['(pshp[-2], pshp[-1], p, p)'], {}), '((pshp[-2], pshp[-1], p, p))\n', (42596, 42624), True, 'import numpy as np\n'), ((2330, 2350), 'scipy.special.gamma', 'gamma', (['((N - 1) / 2.0)'], {}), '((N - 1) / 2.0)\n', (2335, 2350), False, 'from scipy.special import gamma\n'), ((3453, 3473), 'scipy.optimize.fsolve', 'fsolve', (['minb', 'bguess'], {}), '(minb, bguess)\n', (3459, 3473), False, 'from scipy.optimize import curve_fit, fsolve\n'), ((7001, 7030), 'numpy.arange', 'np.arange', (['self._thcl[0].size'], {}), '(self._thcl[0].size)\n', (7010, 7030), True, 'import numpy as np\n'), ((7155, 7184), 'numpy.arange', 'np.arange', (['self._thcl[1].size'], {}), '(self._thcl[1].size)\n', (7164, 7184), True, 'import numpy as np\n'), ((8493, 8554), 'numpy.where', 'np.where', (['((self._lmod > self.lmin) & (self._lmod < self.lmax))'], {}), '((self._lmod > self.lmin) & (self._lmod < self.lmax))\n', (8501, 8554), True, 'import numpy as np\n'), ((10714, 10775), 'numpy.where', 'np.where', (['((self._lmod > self.lmin) & (self._lmod < self.lmax))'], {}), '((self._lmod > self.lmin) & (self._lmod < self.lmax))\n', (10722, 10775), True, 'import numpy as np\n'), ((10927, 10964), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.fid[0]', 'self.fid[1]'], {}), '(self.fid[0], self.fid[1])\n', (10938, 10964), False, 'from scipy.interpolate import CubicSpline\n'), ((13586, 13647), 'numpy.where', 'np.where', (['((self._lmod > self.lmin) & (self._lmod < self.lmax))'], {}), '((self._lmod > self.lmin) & (self._lmod < self.lmax))\n', (13594, 13647), True, 'import numpy as np\n'), ((13799, 13836), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.fid[0]', 'self.fid[1]'], {}), '(self.fid[0], self.fid[1])\n', (13810, 13836), False, 'from scipy.interpolate import CubicSpline\n'), ((18004, 18065), 'numpy.where', 'np.where', (['((self._lmod > self.lmin) & (self._lmod < self.lmax))'], {}), '((self._lmod > self.lmin) & (self._lmod < self.lmax))\n', (18012, 18065), True, 'import numpy as np\n'), ((20889, 20903), 'numpy.zeros', 'np.zeros', (['bins'], {}), '(bins)\n', (20897, 20903), True, 'import numpy as np\n'), ((21720, 21747), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (21730, 21747), True, 'import matplotlib.pyplot as plt\n'), ((22134, 22151), 'matplotlib.pyplot.xscale', 'plt.xscale', (['scale'], {}), '(scale)\n', (22144, 22151), True, 'import matplotlib.pyplot as plt\n'), ((22165, 22217), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$|\\\\nabla T|^2~[\\\\mu{\\\\rm K}^2/rad^2$]"""'], {}), "('$|\\\\nabla T|^2~[\\\\mu{\\\\rm K}^2/rad^2$]')\n", (22175, 22217), True, 'import matplotlib.pyplot as plt\n'), ((22229, 22273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma_T^2~[\\\\mu{\\\\rm K}^2]$"""'], {}), "('$\\\\sigma_T^2~[\\\\mu{\\\\rm K}^2]$')\n", (22239, 22273), True, 'import matplotlib.pyplot as plt\n'), ((22285, 22303), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22301, 22303), True, 'import matplotlib.pyplot as plt\n'), ((22399, 22409), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22407, 22409), True, 'import matplotlib.pyplot as plt\n'), ((22423, 22434), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22432, 22434), True, 'import matplotlib.pyplot as plt\n'), ((24713, 24747), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[0] * db * dm)'], {}), '(Pgrid * pgrid[0] * db * dm)\n', (24719, 24747), True, 'import numpy as np\n'), ((24769, 24815), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[0] - mI) ** 2 * db * dm)'], {}), '(Pgrid * (pgrid[0] - mI) ** 2 * db * dm)\n', (24775, 24815), True, 'import numpy as np\n'), ((24835, 24869), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[1] * db * dm)'], {}), '(Pgrid * pgrid[1] * db * dm)\n', (24841, 24869), True, 'import numpy as np\n'), ((24891, 24937), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[1] - mS) ** 2 * db * dm)'], {}), '(Pgrid * (pgrid[1] - mS) ** 2 * db * dm)\n', (24897, 24937), True, 'import numpy as np\n'), ((25286, 25313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (25296, 25313), True, 'import matplotlib.pyplot as plt\n'), ((25587, 25619), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Likelihood"""'}), "(label='Likelihood')\n", (25599, 25619), True, 'import matplotlib.pyplot as plt\n'), ((25646, 25712), 'matplotlib.pyplot.contour', 'plt.contour', (['pgrid[0]', 'pgrid[1]', '(Pgrid / norm)', 'nsigs'], {'colors': '"""red"""'}), "(pgrid[0], pgrid[1], Pgrid / norm, nsigs, colors='red')\n", (25657, 25712), True, 'import matplotlib.pyplot as plt\n'), ((25929, 25985), 'matplotlib.pyplot.clabel', 'plt.clabel', (['ncontours'], {'fmt': 'fmt', 'inline': '(True)', 'fontsize': '(20)'}), '(ncontours, fmt=fmt, inline=True, fontsize=20)\n', (25939, 25985), True, 'import matplotlib.pyplot as plt\n'), ((25999, 26030), 'matplotlib.pyplot.axvline', 'plt.axvline', (['self._th[0]'], {'c': '"""k"""'}), "(self._th[0], c='k')\n", (26010, 26030), True, 'import matplotlib.pyplot as plt\n'), ((26044, 26075), 'matplotlib.pyplot.axhline', 'plt.axhline', (['self._th[1]'], {'c': '"""k"""'}), "(self._th[1], c='k')\n", (26055, 26075), True, 'import matplotlib.pyplot as plt\n'), ((26089, 26126), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Intercept [$\\\\mu$K$^2$]"""'], {}), "('Intercept [$\\\\mu$K$^2$]')\n", (26099, 26126), True, 'import matplotlib.pyplot as plt\n'), ((26140, 26169), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Slope [rad$^2$]"""'], {}), "('Slope [rad$^2$]')\n", (26150, 26169), True, 'import matplotlib.pyplot as plt\n'), ((26261, 26271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26269, 26271), True, 'import matplotlib.pyplot as plt\n'), ((26285, 26296), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (26294, 26296), True, 'import matplotlib.pyplot as plt\n'), ((30843, 30864), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(2)'}), '(Pgrid, axis=2)\n', (30849, 30864), True, 'import numpy as np\n'), ((30954, 30993), 'numpy.abs', 'np.abs', (['(pgrid[2][0, 0, :] - self.pP3[2])'], {}), '(pgrid[2][0, 0, :] - self.pP3[2])\n', (30960, 30993), True, 'import numpy as np\n'), ((31495, 31516), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(0)'}), '(Pgrid, axis=0)\n', (31501, 31516), True, 'import numpy as np\n'), ((31606, 31645), 'numpy.abs', 'np.abs', (['(pgrid[0][:, 0, 0] - self.pP3[0])'], {}), '(pgrid[0][:, 0, 0] - self.pP3[0])\n', (31612, 31645), True, 'import numpy as np\n'), ((31947, 31968), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(1)'}), '(Pgrid, axis=1)\n', (31953, 31968), True, 'import numpy as np\n'), ((32058, 32102), 'numpy.abs', 'np.abs', (['(self.pgrid[1][0, :, 0] - self.pP3[1])'], {}), '(self.pgrid[1][0, :, 0] - self.pP3[1])\n', (32064, 32102), True, 'import numpy as np\n'), ((32373, 32399), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(1, 2)'}), '(Pgrid, axis=(1, 2))\n', (32379, 32399), True, 'import numpy as np\n'), ((32648, 32674), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(0, 1)'}), '(Pgrid, axis=(0, 1))\n', (32654, 32674), True, 'import numpy as np\n'), ((32928, 32954), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(0, 2)'}), '(Pgrid, axis=(0, 2))\n', (32934, 32954), True, 'import numpy as np\n'), ((34169, 34208), 'numpy.abs', 'np.abs', (['(pgrid[2][0, 0, :] - self.pP3[2])'], {}), '(pgrid[2][0, 0, :] - self.pP3[2])\n', (34175, 34208), True, 'import numpy as np\n'), ((34917, 34956), 'numpy.abs', 'np.abs', (['(pgrid[0][:, 0, 0] - self.pP3[0])'], {}), '(pgrid[0][:, 0, 0] - self.pP3[0])\n', (34923, 34956), True, 'import numpy as np\n'), ((35415, 35459), 'numpy.abs', 'np.abs', (['(self.pgrid[1][0, :, 0] - self.pP3[1])'], {}), '(self.pgrid[1][0, :, 0] - self.pP3[1])\n', (35421, 35459), True, 'import numpy as np\n'), ((35956, 35982), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(1, 2)'}), '(Pgrid, axis=(1, 2))\n', (35962, 35982), True, 'import numpy as np\n'), ((36315, 36341), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(0, 1)'}), '(Pgrid, axis=(0, 1))\n', (36321, 36341), True, 'import numpy as np\n'), ((36595, 36621), 'numpy.sum', 'np.sum', (['Pgrid'], {'axis': '(0, 2)'}), '(Pgrid, axis=(0, 2))\n', (36601, 36621), True, 'import numpy as np\n'), ((38991, 39007), 'numpy.empty', 'np.empty', (['npatch'], {}), '(npatch)\n', (38999, 39007), True, 'import numpy as np\n'), ((39027, 39043), 'numpy.empty', 'np.empty', (['npatch'], {}), '(npatch)\n', (39035, 39043), True, 'import numpy as np\n'), ((39839, 39868), 'numpy.concatenate', 'np.concatenate', (['Pgrid'], {'axis': '(0)'}), '(Pgrid, axis=0)\n', (39853, 39868), True, 'import numpy as np\n'), ((39947, 39961), 'numpy.mean', 'np.mean', (['Pgrid'], {}), '(Pgrid)\n', (39954, 39961), True, 'import numpy as np\n'), ((39983, 40004), 'numpy.exp', 'np.exp', (['(Pgrid - Pmean)'], {}), '(Pgrid - Pmean)\n', (39989, 40004), True, 'import numpy as np\n'), ((40023, 40051), 'numpy.sum', 'np.sum', (['(Pgrid * db * dm * dN)'], {}), '(Pgrid * db * dm * dN)\n', (40029, 40051), True, 'import numpy as np\n'), ((40129, 40168), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[0] * db * dm * dN)'], {}), '(Pgrid * pgrid[0] * db * dm * dN)\n', (40135, 40168), True, 'import numpy as np\n'), ((40187, 40238), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[0] - mI) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[0] - mI) ** 2 * db * dm * dN)\n', (40193, 40238), True, 'import numpy as np\n'), ((40255, 40294), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[1] * db * dm * dN)'], {}), '(Pgrid * pgrid[1] * db * dm * dN)\n', (40261, 40294), True, 'import numpy as np\n'), ((40313, 40364), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[1] - mS) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[1] - mS) ** 2 * db * dm * dN)\n', (40319, 40364), True, 'import numpy as np\n'), ((40381, 40420), 'numpy.sum', 'np.sum', (['(Pgrid * pgrid[2] * db * dm * dN)'], {}), '(Pgrid * pgrid[2] * db * dm * dN)\n', (40387, 40420), True, 'import numpy as np\n'), ((40439, 40490), 'numpy.sum', 'np.sum', (['(Pgrid * (pgrid[2] - mN) ** 2 * db * dm * dN)'], {}), '(Pgrid * (pgrid[2] - mN) ** 2 * db * dm * dN)\n', (40445, 40490), True, 'import numpy as np\n'), ((40513, 40535), 'numpy.array', 'np.array', (['[mI, mS, mN]'], {}), '([mI, mS, mN])\n', (40521, 40535), True, 'import numpy as np\n'), ((40561, 40583), 'numpy.array', 'np.array', (['[dI, dS, dN]'], {}), '([dI, dS, dN])\n', (40569, 40583), True, 'import numpy as np\n'), ((41566, 41627), 'numpy.where', 'np.where', (['((self._lmod > self.lmin) & (self._lmod < self.lmax))'], {}), '((self._lmod > self.lmin) & (self._lmod < self.lmax))\n', (41574, 41627), True, 'import numpy as np\n'), ((41779, 41816), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['self.fid[0]', 'self.fid[1]'], {}), '(self.fid[0], self.fid[1])\n', (41790, 41816), False, 'from scipy.interpolate import CubicSpline\n'), ((3360, 3383), 'numpy.abs', 'np.abs', (['(Px[imode:] - Pa)'], {}), '(Px[imode:] - Pa)\n', (3366, 3383), True, 'import numpy as np\n'), ((7052, 7102), 'numpy.sum', 'np.sum', (['(ell * self._thcl[0])[self.lmin:self.lmax]'], {}), '((ell * self._thcl[0])[self.lmin:self.lmax])\n', (7058, 7102), True, 'import numpy as np\n'), ((7352, 7381), 'numpy.arange', 'np.arange', (['self._thcl[0].size'], {}), '(self._thcl[0].size)\n', (7361, 7381), True, 'import numpy as np\n'), ((7509, 7538), 'numpy.arange', 'np.arange', (['self._thcl[1].size'], {}), '(self._thcl[1].size)\n', (7518, 7538), True, 'import numpy as np\n'), ((7684, 7713), 'numpy.arange', 'np.arange', (['self._thcl[0].size'], {}), '(self._thcl[0].size)\n', (7693, 7713), True, 'import numpy as np\n'), ((7838, 7867), 'numpy.arange', 'np.arange', (['self._thcl[1].size'], {}), '(self._thcl[1].size)\n', (7847, 7867), True, 'import numpy as np\n'), ((11746, 11802), 'numpy.mean', 'np.mean', (['self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (11753, 11802), True, 'import numpy as np\n'), ((11888, 11944), 'numpy.mean', 'np.mean', (['self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (11895, 11944), True, 'import numpy as np\n'), ((12620, 12657), 'numpy.var', 'np.var', (['self._T_sub[i, j, 5:-5, 5:-5]'], {}), '(self._T_sub[i, j, 5:-5, 5:-5])\n', (12626, 12657), True, 'import numpy as np\n'), ((14911, 14983), 'numpy.mean', 'np.mean', (['self._dTy[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b]'], {}), '(self._dTy[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b])\n', (14918, 14983), True, 'import numpy as np\n'), ((15061, 15133), 'numpy.mean', 'np.mean', (['self._dTx[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b]'], {}), '(self._dTx[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b])\n', (15068, 15133), True, 'import numpy as np\n'), ((15212, 15283), 'numpy.std', 'np.std', (['self._dTy[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b]'], {}), '(self._dTy[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b])\n', (15218, 15283), True, 'import numpy as np\n'), ((15362, 15433), 'numpy.std', 'np.std', (['self._dTx[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b]'], {}), '(self._dTx[i * p + b:(i + 1) * p - b, j * p + b:(j + 1) * p - b])\n', (15368, 15433), True, 'import numpy as np\n'), ((18797, 18853), 'numpy.mean', 'np.mean', (['self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (18804, 18853), True, 'import numpy as np\n'), ((18939, 18995), 'numpy.mean', 'np.mean', (['self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (18946, 18995), True, 'import numpy as np\n'), ((19705, 19742), 'numpy.var', 'np.var', (['self._T_sub[i, j, 3:-3, 3:-3]'], {}), '(self._T_sub[i, j, 3:-3, 3:-3])\n', (19711, 19742), True, 'import numpy as np\n'), ((21484, 21498), 'numpy.mean', 'np.mean', (['T_ord'], {}), '(T_ord)\n', (21491, 21498), True, 'import numpy as np\n'), ((21500, 21523), 'numpy.mean', 'np.mean', (['(T_ord / dT_ord)'], {}), '(T_ord / dT_ord)\n', (21507, 21523), True, 'import numpy as np\n'), ((25348, 25435), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['pgrid[0]', 'pgrid[1]', '(Pgrid / norm)'], {'cmap': 'cmr.ocean_r', 'shading': '"""auto"""'}), "(pgrid[0], pgrid[1], Pgrid / norm, cmap=cmr.ocean_r, shading=\n 'auto')\n", (25362, 25435), True, 'import matplotlib.pyplot as plt\n'), ((26226, 26247), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (26237, 26247), True, 'import matplotlib.pyplot as plt\n'), ((37939, 37958), 'numpy.diag', 'np.diag', (['self.dline'], {}), '(self.dline)\n', (37946, 37958), True, 'import numpy as np\n'), ((42768, 42824), 'numpy.mean', 'np.mean', (['self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTy[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (42775, 42824), True, 'import numpy as np\n'), ((42910, 42966), 'numpy.mean', 'np.mean', (['self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p]'], {}), '(self._dTx[i * p:(i + 1) * p, j * p:(j + 1) * p])\n', (42917, 42966), True, 'import numpy as np\n'), ((7208, 7263), 'numpy.sum', 'np.sum', (['(ell ** 3 * self._thcl[1])[self.lmin:self.lmax]'], {}), '((ell ** 3 * self._thcl[1])[self.lmin:self.lmax])\n', (7214, 7263), True, 'import numpy as np\n'), ((7735, 7785), 'numpy.sum', 'np.sum', (['(ell * self._thcl[0])[self.lmin:self.lmax]'], {}), '((ell * self._thcl[0])[self.lmin:self.lmax])\n', (7741, 7785), True, 'import numpy as np\n'), ((7889, 7944), 'numpy.sum', 'np.sum', (['(ell ** 3 * self._thcl[1])[self.lmin:self.lmax]'], {}), '((ell ** 3 * self._thcl[1])[self.lmin:self.lmax])\n', (7895, 7944), True, 'import numpy as np\n'), ((16956, 16970), 'numpy.cov', 'np.cov', (['T1', 'T2'], {}), '(T1, T2)\n', (16962, 16970), True, 'import numpy as np\n'), ((20772, 20798), 'numpy.arange', 'np.arange', (['bins'], {'dtype': 'int'}), '(bins, dtype=int)\n', (20781, 20798), True, 'import numpy as np\n'), ((21270, 21290), 'numpy.where', 'np.where', (['(dT_ord > b)'], {}), '(dT_ord > b)\n', (21278, 21290), True, 'import numpy as np\n'), ((23750, 23775), 'numpy.sqrt', 'np.sqrt', (['self.dline[0][0]'], {}), '(self.dline[0][0])\n', (23757, 23775), True, 'import numpy as np\n'), ((23824, 23849), 'numpy.sqrt', 'np.sqrt', (['self.dline[0][0]'], {}), '(self.dline[0][0])\n', (23831, 23849), True, 'import numpy as np\n'), ((23961, 23986), 'numpy.sqrt', 'np.sqrt', (['self.dline[1][1]'], {}), '(self.dline[1][1])\n', (23968, 23986), True, 'import numpy as np\n'), ((24035, 24060), 'numpy.sqrt', 'np.sqrt', (['self.dline[1][1]'], {}), '(self.dline[1][1])\n', (24042, 24060), True, 'import numpy as np\n'), ((25511, 25575), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['pgrid[0]', 'pgrid[1]', '(Pgrid / norm)'], {'shading': '"""auto"""'}), "(pgrid[0], pgrid[1], Pgrid / norm, shading='auto')\n", (25525, 25575), True, 'import matplotlib.pyplot as plt\n'), ((27325, 27350), 'numpy.sqrt', 'np.sqrt', (['self.dline[0][0]'], {}), '(self.dline[0][0])\n', (27332, 27350), True, 'import numpy as np\n'), ((27400, 27425), 'numpy.sqrt', 'np.sqrt', (['self.dline[0][0]'], {}), '(self.dline[0][0])\n', (27407, 27425), True, 'import numpy as np\n'), ((27537, 27562), 'numpy.sqrt', 'np.sqrt', (['self.dline[1][1]'], {}), '(self.dline[1][1])\n', (27544, 27562), True, 'import numpy as np\n'), ((27612, 27637), 'numpy.sqrt', 'np.sqrt', (['self.dline[1][1]'], {}), '(self.dline[1][1])\n', (27619, 27637), True, 'import numpy as np\n'), ((29658, 29675), 'numpy.exp', 'np.exp', (['(P - Pmean)'], {}), '(P - Pmean)\n', (29664, 29675), True, 'import numpy as np\n'), ((32570, 32591), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[0]'], {}), '(self.dpP3[0])\n', (32577, 32591), True, 'import numpy as np\n'), ((32845, 32866), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[2]'], {}), '(self.dpP3[2])\n', (32852, 32866), True, 'import numpy as np\n'), ((33134, 33155), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[1]'], {}), '(self.dpP3[1])\n', (33141, 33155), True, 'import numpy as np\n'), ((36237, 36258), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[0]'], {}), '(self.dpP3[0])\n', (36244, 36258), True, 'import numpy as np\n'), ((36512, 36533), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[2]'], {}), '(self.dpP3[2])\n', (36519, 36533), True, 'import numpy as np\n'), ((36885, 36906), 'numpy.sqrt', 'np.sqrt', (['self.dpP3[1]'], {}), '(self.dpP3[1])\n', (36892, 36906), True, 'import numpy as np\n'), ((7405, 7455), 'numpy.sum', 'np.sum', (['(ell * self._thcl[0])[self.lmin:self.lmax]'], {}), '((ell * self._thcl[0])[self.lmin:self.lmax])\n', (7411, 7455), True, 'import numpy as np\n'), ((7562, 7617), 'numpy.sum', 'np.sum', (['(ell ** 3 * self._thcl[1])[self.lmin:self.lmax]'], {}), '((ell ** 3 * self._thcl[1])[self.lmin:self.lmax])\n', (7568, 7617), True, 'import numpy as np\n'), ((12318, 12380), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (12325, 12380), True, 'import numpy as np\n'), ((16373, 16393), 'numpy.ones', 'np.ones', (['lsubx.shape'], {}), '(lsubx.shape)\n', (16380, 16393), True, 'import numpy as np\n'), ((29542, 29553), 'numpy.sqrt', 'np.sqrt', (['dI'], {}), '(dI)\n', (29549, 29553), True, 'import numpy as np\n'), ((40791, 40808), 'numpy.exp', 'np.exp', (['(P - Pmean)'], {}), '(P - Pmean)\n', (40797, 40808), True, 'import numpy as np\n'), ((43914, 43934), 'numpy.ones', 'np.ones', (['lsubx.shape'], {}), '(lsubx.shape)\n', (43921, 43934), True, 'import numpy as np\n'), ((12504, 12517), 'pixell.enmap.fft', 'enmap.fft', (['Tp'], {}), '(Tp)\n', (12513, 12517), False, 'from pixell import enmap, utils\n'), ((15912, 15974), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (15919, 15974), True, 'import numpy as np\n'), ((16476, 16490), 'pixell.enmap.fft', 'enmap.fft', (['Tp1'], {}), '(Tp1)\n', (16485, 16490), False, 'from pixell import enmap, utils\n'), ((16552, 16566), 'pixell.enmap.fft', 'enmap.fft', (['Tp2'], {}), '(Tp2)\n', (16561, 16566), False, 'from pixell import enmap, utils\n'), ((19404, 19466), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (19411, 19466), True, 'import numpy as np\n'), ((19589, 19602), 'pixell.enmap.fft', 'enmap.fft', (['Tp'], {}), '(Tp)\n', (19598, 19602), False, 'from pixell import enmap, utils\n'), ((39506, 39518), 'numpy.abs', 'np.abs', (['dT2p'], {}), '(dT2p)\n', (39512, 39518), True, 'import numpy as np\n'), ((40667, 40678), 'numpy.sqrt', 'np.sqrt', (['dI'], {}), '(dI)\n', (40674, 40678), True, 'import numpy as np\n'), ((43453, 43515), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (43460, 43515), True, 'import numpy as np\n'), ((44017, 44031), 'pixell.enmap.fft', 'enmap.fft', (['Tp1'], {}), '(Tp1)\n', (44026, 44031), False, 'from pixell import enmap, utils\n'), ((44093, 44107), 'pixell.enmap.fft', 'enmap.fft', (['Tp2'], {}), '(Tp2)\n', (44102, 44107), False, 'from pixell import enmap, utils\n'), ((16216, 16278), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (16223, 16278), True, 'import numpy as np\n'), ((43757, 43819), 'numpy.sqrt', 'np.sqrt', (['(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)'], {}), '(self._dTxpatch[i, j] ** 2 + self._dTypatch[i, j] ** 2)\n', (43764, 43819), True, 'import numpy as np\n'), ((25167, 25178), 'numpy.sqrt', 'np.sqrt', (['dS'], {}), '(dS)\n', (25174, 25178), True, 'import numpy as np\n')] |
import numpy as np
'''
raining,friend,distance
Data = [[0,0,1],
[0,0,1],
[0,1,0],
[0,1,1],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,0],
[0,0,0],
[1,1,1]]
Result = [0,1, 1, 1,0, 0,0, 0,1, 1,0]
'''
# Data = [[0, 0, 1],
# [0, 0, 1],
# [0, 1, 0],
# [0, 1, 1],
# [0, 1, 1],
# [1, 0, 0],
# [1, 0, 1],
# [1, 1, 0],
# [1, 1, 0],
# [0, 0, 0],
# [1, 1, 1]]
# Result = [0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0]
Data = [[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[0, 0, 0],
[1, 1, 1]]
Result = [1, 0, 1, 0, 1, 0, 0, 1]
# class NN1(object):
# '''with 2 layers'''
# def __init__(self, n_layers, x, y):
# self.n_layers = n_layers
# self.x = x
# self.y = y
# self.weights = [[0.0000001],[0.0000001],[0.0000001]]
# self.bias = 0
# self.output = 0
# self._y =0
# def gradient(self, epochs=10,learning_rate=0.001,*args):
# N = float(len(self.y))
# for _ in range(epochs):
# for i,value in enumerate(self.x):
# self._y = np.dot( value,self.weights) + self.bias
# cost = sum([data**2 for data in (self.y[i]-self._y)]) / N
# w_gradient = -(2/N) * sum(value * (self.y[i] - self._y))
# b_gradient = -(2/N) * sum(self.y[i] - self._y)
# self.weights = self.weights - (learning_rate * w_gradient)
# self.bias = self.bias - (learning_rate * b_gradient)
# return self.weights, self.bias, cost
# def predict(self, test):
# predicted_data = np.dot(test,self.weights) + self.bias
# return predicted_data
class NN2(object):
def __init__(self, n_layers, x, y):
self.n_layers = n_layers
self.x = x
self.y = y
self.weights = [[0.0000001],[0.0000001],[0.0000001]]
self.bias = 0
self.output = 0
self._y =0
def gradient(self, epochs=10,learning_rate=0.001,*args):
for _ in range(epochs):
for i,value in enumerate(self.x):
self._y = 1 / (1 + np.exp(-np.dot(value,self.weights)))
error = self.y[i] - self._y
self.weights += np.dot(value[np.newaxis].T,error*(self._y * (1 - self._y)[np.newaxis]))
return self.weights
def predict(self, test):
predicted_data = np.dot(test,self.weights)
return predicted_data
nn = NN2(1,np.array(Data),np.array(Result))
print(nn.gradient(100))
asd = [int(i) for i in input().split(',')]
print(nn.predict(np.array(asd)))
| [
"numpy.dot",
"numpy.array"
] | [((2626, 2640), 'numpy.array', 'np.array', (['Data'], {}), '(Data)\n', (2634, 2640), True, 'import numpy as np\n'), ((2641, 2657), 'numpy.array', 'np.array', (['Result'], {}), '(Result)\n', (2649, 2657), True, 'import numpy as np\n'), ((2557, 2583), 'numpy.dot', 'np.dot', (['test', 'self.weights'], {}), '(test, self.weights)\n', (2563, 2583), True, 'import numpy as np\n'), ((2743, 2756), 'numpy.array', 'np.array', (['asd'], {}), '(asd)\n', (2751, 2756), True, 'import numpy as np\n'), ((2402, 2476), 'numpy.dot', 'np.dot', (['value[np.newaxis].T', '(error * (self._y * (1 - self._y)[np.newaxis]))'], {}), '(value[np.newaxis].T, error * (self._y * (1 - self._y)[np.newaxis]))\n', (2408, 2476), True, 'import numpy as np\n'), ((2296, 2323), 'numpy.dot', 'np.dot', (['value', 'self.weights'], {}), '(value, self.weights)\n', (2302, 2323), True, 'import numpy as np\n')] |
import numpy as np
import itertools as it
from warnings import warn
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
blosum62 = matlist.blosum62
terms = {}
for key, value in zip(blosum62.keys(), blosum62.values()):
key = tuple(reversed(key))
if key not in blosum62.keys():
terms.update({key:value})
blosum62.update(terms)
def sigmoid (z):
"""
The sigmoid, or logistic, function defined for all real numbers z.
Returns values on the interval [0., 1.] and is bijective.
"""
return 1 / (1+np.exp(z))
def blosum62_score(s1, s2):
"""
Compute the score between two sequences defined by the blosum62 matrix.
The score measures the similarity between sequences
and ranges from -4 to 11.
Returns a generator of the list of scores with each element corresponding
to the score of each character of the sequences.
"""
for pair in zip(s1,s2):
try:
yield matlist.blosum62[pair]
except KeyError as err:
warn('unknown amino acid substitution encountered: {}'\
.format(*err.args),
RuntimeWarning, stacklevel=2)
yield -4
def blosum62_distance(s1, s2, weights=None, allowed_gaps=0):
"""
Returns the distances between each pair of sequences as measured
by a logistic function on the domain [0.,1.].
A distance of 0. implies the sequences are the same, while
a distance of 1. implies that the sequences are infinitely different.
"""
#for s1, s2 in zip(seqs1, seqs2):
if 0 in (len(s1), len(s2)):
warn('empty sequence passed', RuntimeWarning, stacklevel=2)
return 1.
elif len(s1) is not len(s2):
# sort the pairs that we recieve by their unweighted scores.
pairs = pairwise2.align.globaldx(s1, s2, matlist.blosum62)\
.sort(key=lambda p: p[2], reverse=True)
try:
# eliminate matches that don't match our criteria.
s1, s2 = it.filterfalse(
lambda p: p[0].count('-') is not allowed_gaps,
pairs,
)[0][:2]
except (IndexError, TypeError):
# thrown in the case it.filterfalse returns an empty list.
warn('insufficient or invalid number of allowed gaps',
RuntimeWarning, stacklevel=2)
return 1.
if weights is not None and len(weights) is not len(s1):
print('{}, {}'.format(len(weights),len(s1)))
raise ValueError('not enough weights for test data')
elif not isinstance(weights, (list,tuple)):
weights = np.array(weights)
if weights is not None:
return sigmoid(weights @ np.fromiter(blosum62_score(s1,s2),int))
else:
return sigmoid(sum(blosum62_score(s1,s2)))
| [
"warnings.warn",
"numpy.array",
"numpy.exp",
"Bio.pairwise2.align.globaldx"
] | [((1599, 1658), 'warnings.warn', 'warn', (['"""empty sequence passed"""', 'RuntimeWarning'], {'stacklevel': '(2)'}), "('empty sequence passed', RuntimeWarning, stacklevel=2)\n", (1603, 1658), False, 'from warnings import warn\n'), ((547, 556), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (553, 556), True, 'import numpy as np\n'), ((2630, 2647), 'numpy.array', 'np.array', (['weights'], {}), '(weights)\n', (2638, 2647), True, 'import numpy as np\n'), ((1795, 1845), 'Bio.pairwise2.align.globaldx', 'pairwise2.align.globaldx', (['s1', 's2', 'matlist.blosum62'], {}), '(s1, s2, matlist.blosum62)\n', (1819, 1845), False, 'from Bio import pairwise2\n'), ((2266, 2354), 'warnings.warn', 'warn', (['"""insufficient or invalid number of allowed gaps"""', 'RuntimeWarning'], {'stacklevel': '(2)'}), "('insufficient or invalid number of allowed gaps', RuntimeWarning,\n stacklevel=2)\n", (2270, 2354), False, 'from warnings import warn\n')] |
#!/usr/bin/python
from math import pi, sin, tan, sqrt
import numpy as np
from scipy import integrate
from neutpy.tools import isclose
import sys
def f(phi, xi, x_comp, y_comp, x_coords, y_coords, reg, mfp, fromcell, tocell, throughcell, Ki3_fit, li):
try:
result = (2.0 / (pi * -1 * x_comp[-1])) * sin(phi) * Ki3_fit(li(phi, xi, x_coords, y_coords, reg) / mfp)
return result
except:
print()
print('something went wrong when evaluating A transmission coefficient:')
print('li = ', li(phi, xi, x_coords, y_coords, reg))
print('mfp = ', mfp)
print('li/mfp = ', li(phi, xi, x_coords, y_coords, reg) / mfp)
print('fromcell = ', fromcell)
print('tocell = ', tocell)
print('throughcell = ', throughcell)
print()
if li(phi, xi, x_coords, y_coords, reg) / mfp > 100:
result = (2.0 / (pi * -1 * x_comp[-1])) * sin(phi) * Ki3_fit(100.0)
return result
def li(phi, xi, x_coords, y_coords, reg):
x_coords = x_coords - xi
vert_phis = np.arctan2(y_coords, x_coords)
vert_phis[0] = 0
vert_phis[-1] = pi
if phi < pi:
reg = np.searchsorted(vert_phis, phi, side='right') - 1
else:
reg = np.searchsorted(vert_phis, phi, side='right') - 2
# points defining the side of the cell we're going to intersect with
# eq of line is y = ((y2-y2)/(x2-x1))(x-x1)+y1
x1, y1 = x_coords[reg], y_coords[reg]
x2, y2 = x_coords[reg + 1], y_coords[reg + 1]
# calculate intersection point
if isclose(x2, x1): # then line is vertical
x_int = x1
y_int = tan(phi) * x_int
else:
# eq of the intersecting line is y= tan(phi)x ( + 0 because of coordinate system choice)
# set two equations equal and solve for x, then solve for y
x_int = ((y2 - y1) / (x2 - x1) * x1 - y1) / ((y2 - y1) / (x2 - x1) - tan(phi))
y_int = tan(phi) * x_int
return sqrt(x_int ** 2 + y_int ** 2)
def phi_limits(xi, x_comp, y_comp, x_coords, y_coords, reg, mfp, fromcell, tocell, throughcell, Ki3_fit, li):
x_coords = x_coords - xi
vert_phis = np.arctan2(y_coords, x_coords)
vert_phis[0] = 0
vert_phis[-1] = pi
return [vert_phis[reg], vert_phis[reg + 1]]
def xi_limits(x_comp, y_comp, x_coords, y_coords, reg, mfp, fromcell, tocell, throughcell, Ki3_fit, li):
return [0, -1 * x_comp[-1]]
def coeff_calc(inputs, *args, **kwargs):
i = inputs[0][0]
j = inputs[0][1]
k = inputs[0][2]
def midpoint2D(f, f_limx, f_limy, nx, ny, **kwargs):
"""calculates a double integral using the midpoint rule"""
I = 0
# start with outside (y) limits of integration
c, d = f_limy(**kwargs)
hy = (d - c) / float(ny)
for j in range(ny):
yj = c + hy / 2 + j * hy
# for each j, calculate inside limits of integration
a, b = f_limx(yj, **kwargs)
hx = (b - a) / float(nx)
for i in range(nx):
xi = a + hx / 2 + i * hx
I += hx * hy * f(xi, yj, **kwargs)
return I
nSides = kwargs['nSides']
adjCell = kwargs['adjCell']
lsides = kwargs['lsides']
T_from = kwargs['T_from']
T_to = kwargs['T_to']
T_via = kwargs['T_via']
int_method = kwargs['int_method']
T_coef_s = kwargs['T_coef_s']
T_coef_t = kwargs['T_coef_t']
face_mfp_t = kwargs['face_mfp_t']
face_mfp_s = kwargs['face_mfp_s']
print_progress = kwargs['print_progress']
outof = kwargs['outof']
selfAngles = kwargs['angles']
Ki3_fit = kwargs['Ki3_fit']
li = kwargs['li']
# progress = nSides[i] ** 2 * i # + self.nSides[i]*j + k
L_sides = np.roll(lsides[i, :nSides[i]], -(j + 1)) # begins with length of the current "from" side
adj_cells = np.roll(adjCell[i, :nSides[i]], -j)
angles = np.roll(selfAngles[i, :nSides[i]], -j) * 2 * pi / 360 # converted to radians
angles[1:] = 2 * pi - (pi - angles[1:])
if k < adj_cells.size and j < adj_cells.size:
T_from[i, j, k] = adj_cells[0]
T_to[i, j, k] = adj_cells[k - j]
T_via[i, j, k] = i
if j == k:
# All flux from a side back through itself must have at least one collision
T_coef_s[i, j, k] = 0.0
T_coef_t[i, j, k] = 0.0
# trans_coef_file.write(
# ('{:>6d}' * 3 + '{:>12.3E}' * 4 + '\n').format(int(T_from[i, j, k]), int(T_to[i, j, k]),
# int(T_via[i, j, k]), T_coef_s[i, j, k],
# T_coef_t[i, j, k], face.mfp.s[i, k], face.mfp.t[i, k]))
else:
side_thetas = np.cumsum(angles)
x_comp = np.cos(side_thetas) * L_sides
y_comp = np.sin(side_thetas) * L_sides
y_coords = np.roll(np.flipud(np.cumsum(y_comp)), -1)
x_coords = np.roll(np.flipud(np.cumsum(x_comp)),
-1) # this gets adjusted for xi later, as part of the integration process
reg = np.where(np.flipud(adj_cells[1:]) == T_to[i, j, k])[0][0]
if int_method == 'midpoint':
kwargs_s = {"x_comp": x_comp,
"y_comp": y_comp,
"x_coords": x_coords,
"y_coords": y_coords,
"reg": reg,
"mfp": face_mfp_s[i, j], # not sure if this is j or k
"fromcell": adj_cells[0],
"tocell": adj_cells[k - j],
"throughcell": i}
kwargs_t = {"x_comp": x_comp,
"y_comp": y_comp,
"x_coords": x_coords,
"y_coords": y_coords,
"reg": reg,
"mfp": face_mfp_t[i, j],
"fromcell": adj_cells[0],
"tocell": adj_cells[k - j],
"throughcell": i}
nx = 10
ny = 10
T_coef_t[i, j, k] = midpoint2D(f, phi_limits, xi_limits, nx, ny, **kwargs_t)
T_coef_s[i, j, k] = midpoint2D(f, phi_limits, xi_limits, nx, ny, **kwargs_s)
elif int_method == 'quad':
# T_coef_s[i, j, k] = 0
# T_coef_t[i, j, k] = 0
T_coef_s[i, j, k] = integrate.nquad(f, [phi_limits, xi_limits],
args=(x_comp,
y_comp,
x_coords,
y_coords,
reg,
face_mfp_s[i, j],
adj_cells[0],
adj_cells[k - j],
i,
Ki3_fit,
li),
opts=dict([('epsabs', 1.49e-2),
('epsrel', 10.00e-4),
('limit', 2)]))[0]
T_coef_t[i, j, k] = integrate.nquad(f, [phi_limits, xi_limits],
args=(x_comp,
y_comp,
x_coords,
y_coords,
reg,
face_mfp_t[i, j],
adj_cells[0],
adj_cells[k - j],
i,
Ki3_fit,
li),
opts=dict([('epsabs', 1.49e-2),
('epsrel', 10.00e-4),
('limit', 2)]))[0]
# stop if nan is detected
if np.isnan(T_coef_t[i, j, k]) or np.isnan(T_coef_s[i, j, k]):
print('T_coef = nan detected')
print('i, j, k = ', i, j, k)
print(('T_coef_t[i, j, k] = ', (T_coef_t[i, j, k])))
print(('T_coef_s[i, j, k] = ', (T_coef_s[i, j, k])))
print()
print('x_comp = ', x_comp)
print('y_comp = ', y_comp)
print('x_coords = ', x_coords)
print('y_coords = ', y_coords)
print('reg = ', reg)
# = print 'face.mfp.t[i, j] = ', face.mfp.t[i, j]
print('adj_cells[0] = ', adj_cells[0])
print('adj_cells[k-j] = ', adj_cells[k - j])
sys.exit()
else:
pass
#
# trans_coef_file.write(('{:>6d}' * 3 + '{:>12.3E}' * 4 + '\n').format(int(T_from[i, j, k]),
# int(T_to[i, j, k]),
# int(T_via[i, j, k]),
# T_coef_s[i, j, k],
# T_coef_t[i, j, k],
# face.mfp.s[i, j],
# face.mfp.t[i, j]))
return i, j, k, T_coef_s[i, j, k], T_coef_t[i, j, k], T_from[i, j, k], T_to[i, j, k], T_via[i, j, k] | [
"numpy.arctan2",
"math.sqrt",
"numpy.roll",
"math.tan",
"numpy.searchsorted",
"math.sin",
"numpy.isnan",
"numpy.flipud",
"numpy.cumsum",
"numpy.sin",
"numpy.cos",
"neutpy.tools.isclose",
"sys.exit"
] | [((1059, 1089), 'numpy.arctan2', 'np.arctan2', (['y_coords', 'x_coords'], {}), '(y_coords, x_coords)\n', (1069, 1089), True, 'import numpy as np\n'), ((1550, 1565), 'neutpy.tools.isclose', 'isclose', (['x2', 'x1'], {}), '(x2, x1)\n', (1557, 1565), False, 'from neutpy.tools import isclose\n'), ((1951, 1980), 'math.sqrt', 'sqrt', (['(x_int ** 2 + y_int ** 2)'], {}), '(x_int ** 2 + y_int ** 2)\n', (1955, 1980), False, 'from math import pi, sin, tan, sqrt\n'), ((2138, 2168), 'numpy.arctan2', 'np.arctan2', (['y_coords', 'x_coords'], {}), '(y_coords, x_coords)\n', (2148, 2168), True, 'import numpy as np\n'), ((3712, 3752), 'numpy.roll', 'np.roll', (['lsides[i, :nSides[i]]', '(-(j + 1))'], {}), '(lsides[i, :nSides[i]], -(j + 1))\n', (3719, 3752), True, 'import numpy as np\n'), ((3818, 3853), 'numpy.roll', 'np.roll', (['adjCell[i, :nSides[i]]', '(-j)'], {}), '(adjCell[i, :nSides[i]], -j)\n', (3825, 3853), True, 'import numpy as np\n'), ((1166, 1211), 'numpy.searchsorted', 'np.searchsorted', (['vert_phis', 'phi'], {'side': '"""right"""'}), "(vert_phis, phi, side='right')\n", (1181, 1211), True, 'import numpy as np\n'), ((1240, 1285), 'numpy.searchsorted', 'np.searchsorted', (['vert_phis', 'phi'], {'side': '"""right"""'}), "(vert_phis, phi, side='right')\n", (1255, 1285), True, 'import numpy as np\n'), ((1627, 1635), 'math.tan', 'tan', (['phi'], {}), '(phi)\n', (1630, 1635), False, 'from math import pi, sin, tan, sqrt\n'), ((1922, 1930), 'math.tan', 'tan', (['phi'], {}), '(phi)\n', (1925, 1930), False, 'from math import pi, sin, tan, sqrt\n'), ((4737, 4754), 'numpy.cumsum', 'np.cumsum', (['angles'], {}), '(angles)\n', (4746, 4754), True, 'import numpy as np\n'), ((311, 319), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (314, 319), False, 'from math import pi, sin, tan, sqrt\n'), ((1896, 1904), 'math.tan', 'tan', (['phi'], {}), '(phi)\n', (1899, 1904), False, 'from math import pi, sin, tan, sqrt\n'), ((3867, 3905), 'numpy.roll', 'np.roll', (['selfAngles[i, :nSides[i]]', '(-j)'], {}), '(selfAngles[i, :nSides[i]], -j)\n', (3874, 3905), True, 'import numpy as np\n'), ((4777, 4796), 'numpy.cos', 'np.cos', (['side_thetas'], {}), '(side_thetas)\n', (4783, 4796), True, 'import numpy as np\n'), ((4828, 4847), 'numpy.sin', 'np.sin', (['side_thetas'], {}), '(side_thetas)\n', (4834, 4847), True, 'import numpy as np\n'), ((8688, 8715), 'numpy.isnan', 'np.isnan', (['T_coef_t[i, j, k]'], {}), '(T_coef_t[i, j, k])\n', (8696, 8715), True, 'import numpy as np\n'), ((8719, 8746), 'numpy.isnan', 'np.isnan', (['T_coef_s[i, j, k]'], {}), '(T_coef_s[i, j, k])\n', (8727, 8746), True, 'import numpy as np\n'), ((9420, 9430), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9428, 9430), False, 'import sys\n'), ((4900, 4917), 'numpy.cumsum', 'np.cumsum', (['y_comp'], {}), '(y_comp)\n', (4909, 4917), True, 'import numpy as np\n'), ((4965, 4982), 'numpy.cumsum', 'np.cumsum', (['x_comp'], {}), '(x_comp)\n', (4974, 4982), True, 'import numpy as np\n'), ((917, 925), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (920, 925), False, 'from math import pi, sin, tan, sqrt\n'), ((5119, 5143), 'numpy.flipud', 'np.flipud', (['adj_cells[1:]'], {}), '(adj_cells[1:])\n', (5128, 5143), True, 'import numpy as np\n')] |
import os
import numpy as np
import pickle
import re
def load_dict(dict_path):
"""Loads a dictionary in pickle format."""
with open(dict_path,'rb') as fh:
d = pickle.load(fh)
return d
def parse_search(workspace,corpus_name,node,pos,case_sensitive,regexp):
"""Parses the input search and returns a tuple with encoded word and pos node.
"""
# load dictionaries for encoding words to numbers
if case_sensitive == False:
dict_words = load_dict(workspace+corpus_name+'/data/idx/wordslc.pickle') # lowercase
else:
dict_words = load_dict(workspace+corpus_name+'/data/idx/words.pickle')
dict_words = {v: k for k, v in dict_words.items()}
dict_tags = load_dict(workspace+corpus_name+'/data/idx/tags.pickle')
dict_tags = {v: k for k, v in dict_tags.items()}
# parse node
n = []
if case_sensitive == False:
node = node.lower()
for w in node.strip().split(' '):
if len(w.strip()) != 0:
n.append(w.strip())
# parse pos
if pos != None:
p = []
if type(pos) == list:
for t in pos:
if len(t.strip())!= 0:
p.append(t.strip())
else:
for t in pos.strip().split(' '):
if len(t.strip())!=0:
p.append(t.strip())
# encode node
encoded_node = []
if regexp == False:
for w in n:
if w in dict_words:
arr = np.array(dict_words[w],dtype=int)
encoded_node.append(arr)
else: # if any word has no match, return None
encoded_node = None
break
else: # use regexp:
patterns = []
for ptrn in n:
patterns.append(re.compile(ptrn))
for ptrn in patterns:
a = []
for k in dict_words:
if ptrn.match(k) != None:
a+=dict_words[k]
if len(a)!=0:
arr = np.array(a,dtype=int)
encoded_node.append(arr)
else: # if any pattern has no match, return None
encoded_node = None
break
# encode pos
if pos != None:
encoded_pos = []
for t in p:
if t in dict_tags:
arr = np.array(dict_tags[t],dtype=int)
encoded_pos.append(arr)
else: # if any tag has no match, return None
encoded_pos = None
break
else:
encoded_pos = None
return (encoded_node, encoded_pos)
def search_node(npy_path,encoded_node,encoded_pos,horizon):
"""Searches the node in every text file and gets contexts."""
size = len(encoded_node)
files = os.listdir(npy_path)
contexts = []
for filename in files:
# maker arr concatenating with arrays of zeros for preventing index out of bounds
arr = np.concatenate([np.zeros((horizon+size,4),dtype=int),np.load(npy_path + filename),np.zeros((horizon+size,4),dtype=int)])
# get matching indexes
ix = arr_search_indexes(arr,encoded_node,encoded_pos)
# get contexts from indexes
contexts += get_context(arr,ix,horizon,size)
return contexts
def arr_search_indexes(arr,search_w,search_t):
"""Searches the node in arr and returns the matching indexes. (Max. 3 words)"""
size = len(search_w)
if search_t == None:
# search only words
if size == 1:
ix = np.isin(arr[:,[0]],search_w)
ix = np.where(ix)[0]
elif size == 2:
# w1
x1 = np.isin(arr[:,[0]],search_w[0])
if np.any(x1)==False:
return np.where(x1)[0]
# w2
x2 = np.isin(arr[:,[0]],search_w[1])
if np.any(x2)==False:
return np.where(x2)[0]
# match indexes
x = np.arange(0,x1.shape[0]-1)
y = np.arange(1,x1.shape[0])
# return indexes
ix = np.where(x1[x] & x2[y])[0]
return ix
elif size == 3:
# w1
x1 = np.isin(arr[:,[0]],search_w[0])
if np.any(x1)==False:
return np.where(x1)[0]
# w2
x2 = np.isin(arr[:,[0]],search_w[1])
if np.any(x2)==False:
return np.where(x2)[0]
# w3
x3 = np.isin(arr[:,[0]],search_w[2])
if np.any(x3)==False:
return np.where(x3)[0]
# match indexes
x = np.arange(0,x1.shape[0]-2)
y = np.arange(1,x1.shape[0]-1)
z = np.arange(2,x1.shape[0])
# return indexes
ix = np.where(x1[x] & x2[y] & x3[z])[0]
else:
# search words and POS
if size == 1:
iw = np.isin(arr[:,[0]],search_w)
it = np.isin(arr[:,[1]],search_t)
ix = np.where(iw & it)[0]
elif size == 2:
# w1 t1
x1 = np.isin(arr[:,[0]],search_w[0])
t1 = np.isin(arr[:,[1]],search_t[0])
if np.any(x1)==False:
return np.where(x1)[0]
if np.any(t1)==False:
return np.where(t1)[0]
# w2 t2
x2 = np.isin(arr[:,[0]],search_w[1])
t2 = np.isin(arr[:,[1]],search_t[1])
if np.any(x2)==False:
return np.where(x2)[0]
if np.any(t2)==False:
return np.where(t2)[0]
# match indexes
x = np.arange(0,x1.shape[0]-1)
y = np.arange(1,x1.shape[0])
# return indexes
ix = np.where(x1[x] & t1[x] & x2[y] & t2[y])[0]
elif size == 3:
# w1 t1
x1 = np.isin(arr[:,[0]],search_w[0])
t1 = np.isin(arr[:,[1]],search_t[0])
if np.any(x1)==False:
return np.where(x1)[0]
if np.any(t1)==False:
return np.where(t1)[0]
# w2 t2
x2 = np.isin(arr[:,[0]],search_w[1])
t2 = np.isin(arr[:,[1]],search_t[1])
if np.any(x2)==False:
return np.where(x2)[0]
if np.any(t2)==False:
return np.where(t2)[0]
# w3 t3
x3 = np.isin(arr[:,[0]],search_w[2])
t3 = np.isin(arr[:,[1]],search_t[2])
if np.any(x3)==False:
return np.where(x3)[0]
if np.any(t3)==False:
return np.where(t3)[0]
# match indexes
x = np.arange(0,x1.shape[0]-2)
y = np.arange(1,x1.shape[0]-1)
z = np.arange(2,x1.shape[0])
# return indexes
ix = np.where(x1[x] & t1[x] & x2[y] & t2[y] & x3[z] & t3[z])[0]
return ix
def get_context(arr,indexes,horizon,size):
"""Gets the contexts from indexes."""
neg = np.arange(horizon,0,-1)
pos = np.arange(0,horizon,1)
nsize = np.arange(0,size)
contexts=[]
for i in indexes:
l = arr[i-neg,0] # left horizon
n = arr[i+nsize,0] # node
r = arr[(i+size)+pos,0] # right horizon
t = i # token id
s = arr[i,2] # sentence id
f = arr[i,3] # file id
contexts.append((l,n,r,t,s,f))
return contexts
def translate(contexts,dict_words,dict_files):
"""Translate numbers back to words in a list format for kitconc KIWC object."""
dict_files = {v: k for k, v in dict_files.items()} # reverse keys
kwic = []
i = 0
for context in contexts:
i+=1
l = []
n = []
r = []
# left
for i in context[0]:
if i != 0:
l.append(dict_words[i])
# node
for i in context[1]:
if i != 0:
n.append(dict_words[i])
# right
for i in context[2]:
if i != 0:
r.append(dict_words[i])
# add to list
kwic.append((i, ' '.join(l), ' '.join(n),' '.join(r), dict_files[context[5]][0],str(context[3]),str(context[4]),str(context[5])))
return kwic
def make_kwic(workspace,corpus_name,node,pos,case_sensitive,regexp,horizon):
encoded_node,encoded_pos = parse_search(workspace,corpus_name,node,pos,case_sensitive,regexp)
if encoded_node != None: # search node is OK
nl = len(encoded_node)
contexts = search_node(workspace+corpus_name + '/data/npy/',encoded_node,encoded_pos,horizon)
if len(contexts)!=0:
dict_words = load_dict(workspace+corpus_name + '/data/idx/words.pickle')
dict_files = load_dict(workspace+corpus_name + '/data/idx/filenames.pickle')
kwic = translate(contexts,dict_words,dict_files)
else:
kwic=[]
else: # no match in search node, set empty
nl = 0
kwic = []
return (nl,kwic)
| [
"numpy.isin",
"numpy.load",
"numpy.zeros",
"numpy.any",
"pickle.load",
"numpy.array",
"numpy.arange",
"numpy.where",
"os.listdir",
"re.compile"
] | [((2745, 2765), 'os.listdir', 'os.listdir', (['npy_path'], {}), '(npy_path)\n', (2755, 2765), False, 'import os\n'), ((6874, 6899), 'numpy.arange', 'np.arange', (['horizon', '(0)', '(-1)'], {}), '(horizon, 0, -1)\n', (6883, 6899), True, 'import numpy as np\n'), ((6908, 6932), 'numpy.arange', 'np.arange', (['(0)', 'horizon', '(1)'], {}), '(0, horizon, 1)\n', (6917, 6932), True, 'import numpy as np\n'), ((6943, 6961), 'numpy.arange', 'np.arange', (['(0)', 'size'], {}), '(0, size)\n', (6952, 6961), True, 'import numpy as np\n'), ((180, 195), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (191, 195), False, 'import pickle\n'), ((3489, 3519), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w'], {}), '(arr[:, [0]], search_w)\n', (3496, 3519), True, 'import numpy as np\n'), ((4819, 4849), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w'], {}), '(arr[:, [0]], search_w)\n', (4826, 4849), True, 'import numpy as np\n'), ((4865, 4895), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t'], {}), '(arr[:, [1]], search_t)\n', (4872, 4895), True, 'import numpy as np\n'), ((1479, 1513), 'numpy.array', 'np.array', (['dict_words[w]'], {'dtype': 'int'}), '(dict_words[w], dtype=int)\n', (1487, 1513), True, 'import numpy as np\n'), ((1767, 1783), 're.compile', 're.compile', (['ptrn'], {}), '(ptrn)\n', (1777, 1783), False, 'import re\n'), ((1994, 2016), 'numpy.array', 'np.array', (['a'], {'dtype': 'int'}), '(a, dtype=int)\n', (2002, 2016), True, 'import numpy as np\n'), ((2312, 2345), 'numpy.array', 'np.array', (['dict_tags[t]'], {'dtype': 'int'}), '(dict_tags[t], dtype=int)\n', (2320, 2345), True, 'import numpy as np\n'), ((2931, 2971), 'numpy.zeros', 'np.zeros', (['(horizon + size, 4)'], {'dtype': 'int'}), '((horizon + size, 4), dtype=int)\n', (2939, 2971), True, 'import numpy as np\n'), ((2968, 2996), 'numpy.load', 'np.load', (['(npy_path + filename)'], {}), '(npy_path + filename)\n', (2975, 2996), True, 'import numpy as np\n'), ((2997, 3037), 'numpy.zeros', 'np.zeros', (['(horizon + size, 4)'], {'dtype': 'int'}), '((horizon + size, 4), dtype=int)\n', (3005, 3037), True, 'import numpy as np\n'), ((3535, 3547), 'numpy.where', 'np.where', (['ix'], {}), '(ix)\n', (3543, 3547), True, 'import numpy as np\n'), ((3609, 3642), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[0]'], {}), '(arr[:, [0]], search_w[0])\n', (3616, 3642), True, 'import numpy as np\n'), ((3749, 3782), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[1]'], {}), '(arr[:, [0]], search_w[1])\n', (3756, 3782), True, 'import numpy as np\n'), ((3898, 3927), 'numpy.arange', 'np.arange', (['(0)', '(x1.shape[0] - 1)'], {}), '(0, x1.shape[0] - 1)\n', (3907, 3927), True, 'import numpy as np\n'), ((3941, 3966), 'numpy.arange', 'np.arange', (['(1)', 'x1.shape[0]'], {}), '(1, x1.shape[0])\n', (3950, 3966), True, 'import numpy as np\n'), ((4911, 4928), 'numpy.where', 'np.where', (['(iw & it)'], {}), '(iw & it)\n', (4919, 4928), True, 'import numpy as np\n'), ((4993, 5026), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[0]'], {}), '(arr[:, [0]], search_w[0])\n', (5000, 5026), True, 'import numpy as np\n'), ((5042, 5075), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t[0]'], {}), '(arr[:, [1]], search_t[0])\n', (5049, 5075), True, 'import numpy as np\n'), ((5258, 5291), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[1]'], {}), '(arr[:, [0]], search_w[1])\n', (5265, 5291), True, 'import numpy as np\n'), ((5307, 5340), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t[1]'], {}), '(arr[:, [1]], search_t[1])\n', (5314, 5340), True, 'import numpy as np\n'), ((5529, 5558), 'numpy.arange', 'np.arange', (['(0)', '(x1.shape[0] - 1)'], {}), '(0, x1.shape[0] - 1)\n', (5538, 5558), True, 'import numpy as np\n'), ((5572, 5597), 'numpy.arange', 'np.arange', (['(1)', 'x1.shape[0]'], {}), '(1, x1.shape[0])\n', (5581, 5597), True, 'import numpy as np\n'), ((3656, 3666), 'numpy.any', 'np.any', (['x1'], {}), '(x1)\n', (3662, 3666), True, 'import numpy as np\n'), ((3796, 3806), 'numpy.any', 'np.any', (['x2'], {}), '(x2)\n', (3802, 3806), True, 'import numpy as np\n'), ((4012, 4035), 'numpy.where', 'np.where', (['(x1[x] & x2[y])'], {}), '(x1[x] & x2[y])\n', (4020, 4035), True, 'import numpy as np\n'), ((4120, 4153), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[0]'], {}), '(arr[:, [0]], search_w[0])\n', (4127, 4153), True, 'import numpy as np\n'), ((4259, 4292), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[1]'], {}), '(arr[:, [0]], search_w[1])\n', (4266, 4292), True, 'import numpy as np\n'), ((4398, 4431), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[2]'], {}), '(arr[:, [0]], search_w[2])\n', (4405, 4431), True, 'import numpy as np\n'), ((4547, 4576), 'numpy.arange', 'np.arange', (['(0)', '(x1.shape[0] - 2)'], {}), '(0, x1.shape[0] - 2)\n', (4556, 4576), True, 'import numpy as np\n'), ((4590, 4619), 'numpy.arange', 'np.arange', (['(1)', '(x1.shape[0] - 1)'], {}), '(1, x1.shape[0] - 1)\n', (4599, 4619), True, 'import numpy as np\n'), ((4633, 4658), 'numpy.arange', 'np.arange', (['(2)', 'x1.shape[0]'], {}), '(2, x1.shape[0])\n', (4642, 4658), True, 'import numpy as np\n'), ((5089, 5099), 'numpy.any', 'np.any', (['x1'], {}), '(x1)\n', (5095, 5099), True, 'import numpy as np\n'), ((5162, 5172), 'numpy.any', 'np.any', (['t1'], {}), '(t1)\n', (5168, 5172), True, 'import numpy as np\n'), ((5354, 5364), 'numpy.any', 'np.any', (['x2'], {}), '(x2)\n', (5360, 5364), True, 'import numpy as np\n'), ((5427, 5437), 'numpy.any', 'np.any', (['t2'], {}), '(t2)\n', (5433, 5437), True, 'import numpy as np\n'), ((5644, 5683), 'numpy.where', 'np.where', (['(x1[x] & t1[x] & x2[y] & t2[y])'], {}), '(x1[x] & t1[x] & x2[y] & t2[y])\n', (5652, 5683), True, 'import numpy as np\n'), ((5748, 5781), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[0]'], {}), '(arr[:, [0]], search_w[0])\n', (5755, 5781), True, 'import numpy as np\n'), ((5797, 5830), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t[0]'], {}), '(arr[:, [1]], search_t[0])\n', (5804, 5830), True, 'import numpy as np\n'), ((6012, 6045), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[1]'], {}), '(arr[:, [0]], search_w[1])\n', (6019, 6045), True, 'import numpy as np\n'), ((6061, 6094), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t[1]'], {}), '(arr[:, [1]], search_t[1])\n', (6068, 6094), True, 'import numpy as np\n'), ((6276, 6309), 'numpy.isin', 'np.isin', (['arr[:, [0]]', 'search_w[2]'], {}), '(arr[:, [0]], search_w[2])\n', (6283, 6309), True, 'import numpy as np\n'), ((6325, 6358), 'numpy.isin', 'np.isin', (['arr[:, [1]]', 'search_t[2]'], {}), '(arr[:, [1]], search_t[2])\n', (6332, 6358), True, 'import numpy as np\n'), ((6547, 6576), 'numpy.arange', 'np.arange', (['(0)', '(x1.shape[0] - 2)'], {}), '(0, x1.shape[0] - 2)\n', (6556, 6576), True, 'import numpy as np\n'), ((6590, 6619), 'numpy.arange', 'np.arange', (['(1)', '(x1.shape[0] - 1)'], {}), '(1, x1.shape[0] - 1)\n', (6599, 6619), True, 'import numpy as np\n'), ((6633, 6658), 'numpy.arange', 'np.arange', (['(2)', 'x1.shape[0]'], {}), '(2, x1.shape[0])\n', (6642, 6658), True, 'import numpy as np\n'), ((3698, 3710), 'numpy.where', 'np.where', (['x1'], {}), '(x1)\n', (3706, 3710), True, 'import numpy as np\n'), ((3838, 3850), 'numpy.where', 'np.where', (['x2'], {}), '(x2)\n', (3846, 3850), True, 'import numpy as np\n'), ((4167, 4177), 'numpy.any', 'np.any', (['x1'], {}), '(x1)\n', (4173, 4177), True, 'import numpy as np\n'), ((4306, 4316), 'numpy.any', 'np.any', (['x2'], {}), '(x2)\n', (4312, 4316), True, 'import numpy as np\n'), ((4445, 4455), 'numpy.any', 'np.any', (['x3'], {}), '(x3)\n', (4451, 4455), True, 'import numpy as np\n'), ((4704, 4735), 'numpy.where', 'np.where', (['(x1[x] & x2[y] & x3[z])'], {}), '(x1[x] & x2[y] & x3[z])\n', (4712, 4735), True, 'import numpy as np\n'), ((5131, 5143), 'numpy.where', 'np.where', (['x1'], {}), '(x1)\n', (5139, 5143), True, 'import numpy as np\n'), ((5204, 5216), 'numpy.where', 'np.where', (['t1'], {}), '(t1)\n', (5212, 5216), True, 'import numpy as np\n'), ((5396, 5408), 'numpy.where', 'np.where', (['x2'], {}), '(x2)\n', (5404, 5408), True, 'import numpy as np\n'), ((5469, 5481), 'numpy.where', 'np.where', (['t2'], {}), '(t2)\n', (5477, 5481), True, 'import numpy as np\n'), ((5844, 5854), 'numpy.any', 'np.any', (['x1'], {}), '(x1)\n', (5850, 5854), True, 'import numpy as np\n'), ((5917, 5927), 'numpy.any', 'np.any', (['t1'], {}), '(t1)\n', (5923, 5927), True, 'import numpy as np\n'), ((6108, 6118), 'numpy.any', 'np.any', (['x2'], {}), '(x2)\n', (6114, 6118), True, 'import numpy as np\n'), ((6181, 6191), 'numpy.any', 'np.any', (['t2'], {}), '(t2)\n', (6187, 6191), True, 'import numpy as np\n'), ((6372, 6382), 'numpy.any', 'np.any', (['x3'], {}), '(x3)\n', (6378, 6382), True, 'import numpy as np\n'), ((6445, 6455), 'numpy.any', 'np.any', (['t3'], {}), '(t3)\n', (6451, 6455), True, 'import numpy as np\n'), ((6704, 6759), 'numpy.where', 'np.where', (['(x1[x] & t1[x] & x2[y] & t2[y] & x3[z] & t3[z])'], {}), '(x1[x] & t1[x] & x2[y] & t2[y] & x3[z] & t3[z])\n', (6712, 6759), True, 'import numpy as np\n'), ((4209, 4221), 'numpy.where', 'np.where', (['x1'], {}), '(x1)\n', (4217, 4221), True, 'import numpy as np\n'), ((4348, 4360), 'numpy.where', 'np.where', (['x2'], {}), '(x2)\n', (4356, 4360), True, 'import numpy as np\n'), ((4487, 4499), 'numpy.where', 'np.where', (['x3'], {}), '(x3)\n', (4495, 4499), True, 'import numpy as np\n'), ((5886, 5898), 'numpy.where', 'np.where', (['x1'], {}), '(x1)\n', (5894, 5898), True, 'import numpy as np\n'), ((5959, 5971), 'numpy.where', 'np.where', (['t1'], {}), '(t1)\n', (5967, 5971), True, 'import numpy as np\n'), ((6150, 6162), 'numpy.where', 'np.where', (['x2'], {}), '(x2)\n', (6158, 6162), True, 'import numpy as np\n'), ((6223, 6235), 'numpy.where', 'np.where', (['t2'], {}), '(t2)\n', (6231, 6235), True, 'import numpy as np\n'), ((6414, 6426), 'numpy.where', 'np.where', (['x3'], {}), '(x3)\n', (6422, 6426), True, 'import numpy as np\n'), ((6487, 6499), 'numpy.where', 'np.where', (['t3'], {}), '(t3)\n', (6495, 6499), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import numpy as np
from scipy.constants.codata import physical_constants
import xraylib
def energy(crystal_str,hkl,deg):
'''
Calculates the photon energy of the Bragg reflection using kinematic
approximation for given crystal, reflection, and Bragg angle. Return
energy in keV.
'''
crystal = xraylib.Crystal_GetCrystal(crystal_str)
hc = physical_constants['Planck constant in eV s'][0]*physical_constants['speed of light in vacuum'][0]*1e6 #in keV*nm
d = xraylib.Crystal_dSpacing(crystal,*hkl)*1e-1 #in nm
th=np.radians(deg)
return hc/(2*d*np.sin(th)) #in keV
def angle(crystal_str,hkl,energy):
'''
Calculates the Bragg angle for given photon energy using kinematic
approximation for given crystal, reflection, and Bragg angle. energy is
to be given in keV, returns angle in degrees.
'''
#constants
crystal = xraylib.Crystal_GetCrystal(crystal_str)
hc = physical_constants['Planck constant in eV s'][0]*physical_constants['speed of light in vacuum'][0]*1e6 #in keV*nm
d = xraylib.Crystal_dSpacing(crystal,*hkl)*1e-1 #in nm
if not hc/(2*d*energy) > 1:
th0 = np.arcsin(hc/(2*d*energy))
else:
print('Given energy below the backscattering energy!')
print('Setting theta to 90 deg.')
return 90.0
return np.degrees(th0)
| [
"numpy.radians",
"xraylib.Crystal_GetCrystal",
"numpy.degrees",
"numpy.arcsin",
"numpy.sin",
"xraylib.Crystal_dSpacing"
] | [((367, 406), 'xraylib.Crystal_GetCrystal', 'xraylib.Crystal_GetCrystal', (['crystal_str'], {}), '(crystal_str)\n', (393, 406), False, 'import xraylib\n'), ((598, 613), 'numpy.radians', 'np.radians', (['deg'], {}), '(deg)\n', (608, 613), True, 'import numpy as np\n'), ((932, 971), 'xraylib.Crystal_GetCrystal', 'xraylib.Crystal_GetCrystal', (['crystal_str'], {}), '(crystal_str)\n', (958, 971), False, 'import xraylib\n'), ((1376, 1391), 'numpy.degrees', 'np.degrees', (['th0'], {}), '(th0)\n', (1386, 1391), True, 'import numpy as np\n'), ((539, 578), 'xraylib.Crystal_dSpacing', 'xraylib.Crystal_dSpacing', (['crystal', '*hkl'], {}), '(crystal, *hkl)\n', (563, 578), False, 'import xraylib\n'), ((1104, 1143), 'xraylib.Crystal_dSpacing', 'xraylib.Crystal_dSpacing', (['crystal', '*hkl'], {}), '(crystal, *hkl)\n', (1128, 1143), False, 'import xraylib\n'), ((1202, 1234), 'numpy.arcsin', 'np.arcsin', (['(hc / (2 * d * energy))'], {}), '(hc / (2 * d * energy))\n', (1211, 1234), True, 'import numpy as np\n'), ((634, 644), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (640, 644), True, 'import numpy as np\n')] |
"""Data to test interpolation functions."""
from pathlib import Path
import numpy as np
DIR = Path(__file__).parent
scalar_projection = np.loadtxt(DIR / 'scalar_projection.csv')
scalar_slice = np.loadtxt(DIR / 'scalar_slice.csv')
vector_projection = np.array(
(
np.loadtxt(DIR / 'vector_x_projection.csv'),
np.loadtxt(DIR / 'vector_y_projection.csv'),
)
)
vector_slice = np.array(
(np.loadtxt(DIR / 'vector_x_slice.csv'), np.loadtxt(DIR / 'vector_y_slice.csv'),)
)
| [
"pathlib.Path",
"numpy.loadtxt"
] | [((140, 181), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'scalar_projection.csv')"], {}), "(DIR / 'scalar_projection.csv')\n", (150, 181), True, 'import numpy as np\n'), ((197, 233), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'scalar_slice.csv')"], {}), "(DIR / 'scalar_slice.csv')\n", (207, 233), True, 'import numpy as np\n'), ((97, 111), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'from pathlib import Path\n'), ((278, 321), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'vector_x_projection.csv')"], {}), "(DIR / 'vector_x_projection.csv')\n", (288, 321), True, 'import numpy as np\n'), ((331, 374), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'vector_y_projection.csv')"], {}), "(DIR / 'vector_y_projection.csv')\n", (341, 374), True, 'import numpy as np\n'), ((414, 452), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'vector_x_slice.csv')"], {}), "(DIR / 'vector_x_slice.csv')\n", (424, 452), True, 'import numpy as np\n'), ((454, 492), 'numpy.loadtxt', 'np.loadtxt', (["(DIR / 'vector_y_slice.csv')"], {}), "(DIR / 'vector_y_slice.csv')\n", (464, 492), True, 'import numpy as np\n')] |
import numpy as np
def getDistance(p, q, ndim=3, scale=None):
''' Return Euclidean distance between 'p' and 'q'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
distance: float
Euclidean distance between 'p' and 'q'
'''
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Euclidean distance
distance = np.sqrt(np.sum(dr**2.))
return distance
def getMahaDistance(p, q, invCov, ndim=3, scale=None):
''' Return Mahalanobis distance between 'p' and 'q'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
invCov: (N, N) list, ndarray
inverse of covariance matrix
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
distance: float
Mahalanobis distance between 'p' and 'q'
'''
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Mahalanobis distance
distance = np.matmul(invCov, dr)
distance = np.sqrt(np.matmul(dr, distance))
return distance
def getManhattanDistance(p, q, ndim=3, scale=None):
''' Return Manhattan distance between 'p' and 'q'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
distance: float
Manhattan distance between 'p' and 'q'
'''
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Manhattan distance
distance = np.sum(np.abs(dr))
return distance
def getMinkowskiDistance(p, q, degree=2, ndim=3, scale=None):
''' Return Minkowski distance between 'p' and 'q'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
(optional) degree: integer
degree of distance measure
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
distance: float
Minkowski distance between 'p' and 'q'
'''
# Invalid degree
if not degree:
return getManhattanDistance(p, q, ndim=ndim, scale=scale)
elif degree == 2:
return getDistance(p, q, ndim=ndim, scale=scale)
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Minkowski distance
distance = np.power(np.sum(np.power(dr, degree)), 1./degree)
return distance
def getProjDistance(p, q, norm, scale=None, bVertical=False):
''' Return projection distance between p' and 'q' onto 'norm'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
invCov: (N, N) list, ndarray
inverse of covariance matrix
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
(optional) bVertical: boolean
boolean whether to calculate vertical distance
Returns
----------------
distance: float
Projection distance between 'p' and 'q' onto 'norm'
'''
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Projected distance
distance = np.sum(norm*dr)
if bVertical:
# Calculate vertical distance
Euclid = getDistance(p, q, ndim=ndim, scale=scale)
vertical = np.sqrt(Euclid**2. - distance**2.)
return distance, vertical
else:
return distance
def getNormVector(p, q, ndim=3, scale=None):
''' Return unit norm from 'p' to 'q'
Parameters
----------------
p: (N,) list, tuple or ndarray
start position of distance measure
q: (N,) list, tuple or ndarray
end position of distance measure
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
norm: (N,) list, tuple or ndarray
unit norm from 'p' to 'q'
'''
# Check whether input types are matched
if not isinstance(p, type(q)):
p, q = np.array(p), np.array(q)
# Displacement
if isinstance(scale, type(None)):
if ndim == 2:
dr = np.array([q[0] - p[0],
q[1] - p[1]])
else:
dr = np.array([q[0] - p[0],
q[1] - p[1],
q[2] - p[2]])
else:
if ndim == 2:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1])])
else:
dr = np.array([scale[0]*(q[0] - p[0]),
scale[1]*(q[1] - p[1]),
scale[2]*(q[2] - p[2])])
# Unit normal vector
norm = dr / np.sqrt(np.sum(dr**2.))
return norm
def getInterAngle(u, v, ndim=3, scale=None):
''' Return intervection angle between 'u' to 'v'
Parameters
----------------
u: (N,) list, tuple or ndarray
first vector
v: (N,) list, tuple or ndarray
second vector
(optional) ndim: integer
specified input dimension
(optional) scale: (N,) list, tuple or ndarray
scale vector giving weights for each axes
Returns
----------------
angle: float
intersection angle in radian
'''
# Check whether input types are matched
if not isinstance(u, type(v)):
u, v = np.array(u), np.array(v)
# Apply scale factor
if isinstance(scale, type(None)):
u, v = scale*u, scale*v
# Normalize vectors
u = getNormVector(np.zeros(len(u)), u, ndim=ndim, scale=None)
v = getNormVector(np.zeros(len(v)), v, ndim=ndim, scale=None)
# Get intersection angle
angle = np.arccos(np.sum(u*v))
return angle
| [
"numpy.abs",
"numpy.sum",
"numpy.power",
"numpy.array",
"numpy.matmul",
"numpy.sqrt"
] | [((2942, 2963), 'numpy.matmul', 'np.matmul', (['invCov', 'dr'], {}), '(invCov, dr)\n', (2951, 2963), True, 'import numpy as np\n'), ((7806, 7823), 'numpy.sum', 'np.sum', (['(norm * dr)'], {}), '(norm * dr)\n', (7812, 7823), True, 'import numpy as np\n'), ((1424, 1441), 'numpy.sum', 'np.sum', (['(dr ** 2.0)'], {}), '(dr ** 2.0)\n', (1430, 1441), True, 'import numpy as np\n'), ((2987, 3010), 'numpy.matmul', 'np.matmul', (['dr', 'distance'], {}), '(dr, distance)\n', (2996, 3010), True, 'import numpy as np\n'), ((4434, 4444), 'numpy.abs', 'np.abs', (['dr'], {}), '(dr)\n', (4440, 4444), True, 'import numpy as np\n'), ((7957, 7997), 'numpy.sqrt', 'np.sqrt', (['(Euclid ** 2.0 - distance ** 2.0)'], {}), '(Euclid ** 2.0 - distance ** 2.0)\n', (7964, 7997), True, 'import numpy as np\n'), ((10495, 10508), 'numpy.sum', 'np.sum', (['(u * v)'], {}), '(u * v)\n', (10501, 10508), True, 'import numpy as np\n'), ((751, 762), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (759, 762), True, 'import numpy as np\n'), ((764, 775), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (772, 775), True, 'import numpy as np\n'), ((873, 909), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (881, 909), True, 'import numpy as np\n'), ((968, 1017), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (976, 1017), True, 'import numpy as np\n'), ((1121, 1183), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (1129, 1183), True, 'import numpy as np\n'), ((1238, 1331), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (1246, 1331), True, 'import numpy as np\n'), ((2275, 2286), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (2283, 2286), True, 'import numpy as np\n'), ((2288, 2299), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (2296, 2299), True, 'import numpy as np\n'), ((2397, 2433), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (2405, 2433), True, 'import numpy as np\n'), ((2492, 2541), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (2500, 2541), True, 'import numpy as np\n'), ((2645, 2707), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (2653, 2707), True, 'import numpy as np\n'), ((2762, 2855), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (2770, 2855), True, 'import numpy as np\n'), ((3762, 3773), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (3770, 3773), True, 'import numpy as np\n'), ((3775, 3786), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (3783, 3786), True, 'import numpy as np\n'), ((3884, 3920), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (3892, 3920), True, 'import numpy as np\n'), ((3979, 4028), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (3987, 4028), True, 'import numpy as np\n'), ((4132, 4194), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (4140, 4194), True, 'import numpy as np\n'), ((4249, 4342), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (4257, 4342), True, 'import numpy as np\n'), ((5466, 5477), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (5474, 5477), True, 'import numpy as np\n'), ((5479, 5490), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (5487, 5490), True, 'import numpy as np\n'), ((5588, 5624), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (5596, 5624), True, 'import numpy as np\n'), ((5683, 5732), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (5691, 5732), True, 'import numpy as np\n'), ((5836, 5898), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (5844, 5898), True, 'import numpy as np\n'), ((5953, 6046), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (5961, 6046), True, 'import numpy as np\n'), ((6147, 6167), 'numpy.power', 'np.power', (['dr', 'degree'], {}), '(dr, degree)\n', (6155, 6167), True, 'import numpy as np\n'), ((7141, 7152), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (7149, 7152), True, 'import numpy as np\n'), ((7154, 7165), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (7162, 7165), True, 'import numpy as np\n'), ((7263, 7299), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (7271, 7299), True, 'import numpy as np\n'), ((7358, 7407), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (7366, 7407), True, 'import numpy as np\n'), ((7511, 7573), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (7519, 7573), True, 'import numpy as np\n'), ((7628, 7721), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (7636, 7721), True, 'import numpy as np\n'), ((8783, 8794), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (8791, 8794), True, 'import numpy as np\n'), ((8796, 8807), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8804, 8807), True, 'import numpy as np\n'), ((8905, 8941), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1]]'], {}), '([q[0] - p[0], q[1] - p[1]])\n', (8913, 8941), True, 'import numpy as np\n'), ((9000, 9049), 'numpy.array', 'np.array', (['[q[0] - p[0], q[1] - p[1], q[2] - p[2]]'], {}), '([q[0] - p[0], q[1] - p[1], q[2] - p[2]])\n', (9008, 9049), True, 'import numpy as np\n'), ((9153, 9215), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1])])\n', (9161, 9215), True, 'import numpy as np\n'), ((9270, 9363), 'numpy.array', 'np.array', (['[scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q[2] - p[2])]'], {}), '([scale[0] * (q[0] - p[0]), scale[1] * (q[1] - p[1]), scale[2] * (q\n [2] - p[2])])\n', (9278, 9363), True, 'import numpy as np\n'), ((9457, 9474), 'numpy.sum', 'np.sum', (['(dr ** 2.0)'], {}), '(dr ** 2.0)\n', (9463, 9474), True, 'import numpy as np\n'), ((10165, 10176), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (10173, 10176), True, 'import numpy as np\n'), ((10178, 10189), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (10186, 10189), True, 'import numpy as np\n')] |
import random
import numpy
def set_random_seeds(value):
"""
Set the seeds of the RNGs (Random Number Generators)
used internally.
"""
random.seed(value)
numpy.random.seed(value)
| [
"numpy.random.seed",
"random.seed"
] | [((155, 173), 'random.seed', 'random.seed', (['value'], {}), '(value)\n', (166, 173), False, 'import random\n'), ((178, 202), 'numpy.random.seed', 'numpy.random.seed', (['value'], {}), '(value)\n', (195, 202), False, 'import numpy\n')] |
import numpy as np
import numpy.linalg as la
def ta_1(n,p, r):
"""
attempt to generate good test case using sinewaves to make high d spiral/curve
"""
x = np.linspace(-4*np.pi, 4*np.pi, num=p)
A = np.zeros((p,r))
for i in range(r):
#generate a sequence of numbers varying at different speeds
A[:,i] = np.sin((x+i)/((i+1))) + np.random.rand(p)/2
B = np.zeros((n,r))
x = np.linspace(-2*np.pi, 2*np.pi, num=n)
for i in range(r):
#generate a sequence of numbers varying at different speeds
B[:,i] = np.cos((x+i)/((i+1))) + np.random.rand(n)/2
L_o = np.dot(A,B.T) #the clean part of M
return L_o
def ta_2(n,p, r):
"""
attempt to make good test case using random high d path generator
start with rv, add random noise to rv to simulate high D path for A and B
then do dot product for lower rank L
"""
L_o = np.zeros((p,n))
A = np.zeros((p,r))
rv = np.random.rand(p)-0.5
for i in range(r):
rv = rv+ (np.random.rand(p)-.5)*0.2
A[:,i] = rv*(r/n)
B = np.zeros((n,r))
rv = np.random.rand(n)-0.5
for i in range(r):
rv = rv+ (np.random.rand(n)-.5)*0.2
B[:,i] = rv*(r/p)
L_o = np.dot(A, B.T)
return L_o
def ta_3(n,p, r):
"""
attempt to make good test case using random high d path generator
start with rv, add random noise to rv to simulate high D path
will have some lower dimensional structure due to random walk, but
not actually a proper lower d structure
"""
L_o = np.zeros((p,n))
rv = np.random.rand(p)-0.5
for i in range(n):
rv = rv+ (np.random.rand(p)-.5)*0.2
L_o[:,i] = rv
return L_o
def ta_4(n,p,r):
"""
simple data generator. a sine wave with a bit of noise. replicated in p dimensions.
"""
ns = 8/100
x = np.arange(n)*ns*np.pi
# x = x+(np.random.randn(n)-0.5)/3
# x = np.sin(x)
lst = []
for i in range(p):
x_n = x+(np.random.randn(n)-0.5)*0.2
x_n = np.sin(x_n)
lst.append(x_n)# + np.random.randn(n)/10)
return np.array(lst)
def ta_5(n,p,r):
"""
simple data generator, modulated sinewave, varying sinewave frequency in steps
with probability about 0.125 every full wave, i.e changing approximately every 8 waves
"""
ns = int(8*n/100) #gives the number of waves in 400 data points.
m = 1
data = []
n_vec = []
x = 0
s = np.random.rand(n)
st_noise = np.random.rand(p)
for i in range(n):
if np.random.rand()<(0.1):
m = m*(np.random.rand()+0.5)**2
x = x+m+s[i]
d = x*ns*np.pi/n
data.append(d)
data = np.array(data)
x = np.sin(data) # sinewave.
# print(x.shape)
sn = np.sin(np.arange(p)*np.pi)*0.1-0.05
lst = []
# for i in range(p):
ov = np.outer(np.ones(p), x)
arr = ov + np.outer(sn, np.ones(n))
# lst.append(sn + ov)
# arr = np.array(lst)
# print(arr.shape)
return arr
def ta_6(n,p,r, pow=10):
"""
generate mackey glass series using parameters tau = 30,
approximated using rk4
"""
a = 0.2
b = 0.1
tau = 30
x0 = np.random.rand(p) + 0.9
deltat = 6 #sampling rate, deltat
history_length = int(tau/deltat)
def mg(x_t, x_t_minus_tau, a, b,pow):
x_dot = -b*x_t+ a*x_t_minus_tau/(1+x_t_minus_tau**pow)
return x_dot
def mg_rk4(x_t, x_t_minus_tau, deltat, a, b, pow):
k1 = deltat*mg(x_t, x_t_minus_tau, a, b, pow)
k2 = deltat*mg(x_t+0.5*k1, x_t_minus_tau, a, b, pow)
k3 = deltat*mg(x_t+0.5*k2, x_t_minus_tau, a, b, pow)
k4 = deltat*mg(x_t+k3, x_t_minus_tau, a, b, pow)
x_t_plus_deltat = (x_t + k1/6 + k2/3 + k3/3 + k4/6)
return x_t_plus_deltat
X = []
x_t = x0
for i in range(n):
X.append(x_t)
#T.append(time)
if i > history_length:
x_t_minus_tau = X[-history_length]
else:
x_t_minus_tau = 0
x_t_plus_deltat = mg_rk4(x_t, x_t_minus_tau, deltat, a, b, pow)
x_t = x_t_plus_deltat
X = np.array(X)
# for i in range(n):
# X[i,:] = X[i,:]+ (np.random.randn(p)-0.5)/10
return X.T
def normalise(X):
"""
here I mean make max 1 and min 0 and fit everything else in.
"""
X = (X - np.max(X)) / (np.max(X) - np.min(X))
X = X+1
return X
def get_outlier(type, L, out_ind):
"""
takes in base dataset and generates an outlying datapoint, based on type given
'point': produces random vector with random sign
'context': produces a vector from a different part of the time series.
should be a certain amount distant.
'stutter': produces a certain number of repeats of the same vector
"""
p,n = L.shape
if type == 'point':
rv = (np.random.rand(p)+1)/2 #make random number between 0.5 and 1
rv = rv * np.sign(np.random.rand(p)-0.5) #make randomly pos or neg.
elif type == 'context':
#chose random n to copy.
if out_ind <n/4:
n_copy = n-int(np.ceil(np.random.rand()*n*3/4) )-1
elif out_ind > 3*n/4:
n_copy = int(np.ceil(np.random.rand()*n*3/4) ) -1
# elif out_ind or out_ind:
# n_copy = n - out_ind
else:
n_copy = int(out_ind + n*(np.random.rand()-0.5)/2)
# print(out_ind, n_copy)
rv = L[:, n_copy]
elif type == 'stutter':
rv = L[:,out_ind]
else:
raise
return rv
def generate_test(n,p,r, p_frac, p_quant,gamma, noise=0.1, ta=1, nz_cols=None, outlier_type='point'):
"""
generates test case of multidimensional manifold. outliers to be seeded
as a fraction (gamma*n) of outliers perturbed by a certain fraction of
parameters (p_frac) by a certain amount (p_quant)
keeps the same features perturbed for each outlier column
returns the data set, and indices of outliers
"""
ta_lst = [ta_1, ta_2, ta_3, ta_4, ta_5, ta_6]
ta_fn = ta_lst[ta-1]
# Get base data
L_o = ta_fn(n,p, r)
# Add noise
# print(L_o.shape, ta)
# for i in range(n):
#
# L_o[:,i] = L_o[:,i] + (np.random.rand(p)-0.5)*noise
# Normalise
L_o = normalise(L_o)
for i in range(n):
L_o[:,i] = L_o[:,i] + (np.random.rand(p)-0.5)*noise
# chose at random columns to be outliers
n_outs = max(int(gamma*n), 1)
n_feats = max(int(p_frac*p), 1)
if not nz_cols:
nz_cols = np.random.choice(np.arange(n), size=(n_outs), replace=False)
elif nz_cols == 'last half':
nz_cols = np.random.choice(np.arange(int(n/2), n), size=(n_outs), replace=False)
elif nz_cols == 'first_qu':
nz_cols = np.random.choice(np.arange(0, int(n/4)), size=(n_outs), replace=False)
else:
print('oops')
raise
#chose certain random features to be perturbed, generates mask
perturb_feats = np.random.choice(np.arange(p), size=n_feats, replace=False)
mask = np.zeros(p)
mask[perturb_feats]=1
to_add = np.array([])
#start working on the main data matrix M
M_o = np.copy(L_o)
for col in nz_cols:
rv = get_outlier(outlier_type, L_o, col)
if outlier_type == 'stutter':
snum = int(np.ceil(np.log10(n)))+1
# print(n,snum)
for i in range(1, snum):
M_o[:,(col-snum)+i] = L_o[:,col] #+ rv*p_quant*mask
to_add = np.append(to_add, int((col-snum)+i) )
elif outlier_type=='context':
M_o[:,col] = rv
else:
M_o[:,col] = L_o[:,col] + rv*p_quant*mask
nz_cols = np.append(nz_cols, to_add)
nz_cols = nz_cols.astype(int)
# print(nz_cols)
return M_o.T, nz_cols
if __name__ == '__main__':
import matplotlib.pyplot as plt
n = 200
p = 1
r = 20
gamma = 0.05
p_frac = 0.3
p_quant = 0.3
noise = 1
ta = 3
nz_cols = 'first_qu'
data, outs = generate_test(n,p,r, p_frac, p_quant,gamma,noise=0,ta=ta,nz_cols=nz_cols)
x = np.arange(n)
plt.plot(x,data, '.')
plt.plot(outs, data[outs], 'ro')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.copy",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.zeros",
"numpy.ones",
"numpy.log10",
"numpy.append",
"numpy.max",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.random.rand",
"numpy.dot",
"numpy.min"
] | [((172, 213), 'numpy.linspace', 'np.linspace', (['(-4 * np.pi)', '(4 * np.pi)'], {'num': 'p'}), '(-4 * np.pi, 4 * np.pi, num=p)\n', (183, 213), True, 'import numpy as np\n'), ((218, 234), 'numpy.zeros', 'np.zeros', (['(p, r)'], {}), '((p, r))\n', (226, 234), True, 'import numpy as np\n'), ((394, 410), 'numpy.zeros', 'np.zeros', (['(n, r)'], {}), '((n, r))\n', (402, 410), True, 'import numpy as np\n'), ((418, 459), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(2 * np.pi)'], {'num': 'n'}), '(-2 * np.pi, 2 * np.pi, num=n)\n', (429, 459), True, 'import numpy as np\n'), ((618, 632), 'numpy.dot', 'np.dot', (['A', 'B.T'], {}), '(A, B.T)\n', (624, 632), True, 'import numpy as np\n'), ((902, 918), 'numpy.zeros', 'np.zeros', (['(p, n)'], {}), '((p, n))\n', (910, 918), True, 'import numpy as np\n'), ((927, 943), 'numpy.zeros', 'np.zeros', (['(p, r)'], {}), '((p, r))\n', (935, 943), True, 'import numpy as np\n'), ((1076, 1092), 'numpy.zeros', 'np.zeros', (['(n, r)'], {}), '((n, r))\n', (1084, 1092), True, 'import numpy as np\n'), ((1227, 1241), 'numpy.dot', 'np.dot', (['A', 'B.T'], {}), '(A, B.T)\n', (1233, 1241), True, 'import numpy as np\n'), ((1555, 1571), 'numpy.zeros', 'np.zeros', (['(p, n)'], {}), '((p, n))\n', (1563, 1571), True, 'import numpy as np\n'), ((2101, 2114), 'numpy.array', 'np.array', (['lst'], {}), '(lst)\n', (2109, 2114), True, 'import numpy as np\n'), ((2450, 2467), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2464, 2467), True, 'import numpy as np\n'), ((2483, 2500), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (2497, 2500), True, 'import numpy as np\n'), ((2684, 2698), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2692, 2698), True, 'import numpy as np\n'), ((2707, 2719), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (2713, 2719), True, 'import numpy as np\n'), ((4127, 4138), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4135, 4138), True, 'import numpy as np\n'), ((7012, 7023), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (7020, 7023), True, 'import numpy as np\n'), ((7063, 7075), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (7071, 7075), True, 'import numpy as np\n'), ((7132, 7144), 'numpy.copy', 'np.copy', (['L_o'], {}), '(L_o)\n', (7139, 7144), True, 'import numpy as np\n'), ((7650, 7676), 'numpy.append', 'np.append', (['nz_cols', 'to_add'], {}), '(nz_cols, to_add)\n', (7659, 7676), True, 'import numpy as np\n'), ((8057, 8069), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (8066, 8069), True, 'import numpy as np\n'), ((8075, 8097), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data', '"""."""'], {}), "(x, data, '.')\n", (8083, 8097), True, 'import matplotlib.pyplot as plt\n'), ((8101, 8133), 'matplotlib.pyplot.plot', 'plt.plot', (['outs', 'data[outs]', '"""ro"""'], {}), "(outs, data[outs], 'ro')\n", (8109, 8133), True, 'import matplotlib.pyplot as plt\n'), ((8138, 8148), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8146, 8148), True, 'import matplotlib.pyplot as plt\n'), ((952, 969), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (966, 969), True, 'import numpy as np\n'), ((1101, 1118), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1115, 1118), True, 'import numpy as np\n'), ((1580, 1597), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (1594, 1597), True, 'import numpy as np\n'), ((2027, 2038), 'numpy.sin', 'np.sin', (['x_n'], {}), '(x_n)\n', (2033, 2038), True, 'import numpy as np\n'), ((2854, 2864), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (2861, 2864), True, 'import numpy as np\n'), ((3176, 3193), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (3190, 3193), True, 'import numpy as np\n'), ((6958, 6970), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (6967, 6970), True, 'import numpy as np\n'), ((342, 367), 'numpy.sin', 'np.sin', (['((x + i) / (i + 1))'], {}), '((x + i) / (i + 1))\n', (348, 367), True, 'import numpy as np\n'), ((564, 589), 'numpy.cos', 'np.cos', (['((x + i) / (i + 1))'], {}), '((x + i) / (i + 1))\n', (570, 589), True, 'import numpy as np\n'), ((1851, 1863), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1860, 1863), True, 'import numpy as np\n'), ((2535, 2551), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2549, 2551), True, 'import numpy as np\n'), ((2897, 2907), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2904, 2907), True, 'import numpy as np\n'), ((4348, 4357), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (4354, 4357), True, 'import numpy as np\n'), ((4362, 4371), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (4368, 4371), True, 'import numpy as np\n'), ((4374, 4383), 'numpy.min', 'np.min', (['X'], {}), '(X)\n', (4380, 4383), True, 'import numpy as np\n'), ((6520, 6532), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6529, 6532), True, 'import numpy as np\n'), ((366, 383), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (380, 383), True, 'import numpy as np\n'), ((588, 605), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (602, 605), True, 'import numpy as np\n'), ((4853, 4870), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (4867, 4870), True, 'import numpy as np\n'), ((1015, 1032), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (1029, 1032), True, 'import numpy as np\n'), ((1164, 1181), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1178, 1181), True, 'import numpy as np\n'), ((1643, 1660), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (1657, 1660), True, 'import numpy as np\n'), ((1985, 2003), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (2000, 2003), True, 'import numpy as np\n'), ((2769, 2781), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (2778, 2781), True, 'import numpy as np\n'), ((4940, 4957), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (4954, 4957), True, 'import numpy as np\n'), ((6321, 6338), 'numpy.random.rand', 'np.random.rand', (['p'], {}), '(p)\n', (6335, 6338), True, 'import numpy as np\n'), ((2578, 2594), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2592, 2594), True, 'import numpy as np\n'), ((7289, 7300), 'numpy.log10', 'np.log10', (['n'], {}), '(n)\n', (7297, 7300), True, 'import numpy as np\n'), ((5355, 5371), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5369, 5371), True, 'import numpy as np\n'), ((5112, 5128), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5126, 5128), True, 'import numpy as np\n'), ((5203, 5219), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5217, 5219), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import random
from sklearn.model_selection import train_test_split
import pdb
def split_data(data):
input_lstm_train, input_lstm_test, input_train, input_test, target_train, target_test = train_test_split(
data['input_lstm'], data['input'], data['target'], test_size=0.2, random_state=42)
train = {'input_lstm': input_lstm_train, 'input': input_train, 'target': target_train}
test = {'input_lstm': input_lstm_test, 'input': input_test, 'target': target_test}
return train, test
def augment_data(train_data):
sr = 16000
happy = []
neutral = []
angry = []
sad = []
input_lstm = train_data['input_lstm'].tolist()
input = train_data['input']
target = train_data['target']
for utterance_lstm, utterance, label in zip(input_lstm, input, target):
if label == [1,0,0,0]:
happy.append((utterance_lstm, utterance))
elif label == [0,1,0,0]:
neutral.append((utterance_lstm, utterance))
elif label == [0,0,1,0]:
angry.append((utterance_lstm, utterance))
else:
sad.append((utterance_lstm, utterance))
emotions = [happy, neutral, angry, sad]
additions_lstm = []
additions = []
for x, emotion in enumerate(emotions):
utterances_0_1 = []
utterances_1_2 = []
utterances_2_3 = []
utterances_3_4 = []
utterances_4_5 = []
utterances_5_6 = []
utterances_6_7 = []
utterances_7_8 = []
utterances_0_1_lstm = []
utterances_1_2_lstm = []
utterances_2_3_lstm = []
utterances_3_4_lstm = []
utterances_4_5_lstm = []
utterances_5_6_lstm = []
utterances_6_7_lstm = []
utterances_7_8_lstm = []
for y, (utterance_lstm, utterance) in enumerate(emotion):
print("Sample " + str(y) + " from emotion " + str(x))
if len(utterance)/sr < 8:
if len(utterance)/sr > 7:
utterances_7_8_lstm.append(utterance_lstm)
utterances_7_8.append(utterance)
elif len(utterance)/sr > 6:
utterances_6_7_lstm.append(utterance_lstm)
utterances_6_7.append(utterance)
elif len(utterance)/sr > 5:
utterances_5_6_lstm.append(utterance_lstm)
utterances_5_6.append(utterance)
elif len(utterance)/sr > 4:
utterances_4_5_lstm.append(utterance_lstm)
utterances_4_5.append(utterance)
elif len(utterance)/sr > 3:
utterances_3_4_lstm.append(utterance_lstm)
utterances_3_4.append(utterance)
elif len(utterance)/sr > 2:
utterances_2_3_lstm.append(utterance_lstm)
utterances_2_3.append(utterance)
elif len(utterance)/sr > 1:
utterances_1_2_lstm.append(utterance_lstm)
utterances_1_2.append(utterance)
else:
utterances_0_1_lstm.append(utterance_lstm)
utterances_0_1.append(utterance)
matrix_0_8 = []
for i in range(len(utterances_0_1)):
for j in range(len(utterances_7_8)):
matrix_0_8.append((i, j))
matrix_1_7 = []
for i in range(len(utterances_1_2)):
for j in range(len(utterances_6_7)):
matrix_1_7.append((i, j))
matrix_2_6 = []
for i in range(len(utterances_2_3)):
for j in range(len(utterances_5_6)):
matrix_2_6.append((i, j))
matrix_3_5 = []
for i in range(len(utterances_3_4)):
for j in range(len(utterances_4_5)):
matrix_3_5.append((i, j))
rand_0_8 = np.random.choice(np.array(matrix_0_8, dtype='i,i'), size=2000)
for (i, j) in rand_0_8:
additions_lstm.append(np.append(utterances_0_1_lstm[i], utterances_7_8_lstm[j], axis=0))
additions.append(np.append(utterances_0_1[i], utterances_7_8[j]))
a = np.zeros(4)
np.put(a, x, 1)
target.append(a)
rand_1_7 = np.random.choice(np.array(matrix_1_7, dtype='i,i'), size=2000)
for (i, j) in rand_1_7:
additions_lstm.append(np.append(utterances_1_2_lstm[i], utterances_6_7_lstm[j], axis=0))
additions.append(np.append(utterances_1_2[i], utterances_6_7[j]))
a = np.zeros(4)
np.put(a, x, 1)
target.append(a)
rand_2_6 = np.random.choice(np.array(matrix_2_6, dtype='i,i'), size=2000)
for (i, j) in rand_2_6:
additions_lstm.append(np.append(utterances_2_3_lstm[i], utterances_5_6_lstm[j], axis=0))
additions.append(np.append(utterances_2_3[i], utterances_5_6[j]))
a = np.zeros(4)
np.put(a, x, 1)
target.append(a)
rand_3_5 = np.random.choice(np.array(matrix_3_5, dtype='i,i'), size=2000)
for (i, j) in rand_3_5:
additions_lstm.append(np.append(utterances_3_4_lstm[i], utterances_4_5_lstm[j], axis=0))
additions.append(np.append(utterances_3_4[i], utterances_4_5[j]))
a = np.zeros(4)
np.put(a, x, 1)
target.append(a)
input_lstm += additions_lstm
input += additions
if len(input_lstm) == len(input) == len(target):
print("equal lengths")
x = list(range(len(input_lstm)))
random.shuffle(x)
input_lstm_new = []
input_new = []
target_new = []
for i in x:
input_lstm_new.append(input_lstm[i])
input_new.append(input[i])
target_new.append(target[i])
train_new = {'input_lstm': input_lstm_new, 'input': input_new, 'target': target_new}
return train_new
def combine():
with open('/scratch/speech/datasets/IEMOCAP_39_FOUR_EMO_full.pkl', 'rb') as out1:
dict1 = pickle.load(out1)
with open('/scratch/speech/raw_audio_dataset/raw_audio_full.pkl', 'rb') as out2:
dict2 = pickle.load(out2)
flag = True
for i in range(len(dict1["target"])):
if np.argmax(dict1["target"][i]) == np.argmax(dict2["target"][i]):
continue
else:
flag = False
break
if flag:
print("Datasets consistent")
else:
raise ValueError("Datasets inconsistent")
dict3 = {"input_lstm": dict1["input"], "input": dict2["input"], "target": dict2["target"]}
train, test = split_data(dict3)
train_new = augment_data(train)
#with open('/scratch/speech/hand_raw_dataset/EMO39_'+name+'_spectrogram_nfft{}_full.pkl'.format(nfft), 'wb') as full:
#pickle.dump(dict3,full)
with open('/scratch/speech/hand_raw_dataset/EMO39_raw_audio_augmented_train.pkl', 'wb') as f:
pickle.dump(train_new, f)
with open('/scratch/speech/hand_raw_dataset/EMO39_raw_audio_augmented_test.pkl', 'wb') as f:
pickle.dump(test, f)
print("Successfully copied to pickle.")
if __name__ == '__main__':
combine()
| [
"pickle.dump",
"numpy.put",
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"random.shuffle",
"numpy.zeros",
"numpy.append",
"pickle.load",
"numpy.array"
] | [((226, 329), 'sklearn.model_selection.train_test_split', 'train_test_split', (["data['input_lstm']", "data['input']", "data['target']"], {'test_size': '(0.2)', 'random_state': '(42)'}), "(data['input_lstm'], data['input'], data['target'],\n test_size=0.2, random_state=42)\n", (242, 329), False, 'from sklearn.model_selection import train_test_split\n'), ((5555, 5572), 'random.shuffle', 'random.shuffle', (['x'], {}), '(x)\n', (5569, 5572), False, 'import random\n'), ((5997, 6014), 'pickle.load', 'pickle.load', (['out1'], {}), '(out1)\n', (6008, 6014), False, 'import pickle\n'), ((6116, 6133), 'pickle.load', 'pickle.load', (['out2'], {}), '(out2)\n', (6127, 6133), False, 'import pickle\n'), ((6886, 6911), 'pickle.dump', 'pickle.dump', (['train_new', 'f'], {}), '(train_new, f)\n', (6897, 6911), False, 'import pickle\n'), ((7017, 7037), 'pickle.dump', 'pickle.dump', (['test', 'f'], {}), '(test, f)\n', (7028, 7037), False, 'import pickle\n'), ((3894, 3927), 'numpy.array', 'np.array', (['matrix_0_8'], {'dtype': '"""i,i"""'}), "(matrix_0_8, dtype='i,i')\n", (3902, 3927), True, 'import numpy as np\n'), ((4167, 4178), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4175, 4178), True, 'import numpy as np\n'), ((4191, 4206), 'numpy.put', 'np.put', (['a', 'x', '(1)'], {}), '(a, x, 1)\n', (4197, 4206), True, 'import numpy as np\n'), ((4273, 4306), 'numpy.array', 'np.array', (['matrix_1_7'], {'dtype': '"""i,i"""'}), "(matrix_1_7, dtype='i,i')\n", (4281, 4306), True, 'import numpy as np\n'), ((4546, 4557), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4554, 4557), True, 'import numpy as np\n'), ((4570, 4585), 'numpy.put', 'np.put', (['a', 'x', '(1)'], {}), '(a, x, 1)\n', (4576, 4585), True, 'import numpy as np\n'), ((4652, 4685), 'numpy.array', 'np.array', (['matrix_2_6'], {'dtype': '"""i,i"""'}), "(matrix_2_6, dtype='i,i')\n", (4660, 4685), True, 'import numpy as np\n'), ((4925, 4936), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4933, 4936), True, 'import numpy as np\n'), ((4949, 4964), 'numpy.put', 'np.put', (['a', 'x', '(1)'], {}), '(a, x, 1)\n', (4955, 4964), True, 'import numpy as np\n'), ((5031, 5064), 'numpy.array', 'np.array', (['matrix_3_5'], {'dtype': '"""i,i"""'}), "(matrix_3_5, dtype='i,i')\n", (5039, 5064), True, 'import numpy as np\n'), ((5304, 5315), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5312, 5315), True, 'import numpy as np\n'), ((5328, 5343), 'numpy.put', 'np.put', (['a', 'x', '(1)'], {}), '(a, x, 1)\n', (5334, 5343), True, 'import numpy as np\n'), ((6204, 6233), 'numpy.argmax', 'np.argmax', (["dict1['target'][i]"], {}), "(dict1['target'][i])\n", (6213, 6233), True, 'import numpy as np\n'), ((6237, 6266), 'numpy.argmax', 'np.argmax', (["dict2['target'][i]"], {}), "(dict2['target'][i])\n", (6246, 6266), True, 'import numpy as np\n'), ((4006, 4071), 'numpy.append', 'np.append', (['utterances_0_1_lstm[i]', 'utterances_7_8_lstm[j]'], {'axis': '(0)'}), '(utterances_0_1_lstm[i], utterances_7_8_lstm[j], axis=0)\n', (4015, 4071), True, 'import numpy as np\n'), ((4102, 4149), 'numpy.append', 'np.append', (['utterances_0_1[i]', 'utterances_7_8[j]'], {}), '(utterances_0_1[i], utterances_7_8[j])\n', (4111, 4149), True, 'import numpy as np\n'), ((4385, 4450), 'numpy.append', 'np.append', (['utterances_1_2_lstm[i]', 'utterances_6_7_lstm[j]'], {'axis': '(0)'}), '(utterances_1_2_lstm[i], utterances_6_7_lstm[j], axis=0)\n', (4394, 4450), True, 'import numpy as np\n'), ((4481, 4528), 'numpy.append', 'np.append', (['utterances_1_2[i]', 'utterances_6_7[j]'], {}), '(utterances_1_2[i], utterances_6_7[j])\n', (4490, 4528), True, 'import numpy as np\n'), ((4764, 4829), 'numpy.append', 'np.append', (['utterances_2_3_lstm[i]', 'utterances_5_6_lstm[j]'], {'axis': '(0)'}), '(utterances_2_3_lstm[i], utterances_5_6_lstm[j], axis=0)\n', (4773, 4829), True, 'import numpy as np\n'), ((4860, 4907), 'numpy.append', 'np.append', (['utterances_2_3[i]', 'utterances_5_6[j]'], {}), '(utterances_2_3[i], utterances_5_6[j])\n', (4869, 4907), True, 'import numpy as np\n'), ((5143, 5208), 'numpy.append', 'np.append', (['utterances_3_4_lstm[i]', 'utterances_4_5_lstm[j]'], {'axis': '(0)'}), '(utterances_3_4_lstm[i], utterances_4_5_lstm[j], axis=0)\n', (5152, 5208), True, 'import numpy as np\n'), ((5239, 5286), 'numpy.append', 'np.append', (['utterances_3_4[i]', 'utterances_4_5[j]'], {}), '(utterances_3_4[i], utterances_4_5[j])\n', (5248, 5286), True, 'import numpy as np\n')] |
"""
The Blahut-Arimoto algorithm for solving the rate-distortion problem.
"""
from __future__ import division
import numpy as np
from .distortions import hamming_distortion
from .rate_distortion import RateDistortionResult
from ..divergences.pmf import relative_entropy
from ..math.sampling import sample_simplex
###############################################################################
# Rate-Distortion
def _blahut_arimoto(p_x, beta, q_y_x, distortion, max_iters=100):
"""
Perform the Blahut-Arimoto algorithm.
Parameters
----------
p_x : np.ndarray
The pmf to work with.
beta : float
The beta value for the optimization.
q_y_x : np.ndarray
The initial condition to work with.
distortion : np.ndarray
The distortion matrix.
max_iters : int
The maximum number of iterations.
Returns
-------
result : RateDistortionResult
A rate, distortion pair.
q_xy : np.ndarray
The joint distribution q(x, y).
"""
def q_xy(q_y_x):
"""
q(x,y) = q(y|x)p(x)
"""
q_xy = p_x[:, np.newaxis] * q_y_x
return q_xy
def next_q_y(q_y_x):
"""
q(y) = \sum_x q(y|x)p(x)
"""
q_y = np.matmul(p_x, q_y_x)
return q_y
def next_q_y_x(q_y, q_y_x):
"""
q(y|x) = q(y) 2^{-\\beta * distortion}
"""
d = distortion(p_x, q_y_x)
q_y_x = q_y * np.exp2(-beta * d)
q_y_x /= q_y_x.sum(axis=1, keepdims=True)
return q_y_x
def av_dist(q_y_x, dist):
"""
<dist> = \sum_{x, t} q(x,t) * d(x,t)
"""
d = np.matmul(p_x, (q_y_x * dist)).sum()
return d
def next_rd(q_y, q_y_x):
"""
Iterate the BA equations.
"""
q_y = next_q_y(q_y_x)
q_y_x = next_q_y_x(q_y, q_y_x)
d = av_dist(q_y_x, distortion(p_x, q_y_x))
return q_y, q_y_x, d
q_y = next_q_y(q_y_x)
prev_d = 0
d = av_dist(q_y_x, distortion(p_x, q_y_x))
iters = 0
while not np.isclose(prev_d, d) and iters < max_iters:
iters += 1
(q_y, q_y_x, d), prev_d = next_rd(q_y, q_y_x), d
q = q_xy(q_y_x)
r = np.nansum(q * np.log2(q / (q.sum(axis=0, keepdims=True) * q.sum(axis=1, keepdims=True))))
result = RateDistortionResult(r, d)
return result, q
def blahut_arimoto(p_x, beta, distortion=hamming_distortion, max_iters=100, restarts=100):
"""
Perform a robust form of the Blahut-Arimoto algorithms.
Parameters
----------
p_x : np.ndarray
The pmf to work with.
beta : float
The beta value for the optimization.
q_y_x : np.ndarray
The initial condition to work with.
distortion : np.ndarray
The distortion matrix.
max_iters : int
The maximum number of iterations.
restarts : int
The number of initial conditions to try.
Returns
-------
result : RateDistortionResult
The rate, distortion pair.
q_xy : np.ndarray
The distribution p(x, y) which achieves the optimal rate, distortion.
"""
n = len(p_x)
candidates = []
for i in range(restarts):
if i == 0:
q_y_x = np.ones((n, n)) / n
elif i == 1:
q_y_x = np.zeros((n, n))
q_y_x[0, :] = 1
else:
q_y_x = sample_simplex(n, n)
result = _blahut_arimoto(p_x=p_x,
beta=beta,
q_y_x=q_y_x,
distortion=distortion,
max_iters=max_iters
)
candidates.append(result)
rd = min(candidates, key=lambda result: result[0].rate + beta*result[0].distortion)
return rd
###############################################################################
# Information Bottleneck
def blahut_arimoto_ib(p_xy, beta, divergence=relative_entropy, max_iters=100, restarts=250): # pragma: no cover
"""
Perform a robust form of the Blahut-Arimoto algorithms.
Parameters
----------
p_xy : np.ndarray
The pmf to work with.
beta : float
The beta value for the optimization.
q_y_x : np.ndarray
The initial condition to work with.
divergence : func
The divergence measure to construct a distortion from: D(p(Y|x)||q(Y|t)).
max_iters : int
The maximum number of iterations.
restarts : int
The number of initial conditions to try.
Returns
-------
result : RateDistortionResult
The rate, distortion pair.
q_xyt : np.ndarray
The distribution p(x, y, t) which achieves the optimal rate, distortion.
"""
p_x = p_xy.sum(axis=1)
p_y_x = p_xy / p_xy.sum(axis=1, keepdims=True)
def next_q_y_t(q_t_x):
"""
q(y|t) = (\sum_x p(x, y) * q(t|x)) / q(t)
"""
q_xyt = q_t_x[:, np.newaxis, :] * p_xy[:, :, np.newaxis]
q_ty = q_xyt.sum(axis=0).T
q_y_t = q_ty / q_ty.sum(axis=1, keepdims=True)
q_y_t[np.isnan(q_y_t)] = 1
return q_y_t
def distortion(p_x, q_t_x):
"""
d(x, t) = D[ p(Y|x) || q(Y|t) ]
"""
q_y_t = next_q_y_t(q_t_x)
distortions = np.asarray([divergence(a, b) for a in p_y_x for b in q_y_t]).reshape(q_y_t.shape)
return distortions
rd, q_xt = blahut_arimoto(p_x=p_x,
beta=beta,
distortion=distortion,
max_iters=max_iters,
restarts=restarts
)
q_t_x = q_xt / q_xt.sum(axis=1, keepdims=True)
q_xyt = p_xy[:, :, np.newaxis] * q_t_x[:, np.newaxis, :]
return rd, q_xyt
###############################################################################
# TODO: Deterministic Forms | [
"numpy.exp2",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.isclose",
"numpy.matmul"
] | [((1259, 1280), 'numpy.matmul', 'np.matmul', (['p_x', 'q_y_x'], {}), '(p_x, q_y_x)\n', (1268, 1280), True, 'import numpy as np\n'), ((1461, 1479), 'numpy.exp2', 'np.exp2', (['(-beta * d)'], {}), '(-beta * d)\n', (1468, 1479), True, 'import numpy as np\n'), ((2072, 2093), 'numpy.isclose', 'np.isclose', (['prev_d', 'd'], {}), '(prev_d, d)\n', (2082, 2093), True, 'import numpy as np\n'), ((5109, 5124), 'numpy.isnan', 'np.isnan', (['q_y_t'], {}), '(q_y_t)\n', (5117, 5124), True, 'import numpy as np\n'), ((1663, 1691), 'numpy.matmul', 'np.matmul', (['p_x', '(q_y_x * dist)'], {}), '(p_x, q_y_x * dist)\n', (1672, 1691), True, 'import numpy as np\n'), ((3244, 3259), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (3251, 3259), True, 'import numpy as np\n'), ((3305, 3321), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (3313, 3321), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
from sklearn.cluster import DBSCAN
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
import sqlite3
import gc
import psutil
from glob import glob
def updatecoldb(selcolname,selcoldata,updcolname,updcoldata,table,dbfile):
""" Update column in database """
print('Updating '+updcolname+' column in '+table+' table using '+selcolname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(updcoldata,selcoldata))
c.executemany('''UPDATE '''+table+''' SET '''+updcolname+'''=? WHERE '''+selcolname+'''=?''', data)
db.commit()
db.close()
print('updating done after '+str(time.time()-t0)+' sec')
def breakup_idstr(dbfile):
""" Break-up idstr file into separate measid/objectid lists per exposure on /data0."""
t00 = time.time()
outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'
# Load the exposures table
expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)
# Make sure it's a list
if type(dbfile) is str: dbfile=[dbfile]
print('Breaking up '+str(len(dbfile))+' database files')
# Loop over files
for i,dbfile1 in enumerate(dbfile):
print(str(i+1)+' '+dbfile1)
if os.path.exists(dbfile1):
t0 = time.time()
dbbase1 = os.path.basename(dbfile1)[0:-9] # remove _idstr.db ending
# Get existing index names for this database
d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = d.cursor()
cmd = 'select measid,exposure,objectid from idstr'
t1 = time.time()
data = cur.execute(cmd).fetchall()
print(' '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
# Break up data into lists
measid,exposure,objectid = list(zip(*data))
measid = np.array(measid)
objectid = np.array(objectid)
exposure = np.array(exposure)
eindex = dln.create_index(exposure)
# Match exposures to exposure catalog
ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
# Loop over exposures and write output files
nexp = len(eindex['value'])
print(' '+str(nexp)+' exposures')
measid_maxlen = np.max(dln.strlen(measid))
objectid_maxlen = np.max(dln.strlen(objectid))
df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
# Loop over the exposures and write out the files
for k in range(nexp):
if nexp>100:
if k % 100 == 0: print(' '+str(k+1))
ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
cat = np.zeros(len(ind),dtype=df)
cat['measid'] = measid[ind]
cat['objectid'] = objectid[ind]
instcode = expcat['INSTRUMENT'][ind1[k]]
dateobs = expcat['DATEOBS'][ind1[k]]
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
# Sometimes this crashes because another process is making the directory at the same time
try:
os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
except:
pass
outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
np.save(outfile,cat)
print(' dt = %6.1f sec. ' % (time.time()-t0))
else:
print(' '+dbfile1+' NOT FOUND')
print('dt = %6.1f sec.' % (time.time()-t00))
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC data for one healpix region.')
parser.add_argument('pix', type=str, nargs=1, help='HEALPix pixel number')
args = parser.parse_args()
parentpix = args.pix[0]
version = 'v3'
nside = 128
t0 = time.time()
# Output filename
outdir = '/net/dl2/dnidever/nsc/instcal/'+version+'/combine/'
outbase = str(parentpix)
subdir = str(int(parentpix)//1000) # use the thousands to create subdirectory grouping
if os.path.exists(outdir+'/'+subdir) is False: os.mkdir(outdir+'/'+subdir)
outfile = outdir+'/'+subdir+'/'+str(parentpix)+'.fits'
# Get higher-resolution object filesnames
outfiles = glob('/net/dl2/dnidever/nsc/instcal/v3/combine/'+str(int(parentpix)//1000)+'/'+str(parentpix)+'_n*.fits.gz')
outfiles = np.array(outfiles)
si = np.argsort(outfiles)
outfiles = outfiles[si]
base = [os.path.basename(f)[:-8] for f in outfiles]
hinside = int(base[0].split('_')[1][1:])
allpix = [b.split('_')[2] for b in base]
# Load and concatenate all of the files
print('Combining all of the object catalogs for '+parentpix)
allmeta = None
allobj = None
nobjects = []
totobjects = 0
for i in range(len(allpix)):
pix1 = allpix[i]
outfile1 = outfiles[i]
if os.path.exists(outfile1) is False:
print(outfile1+' NOT FOUND')
sys.exit()
# meta columns different: nobjects there'll be repeats
meta1 = fits.getdata(outfile1,1)
if allmeta is None:
allmeta = meta1
else:
allmeta = np.hstack((allmeta,meta1))
hd1 = fits.getheader(outfile1,2)
print(str(i+1)+' '+outfile1+' '+str(hd1['naxis2']))
obj1 = fits.getdata(outfile1,2)
nobj1 = len(obj1)
# Update the objectIDs
dbfile_idstr1 = outfile1.replace('.fits.gz','_idstr.db')
objectid_orig = obj1['objectid']
objectid_new = dln.strjoin( str(parentpix)+'.', ((np.arange(nobj1)+1+totobjects).astype(np.str)) )
# only update if they are different
diff, = np.where(objectid_orig != objectid_new)
if len(diff)>0:
print('Updating objectid in '+dbfile_idstr1)
updatecoldb('objectid',objectid_orig,'objectid',objectid_new,'idstr',dbfile_idstr1)
# Update objectIDs in catalog
obj1['objectid'] = objectid_new
# Update objectIDs in high resolution HEALPix output file
print('Updating objectIDs in '+outfile1)
outfile1fits = outfile1.replace('.fits.gz','.fits')
if os.path.exists(outfile1fits): os.remove(outfile1fits)
Table(meta1).write(outfile1fits) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile1fits)
hdu = fits.table_to_hdu(Table(obj1)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(outfile1fits,overwrite=True)
hdulist.close()
if os.path.exists(outfile1): os.remove(outfile1)
ret = subprocess.call(['gzip',outfile1fits]) # compress final catalog
if allobj is None:
allobj = obj1.copy()
else:
allobj = np.hstack((allobj,obj1.copy()))
nobjects.append(nobj1)
totobjects += nobj1
# Deal with duplicate metas
metaindex = dln.create_index(allmeta['base'])
for i in range(len(metaindex['value'])):
indx = metaindex['index'][metaindex['lo'][i]:metaindex['hi'][i]+1]
meta1 = allmeta[indx[0]].copy()
if len(indx)>1:
meta1['nobjects'] = np.sum(allmeta['nobjects'][indx])
if i==0:
sumstr = meta1
else:
sumstr = np.hstack((sumstr,meta1))
sumstr = Table(sumstr)
# Write the output file
print('Writing combined catalog to '+outfile)
if os.path.exists(outfile): os.remove(outfile)
sumstr.write(outfile) # first, summary table
# append other fits binary tables
hdulist = fits.open(outfile)
hdu = fits.table_to_hdu(Table(allobj)) # second, catalog
hdulist.append(hdu)
hdulist.writeto(outfile,overwrite=True)
hdulist.close()
if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
ret = subprocess.call(['gzip',outfile]) # compress final catalog
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
print('Breaking-up IDSTR information')
dbfiles_idstr = []
for i in range(len(allpix)):
outfile1 = outfiles[i]
dbfile_idstr1 = outfile1.replace('.fits.gz','_idstr.db')
dbfiles_idstr.append(dbfile_idstr1)
breakup_idstr(dbfiles_idstr)
sys.exit()
| [
"os.mkdir",
"os.remove",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.argsort",
"numpy.arange",
"astropy.io.fits.getdata",
"os.path.exists",
"dlnpyutils.utils.create_index",
"dlnpyutils.utils.match",
"numpy.save",
"os.path.basename",
"numpy.hstack",
"sqlite3.connect",
"dlnpyutils.db.cu... | [((1025, 1036), 'time.time', 'time.time', ([], {}), '()\n', (1034, 1036), False, 'import time\n'), ((1041, 1080), 'sqlite3.register_adapter', 'sqlite3.register_adapter', (['np.int16', 'int'], {}), '(np.int16, int)\n', (1065, 1080), False, 'import sqlite3\n'), ((1085, 1124), 'sqlite3.register_adapter', 'sqlite3.register_adapter', (['np.int64', 'int'], {}), '(np.int64, int)\n', (1109, 1124), False, 'import sqlite3\n'), ((1129, 1172), 'sqlite3.register_adapter', 'sqlite3.register_adapter', (['np.float64', 'float'], {}), '(np.float64, float)\n', (1153, 1172), False, 'import sqlite3\n'), ((1177, 1220), 'sqlite3.register_adapter', 'sqlite3.register_adapter', (['np.float32', 'float'], {}), '(np.float32, float)\n', (1201, 1220), False, 'import sqlite3\n'), ((1230, 1321), 'sqlite3.connect', 'sqlite3.connect', (['dbfile'], {'detect_types': '(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)'}), '(dbfile, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.\n PARSE_COLNAMES)\n', (1245, 1321), False, 'import sqlite3\n'), ((1323, 1334), 'dlnpyutils.db.cursor', 'db.cursor', ([], {}), '()\n', (1332, 1334), False, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((1487, 1498), 'dlnpyutils.db.commit', 'db.commit', ([], {}), '()\n', (1496, 1498), False, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((1503, 1513), 'dlnpyutils.db.close', 'db.close', ([], {}), '()\n', (1511, 1513), False, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((1706, 1717), 'time.time', 'time.time', ([], {}), '()\n', (1715, 1717), False, 'import time\n'), ((1817, 1909), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz"""', '(1)'], {}), "(\n '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz', 1)\n", (1829, 1909), False, 'from astropy.io import fits\n'), ((4751, 4821), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Combine NSC data for one healpix region."""'}), "(description='Combine NSC data for one healpix region.')\n", (4765, 4821), False, 'from argparse import ArgumentParser\n'), ((5005, 5016), 'time.time', 'time.time', ([], {}), '()\n', (5014, 5016), False, 'import time\n'), ((5557, 5575), 'numpy.array', 'np.array', (['outfiles'], {}), '(outfiles)\n', (5565, 5575), True, 'import numpy as np\n'), ((5585, 5605), 'numpy.argsort', 'np.argsort', (['outfiles'], {}), '(outfiles)\n', (5595, 5605), True, 'import numpy as np\n'), ((9342, 9375), 'dlnpyutils.utils.create_index', 'dln.create_index', (["allmeta['base']"], {}), "(allmeta['base'])\n", (9358, 9375), True, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((9744, 9757), 'astropy.table.Table', 'Table', (['sumstr'], {}), '(sumstr)\n', (9749, 9757), False, 'from astropy.table import Table, vstack, Column\n'), ((9988, 10011), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (10002, 10011), False, 'import os\n'), ((10390, 10408), 'astropy.io.fits.open', 'fits.open', (['outfile'], {}), '(outfile)\n', (10399, 10408), False, 'from astropy.io import fits\n'), ((10676, 10707), 'os.path.exists', 'os.path.exists', (["(outfile + '.gz')"], {}), "(outfile + '.gz')\n", (10690, 10707), False, 'import os\n'), ((10742, 10776), 'subprocess.call', 'subprocess.call', (["['gzip', outfile]"], {}), "(['gzip', outfile])\n", (10757, 10776), False, 'import subprocess\n'), ((11242, 11252), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11250, 11252), False, 'import sys\n'), ((2149, 2172), 'os.path.exists', 'os.path.exists', (['dbfile1'], {}), '(dbfile1)\n', (2163, 2172), False, 'import os\n'), ((5240, 5277), 'os.path.exists', 'os.path.exists', (["(outdir + '/' + subdir)"], {}), "(outdir + '/' + subdir)\n", (5254, 5277), False, 'import os\n'), ((5284, 5315), 'os.mkdir', 'os.mkdir', (["(outdir + '/' + subdir)"], {}), "(outdir + '/' + subdir)\n", (5292, 5315), False, 'import os\n'), ((6480, 6505), 'astropy.io.fits.getdata', 'fits.getdata', (['outfile1', '(1)'], {}), '(outfile1, 1)\n', (6492, 6505), False, 'from astropy.io import fits\n'), ((6638, 6665), 'astropy.io.fits.getheader', 'fits.getheader', (['outfile1', '(2)'], {}), '(outfile1, 2)\n', (6652, 6665), False, 'from astropy.io import fits\n'), ((6740, 6765), 'astropy.io.fits.getdata', 'fits.getdata', (['outfile1', '(2)'], {}), '(outfile1, 2)\n', (6752, 6765), False, 'from astropy.io import fits\n'), ((7238, 7277), 'numpy.where', 'np.where', (['(objectid_orig != objectid_new)'], {}), '(objectid_orig != objectid_new)\n', (7246, 7277), True, 'import numpy as np\n'), ((10013, 10031), 'os.remove', 'os.remove', (['outfile'], {}), '(outfile)\n', (10022, 10031), False, 'import os\n'), ((10437, 10450), 'astropy.table.Table', 'Table', (['allobj'], {}), '(allobj)\n', (10442, 10450), False, 'from astropy.table import Table, vstack, Column\n'), ((10707, 10733), 'os.remove', 'os.remove', (["(outfile + '.gz')"], {}), "(outfile + '.gz')\n", (10716, 10733), False, 'import os\n'), ((10914, 10925), 'time.time', 'time.time', ([], {}), '()\n', (10923, 10925), False, 'import time\n'), ((2191, 2202), 'time.time', 'time.time', ([], {}), '()\n', (2200, 2202), False, 'import time\n'), ((2357, 2449), 'sqlite3.connect', 'sqlite3.connect', (['dbfile1'], {'detect_types': '(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)'}), '(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.\n PARSE_COLNAMES)\n', (2372, 2449), False, 'import sqlite3\n'), ((2552, 2563), 'time.time', 'time.time', ([], {}), '()\n', (2561, 2563), False, 'import time\n'), ((2813, 2829), 'numpy.array', 'np.array', (['measid'], {}), '(measid)\n', (2821, 2829), True, 'import numpy as np\n'), ((2853, 2871), 'numpy.array', 'np.array', (['objectid'], {}), '(objectid)\n', (2861, 2871), True, 'import numpy as np\n'), ((2895, 2913), 'numpy.array', 'np.array', (['exposure'], {}), '(exposure)\n', (2903, 2913), True, 'import numpy as np\n'), ((2935, 2961), 'dlnpyutils.utils.create_index', 'dln.create_index', (['exposure'], {}), '(exposure)\n', (2951, 2961), True, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((3036, 3082), 'dlnpyutils.utils.match', 'dln.match', (["expcat['EXPOSURE']", "eindex['value']"], {}), "(expcat['EXPOSURE'], eindex['value'])\n", (3045, 3082), True, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((3357, 3454), 'numpy.dtype', 'np.dtype', (["[('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)]"], {}), "([('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)])\n", (3365, 3454), True, 'import numpy as np\n'), ((5646, 5665), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (5662, 5665), False, 'import os\n'), ((6193, 6217), 'os.path.exists', 'os.path.exists', (['outfile1'], {}), '(outfile1)\n', (6207, 6217), False, 'import os\n'), ((6281, 6291), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6289, 6291), False, 'import sys\n'), ((6597, 6624), 'numpy.hstack', 'np.hstack', (['(allmeta, meta1)'], {}), '((allmeta, meta1))\n', (6606, 6624), True, 'import numpy as np\n'), ((7984, 8012), 'os.path.exists', 'os.path.exists', (['outfile1fits'], {}), '(outfile1fits)\n', (7998, 8012), False, 'import os\n'), ((8412, 8435), 'astropy.io.fits.open', 'fits.open', (['outfile1fits'], {}), '(outfile1fits)\n', (8421, 8435), False, 'from astropy.io import fits\n'), ((8744, 8768), 'os.path.exists', 'os.path.exists', (['outfile1'], {}), '(outfile1)\n', (8758, 8768), False, 'import os\n'), ((8808, 8847), 'subprocess.call', 'subprocess.call', (["['gzip', outfile1fits]"], {}), "(['gzip', outfile1fits])\n", (8823, 8847), False, 'import subprocess\n'), ((9592, 9625), 'numpy.sum', 'np.sum', (["allmeta['nobjects'][indx]"], {}), "(allmeta['nobjects'][indx])\n", (9598, 9625), True, 'import numpy as np\n'), ((9705, 9731), 'numpy.hstack', 'np.hstack', (['(sumstr, meta1)'], {}), '((sumstr, meta1))\n', (9714, 9731), True, 'import numpy as np\n'), ((2225, 2250), 'os.path.basename', 'os.path.basename', (['dbfile1'], {}), '(dbfile1)\n', (2241, 2250), False, 'import os\n'), ((3261, 3279), 'dlnpyutils.utils.strlen', 'dln.strlen', (['measid'], {}), '(measid)\n', (3271, 3279), True, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((3318, 3338), 'dlnpyutils.utils.strlen', 'dln.strlen', (['objectid'], {}), '(objectid)\n', (3328, 3338), True, 'from dlnpyutils import utils as dln, coords, bindata, db, job_daemon as jd\n'), ((4519, 4540), 'numpy.save', 'np.save', (['outfile', 'cat'], {}), '(outfile, cat)\n', (4526, 4540), True, 'import numpy as np\n'), ((4690, 4701), 'time.time', 'time.time', ([], {}), '()\n', (4699, 4701), False, 'import time\n'), ((8014, 8037), 'os.remove', 'os.remove', (['outfile1fits'], {}), '(outfile1fits)\n', (8023, 8037), False, 'import os\n'), ((8472, 8483), 'astropy.table.Table', 'Table', (['obj1'], {}), '(obj1)\n', (8477, 8483), False, 'from astropy.table import Table, vstack, Column\n'), ((8770, 8789), 'os.remove', 'os.remove', (['outfile1'], {}), '(outfile1)\n', (8779, 8789), False, 'import os\n'), ((4032, 4106), 'os.path.exists', 'os.path.exists', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (4046, 4106), False, 'import os\n'), ((8050, 8062), 'astropy.table.Table', 'Table', (['meta1'], {}), '(meta1)\n', (8055, 8062), False, 'from astropy.table import Table, vstack, Column\n'), ((1551, 1562), 'time.time', 'time.time', ([], {}), '()\n', (1560, 1562), False, 'import time\n'), ((4266, 4337), 'os.makedirs', 'os.makedirs', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (4277, 4337), False, 'import os\n'), ((4582, 4593), 'time.time', 'time.time', ([], {}), '()\n', (4591, 4593), False, 'import time\n'), ((2680, 2691), 'time.time', 'time.time', ([], {}), '()\n', (2689, 2691), False, 'import time\n'), ((7129, 7145), 'numpy.arange', 'np.arange', (['nobj1'], {}), '(nobj1)\n', (7138, 7145), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Class for reading/writing data from micromed (.trc).
Inspired by the Matlab code for EEGLAB from <NAME>.
Completed with matlab Guillaume BECQ code.
Supported : Read
Author: sgarcia
"""
import datetime
import os
import struct
# file no longer exists in Python3
try:
file
except NameError:
import io
file = io.BufferedReader
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import Segment, AnalogSignal, EpochArray, EventArray
class struct_file(file):
def read_f(self, fmt):
return struct.unpack(fmt , self.read(struct.calcsize(fmt)))
class MicromedIO(BaseIO):
"""
Class for reading data from micromed (.trc).
Usage:
>>> from neo import io
>>> r = io.MicromedIO(filename='File_micromed_1.TRC')
>>> seg = r.read_segment(lazy=False, cascade=True)
>>> print seg.analogsignals # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[<AnalogSignal(array([ -1.77246094e+02, -2.24707031e+02, -2.66015625e+02,
...
"""
is_readable = True
is_writable = False
supported_objects = [ Segment , AnalogSignal , EventArray, EpochArray ]
readable_objects = [Segment]
writeable_objects = [ ]
has_header = False
is_streameable = False
read_params = { Segment : [ ] }
write_params = None
name = None
extensions = [ 'TRC' ]
mode = 'file'
def __init__(self , filename = None) :
"""
This class read a micromed TRC file.
Arguments:
filename : the filename to read
"""
BaseIO.__init__(self)
self.filename = filename
def read_segment(self, cascade = True, lazy = False,):
"""
Arguments:
"""
f = struct_file(self.filename, 'rb')
#Name
f.seek(64,0)
surname = f.read(22)
while surname[-1] == ' ' :
if len(surname) == 0 :break
surname = surname[:-1]
firstname = f.read(20)
while firstname[-1] == ' ' :
if len(firstname) == 0 :break
firstname = firstname[:-1]
#Date
f.seek(128,0)
day, month, year, hour, minute, sec = f.read_f('bbbbbb')
rec_datetime = datetime.datetime(year+1900 , month , day, hour, minute, sec)
f.seek(138,0)
Data_Start_Offset , Num_Chan , Multiplexer , Rate_Min , Bytes = f.read_f('IHHHH')
#~ print Num_Chan, Bytes
#header version
f.seek(175,0)
header_version, = f.read_f('b')
assert header_version == 4
seg = Segment( name = firstname+' '+surname,
file_origin = os.path.basename(self.filename),
)
seg.annotate(surname = surname)
seg.annotate(firstname = firstname)
seg.annotate(rec_datetime = rec_datetime)
if not cascade:
return seg
# area
f.seek(176,0)
zone_names = ['ORDER', 'LABCOD', 'NOTE', 'FLAGS', 'TRONCA', 'IMPED_B', 'IMPED_E', 'MONTAGE',
'COMPRESS', 'AVERAGE', 'HISTORY', 'DVIDEO', 'EVENT A', 'EVENT B', 'TRIGGER']
zones = { }
for zname in zone_names:
zname2, pos, length = f.read_f('8sII')
zones[zname] = zname2, pos, length
#~ print zname2, pos, length
# reading raw data
if not lazy:
f.seek(Data_Start_Offset,0)
rawdata = np.fromstring(f.read() , dtype = 'u'+str(Bytes))
rawdata = rawdata.reshape(( rawdata.size/Num_Chan , Num_Chan))
# Reading Code Info
zname2, pos, length = zones['ORDER']
f.seek(pos,0)
code = np.fromfile(f, dtype='u2', count=Num_Chan)
units = {-1: pq.nano*pq.V, 0:pq.uV, 1:pq.mV, 2:1, 100: pq.percent, 101:pq.dimensionless, 102:pq.dimensionless}
for c in range(Num_Chan):
zname2, pos, length = zones['LABCOD']
f.seek(pos+code[c]*128+2,0)
label = f.read(6).strip("\x00")
ground = f.read(6).strip("\x00")
logical_min , logical_max, logical_ground, physical_min, physical_max = f.read_f('iiiii')
k, = f.read_f('h')
if k in units.keys() :
unit = units[k]
else :
unit = pq.uV
f.seek(8,1)
sampling_rate, = f.read_f('H') * pq.Hz
sampling_rate *= Rate_Min
if lazy:
signal = [ ]*unit
else:
factor = float(physical_max - physical_min) / float(logical_max-logical_min+1)
signal = ( rawdata[:,c].astype('f') - logical_ground )* factor*unit
anaSig = AnalogSignal(signal, sampling_rate=sampling_rate,
name=label, channel_index=c)
if lazy:
anaSig.lazy_shape = None
anaSig.annotate(ground = ground)
seg.analogsignals.append( anaSig )
sampling_rate = np.mean([ anaSig.sampling_rate for anaSig in seg.analogsignals ])*pq.Hz
# Read trigger and notes
for zname, label_dtype in [ ('TRIGGER', 'u2'), ('NOTE', 'S40') ]:
zname2, pos, length = zones[zname]
f.seek(pos,0)
triggers = np.fromstring(f.read(length) , dtype = [('pos','u4'), ('label', label_dtype)] , )
ea = EventArray(name =zname[0]+zname[1:].lower())
if not lazy:
keep = (triggers['pos']>=triggers['pos'][0]) & (triggers['pos']<rawdata.shape[0]) & (triggers['pos']!=0)
triggers = triggers[keep]
ea.labels = triggers['label'].astype('S')
ea.times = (triggers['pos']/sampling_rate).rescale('s')
else:
ea.lazy_shape = triggers.size
seg.eventarrays.append(ea)
# Read Event A and B
# Not so well tested
for zname in ['EVENT A', 'EVENT B']:
zname2, pos, length = zones[zname]
f.seek(pos,0)
epochs = np.fromstring(f.read(length) ,
dtype = [('label','u4'),('start','u4'),('stop','u4'),] )
ep = EpochArray(name =zname[0]+zname[1:].lower())
if not lazy:
keep = (epochs['start']>0) & (epochs['start']<rawdata.shape[0]) & (epochs['stop']<rawdata.shape[0])
epochs = epochs[keep]
ep.labels = epochs['label'].astype('S')
ep.times = (epochs['start']/sampling_rate).rescale('s')
ep.durations = ((epochs['stop'] - epochs['start'])/sampling_rate).rescale('s')
else:
ep.lazy_shape = triggers.size
seg.epocharrays.append(ep)
seg.create_many_to_one_relationship()
return seg
| [
"neo.io.baseio.BaseIO.__init__",
"os.path.basename",
"numpy.fromfile",
"struct.calcsize",
"datetime.datetime",
"neo.core.AnalogSignal",
"numpy.mean"
] | [((1705, 1726), 'neo.io.baseio.BaseIO.__init__', 'BaseIO.__init__', (['self'], {}), '(self)\n', (1720, 1726), False, 'from neo.io.baseio import BaseIO\n'), ((2358, 2419), 'datetime.datetime', 'datetime.datetime', (['(year + 1900)', 'month', 'day', 'hour', 'minute', 'sec'], {}), '(year + 1900, month, day, hour, minute, sec)\n', (2375, 2419), False, 'import datetime\n'), ((3816, 3858), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""u2"""', 'count': 'Num_Chan'}), "(f, dtype='u2', count=Num_Chan)\n", (3827, 3858), True, 'import numpy as np\n'), ((4832, 4910), 'neo.core.AnalogSignal', 'AnalogSignal', (['signal'], {'sampling_rate': 'sampling_rate', 'name': 'label', 'channel_index': 'c'}), '(signal, sampling_rate=sampling_rate, name=label, channel_index=c)\n', (4844, 4910), False, 'from neo.core import Segment, AnalogSignal, EpochArray, EventArray\n'), ((5126, 5189), 'numpy.mean', 'np.mean', (['[anaSig.sampling_rate for anaSig in seg.analogsignals]'], {}), '([anaSig.sampling_rate for anaSig in seg.analogsignals])\n', (5133, 5189), True, 'import numpy as np\n'), ((612, 632), 'struct.calcsize', 'struct.calcsize', (['fmt'], {}), '(fmt)\n', (627, 632), False, 'import struct\n'), ((2793, 2824), 'os.path.basename', 'os.path.basename', (['self.filename'], {}), '(self.filename)\n', (2809, 2824), False, 'import os\n')] |
import unittest
import numpy as np
import tensorflow as tf
import deepchem as dc
import deepchem.models.tensorgraph.layers as layers
from deepchem.data import NumpyDataset
from deepchem.models.text_cnn import default_dict
from scipy.io import loadmat
from flaky import flaky
import os
class TestEstimators(unittest.TestCase):
"""
Test converting TensorGraphs to Estimators.
"""
# def test_multi_task_classifier(self):
# """Test creating an Estimator from a MultitaskClassifier."""
# n_samples = 10
# n_features = 3
# n_tasks = 2
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, n_features)
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y, w)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# return {'x': x, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.MultitaskClassifier(n_tasks, n_features, dropouts=0)
#
# # Create an estimator from it.
#
# x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
#
# def accuracy(labels, predictions, weights):
# return tf.metrics.accuracy(labels, tf.round(predictions), weights)
#
# metrics = {'accuracy': accuracy}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(100))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-4
# assert results['accuracy'] > 0.9
#
# def test_multi_task_regressor(self):
# """Test creating an Estimator from a MultitaskRegressor."""
# n_samples = 10
# n_features = 3
# n_tasks = 2
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, n_features)
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y, w)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# return {'x': x, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.MultitaskRegressor(n_tasks, n_features, dropouts=0)
#
# # Create an estimator from it.
#
# x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
# metrics = {'error': tf.metrics.mean_absolute_error}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(100))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-3
# assert results['error'] < 0.1
#
# def test_robust_multi_task_classifier(self):
# """Test creating an Estimator from a MultitaskClassifier."""
# n_samples = 10
# n_features = 3
# n_tasks = 2
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, n_features)
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y, w)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# return {'x': x, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.RobustMultitaskClassifier(
# n_tasks,
# n_features,
# layer_sizes=[50],
# bypass_layer_sizes=[10],
# dropouts=0,
# bypass_dropouts=0,
# learning_rate=0.003)
#
# # Create an estimator from it.
#
# x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
#
# def accuracy(labels, predictions, weights):
# return tf.metrics.accuracy(labels, tf.round(predictions), weights)
#
# metrics = {'accuracy': accuracy}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(500))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-2
# assert results['accuracy'] > 0.9
#
# def test_robust_multi_task_regressor(self):
# """Test creating an Estimator from a MultitaskRegressor."""
# n_samples = 10
# n_features = 3
# n_tasks = 2
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, n_features)
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y, w)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# return {'x': x, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.RobustMultitaskRegressor(
# n_tasks,
# n_features,
# layer_sizes=[50],
# bypass_layer_sizes=[10],
# dropouts=0,
# bypass_dropouts=0,
# learning_rate=0.003)
#
# # Create an estimator from it.
#
# x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
# metrics = {'error': tf.metrics.mean_absolute_error}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(500))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-2
# assert results['error'] < 1e-2
def test_sequential(self):
"""Test creating an Estimator from a Sequential model."""
n_samples = 20
n_features = 2
# Create a dataset and an input function for processing it.
X = np.random.rand(n_samples, n_features)
y = np.array([[0.5] for x in range(n_samples)])
dataset = dc.data.NumpyDataset(X, y)
def input_fn(epochs):
x, y, weights = dataset.make_iterator(
batch_size=n_samples, epochs=epochs).get_next()
return {'x': x}, y
# Create the model.
model = dc.models.Sequential(loss="mse", learning_rate=0.01)
model.add(layers.Dense(out_channels=1))
# Create an estimator from it.
x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
metrics = {'error': tf.metrics.mean_absolute_error}
estimator = model.make_estimator(feature_columns=[x_col], metrics=metrics)
# Train the model.
estimator.train(input_fn=lambda: input_fn(1000))
# Evaluate the model.
results = estimator.evaluate(input_fn=lambda: input_fn(1))
assert results['loss'] < 1e-2
assert results['error'] < 0.1
# def test_irv(self):
# """Test creating an Estimator from a IRVClassifier."""
# n_samples = 50
# n_features = 3
# n_tasks = 2
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, n_features)
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y, w)
# transformers = [dc.trans.IRVTransformer(10, n_tasks, dataset)]
#
# for transformer in transformers:
# dataset = transformer.transform(dataset)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# return {'x': x, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.TensorflowMultitaskIRVClassifier(
# n_tasks, K=10, learning_rate=0.001, penalty=0.05, batch_size=50)
# model.build()
# # Create an estimator from it.
#
# x_col = tf.feature_column.numeric_column('x', shape=(2 * 10 * n_tasks,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
#
# def accuracy(labels, predictions, weights):
# return tf.metrics.accuracy(labels, tf.round(predictions[:, :, 1]),
# weights)
#
# metrics = {'accuracy': accuracy}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(100))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['accuracy'] > 0.9
#
# def test_textcnn_classification(self):
# """Test creating an Estimator from TextCNN for classification."""
# n_tasks = 2
# n_samples = 5
#
# # Create a TensorGraph model.
# seq_length = 20
# model = dc.models.TextCNNModel(
# n_tasks=n_tasks,
# char_dict=default_dict,
# seq_length=seq_length,
# kernel_sizes=[5, 5],
# num_filters=[20, 20])
#
# np.random.seed(123)
# smile_ids = ["CCCCC", "CCC(=O)O", "CCC", "CC(=O)O", "O=C=O"]
# X = smile_ids
# y = np.zeros((n_samples, n_tasks))
# w = np.ones((n_samples, n_tasks))
# dataset = NumpyDataset(X, y, w, smile_ids)
#
# def accuracy(labels, predictions, weights):
# return tf.metrics.accuracy(labels, tf.round(predictions), weights)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# smiles_seq = tf.py_func(model.smiles_to_seq_batch, inp=[x], Tout=tf.int32)
# return {'x': smiles_seq, 'weights': weights}, y
#
# # Create an estimator from it.
# x_col = tf.feature_column.numeric_column(
# 'x', shape=(seq_length,), dtype=tf.int32)
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
# metrics = {'accuracy': accuracy}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
# estimator.train(input_fn=lambda: input_fn(100))
#
# # Evaluate results
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-2
# assert results['accuracy'] > 0.9
#
# def test_textcnn_regression(self):
# """Test creating an Estimator from TextCNN for regression."""
# n_tasks = 2
# n_samples = 10
#
# # Create a TensorGraph model.
# seq_length = 20
# model = dc.models.TextCNNModel(
# n_tasks=n_tasks,
# char_dict=default_dict,
# seq_length=seq_length,
# kernel_sizes=[5, 5],
# num_filters=[20, 20],
# mode="regression")
#
# np.random.seed(123)
# smile_ids = ["CCCCC", "CCC(=O)O", "CCC", "CC(=O)O", "O=C=O"]
# X = smile_ids
# y = np.zeros((n_samples, n_tasks, 1), dtype=np.float32)
# w = np.ones((n_samples, n_tasks))
# dataset = NumpyDataset(X, y, w, smile_ids)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# smiles_seq = tf.py_func(model.smiles_to_seq_batch, inp=[x], Tout=tf.int32)
# return {'x': smiles_seq, 'weights': weights}, y
#
# # Create an estimator from it.
# x_col = tf.feature_column.numeric_column(
# 'x', shape=(seq_length,), dtype=tf.int32)
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
# metrics = {'error': tf.metrics.mean_absolute_error}
# estimator = model.make_estimator(
# feature_columns=[x_col], weight_column=weight_col, metrics=metrics)
#
# # Train the model.
# estimator.train(input_fn=lambda: input_fn(100))
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 1e-1
# assert results['error'] < 0.1
#
# def test_scscore(self):
# """Test creating an Estimator from a ScScoreModel."""
# n_samples = 10
# n_features = 3
# n_tasks = 1
#
# # Create a dataset and an input function for processing it.
#
# np.random.seed(123)
# X = np.random.rand(n_samples, 2, n_features)
# y = np.zeros((n_samples, n_tasks))
# dataset = dc.data.NumpyDataset(X, y)
#
# def input_fn(epochs):
# x, y, weights = dataset.make_iterator(
# batch_size=n_samples, epochs=epochs).get_next()
# x1 = x[:, 0]
# x2 = x[:, 1]
# return {'x1': x1, 'x2': x2, 'weights': weights}, y
#
# # Create a TensorGraph model.
#
# model = dc.models.ScScoreModel(n_features, dropouts=0)
# del model.outputs[:]
# model.outputs.append(model.difference)
#
# def accuracy(labels, predictions, weights):
# predictions = tf.nn.relu(tf.sign(predictions))
# return tf.metrics.accuracy(labels, predictions, weights)
#
# # Create an estimator from it.
#
# x_col1 = tf.feature_column.numeric_column('x1', shape=(n_features,))
# x_col2 = tf.feature_column.numeric_column('x2', shape=(n_features,))
# weight_col = tf.feature_column.numeric_column('weights', shape=(1,))
#
# estimator = model.make_estimator(
# feature_columns=[x_col1, x_col2],
# metrics={'accuracy': accuracy},
# weight_column=weight_col)
#
# # Train the model.
#
# estimator.train(input_fn=lambda: input_fn(100))
#
# # Evaluate the model.
#
# results = estimator.evaluate(input_fn=lambda: input_fn(1))
# assert results['loss'] < 0.5
# assert results['accuracy'] > 0.6
def test_tensorboard(self):
"""Test creating an Estimator from a TensorGraph that logs information to TensorBoard."""
n_samples = 10
n_features = 3
n_tasks = 2
# Create a dataset and an input function for processing it.
np.random.seed(123)
X = np.random.rand(n_samples, n_features)
y = np.zeros((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y)
def input_fn(epochs):
x, y, weights = dataset.make_iterator(
batch_size=n_samples, epochs=epochs).get_next()
return {'x': x, 'weights': weights}, y
# Create a TensorGraph model.
model = dc.models.TensorGraph()
features = layers.Feature(shape=(None, n_features))
dense = layers.Dense(out_channels=n_tasks, in_layers=features)
dense.set_summary('histogram')
model.add_output(dense)
labels = layers.Label(shape=(None, n_tasks))
loss = layers.ReduceMean(layers.L2Loss(in_layers=[labels, dense]))
model.set_loss(loss)
# Create an estimator from it.
x_col = tf.feature_column.numeric_column('x', shape=(n_features,))
estimator = model.make_estimator(feature_columns=[x_col])
# Train the model.
estimator.train(input_fn=lambda: input_fn(100))
# @flaky
# def test_dtnn_regression_model(self):
# """Test creating an estimator for DTNNGraphModel for regression"""
# current_dir = os.path.dirname(os.path.abspath(__file__))
# input_file = os.path.join(current_dir, "example_DTNN.mat")
# dataset = loadmat(input_file)
#
# num_vals_to_use = 20
#
# np.random.seed(123)
# X = dataset['X'][:num_vals_to_use]
# y = dataset['T'][:num_vals_to_use].astype(np.float32)
# w = np.ones_like(y)
# dataset = dc.data.NumpyDataset(X, y, w, ids=None)
# n_tasks = y.shape[1]
# n_samples = y.shape[0]
#
# dtypes = [tf.int32, tf.float32, tf.int32, tf.int32, tf.int32]
#
# model = dc.models.DTNNModel(
# n_tasks,
# n_embedding=20,
# n_distance=100,
# learning_rate=1.0,
# mode="regression")
#
# def mean_relative_error(labels, predictions, weights):
# error = tf.abs(1 - tf.math.divide(labels, predictions))
# error_val, update_op = tf.metrics.mean(error)
# return error_val, update_op
#
# def input_fn(batch_size, epochs):
# X, y, weights = dataset.make_iterator(
# batch_size=batch_size, epochs=epochs).get_next()
# features = tf.py_func(
# model.compute_features_on_batch, inp=[X], Tout=dtypes)
#
# assert len(features) == 5
# feature_dict = dict()
# feature_dict['atom_num'] = features[0]
# feature_dict['distance'] = features[1]
# feature_dict['dist_mem_i'] = features[2]
# feature_dict['dist_mem_j'] = features[3]
# feature_dict['atom_mem'] = features[4]
# feature_dict['weights'] = weights
#
# return feature_dict, y
#
# atom_number = tf.feature_column.numeric_column(
# 'atom_num', shape=[], dtype=dtypes[0])
# distance = tf.feature_column.numeric_column(
# 'distance', shape=(model.n_distance,), dtype=dtypes[1])
# atom_mem = tf.feature_column.numeric_column(
# 'atom_mem', shape=[], dtype=dtypes[2])
# dist_mem_i = tf.feature_column.numeric_column(
# 'dist_mem_i', shape=[], dtype=dtypes[3])
# dist_mem_j = tf.feature_column.numeric_column(
# 'dist_mem_j', shape=[], dtype=dtypes[4])
#
# weight_col = tf.feature_column.numeric_column('weights', shape=(n_tasks,))
# metrics = {'error': mean_relative_error}
#
# feature_cols = [atom_number, distance, dist_mem_i, dist_mem_j, atom_mem]
# estimator = model.make_estimator(
# feature_columns=feature_cols, weight_column=weight_col, metrics=metrics)
# estimator.train(input_fn=lambda: input_fn(100, 250))
#
# results = estimator.evaluate(input_fn=lambda: input_fn(n_samples, 1))
# assert results['error'] < 0.1
def test_bpsymm_regression_model(self):
"""Test creating an estimator for BPSymmetry Regression model."""
tasks, dataset, transformers = dc.molnet.load_qm7_from_mat(
featurizer='BPSymmetryFunctionInput', move_mean=False)
num_samples_to_use = 5
train, _, _ = dataset
X = train.X[:num_samples_to_use]
y = train.y[:num_samples_to_use]
w = train.w[:num_samples_to_use]
ids = train.ids[:num_samples_to_use]
dataset = dc.data.NumpyDataset(X, y, w, ids)
max_atoms = 23
batch_size = 16
layer_structures = [128, 128, 64]
ANItransformer = dc.trans.ANITransformer(
max_atoms=max_atoms, atomic_number_differentiated=False)
dataset = ANItransformer.transform(dataset)
n_feat = ANItransformer.get_num_feats() - 1
model = dc.models.BPSymmetryFunctionRegression(
len(tasks),
max_atoms,
n_feat,
layer_structures=layer_structures,
batch_size=batch_size,
learning_rate=0.001,
use_queue=False,
mode="regression")
metrics = {'error': tf.metrics.mean_absolute_error}
def input_fn(epochs):
X, y, w = dataset.make_iterator(
batch_size=batch_size, epochs=epochs).get_next()
atom_feats, atom_flags = tf.py_func(
model.compute_features_on_batch, [X], Tout=[tf.float32, tf.float32])
atom_feats = tf.reshape(
atom_feats,
shape=(tf.shape(atom_feats)[0], model.max_atoms * model.n_feat))
atom_flags = tf.reshape(
atom_flags,
shape=(tf.shape(atom_flags)[0], model.max_atoms * model.max_atoms))
features = dict()
features['atom_feats'] = atom_feats
features['atom_flags'] = atom_flags
features['weights'] = w
return features, y
atom_feats = tf.feature_column.numeric_column(
'atom_feats', shape=(max_atoms * n_feat,), dtype=tf.float32)
atom_flags = tf.feature_column.numeric_column(
'atom_flags', shape=(max_atoms * max_atoms), dtype=tf.float32)
weight_col = tf.feature_column.numeric_column(
'weights', shape=(len(tasks),), dtype=tf.float32)
estimator = model.make_estimator(
feature_columns=[atom_feats, atom_flags],
weight_column=weight_col,
metrics=metrics)
estimator.train(input_fn=lambda: input_fn(100))
results = estimator.evaluate(input_fn=lambda: input_fn(1))
assert results['error'] < 0.1
def test_ani_regression(self):
"""Test creating an estimator for ANI Regression."""
max_atoms = 4
X = np.array(
[[
[1, 5.0, 3.2, 1.1],
[6, 1.0, 3.4, -1.1],
[1, 2.3, 3.4, 2.2],
[0, 0, 0, 0],
], [
[8, 2.0, -1.4, -1.1],
[7, 6.3, 2.4, 3.2],
[0, 0, 0, 0],
[0, 0, 0, 0],
]],
dtype=np.float32)
y = np.array([[2.0], [1.1]], dtype=np.float32)
layer_structures = [128, 128, 64]
atom_number_cases = [1, 6, 7, 8]
kwargs = {
"n_tasks": 1,
"max_atoms": max_atoms,
"layer_structures": layer_structures,
"atom_number_cases": atom_number_cases,
"batch_size": 2,
"learning_rate": 0.001,
"use_queue": False,
"mode": "regression"
}
model = dc.models.ANIRegression(**kwargs)
dataset = dc.data.NumpyDataset(X, y, n_tasks=1)
metrics = {'error': tf.metrics.mean_absolute_error}
def input_fn(epochs):
X, y, w = dataset.make_iterator(batch_size=2, epochs=epochs).get_next()
atom_feats, atom_numbers, atom_flags = tf.py_func(
model.compute_features_on_batch, [X],
Tout=[tf.float32, tf.int32, tf.float32])
atom_feats = tf.reshape(
atom_feats, shape=(tf.shape(atom_feats)[0], model.max_atoms * 4))
atom_numbers = tf.reshape(
atom_numbers, shape=(tf.shape(atom_numbers)[0], model.max_atoms))
atom_flags = tf.reshape(
atom_flags,
shape=(tf.shape(atom_flags)[0], model.max_atoms * model.max_atoms))
features = dict()
features['atom_feats'] = atom_feats
features['atom_numbers'] = atom_numbers
features['atom_flags'] = atom_flags
features['weights'] = w
return features, y
atom_feats = tf.feature_column.numeric_column(
'atom_feats', shape=(max_atoms * 4,), dtype=tf.float32)
atom_numbers = tf.feature_column.numeric_column(
'atom_numbers', shape=(max_atoms,), dtype=tf.int32)
atom_flags = tf.feature_column.numeric_column(
'atom_flags', shape=(max_atoms * max_atoms), dtype=tf.float32)
weight_col = tf.feature_column.numeric_column(
'weights', shape=(kwargs["n_tasks"],), dtype=tf.float32)
estimator = model.make_estimator(
feature_columns=[atom_feats, atom_numbers, atom_flags],
weight_column=weight_col,
metrics=metrics)
estimator.train(input_fn=lambda: input_fn(100))
results = estimator.evaluate(input_fn=lambda: input_fn(1))
assert results['error'] < 0.1
| [
"deepchem.models.ANIRegression",
"numpy.random.seed",
"deepchem.models.tensorgraph.layers.Dense",
"tensorflow.feature_column.numeric_column",
"deepchem.molnet.load_qm7_from_mat",
"deepchem.models.Sequential",
"tensorflow.py_func",
"numpy.zeros",
"deepchem.models.tensorgraph.layers.Label",
"tensorf... | [((6710, 6747), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (6724, 6747), True, 'import numpy as np\n'), ((6814, 6840), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y'], {}), '(X, y)\n', (6834, 6840), True, 'import deepchem as dc\n'), ((7034, 7086), 'deepchem.models.Sequential', 'dc.models.Sequential', ([], {'loss': '"""mse"""', 'learning_rate': '(0.01)'}), "(loss='mse', learning_rate=0.01)\n", (7054, 7086), True, 'import deepchem as dc\n'), ((7180, 7238), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""x"""'], {'shape': '(n_features,)'}), "('x', shape=(n_features,))\n", (7212, 7238), True, 'import tensorflow as tf\n'), ((14562, 14581), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (14576, 14581), True, 'import numpy as np\n'), ((14590, 14627), 'numpy.random.rand', 'np.random.rand', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (14604, 14627), True, 'import numpy as np\n'), ((14636, 14666), 'numpy.zeros', 'np.zeros', (['(n_samples, n_tasks)'], {}), '((n_samples, n_tasks))\n', (14644, 14666), True, 'import numpy as np\n'), ((14681, 14707), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y'], {}), '(X, y)\n', (14701, 14707), True, 'import deepchem as dc\n'), ((14931, 14954), 'deepchem.models.TensorGraph', 'dc.models.TensorGraph', ([], {}), '()\n', (14952, 14954), True, 'import deepchem as dc\n'), ((14970, 15010), 'deepchem.models.tensorgraph.layers.Feature', 'layers.Feature', ([], {'shape': '(None, n_features)'}), '(shape=(None, n_features))\n', (14984, 15010), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15023, 15077), 'deepchem.models.tensorgraph.layers.Dense', 'layers.Dense', ([], {'out_channels': 'n_tasks', 'in_layers': 'features'}), '(out_channels=n_tasks, in_layers=features)\n', (15035, 15077), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15154, 15189), 'deepchem.models.tensorgraph.layers.Label', 'layers.Label', ([], {'shape': '(None, n_tasks)'}), '(shape=(None, n_tasks))\n', (15166, 15189), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15335, 15393), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""x"""'], {'shape': '(n_features,)'}), "('x', shape=(n_features,))\n", (15367, 15393), True, 'import tensorflow as tf\n'), ((18410, 18497), 'deepchem.molnet.load_qm7_from_mat', 'dc.molnet.load_qm7_from_mat', ([], {'featurizer': '"""BPSymmetryFunctionInput"""', 'move_mean': '(False)'}), "(featurizer='BPSymmetryFunctionInput', move_mean\n =False)\n", (18437, 18497), True, 'import deepchem as dc\n'), ((18723, 18757), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (18743, 18757), True, 'import deepchem as dc\n'), ((18858, 18943), 'deepchem.trans.ANITransformer', 'dc.trans.ANITransformer', ([], {'max_atoms': 'max_atoms', 'atomic_number_differentiated': '(False)'}), '(max_atoms=max_atoms, atomic_number_differentiated=False\n )\n', (18881, 18943), True, 'import deepchem as dc\n'), ((20052, 20149), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""atom_feats"""'], {'shape': '(max_atoms * n_feat,)', 'dtype': 'tf.float32'}), "('atom_feats', shape=(max_atoms * n_feat,),\n dtype=tf.float32)\n", (20084, 20149), True, 'import tensorflow as tf\n'), ((20172, 20269), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""atom_flags"""'], {'shape': '(max_atoms * max_atoms)', 'dtype': 'tf.float32'}), "('atom_flags', shape=max_atoms * max_atoms,\n dtype=tf.float32)\n", (20204, 20269), True, 'import tensorflow as tf\n'), ((20803, 20988), 'numpy.array', 'np.array', (['[[[1, 5.0, 3.2, 1.1], [6, 1.0, 3.4, -1.1], [1, 2.3, 3.4, 2.2], [0, 0, 0, 0]\n ], [[8, 2.0, -1.4, -1.1], [7, 6.3, 2.4, 3.2], [0, 0, 0, 0], [0, 0, 0, 0]]]'], {'dtype': 'np.float32'}), '([[[1, 5.0, 3.2, 1.1], [6, 1.0, 3.4, -1.1], [1, 2.3, 3.4, 2.2], [0,\n 0, 0, 0]], [[8, 2.0, -1.4, -1.1], [7, 6.3, 2.4, 3.2], [0, 0, 0, 0], [0,\n 0, 0, 0]]], dtype=np.float32)\n', (20811, 20988), True, 'import numpy as np\n'), ((21125, 21167), 'numpy.array', 'np.array', (['[[2.0], [1.1]]'], {'dtype': 'np.float32'}), '([[2.0], [1.1]], dtype=np.float32)\n', (21133, 21167), True, 'import numpy as np\n'), ((21541, 21574), 'deepchem.models.ANIRegression', 'dc.models.ANIRegression', ([], {}), '(**kwargs)\n', (21564, 21574), True, 'import deepchem as dc\n'), ((21589, 21626), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y'], {'n_tasks': '(1)'}), '(X, y, n_tasks=1)\n', (21609, 21626), True, 'import deepchem as dc\n'), ((22520, 22612), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""atom_feats"""'], {'shape': '(max_atoms * 4,)', 'dtype': 'tf.float32'}), "('atom_feats', shape=(max_atoms * 4,),\n dtype=tf.float32)\n", (22552, 22612), True, 'import tensorflow as tf\n'), ((22637, 22726), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""atom_numbers"""'], {'shape': '(max_atoms,)', 'dtype': 'tf.int32'}), "('atom_numbers', shape=(max_atoms,), dtype=\n tf.int32)\n", (22669, 22726), True, 'import tensorflow as tf\n'), ((22748, 22845), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""atom_flags"""'], {'shape': '(max_atoms * max_atoms)', 'dtype': 'tf.float32'}), "('atom_flags', shape=max_atoms * max_atoms,\n dtype=tf.float32)\n", (22780, 22845), True, 'import tensorflow as tf\n'), ((22870, 22963), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""weights"""'], {'shape': "(kwargs['n_tasks'],)", 'dtype': 'tf.float32'}), "('weights', shape=(kwargs['n_tasks'],),\n dtype=tf.float32)\n", (22902, 22963), True, 'import tensorflow as tf\n'), ((7101, 7129), 'deepchem.models.tensorgraph.layers.Dense', 'layers.Dense', ([], {'out_channels': '(1)'}), '(out_channels=1)\n', (7113, 7129), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((15219, 15259), 'deepchem.models.tensorgraph.layers.L2Loss', 'layers.L2Loss', ([], {'in_layers': '[labels, dense]'}), '(in_layers=[labels, dense])\n', (15232, 15259), True, 'import deepchem.models.tensorgraph.layers as layers\n'), ((19520, 19599), 'tensorflow.py_func', 'tf.py_func', (['model.compute_features_on_batch', '[X]'], {'Tout': '[tf.float32, tf.float32]'}), '(model.compute_features_on_batch, [X], Tout=[tf.float32, tf.float32])\n', (19530, 19599), True, 'import tensorflow as tf\n'), ((21834, 21927), 'tensorflow.py_func', 'tf.py_func', (['model.compute_features_on_batch', '[X]'], {'Tout': '[tf.float32, tf.int32, tf.float32]'}), '(model.compute_features_on_batch, [X], Tout=[tf.float32, tf.int32,\n tf.float32])\n', (21844, 21927), True, 'import tensorflow as tf\n'), ((19681, 19701), 'tensorflow.shape', 'tf.shape', (['atom_feats'], {}), '(atom_feats)\n', (19689, 19701), True, 'import tensorflow as tf\n'), ((19809, 19829), 'tensorflow.shape', 'tf.shape', (['atom_flags'], {}), '(atom_flags)\n', (19817, 19829), True, 'import tensorflow as tf\n'), ((22005, 22025), 'tensorflow.shape', 'tf.shape', (['atom_feats'], {}), '(atom_feats)\n', (22013, 22025), True, 'import tensorflow as tf\n'), ((22116, 22138), 'tensorflow.shape', 'tf.shape', (['atom_numbers'], {}), '(atom_numbers)\n', (22124, 22138), True, 'import tensorflow as tf\n'), ((22231, 22251), 'tensorflow.shape', 'tf.shape', (['atom_flags'], {}), '(atom_flags)\n', (22239, 22251), True, 'import tensorflow as tf\n')] |
import torch
import numpy as np
import random
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def target_format(target, loss_mode, device):
if loss_mode == 'mse':
return target.to(device, dtype=torch.float)
elif loss_mode == 'ce':
return target.to(device, dtype=torch.long)
def get_acc(x, target, loss_mode):
if loss_mode == 'mse':
pred = x.data.max(1, keepdim=True)[1]
tar = target.data.max(1, keepdim=True)[1]
return pred.eq(tar.data).sum().item()
if loss_mode == 'ce':
pred = x.data.max(1, keepdim=True)[1]
return pred.eq(target.data.view_as(pred)).sum().item()
| [
"torch.manual_seed",
"torch.cuda.manual_seed_all",
"numpy.random.seed",
"random.seed"
] | [((80, 103), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (97, 103), False, 'import torch\n'), ((109, 141), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (135, 141), False, 'import torch\n'), ((147, 167), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (161, 167), True, 'import numpy as np\n'), ((173, 190), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (184, 190), False, 'import random\n')] |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2021-2021 Arm Ltd.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Basic functions solving FPE.
Based on
<NAME>. (2018).
Switching probability of all-perpendicular spin valve nanopillars.
AIP Advances, 8(5).
https://doi.org/10.1063/1.5003832
FPE is expanded into Legendre pilynomials:
ρ(θ, τ) = sum[ r_n (τ) Pn(cos θ)
P_n(x) are the Legendre polynomials solution to eq (3) in [1]
∂ρ/∂τ = sum[ sum [r_n a_{n+k, n} P_{n+k}] ] ->
∂r/∂τ = Ar⇒r(τ) = e^(Aτ)r(0) eq (11) at[1]
A is the pentadiagonal matrix with Legendre coefficients (at generate_A())
Notes:
ρ area over theta should be 1/2pi, following [1]
as it integration should be over phi dimension as well in spherical coord
(i.e. int( 2*pi*f(theta)*sin(theta)d_theta )
"""
import numpy as np
import scipy as scipy
import scipy.special
from scipy.stats import maxwell
from scipy import optimize
import fvm.mtj_fp_fvm as fvm
import python_compact_model.sllgs_solver as sllgs_solver
def sph_area_rho_over_theta(rho, theta, axis=-1):
"""
Compute the area of rho over theta in an spherical system.
a) d_line_theta = sin(theta) dtheta
b) d_solid_angle = sin(theta) dtheta dphi
c) surface element in a surface of polar angle θ constant
(a cone with vertex the origin):
d_s_theta = r sin(theta) dphy dr
d) surface element in a surface of azimuth φ const (a vertical half-plane):
d_s_phi = r dr dtheta
Expects theta between [PI, 0] (comming from z in [-1, 1]).
"""
return np.abs(np.trapz(rho*np.sin(theta), x=theta))
def generate_A(i, h, delta, j):
"""Compute the pentadiagonal matrix."""
# print(f'[debug] i: {i}, h: {h}, delta: {delta}')
field_diff = i - h
inv_2_delta = 1.0/(2.0*delta)
A = np.zeros([j+1, j+1], dtype=float)
# fill matrix
for n in range(2, j-1):
n2 = 2*n
A[n-2, n] = -1.0*n*(n-1)*(n-2)/((n2+1)*(n2-1))
A[n-1, n] = field_diff*n*(n-1)/(n2+1)
A[n, n] = -1.0*n*(n+1)*(inv_2_delta-1/((n2+3)*(n2-1)))
A[n+1, n] = -1.0*field_diff*(n+1)*(n+2)/(n2+1)
A[n+2, n] = (n+1)*(n+2)*(n+3)/((n2+1)*(n2+3))
# initial cases and last cases
# [0,0] = 0
A[1, 0] = -1.0*field_diff*2
A[2, 0] = 2.0
# [0,1] = 0
A[1, 1] = -2.0*(inv_2_delta - 1/5)
A[2, 1] = -1.0*field_diff*2
A[3, 1] = 2*3*4/(3*5)
A[j-3, j-1] = -1.0*(j-1)*(j-2)*(j-3)/((2*j-1)*(2*j-3))
A[j-2, j-1] = field_diff*(j-1)*(j-2)/(2*j-1)
A[j-1, j-1] = -1.0*(j-1)*j*(inv_2_delta - 1/((2*j+1)*(2*j-3)))
A[j, j-1] = -1.0*field_diff*j*(j+1)/(2*j-1)
A[j-2, j] = -1.0*j*(j-1)*(j-2)/((2*j+1)*(2*j-1))
A[j-1, j] = field_diff*j*(j-1)/(2*j+1)
A[j, j] = -1.0*j*(j+1)*(inv_2_delta - 1/((2*j+3)*(2*j-1)))
return A
def get_state(tau, i=None, h=None, delta=None, state_0=None, a_matrix=None):
"""
Compute the magnetization state.
r(τ) = e^(Aτ)r(0) eq (11) at[1]
"""
if a_matrix is not None:
# get state from a known A matrix
# A matrix can be shared and it takes time to build
return np.matmul(scipy.linalg.expm(tau*a_matrix), state_0)
return np.matmul(scipy.linalg.expm(
tau*generate_A(i, h, delta, state_0.size-1)), state_0)
def untangle_state(m_state,
lin_space_z=False,
dim_points=2000):
"""Untangle the Legendre series from its coefficients."""
if lin_space_z:
z = np.linspace(-1, 1, dim_points)
theta = np.arccos(z)
return theta, np.polynomial.legendre.legval(z, m_state)
theta = np.linspace(np.pi, 0, dim_points)
return theta, np.polynomial.legendre.legval(np.cos(theta), m_state)
# @np.vectorize
def get_time_to_sw(a_matrix, s_rho_0,
rho_0_at_pi,
target_sw_prob=0.5,
target_tolerance=1e-3,
t_max=10,
do_manual_sw_prob=False,
sn=None):
"""Compute time to switch for a given MTJ for a given A matrix."""
dim_points = 10000
max_tau_considered = 1e15
max_iteration = int(1e2)
t_min = 0
# initialize integration matrix
if not do_manual_sw_prob and sn is None:
sn = get_sn(s_rho_0.shape[0]-1)
# ensure switching
while True:
new_state = get_state(t_max, a_matrix=a_matrix, state_0=s_rho_0)
if do_manual_sw_prob:
theta, data = untangle_state(new_state, dim_points=dim_points)
sw_prob = compute_sw_prob(data=data,
theta=theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
sw_prob = 1 - get_analytical_sw_prob(new_state, sn)
if sw_prob > target_sw_prob:
break
if t_max > max_tau_considered:
print('tmax does not meet requirements')
return np.inf
t_max *= 10
iteration = 0
while iteration < max_iteration:
t = (t_min + t_max)/2
new_state = get_state(t, a_matrix=a_matrix, state_0=s_rho_0)
theta, data = untangle_state(new_state, dim_points=dim_points)
if do_manual_sw_prob:
theta, data = untangle_state(new_state, dim_points=dim_points)
sw_prob = compute_sw_prob(data=data,
theta=theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
sw_prob = 1 - get_analytical_sw_prob(new_state, sn)
if sw_prob < 0:
print(f'[error] negative sw t: {t}, sw_prob: {sw_prob}')
return np.inf
if np.abs(sw_prob - target_sw_prob) < target_tolerance:
print(f'\t\tfound t: {t}, sw prob: {sw_prob}')
return t
if iteration > 1 and iteration % 50 == 1:
print(f'iteration: {iteration}, t: {t}, sw_prob: {sw_prob}'
f' t_max: {t_max}, t_min: {t_min}')
if sw_prob < target_sw_prob:
t_min = t
else:
t_max = t
iteration += 1
# max iterations
return t
def get_time_to_sw_fitting(c, delta, nu, alpha, h_k,
temperature=300,
rho_0_at_pi=False):
"""
Compute time to switch for a given MTJ/current for curve fitting.
h=0 and only current/delta can vary.
parameters are: i_c, t_d, delta
"""
temperature = 300
L0_max = 150
lin_space_z = False
# i
_, _, s_rho_0 = get_state_rho_0(delta=delta,
do_maxwell=False,
L0=L0_max,
rho_0_at_pi=rho_0_at_pi,
lin_space_z=lin_space_z)
print(f'nu passed: {nu} alpha passed: {alpha} h_k: {h_k}')
i_c = delta*(4*alpha*sllgs_solver.c_E*sllgs_solver.c_KB *
temperature)/(nu*sllgs_solver.c_hbar)
t_d = (1+alpha*alpha)/(alpha*sllgs_solver.c_gamma_0*sllgs_solver.c_U0*h_k)
i = c/i_c
sw_t = np.zeros(i.shape)
# alpha = 0.02
# nu = 0.3
# temperature = 300
# delta = nu*llg.c_hbar * i_c / (4*alpha*llg.c_E*llg.c_KB*temperature)
print(f'delta: {delta} for ic: {i_c}, t_d {t_d}')
for ii_idx, ii in enumerate(i):
# share A
A = generate_A(ii, 0.0, delta, s_rho_0.size-1)
sw_t[ii_idx] = get_time_to_sw(A, s_rho_0, rho_0_at_pi)
sw_t *= t_d
if np.any(sw_t <= 0):
print('[error] negative sw time')
return -1
return sw_t
def basic_error_fn(error_mode, _get_time_to_sw_fitting, currents, _times):
"""Specify basic error fn."""
if error_mode == 0:
print('[fitting] Error mode 0: doing abs(err/x)^2')
def err(p): return np.mean((
(_get_time_to_sw_fitting(currents, *p)-_times)/_times)**2)
elif error_mode == 1:
print('[fitting] Error mode 1: doing abs(err)^2')
def err(p): return np.mean((
(_get_time_to_sw_fitting(currents, *p)-_times))**2)
elif error_mode == 2:
print('[fitting] Error mode 2: doing abs(err/x)')
def err(p): return np.mean(np.abs(
(_get_time_to_sw_fitting(currents, *p)-_times)/_times))
elif error_mode == 3:
print('[fitting] Error mode 3: doing abs(err)')
def err(p): return np.mean(np.abs(
(_get_time_to_sw_fitting(currents, *p)-_times)))
return err
def _minimize_error(err, minimize_mode, p0, bounds):
"""
Perform minimization.
Mode 0: global using basinhopping
Mode 1: global using shgo
Mode 2: global using brute force
Mode 3: local using minimize
"""
# choose minimization scheme
if minimize_mode == 0:
print('[fitting] Optimization algorithm: Basin Hopping')
# basinhopping for global minima
res = optimize.basinhopping(err,
p0,
# stepsize=0.5,
niter=500,
# accept_test=my_bounds,
minimizer_kwargs={'bounds': bounds}
)
print(f'[res] Mode {minimize_mode}: {res}')
popt = res.x
return popt
elif minimize_mode == 1:
print('[fitting] Optimization algorithm: SHGO')
# shgo
res = optimize.shgo(err,
bounds=bounds,
iters=200)
print(f'[res] Mode {minimize_mode}: {res}')
popt = res.x
return popt
elif minimize_mode == 2:
print('[fitting] Optimization algorithm: Brute')
# shgo
res = optimize.brute(err,
ranges=bounds,
finish=optimize.fmin)
print(f'[res] Mode {minimize_mode}: {res}')
# if full_output is set to True
# res[0] has the params, res[1] the error evaluation
# otherwise:
popt = res
return popt
elif minimize_mode == 3:
print('[fitting] Optimization using local algorithm')
# minimize for local minima
res = optimize.minimize(err,
x0=p0,
bounds=bounds,
method='L-BFGS-B',
options={'eps': 1e-13},
# method='dogbox',
)
print(f'[res] Mode {minimize_mode}: {res}')
popt = res.x
return popt
def _fit_current_time_points(currents,
times,
rho_0_at_pi,
p0=None,
bounds=((0., 0.), (np.inf, np.inf)),
do_log=False,
minimize_mode=0,
error_mode=0):
"""Fit current/time 2d array to i/h params."""
# generate s_rho_0
L0_max = 150
lin_space_z = False
temperature = 300
# define internal fn to not pass extra params like rho_0_at_pi
# delta = 55
# def _get_time_to_sw_fitting(c, nu, alpha, h_k):
def _get_time_to_sw_fitting(c, delta, nu, alpha, h_k):
"""
Compute time to switch for a given MTJ/current for curve fitting.
h=0 and only current/delta can vary.
parameters are: delta, nu, alpha, h_k
"""
print(f'delta: {delta}, nu: {nu} alpha: {alpha} h_k: {h_k}')
_p = np.array([delta, nu, alpha, h_k])
if np.any(np.isnan(_p)) or np.any(np.isinf(_p)):
print('[warning] optimizer passing NaN')
return np.nan
# initial state cannot be passed
_, _, s_rho_0 = get_state_rho_0(delta=delta,
do_maxwell=False,
L0=L0_max,
rho_0_at_pi=rho_0_at_pi,
lin_space_z=lin_space_z)
# i
i_c = delta*(4*alpha*sllgs_solver.c_E*sllgs_solver.c_KB *
temperature)/(nu*sllgs_solver.c_hbar)
t_d = (1+alpha*alpha) / \
(alpha*sllgs_solver.c_gamma_0*sllgs_solver.c_U0*h_k)
i = c/i_c
sw_t = np.zeros(i.shape)
# alpha = 0.02
# nu = 0.3
# temperature = 300
# delta = nu*llg.c_hbar * i_c / (4*alpha*llg.c_E*llg.c_KB*temperature)
print(f'delta: {delta} for ic: {i_c}, t_d {t_d}')
for ii_idx, ii in enumerate(i):
# share A
A = generate_A(ii, 0.0, delta, s_rho_0.size-1)
sw_t[ii_idx] = get_time_to_sw(A, s_rho_0, rho_0_at_pi)
sw_t *= t_d
if np.any(sw_t <= 0):
print('[error] negative sw time')
return -np.inf
if do_log:
return np.log(sw_t)
return sw_t
_times = times
if do_log:
_times = np.log(times)
# minimize approach
err = basic_error_fn(error_mode=error_mode,
_get_time_to_sw_fitting=_get_time_to_sw_fitting,
currents=currents,
_times=_times)
# minimize
popt = _minimize_error(err, minimize_mode, p0, bounds)
if do_log:
return popt, np.exp(_get_time_to_sw_fitting(currents, *popt))
return popt, _get_time_to_sw_fitting(currents, *popt)
def get_nc(delta):
"""Critical Nc."""
return np.sqrt((delta/2)+1)-1/2
def get_state_rho_0(delta,
L0=150,
rho_0_at_pi=False,
lin_space_z=False,
do_maxwell=False,
maxwell_loc=None,
maxwell_scale=None,
dim_points=2000):
"""Generate the initial rho_0 distribution."""
if lin_space_z:
# rho with equidistant z (fitting on z, so best option)
z = np.linspace(-1, 1, dim_points)
theta = np.arccos(z)
else:
# rho with equidistance theta
theta = np.linspace(np.pi, 0, dim_points)
z = np.cos(theta)
if do_maxwell:
if maxwell_loc is None or maxwell_scale is None:
theta_0 = 1/np.sqrt(2*delta)
maxwell_scale = theta_0
# theta = np.arccos(z)
# maxwell_loc = 0 # theta_0
maxwell_loc = -theta_0
# maxwell_loc = -theta_0/2
# fitted
rho_0 = maxwell.pdf(theta,
loc=maxwell_loc,
scale=maxwell_scale)
else:
sin_theta = np.sin(theta)
rho_0 = np.exp(-delta*sin_theta*sin_theta)*np.heaviside(np.pi/2-theta,
0.5)
# area over theta should be 1/2pi:, following
# <NAME>. (2018).
# Switching probability of all-perpendicular spin valve nanopillars.
# AIP Advances, 8(5).
# https://doi.org/10.1063/1.5003832
area = sph_area_rho_over_theta(rho_0, theta)
rho_0 /= (2*np.pi*area)
# flip if rho at pi
if rho_0_at_pi:
rho_0 = rho_0[::-1]
# fit legendre coefficients
print(f'[debug] fitting to delta {delta}')
s_rho_0 = np.polynomial.legendre.legfit(x=z,
y=rho_0,
deg=L0)
return z, rho_0, s_rho_0
def get_sn(L0):
"""
Compute s_n vector from eq (12) in [1].
s_n = int_0^1 (P_n(x) dx)
"""
dtype = np.longdouble
# dtype = np.float
# print('using double precision and exact factorial2')
fact_type = np.int
print('using exact factorial2')
sn = np.arange(L0+1, dtype=dtype)
sign = -1.0 * np.ones(sn[3::2].shape[0]).astype(dtype)
sign[1::2] = 1.0
# factorials
nf = np.array([scipy.special.factorial2(f, exact=True)
for f in sn[3::2].astype(fact_type)]).astype(dtype)
nfm1 = np.array([scipy.special.factorial2(f, exact=True)
for f in sn[2:-1:2].astype(fact_type)]).astype(dtype)
# odd numbers
sn[3::2] = sign * nf / (sn[3::2]*(sn[3::2]+1)*nfm1)
# even numbers
sn[2::2] = 0.0
# n==0 and n==1
sn[0] = 1.0
sn[1] = 0.5
return sn.astype(float)
def get_analytical_sw_prob(state, sn=None):
"""Compute switching probability eq (12) in [1]."""
if sn is None:
sn = get_sn(state.shape[0])
return np.dot(2.0*np.pi*sn, state)
def compute_sw_prob(data, theta, rho_0_at_pi, normalize=False):
"""
Compute switching probability with area on spherical coordinates.
data is expected to be [time, theta] or [theta].
FVM and legendre analytical compute over z.
Transformation is from int^1_-1 [ f(z)dz ] to
int^hemisphere [ f(theta) sin(theta) dtheta ].
area is scalled by 2PI as the integral of rho should also be done
over unit phi vector between 0-2PI.
"""
if len(data.shape) == 1:
data = np.expand_dims(data, axis=0)
# area = sph_area_rho_over_theta(data, theta, axis=-1)
# print(f'[debug] area*2pi: {2*np.pi*area}')
# normalize not needed
# if normalize:
# area = sph_area_rho_over_theta(data, theta, axis=-1)
# print(f'[debug] area*2pi: {2*np.pi*area}')
# # print('[debug] area*2pi over theta '
# # f'{2*np.pi*np.trapz(data, np.cos(theta))}')
# first_theta = theta[0]
# if first_theta > np.pi/2:
# # theta pi->0
# # broadcast op with None
# data /= (-area[:, None])
# else:
# # theta 0-> pi
# # broadcast op with None
# data /= area[:, None]
if rho_0_at_pi:
return 2*np.pi*sph_area_rho_over_theta(data[:, theta < np.pi/2],
theta[theta < np.pi/2])
return 2*np.pi*sph_area_rho_over_theta(data[:, theta > np.pi/2],
theta[theta > np.pi/2])
def get_sw_prob(s_rho_0, tau, delta, i, h,
rho_0_at_pi=False,
compute_fvm=False,
compute_analytical_sw=False,
compute_analytical_manual=False,
dim_points=500,
lin_space_z=False,
t_step=1e-3,
sn=None):
"""
Evolve FPE and compute switching probability over theta.
FVM and legendre analytical compute over z.
Transformation is from int^1_-1 [ f(z)dz ] to
int^hemisphere [ f(theta) sin(theta) dtheta ].
"""
# analytical rho computation
new_state = get_state(tau, i, h, delta, s_rho_0)
theta, data = untangle_state(new_state, dim_points=5000)
if compute_analytical_manual:
manual_prob = compute_sw_prob(data=data,
theta=theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
manual_prob = -1
if compute_fvm:
theta, rho_init = untangle_state(s_rho_0,
dim_points=dim_points,
lin_space_z=lin_space_z)
rho_init = np.array(rho_init)
fvm_data = fvm.solve_mtj_fp(rho_init=rho_init,
delta=delta,
i0=i,
h=h,
T=tau,
dim_points=dim_points+1,
t_step=t_step,
do_3d=False,
lin_space_z=lin_space_z)
theta = np.arccos(fvm_data['z0'])
data = fvm_data['rho']
fvm_prob = compute_sw_prob(data=data,
theta=theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
fvm_prob = -1
if compute_analytical_sw:
analytical_prob = 1 - get_analytical_sw_prob(new_state, sn)
else:
analytical_prob = -1
return {'manual_prob': manual_prob,
'fvm_prob': fvm_prob,
'analytical_prob': analytical_prob}
def get_sw_continuous_prob(s_rho_0, tau, delta, i, h,
rho_0_at_pi=False,
compute_fvm=False,
compute_analytical_sw=False,
compute_analytical_manual=False,
dim_points=1000,
lin_space_z=False,
t_step=1e-3,
sn=None):
"""Evolve FPE and compute switching probability over time between 0-tau."""
# analytical rho computation
time = np.arange(0, tau, t_step)
fvm_time = np.arange(0, tau, t_step)
manual_prob = np.zeros(time.shape[0])
distribution = np.zeros((time.shape[0], dim_points))
analytical_prob = np.zeros(time.shape[0])
# share A
A = generate_A(i, h, delta, s_rho_0.size-1)
for t_idx, t in enumerate(time):
new_state = get_state(t, a_matrix=A, state_0=s_rho_0)
theta, data = untangle_state(new_state, dim_points=dim_points)
distribution[t_idx] = data
if compute_analytical_manual:
manual_prob[t_idx] = compute_sw_prob(data=data,
theta=theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
manual_prob[t_idx] = -1
if compute_analytical_sw:
analytical_prob[t_idx] = 1 - get_analytical_sw_prob(new_state, sn)
else:
analytical_prob[t_idx] = -1
fvm_theta = theta
if compute_fvm:
theta, rho_init = untangle_state(s_rho_0,
dim_points=dim_points,
lin_space_z=lin_space_z)
rho_init = np.array(rho_init)
fvm_data = fvm.solve_mtj_fp(rho_init=rho_init,
delta=delta,
i0=i,
h=h,
T=tau,
dim_points=dim_points+1,
t_step=t_step,
do_3d=True,
lin_space_z=lin_space_z)
fvm_theta = np.arccos(fvm_data['z0'])
data = fvm_data['rho'].T
fvm_time = fvm_data['t0']
fvm_prob = compute_sw_prob(data=data,
theta=fvm_theta,
rho_0_at_pi=rho_0_at_pi,
normalize=True)
else:
fvm_prob = -1*np.ones(time.shape[0])
return {'time': time,
'theta': theta,
'manual_prob': manual_prob,
'pdf': distribution,
'fvm_time': fvm_time,
'fvm_theta': fvm_theta,
'fvm_prob': fvm_prob,
'analytical_prob': analytical_prob}
| [
"numpy.polynomial.legendre.legfit",
"numpy.heaviside",
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.sin",
"numpy.arange",
"numpy.exp",
"scipy.optimize.basinhopping",
"scipy.optimize.minimize",
"scipy.optimize.shgo",
"numpy.linspace",
"numpy.polynomial.legendre.legval",
"numpy.arccos",
... | [((1792, 1829), 'numpy.zeros', 'np.zeros', (['[j + 1, j + 1]'], {'dtype': 'float'}), '([j + 1, j + 1], dtype=float)\n', (1800, 1829), True, 'import numpy as np\n'), ((3581, 3614), 'numpy.linspace', 'np.linspace', (['np.pi', '(0)', 'dim_points'], {}), '(np.pi, 0, dim_points)\n', (3592, 3614), True, 'import numpy as np\n'), ((7107, 7124), 'numpy.zeros', 'np.zeros', (['i.shape'], {}), '(i.shape)\n', (7115, 7124), True, 'import numpy as np\n'), ((7508, 7525), 'numpy.any', 'np.any', (['(sw_t <= 0)'], {}), '(sw_t <= 0)\n', (7514, 7525), True, 'import numpy as np\n'), ((15280, 15331), 'numpy.polynomial.legendre.legfit', 'np.polynomial.legendre.legfit', ([], {'x': 'z', 'y': 'rho_0', 'deg': 'L0'}), '(x=z, y=rho_0, deg=L0)\n', (15309, 15331), True, 'import numpy as np\n'), ((15736, 15766), 'numpy.arange', 'np.arange', (['(L0 + 1)'], {'dtype': 'dtype'}), '(L0 + 1, dtype=dtype)\n', (15745, 15766), True, 'import numpy as np\n'), ((16489, 16520), 'numpy.dot', 'np.dot', (['(2.0 * np.pi * sn)', 'state'], {}), '(2.0 * np.pi * sn, state)\n', (16495, 16520), True, 'import numpy as np\n'), ((20845, 20870), 'numpy.arange', 'np.arange', (['(0)', 'tau', 't_step'], {}), '(0, tau, t_step)\n', (20854, 20870), True, 'import numpy as np\n'), ((20886, 20911), 'numpy.arange', 'np.arange', (['(0)', 'tau', 't_step'], {}), '(0, tau, t_step)\n', (20895, 20911), True, 'import numpy as np\n'), ((20930, 20953), 'numpy.zeros', 'np.zeros', (['time.shape[0]'], {}), '(time.shape[0])\n', (20938, 20953), True, 'import numpy as np\n'), ((20973, 21010), 'numpy.zeros', 'np.zeros', (['(time.shape[0], dim_points)'], {}), '((time.shape[0], dim_points))\n', (20981, 21010), True, 'import numpy as np\n'), ((21033, 21056), 'numpy.zeros', 'np.zeros', (['time.shape[0]'], {}), '(time.shape[0])\n', (21041, 21056), True, 'import numpy as np\n'), ((3445, 3475), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'dim_points'], {}), '(-1, 1, dim_points)\n', (3456, 3475), True, 'import numpy as np\n'), ((3492, 3504), 'numpy.arccos', 'np.arccos', (['z'], {}), '(z)\n', (3501, 3504), True, 'import numpy as np\n'), ((8908, 8986), 'scipy.optimize.basinhopping', 'optimize.basinhopping', (['err', 'p0'], {'niter': '(500)', 'minimizer_kwargs': "{'bounds': bounds}"}), "(err, p0, niter=500, minimizer_kwargs={'bounds': bounds})\n", (8929, 8986), False, 'from scipy import optimize\n'), ((11587, 11620), 'numpy.array', 'np.array', (['[delta, nu, alpha, h_k]'], {}), '([delta, nu, alpha, h_k])\n', (11595, 11620), True, 'import numpy as np\n'), ((12359, 12376), 'numpy.zeros', 'np.zeros', (['i.shape'], {}), '(i.shape)\n', (12367, 12376), True, 'import numpy as np\n'), ((12804, 12821), 'numpy.any', 'np.any', (['(sw_t <= 0)'], {}), '(sw_t <= 0)\n', (12810, 12821), True, 'import numpy as np\n'), ((13020, 13033), 'numpy.log', 'np.log', (['times'], {}), '(times)\n', (13026, 13033), True, 'import numpy as np\n'), ((13539, 13561), 'numpy.sqrt', 'np.sqrt', (['(delta / 2 + 1)'], {}), '(delta / 2 + 1)\n', (13546, 13561), True, 'import numpy as np\n'), ((14000, 14030), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'dim_points'], {}), '(-1, 1, dim_points)\n', (14011, 14030), True, 'import numpy as np\n'), ((14047, 14059), 'numpy.arccos', 'np.arccos', (['z'], {}), '(z)\n', (14056, 14059), True, 'import numpy as np\n'), ((14124, 14157), 'numpy.linspace', 'np.linspace', (['np.pi', '(0)', 'dim_points'], {}), '(np.pi, 0, dim_points)\n', (14135, 14157), True, 'import numpy as np\n'), ((14170, 14183), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (14176, 14183), True, 'import numpy as np\n'), ((14521, 14577), 'scipy.stats.maxwell.pdf', 'maxwell.pdf', (['theta'], {'loc': 'maxwell_loc', 'scale': 'maxwell_scale'}), '(theta, loc=maxwell_loc, scale=maxwell_scale)\n', (14532, 14577), False, 'from scipy.stats import maxwell\n'), ((14664, 14677), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (14670, 14677), True, 'import numpy as np\n'), ((17026, 17054), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (17040, 17054), True, 'import numpy as np\n'), ((19250, 19268), 'numpy.array', 'np.array', (['rho_init'], {}), '(rho_init)\n', (19258, 19268), True, 'import numpy as np\n'), ((19288, 19443), 'fvm.mtj_fp_fvm.solve_mtj_fp', 'fvm.solve_mtj_fp', ([], {'rho_init': 'rho_init', 'delta': 'delta', 'i0': 'i', 'h': 'h', 'T': 'tau', 'dim_points': '(dim_points + 1)', 't_step': 't_step', 'do_3d': '(False)', 'lin_space_z': 'lin_space_z'}), '(rho_init=rho_init, delta=delta, i0=i, h=h, T=tau,\n dim_points=dim_points + 1, t_step=t_step, do_3d=False, lin_space_z=\n lin_space_z)\n', (19304, 19443), True, 'import fvm.mtj_fp_fvm as fvm\n'), ((19737, 19762), 'numpy.arccos', 'np.arccos', (["fvm_data['z0']"], {}), "(fvm_data['z0'])\n", (19746, 19762), True, 'import numpy as np\n'), ((22081, 22099), 'numpy.array', 'np.array', (['rho_init'], {}), '(rho_init)\n', (22089, 22099), True, 'import numpy as np\n'), ((22119, 22273), 'fvm.mtj_fp_fvm.solve_mtj_fp', 'fvm.solve_mtj_fp', ([], {'rho_init': 'rho_init', 'delta': 'delta', 'i0': 'i', 'h': 'h', 'T': 'tau', 'dim_points': '(dim_points + 1)', 't_step': 't_step', 'do_3d': '(True)', 'lin_space_z': 'lin_space_z'}), '(rho_init=rho_init, delta=delta, i0=i, h=h, T=tau,\n dim_points=dim_points + 1, t_step=t_step, do_3d=True, lin_space_z=\n lin_space_z)\n', (22135, 22273), True, 'import fvm.mtj_fp_fvm as fvm\n'), ((22571, 22596), 'numpy.arccos', 'np.arccos', (["fvm_data['z0']"], {}), "(fvm_data['z0'])\n", (22580, 22596), True, 'import numpy as np\n'), ((3101, 3134), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(tau * a_matrix)'], {}), '(tau * a_matrix)\n', (3118, 3134), True, 'import scipy as scipy\n'), ((3527, 3568), 'numpy.polynomial.legendre.legval', 'np.polynomial.legendre.legval', (['z', 'm_state'], {}), '(z, m_state)\n', (3556, 3568), True, 'import numpy as np\n'), ((3663, 3676), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3669, 3676), True, 'import numpy as np\n'), ((5709, 5741), 'numpy.abs', 'np.abs', (['(sw_prob - target_sw_prob)'], {}), '(sw_prob - target_sw_prob)\n', (5715, 5741), True, 'import numpy as np\n'), ((9452, 9496), 'scipy.optimize.shgo', 'optimize.shgo', (['err'], {'bounds': 'bounds', 'iters': '(200)'}), '(err, bounds=bounds, iters=200)\n', (9465, 9496), False, 'from scipy import optimize\n'), ((12935, 12947), 'numpy.log', 'np.log', (['sw_t'], {}), '(sw_t)\n', (12941, 12947), True, 'import numpy as np\n'), ((14694, 14732), 'numpy.exp', 'np.exp', (['(-delta * sin_theta * sin_theta)'], {}), '(-delta * sin_theta * sin_theta)\n', (14700, 14732), True, 'import numpy as np\n'), ((14729, 14765), 'numpy.heaviside', 'np.heaviside', (['(np.pi / 2 - theta)', '(0.5)'], {}), '(np.pi / 2 - theta, 0.5)\n', (14741, 14765), True, 'import numpy as np\n'), ((22905, 22927), 'numpy.ones', 'np.ones', (['time.shape[0]'], {}), '(time.shape[0])\n', (22912, 22927), True, 'import numpy as np\n'), ((1569, 1582), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1575, 1582), True, 'import numpy as np\n'), ((9761, 9817), 'scipy.optimize.brute', 'optimize.brute', (['err'], {'ranges': 'bounds', 'finish': 'optimize.fmin'}), '(err, ranges=bounds, finish=optimize.fmin)\n', (9775, 9817), False, 'from scipy import optimize\n'), ((11639, 11651), 'numpy.isnan', 'np.isnan', (['_p'], {}), '(_p)\n', (11647, 11651), True, 'import numpy as np\n'), ((11663, 11675), 'numpy.isinf', 'np.isinf', (['_p'], {}), '(_p)\n', (11671, 11675), True, 'import numpy as np\n'), ((14285, 14303), 'numpy.sqrt', 'np.sqrt', (['(2 * delta)'], {}), '(2 * delta)\n', (14292, 14303), True, 'import numpy as np\n'), ((15783, 15809), 'numpy.ones', 'np.ones', (['sn[3::2].shape[0]'], {}), '(sn[3::2].shape[0])\n', (15790, 15809), True, 'import numpy as np\n'), ((10230, 10322), 'scipy.optimize.minimize', 'optimize.minimize', (['err'], {'x0': 'p0', 'bounds': 'bounds', 'method': '"""L-BFGS-B"""', 'options': "{'eps': 1e-13}"}), "(err, x0=p0, bounds=bounds, method='L-BFGS-B', options={\n 'eps': 1e-13})\n", (10247, 10322), False, 'from scipy import optimize\n'), ((15881, 15920), 'scipy.special.factorial2', 'scipy.special.factorial2', (['f'], {'exact': '(True)'}), '(f, exact=True)\n', (15905, 15920), True, 'import scipy as scipy\n'), ((16013, 16052), 'scipy.special.factorial2', 'scipy.special.factorial2', (['f'], {'exact': '(True)'}), '(f, exact=True)\n', (16037, 16052), True, 'import scipy as scipy\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
ROOT_DIR = os.getenv('PLASTICC_DIR')
WORK_DIR = os.path.join(ROOT_DIR, 'plasticc')
DATA_DIR = os.path.join(ROOT_DIR, 'plasticc_data')
sys.path.append(WORK_DIR)
import numpy as np
import plasticc
import plasticc.get_data
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import to_hex
from matplotlib.backends.backend_pdf import PdfPages
def main():
kwargs = plasticc.get_data.parse_getdata_options()
print("This config ", kwargs)
data_release = kwargs.pop('data_release')
fig_dir = os.path.join(WORK_DIR, 'Figures', data_release, 'rate_analysis')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
_ = kwargs.pop('model')
out_field = kwargs.get('field')
kwargs['columns']=['objid','ptrobs_min','ptrobs_max','hostgal_photoz', 'hostgal_photoz_err', 'sim_redshift_host', ]
sntypes = plasticc.get_data.GetData.get_sntypes()
getter = plasticc.get_data.GetData(data_release)
cmap = plt.cm.tab20
keys = np.array(list(sntypes.keys()))
nlines = len(keys[keys < 80])
color = iter(cmap(np.linspace(0,1,nlines - 2)))
redshift_range = np.arange(0, 3.01, 0.01)
with PdfPages(f'{fig_dir}/redshift_checks_{data_release}_{out_field}.pdf') as pdf:
for i, model in enumerate(sntypes.keys()):
if model >= 80:
break
kwargs['model'] = model
kwargs['big'] = True
head = getter.get_lcs_headers(**kwargs)
model_name = sntypes[model]
head = list(head)
nobs = len(head)
if nobs <= 1:
continue
c = to_hex(next(color), keep_alpha=False)
objid, _, _, hz, dhz, z = zip(*head)
if nobs <= 2500:
g1 = (sns.jointplot(z, hz, color=c, kind='scatter', xlim=(0, 3.), ylim=(0,3.), height=8).set_axis_labels("z", "hostz"))
g2 = (sns.jointplot(hz, dhz, color=c, kind='scatter', xlim=(0, 3.), height=8).set_axis_labels("hostz", "hostz_err"))
else:
g1 = (sns.jointplot(z, hz, color=c, kind='hex', xlim=(0, 3.), ylim=(0,3.), height=8).set_axis_labels("z", "hostz"))
g2 = (sns.jointplot(hz, dhz, color=c, kind='hex', xlim=(0,3.), height=8).set_axis_labels("z", "hostz_err"))
fig1 = g1.fig
fig2 = g2.fig
fig1.suptitle(f'{model_name}_{model}')
fig2.suptitle(f'{model_name}_{model}')
fig1.tight_layout(rect=[0, 0, 1, 0.97])
fig2.tight_layout(rect=[0, 0, 1, 0.97])
pdf.savefig(fig1)
pdf.savefig(fig2)
plt.close(fig1)
plt.close(fig2)
if __name__=='__main__':
sys.exit(main())
| [
"sys.path.append",
"matplotlib.backends.backend_pdf.PdfPages",
"os.makedirs",
"matplotlib.pyplot.close",
"plasticc.get_data.GetData.get_sntypes",
"os.path.exists",
"plasticc.get_data.GetData",
"numpy.arange",
"numpy.linspace",
"seaborn.jointplot",
"plasticc.get_data.parse_getdata_options",
"os... | [((157, 182), 'os.getenv', 'os.getenv', (['"""PLASTICC_DIR"""'], {}), "('PLASTICC_DIR')\n", (166, 182), False, 'import os\n'), ((194, 228), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""plasticc"""'], {}), "(ROOT_DIR, 'plasticc')\n", (206, 228), False, 'import os\n'), ((240, 279), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""plasticc_data"""'], {}), "(ROOT_DIR, 'plasticc_data')\n", (252, 279), False, 'import os\n'), ((280, 305), 'sys.path.append', 'sys.path.append', (['WORK_DIR'], {}), '(WORK_DIR)\n', (295, 305), False, 'import sys\n'), ((541, 582), 'plasticc.get_data.parse_getdata_options', 'plasticc.get_data.parse_getdata_options', ([], {}), '()\n', (580, 582), False, 'import plasticc\n'), ((678, 742), 'os.path.join', 'os.path.join', (['WORK_DIR', '"""Figures"""', 'data_release', '"""rate_analysis"""'], {}), "(WORK_DIR, 'Figures', data_release, 'rate_analysis')\n", (690, 742), False, 'import os\n'), ((1008, 1047), 'plasticc.get_data.GetData.get_sntypes', 'plasticc.get_data.GetData.get_sntypes', ([], {}), '()\n', (1045, 1047), False, 'import plasticc\n'), ((1061, 1100), 'plasticc.get_data.GetData', 'plasticc.get_data.GetData', (['data_release'], {}), '(data_release)\n', (1086, 1100), False, 'import plasticc\n'), ((1277, 1301), 'numpy.arange', 'np.arange', (['(0)', '(3.01)', '(0.01)'], {}), '(0, 3.01, 0.01)\n', (1286, 1301), True, 'import numpy as np\n'), ((754, 777), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (768, 777), False, 'import os\n'), ((787, 807), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (798, 807), False, 'import os\n'), ((1312, 1381), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['f"""{fig_dir}/redshift_checks_{data_release}_{out_field}.pdf"""'], {}), "(f'{fig_dir}/redshift_checks_{data_release}_{out_field}.pdf')\n", (1320, 1381), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1224, 1253), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(nlines - 2)'], {}), '(0, 1, nlines - 2)\n', (1235, 1253), True, 'import numpy as np\n'), ((2812, 2827), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (2821, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2855), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (2849, 2855), True, 'import matplotlib.pyplot as plt\n'), ((1960, 2049), 'seaborn.jointplot', 'sns.jointplot', (['z', 'hz'], {'color': 'c', 'kind': '"""scatter"""', 'xlim': '(0, 3.0)', 'ylim': '(0, 3.0)', 'height': '(8)'}), "(z, hz, color=c, kind='scatter', xlim=(0, 3.0), ylim=(0, 3.0),\n height=8)\n", (1973, 2049), True, 'import seaborn as sns\n'), ((2096, 2168), 'seaborn.jointplot', 'sns.jointplot', (['hz', 'dhz'], {'color': 'c', 'kind': '"""scatter"""', 'xlim': '(0, 3.0)', 'height': '(8)'}), "(hz, dhz, color=c, kind='scatter', xlim=(0, 3.0), height=8)\n", (2109, 2168), True, 'import seaborn as sns\n'), ((2247, 2332), 'seaborn.jointplot', 'sns.jointplot', (['z', 'hz'], {'color': 'c', 'kind': '"""hex"""', 'xlim': '(0, 3.0)', 'ylim': '(0, 3.0)', 'height': '(8)'}), "(z, hz, color=c, kind='hex', xlim=(0, 3.0), ylim=(0, 3.0),\n height=8)\n", (2260, 2332), True, 'import seaborn as sns\n'), ((2379, 2447), 'seaborn.jointplot', 'sns.jointplot', (['hz', 'dhz'], {'color': 'c', 'kind': '"""hex"""', 'xlim': '(0, 3.0)', 'height': '(8)'}), "(hz, dhz, color=c, kind='hex', xlim=(0, 3.0), height=8)\n", (2392, 2447), True, 'import seaborn as sns\n')] |
"""
.. _example3:
Third Example: Injecting varying ``sample_weight`` vectors to a linear regression model for GridSearchCV
-------------------------------------------------------------------------------------------------------------------
This example illustrates a case in which a varying vector is injected to a linear regression model as ``sample_weight`` in order to evaluate them and obtain the sample_weight that generates the best results.
Let's imagine we have a sample_weight vector and different powers of the vector are needed to be evaluated. To perform such experiment, the following issues appear:
- The shape of the graph is not a linear sequence as those that can be implemented using Pipeline.
- More than two variables (typically: ``X`` and ``y``) need to be accordingly split in order to perform the cross validation with GridSearchCV, in this case: ``X``, ``y`` and ``sample_weight``.
- The information provided to the ``sample_weight`` parameter of the LinearRegression step varies on the different scenarios explored by GridSearchCV. In a GridSearchCV with Pipeline, ``sample_weight`` can't vary because it is treated as a ``fit_param`` instead of a variable.
Steps of the **PipeGraph**:
- **selector**: Featuring a :class:`ColumnSelector` custom step. This is not a sklearn original object but a custom class that allows to split an array into columns. In this case, ``X`` augmented data is column-wise divided as specified in a mapping dictionary. We previously created an augmented ``X`` in which all data but ``y`` is concatenated and it will be used by :class:`GridSearchCV` to make the cross validation splits. **selector** step de-concatenates such data.
- **custom_power**: Featuring a :class:`CustomPower` custom class. A simple transformation of the input data that is powered to a specified power as indicated in ``param_grid``.
- **scaler**: implements :class:`MinMaxScaler` class
- **polynomial_features**: Contains a :class:`PolynomialFeatures` object
- **linear_model**: Contains a :class:`LinearRegression` model
.. figure:: https://raw.githubusercontent.com/mcasl/PipeGraph/master/examples/images/Diapositiva3.png
Figure 1. PipeGraph diagram showing the steps and their connections
"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from pipegraph.base import PipeGraph, ColumnSelector, Reshape
from pipegraph.demo_blocks import CustomPower
import matplotlib.pyplot as plt
###############################################################################
# We create an augmented ``X`` in which all data but ``y`` is concatenated. In this case, we concatenate ``X`` and ``sample_weight`` vector.
X = pd.DataFrame(dict(X=np.array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
sample_weight=np.array([0.01, 0.95, 0.10, 0.95, 0.95, 0.10, 0.10, 0.95, 0.95, 0.95, 0.01])))
y = np.array( [ 10, 4, 20, 16, 25 , -60, 85, 64, 81, 100, 150])
###############################################################################
# Next we define the steps and we use :class:`PipeGraphRegressor` as estimator for :class:`GridSearchCV`.
scaler = MinMaxScaler()
polynomial_features = PolynomialFeatures()
linear_model = LinearRegression()
custom_power = CustomPower()
selector = ColumnSelector(mapping={'X': slice(0, 1),
'sample_weight': slice(1,2)})
steps = [('selector', selector),
('custom_power', custom_power),
('scaler', scaler),
('polynomial_features', polynomial_features),
('linear_model', linear_model)]
pgraph = PipeGraph(steps=steps)
(pgraph.inject(sink='selector', sink_var='X', source='_External', source_var='X')
.inject('custom_power', 'X', 'selector', 'sample_weight')
.inject('scaler', 'X', 'selector', 'X')
.inject('polynomial_features', 'X', 'scaler')
.inject('linear_model', 'X', 'polynomial_features')
.inject('linear_model', 'y', source_var='y')
.inject('linear_model', 'sample_weight', 'custom_power'))
###############################################################################
# Then we define ``param_grid`` as expected by :class:`GridSearchCV` exploring a few possibilities
# of varying parameters.
param_grid = {'polynomial_features__degree': range(1, 3),
'linear_model__fit_intercept': [True, False],
'custom_power__power': [1, 5, 10, 20, 30]}
grid_search_regressor = GridSearchCV(estimator=pgraph, param_grid=param_grid, refit=True)
grid_search_regressor.fit(X, y)
y_pred = grid_search_regressor.predict(X)
plt.scatter(X.loc[:,'X'], y)
plt.scatter(X.loc[:,'X'], y_pred)
plt.show()
power = grid_search_regressor.best_estimator_.get_params()['custom_power']
print('Power that obtains the best results in the linear model: \n {}'.format(power))
###############################################################################
# This example displayed a non linear workflow successfully implemented by **PipeGraph**, while at the same time showing a way to circumvent current limitations of standard :class:`GridSearchCV`, in particular, the retriction on the number of input parameters.
# :ref:`Next examples <example4>` show more elaborated examples in increasing complexity order.
| [
"sklearn.model_selection.GridSearchCV",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.PolynomialFeatures",
"pipegraph.demo_blocks.CustomPower",
"numpy.array",
"pipegraph.base.PipeGraph"
] | [((3046, 3102), 'numpy.array', 'np.array', (['[10, 4, 20, 16, 25, -60, 85, 64, 81, 100, 150]'], {}), '([10, 4, 20, 16, 25, -60, 85, 64, 81, 100, 150])\n', (3054, 3102), True, 'import numpy as np\n'), ((3341, 3355), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (3353, 3355), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3378, 3398), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {}), '()\n', (3396, 3398), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((3414, 3432), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3430, 3432), False, 'from sklearn.linear_model import LinearRegression\n'), ((3448, 3461), 'pipegraph.demo_blocks.CustomPower', 'CustomPower', ([], {}), '()\n', (3459, 3461), False, 'from pipegraph.demo_blocks import CustomPower\n'), ((3790, 3812), 'pipegraph.base.PipeGraph', 'PipeGraph', ([], {'steps': 'steps'}), '(steps=steps)\n', (3799, 3812), False, 'from pipegraph.base import PipeGraph, ColumnSelector, Reshape\n'), ((4647, 4712), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'pgraph', 'param_grid': 'param_grid', 'refit': '(True)'}), '(estimator=pgraph, param_grid=param_grid, refit=True)\n', (4659, 4712), False, 'from sklearn.model_selection import GridSearchCV\n'), ((4788, 4817), 'matplotlib.pyplot.scatter', 'plt.scatter', (["X.loc[:, 'X']", 'y'], {}), "(X.loc[:, 'X'], y)\n", (4799, 4817), True, 'import matplotlib.pyplot as plt\n'), ((4817, 4851), 'matplotlib.pyplot.scatter', 'plt.scatter', (["X.loc[:, 'X']", 'y_pred'], {}), "(X.loc[:, 'X'], y_pred)\n", (4828, 4851), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4859, 4861), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2906), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n', (2869, 2906), True, 'import numpy as np\n'), ((2963, 3036), 'numpy.array', 'np.array', (['[0.01, 0.95, 0.1, 0.95, 0.95, 0.1, 0.1, 0.95, 0.95, 0.95, 0.01]'], {}), '([0.01, 0.95, 0.1, 0.95, 0.95, 0.1, 0.1, 0.95, 0.95, 0.95, 0.01])\n', (2971, 3036), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Simple example of using ArenaRllibEnv, which is a interface that
convert a arena environment to a MultiAgentEnv
(see: https://ray.readthedocs.io/en/latest/rllib-env.html#multi-agent-and-hierarchical)
interface by rllib.
"""
import yaml
import cv2
import logging
import arena
import numpy as np
from copy import deepcopy as dcopy
np.set_printoptions(edgeitems=1)
logger = logging.getLogger(__name__)
def run(args, parser):
with open(args.config_file) as f:
experiments = yaml.safe_load(f)
env = arena.get_one_from_grid_search(
dcopy(experiments["Arena-Benchmark"]["env"])
)
env_config = dcopy(experiments["Arena-Benchmark"]["config"]["env_config"])
env_config["sensors"] = arena.get_one_from_grid_search(
env_config["sensors"]
)
env_config["multi_agent_obs"] = arena.get_one_from_grid_search(
env_config["multi_agent_obs"]
)
env_config["train_mode"] = False
logger.info(env)
# Tennis-Sparse-2T1P-Discrete
logger.info(env_config)
# {'is_shuffle_agents': True, 'train_mode': True, 'sensors': 'visual_FP'}
env = arena.ArenaRllibEnv(
env=env,
env_config=env_config,
)
logger.info(env.observation_space)
logger.info(env.action_space)
obs_rllib = env.reset()
logger.info("obs_rllib: {}".format(obs_rllib))
episode_video = {}
while True:
# Actions should be provided for each agent that returned an observation.
obs_rllib, rewards_rllib, dones_rllib, infos_rllib = env.step(
# actions={"agent_0": 0, "agent_1": 7}
actions_rllib={
"agent_0": 0,
"agent_1": 5,
"agent_2": 6,
"agent_3": 3,
}
)
logger.info("obs_rllib: {}".format(obs_rllib))
logger.info("rewards_rllib: {}".format(rewards_rllib))
logger.info("dones_rllib: {}".format(dones_rllib))
logger.info("infos_rllib: {}".format(infos_rllib))
if dones_rllib["__all__"]:
for episode_video_key in episode_video.keys():
# initialize video writer
fourcc = cv2.VideoWriter_fourcc(
'M', 'J', 'P', 'G'
)
fps = 15
video_filename = "../{}.avi".format(
episode_video_key,
)
video_size = (
np.shape(episode_video[episode_video_key])[2],
np.shape(episode_video[episode_video_key])[1]
)
video_writer = cv2.VideoWriter(
video_filename, fourcc, fps, video_size
)
for frame_i in range(np.shape(episode_video[episode_video_key])[0]):
video_writer.write(
episode_video[episode_video_key][frame_i]
)
video_writer.release()
episode_video = {}
input('episode end, keep going?')
else:
for agent_id in obs_rllib.keys():
obs_each_agent = obs_rllib[agent_id]
if isinstance(obs_each_agent, dict):
obs_keys = obs_each_agent.keys()
else:
obs_keys = ["default_own_obs"]
for obs_key in obs_keys:
if isinstance(obs_each_agent, dict):
obs_each_key = obs_each_agent[obs_key]
else:
obs_each_key = obs_each_agent
obs_each_channel = {}
if len(np.shape(obs_each_key)) == 1:
# vector observation
obs_each_channel["default_channel"] = arena.get_img_from_fig(
arena.plot_feature(
obs_each_key
)
)
elif len(np.shape(obs_each_key)) == 3:
# visual observation
for channel_i in range(np.shape(obs_each_key)[2]):
gray = obs_each_key[
:, :, channel_i
]
rgb = cv2.merge([gray, gray, gray])
rgb = (rgb * 255.0).astype(np.uint8)
obs_each_channel["{}_channel".format(
channel_i
)] = rgb
else:
raise NotImplementedError
for channel_key in obs_each_channel.keys():
temp = np.expand_dims(
obs_each_channel[channel_key],
0
)
episode_video_key = "agent_{}-obs_{}-channel-{}".format(
agent_id,
obs_key,
channel_key,
)
if episode_video_key not in episode_video.keys():
episode_video[episode_video_key] = temp
else:
episode_video[episode_video_key] = np.concatenate(
(episode_video[episode_video_key], temp)
)
if __name__ == "__main__":
parser = arena.create_parser()
args = parser.parse_args()
run(args, parser)
| [
"arena.ArenaRllibEnv",
"copy.deepcopy",
"numpy.set_printoptions",
"arena.create_parser",
"cv2.VideoWriter_fourcc",
"arena.plot_feature",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.shape",
"yaml.safe_load",
"cv2.VideoWriter",
"cv2.merge",
"arena.get_one_from_grid_search",
"logging.get... | [((444, 476), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'edgeitems': '(1)'}), '(edgeitems=1)\n', (463, 476), True, 'import numpy as np\n'), ((487, 514), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (504, 514), False, 'import logging\n'), ((739, 800), 'copy.deepcopy', 'dcopy', (["experiments['Arena-Benchmark']['config']['env_config']"], {}), "(experiments['Arena-Benchmark']['config']['env_config'])\n", (744, 800), True, 'from copy import deepcopy as dcopy\n'), ((830, 883), 'arena.get_one_from_grid_search', 'arena.get_one_from_grid_search', (["env_config['sensors']"], {}), "(env_config['sensors'])\n", (860, 883), False, 'import arena\n'), ((934, 995), 'arena.get_one_from_grid_search', 'arena.get_one_from_grid_search', (["env_config['multi_agent_obs']"], {}), "(env_config['multi_agent_obs'])\n", (964, 995), False, 'import arena\n'), ((1221, 1272), 'arena.ArenaRllibEnv', 'arena.ArenaRllibEnv', ([], {'env': 'env', 'env_config': 'env_config'}), '(env=env, env_config=env_config)\n', (1240, 1272), False, 'import arena\n'), ((5566, 5587), 'arena.create_parser', 'arena.create_parser', ([], {}), '()\n', (5585, 5587), False, 'import arena\n'), ((601, 618), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (615, 618), False, 'import yaml\n'), ((670, 714), 'copy.deepcopy', 'dcopy', (["experiments['Arena-Benchmark']['env']"], {}), "(experiments['Arena-Benchmark']['env'])\n", (675, 714), True, 'from copy import deepcopy as dcopy\n'), ((2270, 2312), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (2292, 2312), False, 'import cv2\n'), ((2699, 2755), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_filename', 'fourcc', 'fps', 'video_size'], {}), '(video_filename, fourcc, fps, video_size)\n', (2714, 2755), False, 'import cv2\n'), ((2537, 2579), 'numpy.shape', 'np.shape', (['episode_video[episode_video_key]'], {}), '(episode_video[episode_video_key])\n', (2545, 2579), True, 'import numpy as np\n'), ((2604, 2646), 'numpy.shape', 'np.shape', (['episode_video[episode_video_key]'], {}), '(episode_video[episode_video_key])\n', (2612, 2646), True, 'import numpy as np\n'), ((2832, 2874), 'numpy.shape', 'np.shape', (['episode_video[episode_video_key]'], {}), '(episode_video[episode_video_key])\n', (2840, 2874), True, 'import numpy as np\n'), ((4814, 4862), 'numpy.expand_dims', 'np.expand_dims', (['obs_each_channel[channel_key]', '(0)'], {}), '(obs_each_channel[channel_key], 0)\n', (4828, 4862), True, 'import numpy as np\n'), ((3740, 3762), 'numpy.shape', 'np.shape', (['obs_each_key'], {}), '(obs_each_key)\n', (3748, 3762), True, 'import numpy as np\n'), ((3931, 3963), 'arena.plot_feature', 'arena.plot_feature', (['obs_each_key'], {}), '(obs_each_key)\n', (3949, 3963), False, 'import arena\n'), ((5405, 5461), 'numpy.concatenate', 'np.concatenate', (['(episode_video[episode_video_key], temp)'], {}), '((episode_video[episode_video_key], temp))\n', (5419, 5461), True, 'import numpy as np\n'), ((4082, 4104), 'numpy.shape', 'np.shape', (['obs_each_key'], {}), '(obs_each_key)\n', (4090, 4104), True, 'import numpy as np\n'), ((4397, 4426), 'cv2.merge', 'cv2.merge', (['[gray, gray, gray]'], {}), '([gray, gray, gray])\n', (4406, 4426), False, 'import cv2\n'), ((4206, 4228), 'numpy.shape', 'np.shape', (['obs_each_key'], {}), '(obs_each_key)\n', (4214, 4228), True, 'import numpy as np\n')] |
"""
<NAME>, <EMAIL>
This code is an adaption of a python module for regularized kernel canonical correlation analysis, which can be found on
https://github.com/gallantlab/pyrcca
"""
import sys
import h5py
import joblib
import numpy as np
from scipy.linalg import eigh
from sklearn.cross_decomposition.pls_ import _center_scale_xy
sys.path.append("../") # go to parent dir
sys.path.append("../..") # go to parent dir
class _CCABase(object):
def __init__(self, reg=None, n_components=None, cutoff=1e-15):
self.reg = reg
self.n_components = n_components
self.cutoff = cutoff
def fit(self, X, Y):
# print('Training CCA, regularization = %0.4f, %d components' % (self.reg, self.n_components))
X = X.copy()
Y = Y.copy()
# Subtract mean and divide by std
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (_center_scale_xy(X, Y))
data = [X, Y]
# Get dimensions of data and number of canonical components
kernel = [d.T for d in data]
nDs = len(kernel)
nFs = [k.shape[0] for k in kernel]
numCC = min([k.shape[1] for k in kernel]) if self.n_components is None else self.n_components
# Get the auto- and cross-covariance matrices
crosscovs = [np.dot(ki, kj.T) / len(ki.T - 1) for ki in kernel for kj in kernel]
# Allocate left-hand side (LH) and right-hand side (RH):
LH = np.zeros((sum(nFs), sum(nFs)))
RH = np.zeros((sum(nFs), sum(nFs)))
# Fill the left and right sides of the eigenvalue problem
# Eq. (7) in https://www.frontiersin.org/articles/10.3389/fninf.2016.00049/full
for ii in range(nDs):
RH[sum(nFs[:ii]):sum(nFs[:ii + 1]), sum(nFs[:ii]):sum(nFs[:ii + 1])] = \
(crosscovs[ii * (nDs + 1)] + self.reg[ii] * np.eye(nFs[ii]))
for jj in range(nDs):
if ii != jj:
LH[sum(nFs[:jj]): sum(nFs[:jj + 1]), sum(nFs[:ii]): sum(nFs[:ii + 1])] = crosscovs[nDs * jj + ii]
# The matrices are symmetric, i.e. A = A^T, this makes sure that small differences are evened out.
LH = (LH + LH.T) / 2.
RH = (RH + RH.T) / 2.
maxCC = LH.shape[0]
# Solve the generalized eigenvalue problem for the two symmetric matrices
# Returns the eigenvalues and the eigenvectors
r, Vs = eigh(LH, RH, eigvals=(maxCC - numCC, maxCC - 1))
r[np.isnan(r)] = 0
rindex = np.argsort(r)[::-1]
comp = []
Vs = Vs[:, rindex]
for ii in range(nDs):
comp.append(Vs[sum(nFs[:ii]):sum(nFs[:ii + 1]), :numCC])
self.x_weights_ = comp[0]
self.y_weights_ = comp[1]
self.x_loadings_ = np.dot(self.x_weights_.T, crosscovs[0]).T
self.y_loadings_ = np.dot(self.y_weights_.T, crosscovs[-1]).T
return self
def transform(self, X, Y):
check_is_fitted(self, 'x_mean_')
check_is_fitted(self, 'y_mean_')
X = X.copy()
Y = Y.copy()
X -= self.x_mean_
X /= self.x_std_
Y -= self.y_mean_
Y /= self.y_std_
x_scores = np.dot(X, self.x_weights_)
y_scores = np.dot(Y, self.y_weights_)
return x_scores, y_scores
def transform2(self, X, Y):
check_is_fitted(self, 'x_mean_')
check_is_fitted(self, 'y_mean_')
X = X.copy()
Y = Y.copy()
X -= self.x_mean_
X /= self.x_std_
Y -= self.y_mean_
Y /= self.y_std_
x_scores = np.dot(self.x_weights_.T, X.T)
y_scores = np.dot(self.y_weights_.T, Y.T)
return x_scores, y_scores
def train(self, data):
print('Training CCA, regularization = %0.4f, %d components' % (self.reg, self.n_components))
comps = kcca(data, self.reg, self.n_components)
self.cancorrs, self.ws, self.comps = recon(data, comps)
if len(data) == 2:
self.cancorrs = self.cancorrs[np.nonzero(self.cancorrs)]
return self
def validate(self, vdata):
vdata = [np.nan_to_num(_zscore(d)) for d in vdata]
if not hasattr(self, 'x_weights_'):
raise NameError('Algorithm has not been trained.')
self.preds, self.corrs = predict(vdata, [self.x_weights_, self.y_weights_], self.cutoff)
return self.corrs
def compute_ev(self, vdata):
"""
This function estimates the variance explained (R^2) in the test data by each of the canonical components.
:param vdata:
:return:
"""
nD = len(vdata)
nC = self.ws[0].shape[1]
nF = [d.shape[1] for d in vdata]
self.ev = [np.zeros((nC, f)) for f in nF]
for cc in range(nC):
ccs = cc + 1
print('Computing explained variance for component #%d' % ccs)
preds, corrs = predict(vdata, [w[:, ccs - 1:ccs] for w in self.ws], self.cutoff)
resids = [abs(d[0] - d[1]) for d in zip(vdata, preds)]
for s in range(nD):
ev = abs(vdata[s].var(0) - resids[s].var(0)) / vdata[s].var(0)
ev[np.isnan(ev)] = 0.
self.ev[s][cc] = ev
return self.ev
def save(self, filename):
h5 = h5py.File(filename, 'a')
for key, value in self.__dict__.items():
if value is not None:
if isinstance(value, list):
for di in range(len(value)):
grpname = 'dataset%d' % di
dgrp = h5.require_group(grpname)
try:
dgrp.create_dataset(key, data=value[di])
except RuntimeError:
del h5[grpname][key]
dgrp.create_dataset(key, data=value[di])
else:
h5.attrs[key] = value
h5.close()
def load(self, filename):
h5 = h5py.File(filename, 'a')
for key, value in h5.attrs.items():
setattr(self, key, value)
for di in range(len(h5.keys())):
ds = 'dataset%d' % di
for key, value in h5[ds].items():
if di == 0:
setattr(self, key, [])
self.__getattribute__(key).append(value.value)
class CCACrossValidate(_CCABase):
"""
Attributes:
numCV (int): number of cross-validation folds
regs (list or numpy.array): regularization param array.
Default: np.logspace(-3, 1, 10)
numCCs (list or numpy.array): list of numbers of canonical dimensions
to keep. Default is np.range(5, 10).
Returns:
ws (list): canonical weights
comps (list): canonical components
cancorrs (list): correlations of the canonical components
on the training dataset
corrs (list): correlations on the validation dataset
preds (list): predictions on the validation dataset
ev (list): explained variance for each canonical dimension
"""
def __init__(self, numCV=None, regs=None, numCCs=None, select=0.2, cutoff=1e-15):
self.numCCs = np.arange(5, 10) if numCCs is None else numCCs
self.select = select
self.regs = np.array(np.logspace(-3, 1, 10)) if regs is None else regs
self.numCV = 10 if numCV is None else numCV
super(CCACrossValidate, self).__init__(cutoff=cutoff)
def train(self, data, parallel=True):
"""
Train CCA with cross-validation for a set of regularization
coefficients and/or numbers of CCs
Attributes:
data (list): training data matrices
(number of samples X number of features).
Number of samples must match across datasets.
parallel (bool): use joblib to train cross-validation folds
in parallel
"""
corr_mat = np.zeros((len(self.regs), len(self.numCCs)))
selection = max(int(self.select * min([d.shape[1] for d in data])), 1)
for ri, reg in enumerate(self.regs):
for ci, numCC in enumerate(self.numCCs):
running_corr_mean_sum = 0.
# Run in parallel
if parallel:
fold_corr_means = joblib.Parallel(n_jobs=self.numCV)(joblib.delayed(train_cvfold)
(data=data, reg=reg, numCC=numCC,
cutoff=self.cutoff, selection=selection
) for fold in range(self.numCV))
running_corr_mean_sum += sum(fold_corr_means)
# Run in sequential
else:
for cvfold in range(self.numCV):
fold_corr_mean = train_cvfold(data=data, reg=reg, numCC=numCC, cutoff=self.cutoff, selection=selection)
running_corr_mean_sum += fold_corr_mean
corr_mat[ri, ci] = running_corr_mean_sum / self.numCV
best_ri, best_ci = np.where(corr_mat == corr_mat.max())
self.best_reg = self.regs[best_ri[0]]
self.best_numCC = self.numCCs[best_ci[0]]
comps = kcca(data, self.best_reg, self.best_numCC)
self.cancorrs, self.ws, self.comps = recon(data, comps)
if len(data) == 2:
self.cancorrs = self.cancorrs[np.nonzero(self.cancorrs)]
return self
def train_cvfold(data, reg, numCC, cutoff, selection):
"""
Train a cross-validation fold of CCA
"""
nT = data[0].shape[0]
chunklen = 10 if nT > 50 else 1
nchunks = int(0.2 * nT / chunklen)
indchunks = list(zip(*[iter(range(nT))] * chunklen))
np.random.shuffle(indchunks)
heldinds = [ind for chunk in indchunks[:nchunks] for ind in chunk]
notheldinds = list(set(range(nT)) - set(heldinds))
comps = kcca([d[notheldinds] for d in data], reg, numCC)
cancorrs, ws, ccomps = recon([d[notheldinds] for d in data], comps)
preds, corrs = predict([d[heldinds] for d in data], ws, cutoff=cutoff)
fold_corr_mean = []
for corr in corrs:
corr_idx = np.argsort(corr)[::-1]
corr_mean = corr[corr_idx][:selection].mean()
fold_corr_mean.append(corr_mean)
return np.mean(fold_corr_mean)
class CCA(_CCABase):
"""Attributes:
reg (float): regularization parameter. Default is 0.1.
n_components (int): number of canonical dimensions to keep. Default is 10.
kernelcca (bool): kernel or non-kernel CCA. Default is True.
ktype (string): type of kernel used if kernelcca is True.
Value can be 'linear' (default) or 'gaussian'.
verbose (bool): default is True.
Returns:
ws (list): canonical weights
comps (list): canonical components
cancorrs (list): correlations of the canonical components
on the training dataset
corrs (list): correlations on the validation dataset
preds (list): predictions on the validation dataset
ev (list): explained variance for each canonical dimension
"""
def __init__(self, reg1=0.2, reg2=0.2, n_components=10, cutoff=1e-15):
reg = [reg1, reg2]
super(CCA, self).__init__(reg=reg, n_components=n_components, cutoff=cutoff)
def train(self, data):
# # Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ = (_center_scale_xy(data[0], data[1]))
return super(CCA, self).train([X, Y])
def predict(vdata, ws, cutoff=1e-15):
"""Get predictions for each dataset based on the other datasets
and weights. Find correlations with actual dataset."""
iws = [np.linalg.pinv(w.T, rcond=cutoff) for w in ws]
ccomp = _listdot([d.T for d in vdata], ws)
ccomp = np.array(ccomp)
preds = []
corrs = []
for dnum in range(len(vdata)):
idx = np.ones((len(vdata),))
idx[dnum] = False
proj = ccomp[idx > 0].mean(0)
pred = np.dot(iws[dnum], proj.T).T
pred = np.nan_to_num(_zscore(pred))
preds.append(pred)
cs = np.nan_to_num(_rowcorr(vdata[dnum].T, pred.T))
corrs.append(cs)
return preds, corrs
def kcca(data, reg=0., numCC=None):
"""Set up and solve the kernel CCA eigenproblem
"""
kernel = [d.T for d in data]
nDs = len(kernel)
nFs = [k.shape[0] for k in kernel]
numCC = min([k.shape[1] for k in kernel]) if numCC is None else numCC
# Get the auto- and cross-covariance matrices
crosscovs = [np.dot(ki, kj.T) / len(ki.T - 1) for ki in kernel for kj in kernel]
# Allocate left-hand side (LH) and right-hand side (RH):
LH = np.zeros((sum(nFs), sum(nFs)))
RH = np.zeros((sum(nFs), sum(nFs)))
# Fill the left and right sides of the eigenvalue problem
# Eq. (7) in https://www.frontiersin.org/articles/10.3389/fninf.2016.00049/full
for ii in range(nDs):
RH[sum(nFs[:ii]):sum(nFs[:ii + 1]), sum(nFs[:ii]):sum(nFs[:ii + 1])] = \
(crosscovs[ii * (nDs + 1)] + reg * np.eye(nFs[ii]))
for jj in range(nDs):
if ii != jj:
LH[sum(nFs[:jj]): sum(nFs[:jj + 1]), sum(nFs[:ii]): sum(nFs[:ii + 1])] = crosscovs[nDs * jj + ii]
LH = (LH + LH.T) / 2.
RH = (RH + RH.T) / 2.
maxCC = LH.shape[0]
# Solve the generalized eigenvalue problem for the two symmetric matrices
r, Vs = eigh(LH, RH, eigvals=(maxCC - numCC, maxCC - 1))
r[np.isnan(r)] = 0
rindex = np.argsort(r)[::-1]
comp = []
Vs = Vs[:, rindex]
for ii in range(nDs):
comp.append(Vs[sum(nFs[:ii]):sum(nFs[:ii + 1]), :numCC])
return comp
def recon(data, comp, corronly=False):
# Get canonical variates and CCs
ws = comp
ccomp = _listdot([d.T for d in data], ws)
corrs = _listcorr(ccomp)
if corronly:
return corrs
else:
return corrs, ws, ccomp
def _zscore(d): return (d - d.mean(0)) / d.std(0)
def _demean(d): return d - d.mean(0)
def _listdot(d1, d2): return [np.dot(x[0].T, x[1]) for x in zip(d1, d2)]
def _listcorr(a):
"""Returns pairwise row correlations for all items in array as a list of matrices
"""
corrs = np.zeros((a[0].shape[1], len(a), len(a)))
for i in range(len(a)):
for j in range(len(a)):
if j > i:
corrs[:, i, j] = [np.nan_to_num(np.corrcoef(ai, aj)[0, 1]) for (ai, aj) in zip(a[i].T, a[j].T)]
return corrs
def _rowcorr(a, b):
"""Correlations between corresponding matrix rows"""
cs = np.zeros((a.shape[0]))
for idx in range(a.shape[0]):
cs[idx] = np.corrcoef(a[idx], b[idx])[0, 1]
return cs
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg.:
``["coef_", "estimator_", ...], "coef_"``
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
Returns
-------
None
Raises
------
NotFittedError
If the attributes are not found.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
if __name__ == '__main__':
pass
| [
"numpy.logspace",
"numpy.isnan",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.linalg.pinv",
"sys.path.append",
"sklearn.cross_decomposition.pls_._center_scale_xy",
"scipy.linalg.eigh",
"numpy.random.shuffle",
"h5py.File",
"numpy.corrcoef",
"numpy.dot",
"numpy.zeros",
"numpy.nonz... | [((333, 355), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (348, 355), False, 'import sys\n'), ((376, 400), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (391, 400), False, 'import sys\n'), ((9876, 9904), 'numpy.random.shuffle', 'np.random.shuffle', (['indchunks'], {}), '(indchunks)\n', (9893, 9904), True, 'import numpy as np\n'), ((10434, 10457), 'numpy.mean', 'np.mean', (['fold_corr_mean'], {}), '(fold_corr_mean)\n', (10441, 10457), True, 'import numpy as np\n'), ((11978, 11993), 'numpy.array', 'np.array', (['ccomp'], {}), '(ccomp)\n', (11986, 11993), True, 'import numpy as np\n'), ((13585, 13633), 'scipy.linalg.eigh', 'eigh', (['LH', 'RH'], {'eigvals': '(maxCC - numCC, maxCC - 1)'}), '(LH, RH, eigvals=(maxCC - numCC, maxCC - 1))\n', (13589, 13633), False, 'from scipy.linalg import eigh\n'), ((14715, 14735), 'numpy.zeros', 'np.zeros', (['a.shape[0]'], {}), '(a.shape[0])\n', (14723, 14735), True, 'import numpy as np\n'), ((892, 914), 'sklearn.cross_decomposition.pls_._center_scale_xy', '_center_scale_xy', (['X', 'Y'], {}), '(X, Y)\n', (908, 914), False, 'from sklearn.cross_decomposition.pls_ import _center_scale_xy\n'), ((2393, 2441), 'scipy.linalg.eigh', 'eigh', (['LH', 'RH'], {'eigvals': '(maxCC - numCC, maxCC - 1)'}), '(LH, RH, eigvals=(maxCC - numCC, maxCC - 1))\n', (2397, 2441), False, 'from scipy.linalg import eigh\n'), ((3157, 3183), 'numpy.dot', 'np.dot', (['X', 'self.x_weights_'], {}), '(X, self.x_weights_)\n', (3163, 3183), True, 'import numpy as np\n'), ((3203, 3229), 'numpy.dot', 'np.dot', (['Y', 'self.y_weights_'], {}), '(Y, self.y_weights_)\n', (3209, 3229), True, 'import numpy as np\n'), ((3542, 3572), 'numpy.dot', 'np.dot', (['self.x_weights_.T', 'X.T'], {}), '(self.x_weights_.T, X.T)\n', (3548, 3572), True, 'import numpy as np\n'), ((3592, 3622), 'numpy.dot', 'np.dot', (['self.y_weights_.T', 'Y.T'], {}), '(self.y_weights_.T, Y.T)\n', (3598, 3622), True, 'import numpy as np\n'), ((5245, 5269), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (5254, 5269), False, 'import h5py\n'), ((5942, 5966), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (5951, 5966), False, 'import h5py\n'), ((11612, 11646), 'sklearn.cross_decomposition.pls_._center_scale_xy', '_center_scale_xy', (['data[0]', 'data[1]'], {}), '(data[0], data[1])\n', (11628, 11646), False, 'from sklearn.cross_decomposition.pls_ import _center_scale_xy\n'), ((11872, 11905), 'numpy.linalg.pinv', 'np.linalg.pinv', (['w.T'], {'rcond': 'cutoff'}), '(w.T, rcond=cutoff)\n', (11886, 11905), True, 'import numpy as np\n'), ((13640, 13651), 'numpy.isnan', 'np.isnan', (['r'], {}), '(r)\n', (13648, 13651), True, 'import numpy as np\n'), ((13670, 13683), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (13680, 13683), True, 'import numpy as np\n'), ((14205, 14225), 'numpy.dot', 'np.dot', (['x[0].T', 'x[1]'], {}), '(x[0].T, x[1])\n', (14211, 14225), True, 'import numpy as np\n'), ((2452, 2463), 'numpy.isnan', 'np.isnan', (['r'], {}), '(r)\n', (2460, 2463), True, 'import numpy as np\n'), ((2486, 2499), 'numpy.argsort', 'np.argsort', (['r'], {}), '(r)\n', (2496, 2499), True, 'import numpy as np\n'), ((2747, 2786), 'numpy.dot', 'np.dot', (['self.x_weights_.T', 'crosscovs[0]'], {}), '(self.x_weights_.T, crosscovs[0])\n', (2753, 2786), True, 'import numpy as np\n'), ((2816, 2856), 'numpy.dot', 'np.dot', (['self.y_weights_.T', 'crosscovs[-1]'], {}), '(self.y_weights_.T, crosscovs[-1])\n', (2822, 2856), True, 'import numpy as np\n'), ((4674, 4691), 'numpy.zeros', 'np.zeros', (['(nC, f)'], {}), '((nC, f))\n', (4682, 4691), True, 'import numpy as np\n'), ((7214, 7230), 'numpy.arange', 'np.arange', (['(5)', '(10)'], {}), '(5, 10)\n', (7223, 7230), True, 'import numpy as np\n'), ((10305, 10321), 'numpy.argsort', 'np.argsort', (['corr'], {}), '(corr)\n', (10315, 10321), True, 'import numpy as np\n'), ((12176, 12201), 'numpy.dot', 'np.dot', (['iws[dnum]', 'proj.T'], {}), '(iws[dnum], proj.T)\n', (12182, 12201), True, 'import numpy as np\n'), ((12718, 12734), 'numpy.dot', 'np.dot', (['ki', 'kj.T'], {}), '(ki, kj.T)\n', (12724, 12734), True, 'import numpy as np\n'), ((14790, 14817), 'numpy.corrcoef', 'np.corrcoef', (['a[idx]', 'b[idx]'], {}), '(a[idx], b[idx])\n', (14801, 14817), True, 'import numpy as np\n'), ((1291, 1307), 'numpy.dot', 'np.dot', (['ki', 'kj.T'], {}), '(ki, kj.T)\n', (1297, 1307), True, 'import numpy as np\n'), ((3976, 4001), 'numpy.nonzero', 'np.nonzero', (['self.cancorrs'], {}), '(self.cancorrs)\n', (3986, 4001), True, 'import numpy as np\n'), ((7319, 7341), 'numpy.logspace', 'np.logspace', (['(-3)', '(1)', '(10)'], {}), '(-3, 1, 10)\n', (7330, 7341), True, 'import numpy as np\n'), ((9553, 9578), 'numpy.nonzero', 'np.nonzero', (['self.cancorrs'], {}), '(self.cancorrs)\n', (9563, 9578), True, 'import numpy as np\n'), ((13229, 13244), 'numpy.eye', 'np.eye', (['nFs[ii]'], {}), '(nFs[ii])\n', (13235, 13244), True, 'import numpy as np\n'), ((1843, 1858), 'numpy.eye', 'np.eye', (['nFs[ii]'], {}), '(nFs[ii])\n', (1849, 1858), True, 'import numpy as np\n'), ((5123, 5135), 'numpy.isnan', 'np.isnan', (['ev'], {}), '(ev)\n', (5131, 5135), True, 'import numpy as np\n'), ((8366, 8400), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'self.numCV'}), '(n_jobs=self.numCV)\n', (8381, 8400), False, 'import joblib\n'), ((14546, 14565), 'numpy.corrcoef', 'np.corrcoef', (['ai', 'aj'], {}), '(ai, aj)\n', (14557, 14565), True, 'import numpy as np\n'), ((8401, 8429), 'joblib.delayed', 'joblib.delayed', (['train_cvfold'], {}), '(train_cvfold)\n', (8415, 8429), False, 'import joblib\n')] |
from datetime import datetime
from pdb import set_trace
from time import time
import numpy as np
import tensorflow as tf
import torch
from deep_lagrangian_networks.replay_memory import PyTorchReplayMemory
from deep_lagrangian_networks.utils import init_env, load_dataset
from DeLaN_tensorflow_ddq import DeepLagrangianNetwork
from DeLaN_utils import plot_test2
class Train:
def __init__(self):
# Read the dataset:
n_dof = 2
cuda = 0
# train_data, test_data, self.divider = load_dataset()
train_data, test_data, self.divider = load_dataset(
filename="data/sine_track2.pickle"
)
(
self.train_labels,
self.train_qp,
self.train_qv,
self.train_qa,
self.train_tau,
) = train_data
(
self.test_labels,
self.test_qp,
self.test_qv,
self.test_qa,
self.test_tau,
self.test_m,
self.test_c,
self.test_g,
) = test_data
self.hyper = {
"n_width": 64,
"n_depth": 2,
"diagonal_epsilon": 0.01,
"activation": "softplus",
"b_init": 1.0e-4,
"b_diag_init": 0.001,
"w_init": "xavier_normal",
"gain_hidden": np.sqrt(2.0),
"gain_output": 0.1,
"n_minibatch": 512,
"learning_rate": 5.0e-04,
"weight_decay": 1.0e-5,
"max_epoch": 50000,
}
self.model = DeepLagrangianNetwork(n_dof, **self.hyper)
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=self.hyper["learning_rate"], amsgrad=True
)
# Generate Replay Memory:
mem_dim = ((n_dof,), (n_dof,), (n_dof,), (n_dof,))
self.mem = PyTorchReplayMemory(
self.train_qp.shape[0], self.hyper["n_minibatch"], mem_dim, cuda
)
self.mem.add_samples(
[self.train_qp, self.train_qv, self.train_qa, self.train_tau]
)
# Information for saving model
self.checkpoint = tf.train.Checkpoint(net=self.model)
self.stamp = datetime.fromtimestamp(time()).strftime("%Y%m%d-%H%M%S")
def train(self):
# Print training information
print("\n\n################################################")
print("Characters:")
print(" Test Characters = {0}".format(self.test_labels))
print(" Train Characters = {0}".format(self.train_labels))
print("# Training Samples = {0:05d}".format(int(self.train_qp.shape[0])))
print("")
# Training Parameters:
print("\n################################################")
print("Training Deep Lagrangian Networks (DeLaN):")
for epoch_i in range(self.hyper["max_epoch"]):
l_mem, n_batches = 0.0, 0.0
for q, qd, qdd, tau in self.mem:
q_tf, qd_tf, qdd_tf, tau_tf = self.convert_to_tf(q, qd, qdd, tau)
loss = self.opt(q_tf, qd_tf, qdd_tf, tau_tf)
l_mem += loss
n_batches += 1
l_mem /= float(n_batches)
# if epoch_i == 1 or np.mod(epoch_i, 50) == 0:
print("Epoch {0:05d}: ".format(epoch_i), end=" ")
print("Loss = {0:.3e}\n".format(l_mem))
if np.mod(epoch_i + 1, 1000) == 0:
filename = "trained_models/{}/tf_model_{}".format(
self.stamp, epoch_i + 1
)
self.checkpoint.save("{}".format(filename))
def test(self, filename=None):
if filename is not None:
# Load pre-trained model
self.checkpoint.restore(filename)
# Get test data
q = tf.cast(self.test_qp, tf.float32)
dq = tf.cast(self.test_qv, tf.float32)
tau = tf.cast(self.test_tau, tf.float32)
# Calculate torque using test data
delan_ddq, delan_M, delan_C, delan_G = self.model(q, dq, tau)
delan_Mddq = tf.squeeze(delan_M @ self.test_qa[:, :, tf.newaxis])
# Get test error
mean_coeff = 1.0 / float(self.test_qp.shape[0])
err_g = mean_coeff * np.sum((delan_G - self.test_g) ** 2)
err_m = mean_coeff * np.sum((delan_Mddq - self.test_m) ** 2)
err_c = mean_coeff * np.sum((delan_C - self.test_c) ** 2)
err_qa = mean_coeff * np.sum((delan_ddq - self.test_qa) ** 2)
print("\nPerformance:")
print(" ddq MSE = {0:.3e}".format(err_qa))
print(" Inertial MSE = {0:.3e}".format(err_m))
print("Coriolis & Centrifugal MSE = {0:.3e}".format(err_c))
print(" Gravitational MSE = {0:.3e}".format(err_g))
plot_test2(
delan_ddq,
delan_Mddq,
delan_C,
delan_G,
self.test_qa,
self.test_m,
self.test_c,
self.test_g,
self.divider,
self.test_labels,
)
@tf.function
def opt(self, q_tf, qd_tf, qdd_tf, tau_tf):
with tf.GradientTape() as tape:
qdd_hat, _, _, _ = self.model(q_tf, qd_tf, tau_tf)
err = tf.math.reduce_sum(tf.square(qdd_hat - qdd_tf), axis=1)
loss = tf.reduce_mean(err)
grads = tape.gradient(loss, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
return loss
def convert_to_tf(self, q, qd, qdd, tau):
q_tf = tf.convert_to_tensor(q.cpu().numpy())
qd_tf = tf.convert_to_tensor(qd.cpu().numpy())
qdd_tf = tf.convert_to_tensor(qdd.cpu().numpy())
tau_tf = tf.convert_to_tensor(tau.cpu().numpy())
return q_tf, qd_tf, qdd_tf, tau_tf
def main():
tf.keras.backend.set_floatx("float32")
tf.debugging.set_log_device_placement(True)
gpus = tf.config.experimental.list_physical_devices("GPU")
tf.config.experimental.set_visible_devices(gpus[1], "GPU")
tf.config.experimental.set_memory_growth(gpus[1], True)
with tf.device("/device:GPU:1"):
train = Train()
# train.train()
# train.test()
train.test("trained_models/20210628-081643/tf_model_10000-10")
if __name__ == "__main__":
main()
| [
"DeLaN_tensorflow_ddq.DeepLagrangianNetwork",
"numpy.sum",
"deep_lagrangian_networks.replay_memory.PyTorchReplayMemory",
"tensorflow.train.Checkpoint",
"tensorflow.cast",
"tensorflow.keras.optimizers.Adam",
"tensorflow.squeeze",
"tensorflow.config.experimental.set_visible_devices",
"DeLaN_utils.plot... | [((5820, 5858), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float32"""'], {}), "('float32')\n", (5847, 5858), True, 'import tensorflow as tf\n'), ((5863, 5906), 'tensorflow.debugging.set_log_device_placement', 'tf.debugging.set_log_device_placement', (['(True)'], {}), '(True)\n', (5900, 5906), True, 'import tensorflow as tf\n'), ((5918, 5969), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (5962, 5969), True, 'import tensorflow as tf\n'), ((5974, 6032), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['gpus[1]', '"""GPU"""'], {}), "(gpus[1], 'GPU')\n", (6016, 6032), True, 'import tensorflow as tf\n'), ((6037, 6092), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[1]', '(True)'], {}), '(gpus[1], True)\n', (6077, 6092), True, 'import tensorflow as tf\n'), ((574, 622), 'deep_lagrangian_networks.utils.load_dataset', 'load_dataset', ([], {'filename': '"""data/sine_track2.pickle"""'}), "(filename='data/sine_track2.pickle')\n", (586, 622), False, 'from deep_lagrangian_networks.utils import init_env, load_dataset\n'), ((1559, 1601), 'DeLaN_tensorflow_ddq.DeepLagrangianNetwork', 'DeepLagrangianNetwork', (['n_dof'], {}), '(n_dof, **self.hyper)\n', (1580, 1601), False, 'from DeLaN_tensorflow_ddq import DeepLagrangianNetwork\n'), ((1627, 1713), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': "self.hyper['learning_rate']", 'amsgrad': '(True)'}), "(learning_rate=self.hyper['learning_rate'], amsgrad\n =True)\n", (1651, 1713), True, 'import tensorflow as tf\n'), ((1844, 1933), 'deep_lagrangian_networks.replay_memory.PyTorchReplayMemory', 'PyTorchReplayMemory', (['self.train_qp.shape[0]', "self.hyper['n_minibatch']", 'mem_dim', 'cuda'], {}), "(self.train_qp.shape[0], self.hyper['n_minibatch'],\n mem_dim, cuda)\n", (1863, 1933), False, 'from deep_lagrangian_networks.replay_memory import PyTorchReplayMemory\n'), ((2132, 2167), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'net': 'self.model'}), '(net=self.model)\n', (2151, 2167), True, 'import tensorflow as tf\n'), ((3784, 3817), 'tensorflow.cast', 'tf.cast', (['self.test_qp', 'tf.float32'], {}), '(self.test_qp, tf.float32)\n', (3791, 3817), True, 'import tensorflow as tf\n'), ((3831, 3864), 'tensorflow.cast', 'tf.cast', (['self.test_qv', 'tf.float32'], {}), '(self.test_qv, tf.float32)\n', (3838, 3864), True, 'import tensorflow as tf\n'), ((3879, 3913), 'tensorflow.cast', 'tf.cast', (['self.test_tau', 'tf.float32'], {}), '(self.test_tau, tf.float32)\n', (3886, 3913), True, 'import tensorflow as tf\n'), ((4049, 4101), 'tensorflow.squeeze', 'tf.squeeze', (['(delan_M @ self.test_qa[:, :, tf.newaxis])'], {}), '(delan_M @ self.test_qa[:, :, tf.newaxis])\n', (4059, 4101), True, 'import tensorflow as tf\n'), ((4770, 4911), 'DeLaN_utils.plot_test2', 'plot_test2', (['delan_ddq', 'delan_Mddq', 'delan_C', 'delan_G', 'self.test_qa', 'self.test_m', 'self.test_c', 'self.test_g', 'self.divider', 'self.test_labels'], {}), '(delan_ddq, delan_Mddq, delan_C, delan_G, self.test_qa, self.\n test_m, self.test_c, self.test_g, self.divider, self.test_labels)\n', (4780, 4911), False, 'from DeLaN_utils import plot_test2\n'), ((6103, 6129), 'tensorflow.device', 'tf.device', (['"""/device:GPU:1"""'], {}), "('/device:GPU:1')\n", (6112, 6129), True, 'import tensorflow as tf\n'), ((1343, 1355), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1350, 1355), True, 'import numpy as np\n'), ((4213, 4249), 'numpy.sum', 'np.sum', (['((delan_G - self.test_g) ** 2)'], {}), '((delan_G - self.test_g) ** 2)\n', (4219, 4249), True, 'import numpy as np\n'), ((4279, 4318), 'numpy.sum', 'np.sum', (['((delan_Mddq - self.test_m) ** 2)'], {}), '((delan_Mddq - self.test_m) ** 2)\n', (4285, 4318), True, 'import numpy as np\n'), ((4348, 4384), 'numpy.sum', 'np.sum', (['((delan_C - self.test_c) ** 2)'], {}), '((delan_C - self.test_c) ** 2)\n', (4354, 4384), True, 'import numpy as np\n'), ((4415, 4454), 'numpy.sum', 'np.sum', (['((delan_ddq - self.test_qa) ** 2)'], {}), '((delan_ddq - self.test_qa) ** 2)\n', (4421, 4454), True, 'import numpy as np\n'), ((5117, 5134), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5132, 5134), True, 'import tensorflow as tf\n'), ((5300, 5319), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['err'], {}), '(err)\n', (5314, 5319), True, 'import tensorflow as tf\n'), ((3374, 3399), 'numpy.mod', 'np.mod', (['(epoch_i + 1)', '(1000)'], {}), '(epoch_i + 1, 1000)\n', (3380, 3399), True, 'import numpy as np\n'), ((5244, 5271), 'tensorflow.square', 'tf.square', (['(qdd_hat - qdd_tf)'], {}), '(qdd_hat - qdd_tf)\n', (5253, 5271), True, 'import tensorflow as tf\n'), ((2212, 2218), 'time.time', 'time', ([], {}), '()\n', (2216, 2218), False, 'from time import time\n')] |
from __future__ import division
import numpy as np
import pybullet as p
"returns list of vertices"
def make_polygon(n, side_length):
int_angle = np.pi/n
hyp = side_length/(2*np.sin(int_angle))
theta = int_angle
pts = []
for i in range(n):
new_pt = (hyp*np.cos(theta), hyp*np.sin(theta))
pts.append(new_pt)
theta += 2*int_angle
return pts
def make_cylinder(n, side_length, height, width, CORRECTION_ANGLE=-0.29):
verts = make_polygon(n, side_length)
verts = np.vstack(verts)
mesh_sl = 0.2
mesh_h = 1
mesh_w =0.05
shape_indices = [p.createCollisionShape(p.GEOM_BOX, halfExtents = [side_length/2., height/2., width/2.]) for _ in verts]
#shape_indices = [p.createCollisionShape(p.GEOM_MESH,fileName="../models/test.obj", meshScale=[side_length/mesh_sl, height/mesh_h, width/mesh_w]) for _ in verts]
shape_indices = [p.createCollisionShape(p.GEOM_MESH,fileName="../../models/face8wide.obj", meshScale=[side_length/mesh_sl, height/mesh_h, width/mesh_w]) for _ in verts]
#shape_indices = shape_indices[0:2]
angle = ((n-2)*180)/n
angle = np.deg2rad(angle)
phi = np.pi-angle
link_orientations = [p.getQuaternionFromEuler((0,angle,0)) for i in shape_indices]
link_positions = []
link_orientations=[]
h = ((side_length/2)**2+(width/2)**2)**0.5
rel_x = side_length/2+h*np.cos(phi)
rel_y = width/2+h*np.sin(phi)
curr_angle = angle
curr_x = 0
curr_y = 0
for i in range(len(shape_indices)):
#link_positions.append((verts[1],0, verts[0]))
#link_positions.append((rel_y, rel_x,0))
#link_positions.append((0.9, 0,-1.1))
#link_positions.append((side_length-width+(i/10.), 0,-(side_length+width)))
#curr_x += side_length*np.cos(angle)
#curr_y += side_length*np.sin(angle)
#curr_angle += angle
if i == len(shape_indices)-1:
midpt =np.mean(np.vstack([verts[i], verts[0]]),axis=0)
diff = verts[0]-verts[i]
else:
midpt =np.mean(np.vstack([verts[i], verts[i+1]]),axis=0)
diff = verts[i+1]-verts[i]
curr_angle = np.arctan2(diff[1],diff[0])-(np.pi/2.0)+np.pi+CORRECTION_ANGLE
link_orientations.append(p.getQuaternionFromEuler((0,curr_angle,0)))
x_width_shift = width
y_width_shift = width
link_positions.append((midpt[1]+y_width_shift,0,midpt[0]+x_width_shift))
#num_shapes = len(shape_indices)
#for j in range(num_shapes):
# link_orientations.append(p.getQuaternionFromEuler((0,curr_angle,0)))
# link_positions.append(p.getQuaternionFromEuler((0,curr_angle,0)))
#shape_indices = shape_indices[0:2]
#link_orientations = link_orientations[0:2]
#link_positions = link_positions[0:2]
parent_indices = [0]*len(shape_indices)
assert(len(shape_indices)==len(link_orientations)==len(link_positions))
#p.createMultiBody(linkCollisionShapeIndices=shape_indices, linkPositions=link_positions, linkOrientations=link_orientations)
body = p.createMultiBody(baseMass=0,
baseCollisionShapeIndex=-1,
baseVisualShapeIndex=-1,
basePosition=[0,0,0],
baseOrientation=[1,0,0,0],
linkMasses=[0 for i in shape_indices],
linkCollisionShapeIndices=shape_indices,
linkVisualShapeIndices=[-1 for i in shape_indices],
linkPositions=link_positions,
linkOrientations = link_orientations,
linkInertialFramePositions=[[0,0,0,0] for _ in shape_indices],
linkInertialFrameOrientations=[[1,0,0,0] for _ in shape_indices],
linkParentIndices=parent_indices,
linkJointTypes=[p.JOINT_FIXED for _ in shape_indices],
linkJointAxis=[[1,0,0] for _ in shape_indices])
return body
#verts = make_polygon(16, 1)
if __name__ == "__main__":
p.connect(p.GUI)
make_cylinder(12,1,3,0.1)
import ipdb; ipdb.set_trace()
#xs = [pt[0] for pt in verts]
#ys = [pt[1] for pt in verts]
#plt.plot(xs, ys)
#plt.show()
| [
"pybullet.getQuaternionFromEuler",
"pybullet.createMultiBody",
"numpy.arctan2",
"ipdb.set_trace",
"numpy.deg2rad",
"numpy.sin",
"numpy.cos",
"pybullet.connect",
"pybullet.createCollisionShape",
"numpy.vstack"
] | [((515, 531), 'numpy.vstack', 'np.vstack', (['verts'], {}), '(verts)\n', (524, 531), True, 'import numpy as np\n'), ((1125, 1142), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (1135, 1142), True, 'import numpy as np\n'), ((3052, 3702), 'pybullet.createMultiBody', 'p.createMultiBody', ([], {'baseMass': '(0)', 'baseCollisionShapeIndex': '(-1)', 'baseVisualShapeIndex': '(-1)', 'basePosition': '[0, 0, 0]', 'baseOrientation': '[1, 0, 0, 0]', 'linkMasses': '[(0) for i in shape_indices]', 'linkCollisionShapeIndices': 'shape_indices', 'linkVisualShapeIndices': '[(-1) for i in shape_indices]', 'linkPositions': 'link_positions', 'linkOrientations': 'link_orientations', 'linkInertialFramePositions': '[[0, 0, 0, 0] for _ in shape_indices]', 'linkInertialFrameOrientations': '[[1, 0, 0, 0] for _ in shape_indices]', 'linkParentIndices': 'parent_indices', 'linkJointTypes': '[p.JOINT_FIXED for _ in shape_indices]', 'linkJointAxis': '[[1, 0, 0] for _ in shape_indices]'}), '(baseMass=0, baseCollisionShapeIndex=-1,\n baseVisualShapeIndex=-1, basePosition=[0, 0, 0], baseOrientation=[1, 0,\n 0, 0], linkMasses=[(0) for i in shape_indices],\n linkCollisionShapeIndices=shape_indices, linkVisualShapeIndices=[(-1) for\n i in shape_indices], linkPositions=link_positions, linkOrientations=\n link_orientations, linkInertialFramePositions=[[0, 0, 0, 0] for _ in\n shape_indices], linkInertialFrameOrientations=[[1, 0, 0, 0] for _ in\n shape_indices], linkParentIndices=parent_indices, linkJointTypes=[p.\n JOINT_FIXED for _ in shape_indices], linkJointAxis=[[1, 0, 0] for _ in\n shape_indices])\n', (3069, 3702), True, 'import pybullet as p\n'), ((4205, 4221), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (4214, 4221), True, 'import pybullet as p\n'), ((4269, 4285), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (4283, 4285), False, 'import ipdb\n'), ((603, 701), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_BOX'], {'halfExtents': '[side_length / 2.0, height / 2.0, width / 2.0]'}), '(p.GEOM_BOX, halfExtents=[side_length / 2.0, height /\n 2.0, width / 2.0])\n', (625, 701), True, 'import pybullet as p\n'), ((894, 1040), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['p.GEOM_MESH'], {'fileName': '"""../../models/face8wide.obj"""', 'meshScale': '[side_length / mesh_sl, height / mesh_h, width / mesh_w]'}), "(p.GEOM_MESH, fileName='../../models/face8wide.obj',\n meshScale=[side_length / mesh_sl, height / mesh_h, width / mesh_w])\n", (916, 1040), True, 'import pybullet as p\n'), ((1190, 1229), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['(0, angle, 0)'], {}), '((0, angle, 0))\n', (1214, 1229), True, 'import pybullet as p\n'), ((183, 200), 'numpy.sin', 'np.sin', (['int_angle'], {}), '(int_angle)\n', (189, 200), True, 'import numpy as np\n'), ((1376, 1387), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1382, 1387), True, 'import numpy as np\n'), ((1410, 1421), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1416, 1421), True, 'import numpy as np\n'), ((2251, 2295), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['(0, curr_angle, 0)'], {}), '((0, curr_angle, 0))\n', (2275, 2295), True, 'import pybullet as p\n'), ((282, 295), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (288, 295), True, 'import numpy as np\n'), ((301, 314), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (307, 314), True, 'import numpy as np\n'), ((1933, 1964), 'numpy.vstack', 'np.vstack', (['[verts[i], verts[0]]'], {}), '([verts[i], verts[0]])\n', (1942, 1964), True, 'import numpy as np\n'), ((2052, 2087), 'numpy.vstack', 'np.vstack', (['[verts[i], verts[i + 1]]'], {}), '([verts[i], verts[i + 1]])\n', (2061, 2087), True, 'import numpy as np\n'), ((2155, 2183), 'numpy.arctan2', 'np.arctan2', (['diff[1]', 'diff[0]'], {}), '(diff[1], diff[0])\n', (2165, 2183), True, 'import numpy as np\n')] |
import numpy as np
from scipy import integrate, stats
import autofit.graphical.messages.fixed
import autofit.graphical.messages.gamma
import autofit.graphical.messages.normal
import autofit.mapper.variable
from autofit import graphical as mp
def _test():
## define parameters of model
np.random.seed(1)
alpha, beta, gamma, delta = 2 / 3, 4 / 3, 1, 1
r = np.array([alpha, - gamma])
A = np.array([
[0., beta / alpha],
[delta / gamma, 0.]])
K = 1
noise = 0.1
# starting composition
y0 = np.array([1., 1.])
n_species = len(y0)
n_obs = 30
t_space = 1.
t_obs = np.r_[
0,
(np.arange(n_obs - 1) * t_space +
np.random.rand(n_obs - 1)) * t_space]
def lotka_volterra(t, z, r=r, A=A, K=K):
return z * r * (1 - A.dot(z) / K)
def calc_lotka_volterra(y0, r, A, K, t_obs):
res = integrate.solve_ivp(
lotka_volterra,
(t_obs[0], t_obs[-1]), y0, t_eval=t_obs,
args=(r, A, K),
method='BDF')
y_ = res.y
n = y_.shape[1]
n_obs = len(t_obs)
# make sure output is correct dimension
if n != n_obs:
y_ = np.c_[
y_, np.repeat(y_[:, [-1]],
n_obs - n, axis=1)][:, :n_obs]
if y_.shape[1] != n_obs:
raise Exception
return y_
y_true = calc_lotka_volterra(y0, r, A, K, t_obs)
y = y_true + noise * np.random.randn(n_species, n_obs)
## Specifying dimensions of problem
obs = autofit.mapper.variable.Plate(name='obs')
species = autofit.mapper.variable.Plate(name='species')
# Need to specify a second plate for species because
# A is (species, species) and we need a second plate
# to unique specify the second dimension
speciesA = autofit.mapper.variable.Plate(name='species')
dims = autofit.mapper.variable.Plate(name='dims')
## Specifying variables
r_ = autofit.mapper.variable.Variable('r', species)
A_ = autofit.mapper.variable.Variable('A', species, speciesA)
K_ = autofit.mapper.variable.Variable('K')
y0_ = autofit.mapper.variable.Variable('y0', species)
y_ = autofit.mapper.variable.Variable('y', species, obs)
y_obs_ = autofit.mapper.variable.Variable('y_obs', species, obs)
t_obs_ = autofit.mapper.variable.Variable('t_obs', obs)
_norm = stats.norm(loc=0, scale=noise)
_prior = stats.norm(loc=0, scale=10)
_prior_exp = stats.expon(loc=0, scale=1)
def _likelihood(y_obs, y):
return _norm.logpdf(y_obs - y)
## Specifying factors
likelihood = mp.Factor(_likelihood, y_obs=y_obs_, y=y_)
prior_A = mp.Factor(_prior.logpdf, 'prior_A', x=A_)
prior_r = mp.Factor(_prior.logpdf, 'prior_r', x=r_)
prior_y0 = mp.Factor(_prior_exp.logpdf, 'prior_y0', x=y0_)
# calc_lotka_volterra does not vectorise over
# multiple inputs, see `FactorNode._py_vec_call`
LV = mp.Factor(
calc_lotka_volterra, 'LV',
vectorised=False,
y0=y0_,
r=r_,
A=A_,
K=K_,
t_obs=t_obs_
) == y_
## Defining model
priors = prior_A * prior_r * prior_y0
LV_model = (likelihood * LV) * priors
LV_model._name = 'LV_model'
model_approx = mp.EPMeanField.from_kws(
LV_model,
{
A_: autofit.graphical.messages.normal.NormalMessage.from_mode(A, 100.),
r_: autofit.graphical.messages.normal.NormalMessage.from_mode(r, 100.),
y0_: autofit.graphical.messages.gamma.GammaMessage.from_mode(np.ones_like(y0), 1),
y_: autofit.graphical.messages.normal.NormalMessage.from_mode(y, 1),
K_: autofit.graphical.messages.fixed.FixedMessage(1),
y_obs_: autofit.graphical.messages.fixed.FixedMessage(y),
t_obs_: autofit.graphical.messages.fixed.FixedMessage(t_obs)
},
)
history = {}
n_iter = 1
factors = [f for f in LV_model.factors if f not in (LV,)]
np.random.seed(1)
opt = mp.optimise.LaplaceOptimiser(
n_iter=n_iter
)
for i in range(n_iter):
# perform least squares fit for LV model
model_approx, status = mp.lstsq_laplace_factor_approx(
model_approx,
LV
)
# perform laplace non linear fit for other factors
for factor in factors:
model_approx, status = mp.optimise.laplace_factor_approx(
model_approx,
factor,
status=status,
)
history[i, factor] = model_approx
# model_mean = {v: d.mean for v, d in model_approx.mean_field.items()}
# y_pred = LV_model(model_mean).deterministic_values[y_]
y_pred = model_approx.mean_field[y_].mean
assert np.square(y_pred - y).mean()**0.5 < 2
| [
"autofit.graphical.optimise.laplace_factor_approx",
"scipy.stats.norm",
"numpy.random.seed",
"autofit.graphical.Factor",
"numpy.ones_like",
"numpy.random.randn",
"numpy.square",
"scipy.integrate.solve_ivp",
"autofit.graphical.optimise.LaplaceOptimiser",
"numpy.array",
"autofit.graphical.lstsq_la... | [((296, 313), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (310, 313), True, 'import numpy as np\n'), ((374, 399), 'numpy.array', 'np.array', (['[alpha, -gamma]'], {}), '([alpha, -gamma])\n', (382, 399), True, 'import numpy as np\n'), ((409, 462), 'numpy.array', 'np.array', (['[[0.0, beta / alpha], [delta / gamma, 0.0]]'], {}), '([[0.0, beta / alpha], [delta / gamma, 0.0]])\n', (417, 462), True, 'import numpy as np\n'), ((541, 561), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (549, 561), True, 'import numpy as np\n'), ((2413, 2443), 'scipy.stats.norm', 'stats.norm', ([], {'loc': '(0)', 'scale': 'noise'}), '(loc=0, scale=noise)\n', (2423, 2443), False, 'from scipy import integrate, stats\n'), ((2457, 2484), 'scipy.stats.norm', 'stats.norm', ([], {'loc': '(0)', 'scale': '(10)'}), '(loc=0, scale=10)\n', (2467, 2484), False, 'from scipy import integrate, stats\n'), ((2502, 2529), 'scipy.stats.expon', 'stats.expon', ([], {'loc': '(0)', 'scale': '(1)'}), '(loc=0, scale=1)\n', (2513, 2529), False, 'from scipy import integrate, stats\n'), ((2646, 2688), 'autofit.graphical.Factor', 'mp.Factor', (['_likelihood'], {'y_obs': 'y_obs_', 'y': 'y_'}), '(_likelihood, y_obs=y_obs_, y=y_)\n', (2655, 2688), True, 'from autofit import graphical as mp\n'), ((2703, 2744), 'autofit.graphical.Factor', 'mp.Factor', (['_prior.logpdf', '"""prior_A"""'], {'x': 'A_'}), "(_prior.logpdf, 'prior_A', x=A_)\n", (2712, 2744), True, 'from autofit import graphical as mp\n'), ((2759, 2800), 'autofit.graphical.Factor', 'mp.Factor', (['_prior.logpdf', '"""prior_r"""'], {'x': 'r_'}), "(_prior.logpdf, 'prior_r', x=r_)\n", (2768, 2800), True, 'from autofit import graphical as mp\n'), ((2816, 2863), 'autofit.graphical.Factor', 'mp.Factor', (['_prior_exp.logpdf', '"""prior_y0"""'], {'x': 'y0_'}), "(_prior_exp.logpdf, 'prior_y0', x=y0_)\n", (2825, 2863), True, 'from autofit import graphical as mp\n'), ((4023, 4040), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (4037, 4040), True, 'import numpy as np\n'), ((4052, 4095), 'autofit.graphical.optimise.LaplaceOptimiser', 'mp.optimise.LaplaceOptimiser', ([], {'n_iter': 'n_iter'}), '(n_iter=n_iter)\n', (4080, 4095), True, 'from autofit import graphical as mp\n'), ((888, 998), 'scipy.integrate.solve_ivp', 'integrate.solve_ivp', (['lotka_volterra', '(t_obs[0], t_obs[-1])', 'y0'], {'t_eval': 't_obs', 'args': '(r, A, K)', 'method': '"""BDF"""'}), "(lotka_volterra, (t_obs[0], t_obs[-1]), y0, t_eval=t_obs,\n args=(r, A, K), method='BDF')\n", (907, 998), False, 'from scipy import integrate, stats\n'), ((2977, 3075), 'autofit.graphical.Factor', 'mp.Factor', (['calc_lotka_volterra', '"""LV"""'], {'vectorised': '(False)', 'y0': 'y0_', 'r': 'r_', 'A': 'A_', 'K': 'K_', 't_obs': 't_obs_'}), "(calc_lotka_volterra, 'LV', vectorised=False, y0=y0_, r=r_, A=A_,\n K=K_, t_obs=t_obs_)\n", (2986, 3075), True, 'from autofit import graphical as mp\n'), ((4219, 4267), 'autofit.graphical.lstsq_laplace_factor_approx', 'mp.lstsq_laplace_factor_approx', (['model_approx', 'LV'], {}), '(model_approx, LV)\n', (4249, 4267), True, 'from autofit import graphical as mp\n'), ((1491, 1524), 'numpy.random.randn', 'np.random.randn', (['n_species', 'n_obs'], {}), '(n_species, n_obs)\n', (1506, 1524), True, 'import numpy as np\n'), ((4428, 4498), 'autofit.graphical.optimise.laplace_factor_approx', 'mp.optimise.laplace_factor_approx', (['model_approx', 'factor'], {'status': 'status'}), '(model_approx, factor, status=status)\n', (4461, 4498), True, 'from autofit import graphical as mp\n'), ((3593, 3609), 'numpy.ones_like', 'np.ones_like', (['y0'], {}), '(y0)\n', (3605, 3609), True, 'import numpy as np\n'), ((698, 723), 'numpy.random.rand', 'np.random.rand', (['(n_obs - 1)'], {}), '(n_obs - 1)\n', (712, 723), True, 'import numpy as np\n'), ((4807, 4828), 'numpy.square', 'np.square', (['(y_pred - y)'], {}), '(y_pred - y)\n', (4816, 4828), True, 'import numpy as np\n'), ((656, 676), 'numpy.arange', 'np.arange', (['(n_obs - 1)'], {}), '(n_obs - 1)\n', (665, 676), True, 'import numpy as np\n'), ((1235, 1276), 'numpy.repeat', 'np.repeat', (['y_[:, [-1]]', '(n_obs - n)'], {'axis': '(1)'}), '(y_[:, [-1]], n_obs - n, axis=1)\n', (1244, 1276), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import sys
import numpy as np
import torch
import tritonclient.http as http_client
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
from dlrm.data.datasets import SyntheticDataset, SplitCriteoDataset
def get_data_loader(batch_size, *, data_path, model_config):
with open(model_config.dataset_config) as f:
categorical_sizes = list(json.load(f).values())
if data_path:
data = SplitCriteoDataset(
data_path=data_path,
batch_size=batch_size,
numerical_features=True,
categorical_features=range(len(categorical_sizes)),
categorical_feature_sizes=categorical_sizes,
prefetch_depth=1,
drop_last_batch=model_config.drop_last_batch
)
else:
data = SyntheticDataset(
num_entries=batch_size * 1024,
batch_size=batch_size,
numerical_features=model_config.num_numerical_features,
categorical_feature_sizes=categorical_sizes,
device="cpu"
)
if model_config.test_batches > 0:
data = torch.utils.data.Subset(data, list(range(model_config.test_batches)))
return torch.utils.data.DataLoader(data,
batch_size=None,
num_workers=0,
pin_memory=False)
def run_infer(model_name, model_version, numerical_features, categorical_features, headers=None):
inputs = []
outputs = []
num_type = "FP16" if numerical_features.dtype == np.float16 else "FP32"
inputs.append(http_client.InferInput('input__0', numerical_features.shape, num_type))
inputs.append(http_client.InferInput('input__1', categorical_features.shape, "INT64"))
# Initialize the data
inputs[0].set_data_from_numpy(numerical_features, binary_data=True)
inputs[1].set_data_from_numpy(categorical_features, binary_data=False)
outputs.append(http_client.InferRequestedOutput('output__0', binary_data=True))
results = triton_client.infer(model_name,
inputs,
model_version=str(model_version) if model_version != -1 else '',
outputs=outputs,
headers=headers)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--triton-server-url',
type=str,
required=True,
help='URL adress of triton server (with port)')
parser.add_argument('--triton-model-name', type=str, required=True,
help='Triton deployed model name')
parser.add_argument('--triton-model-version', type=int, default=-1,
help='Triton model version')
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER",
required=False, action='append',
help='HTTP headers to add to inference server requests. ' +
'Format is -H"Header:Value".')
parser.add_argument("--dataset_config", type=str, required=True)
parser.add_argument("--inference_data", type=str,
help="Path to file with inference data.")
parser.add_argument("--batch_size", type=int, default=1,
help="Inference request batch size")
parser.add_argument("--drop_last_batch", type=bool, default=True,
help="Drops the last batch size if it's not full")
parser.add_argument("--fp16", action="store_true", default=False,
help="Use 16bit for numerical input")
parser.add_argument("--test_batches", type=int, default=0,
help="Specifies number of batches used in the inference")
FLAGS = parser.parse_args()
try:
triton_client = http_client.InferenceServerClient(url=FLAGS.triton_server_url, verbose=FLAGS.verbose)
except Exception as e:
print("channel creation failed: " + str(e))
sys.exit(1)
if FLAGS.http_headers is not None:
headers_dict = {l.split(':')[0]: l.split(':')[1]
for l in FLAGS.http_headers}
else:
headers_dict = None
triton_client.load_model(FLAGS.triton_model_name)
if not triton_client.is_model_ready(FLAGS.triton_model_name):
sys.exit(1)
dataloader = get_data_loader(FLAGS.batch_size,
data_path=FLAGS.inference_data,
model_config=FLAGS)
results = []
tgt_list = []
for numerical_features, categorical_features, target in tqdm(dataloader):
numerical_features = numerical_features.cpu().numpy()
numerical_features = numerical_features.astype(np.float16 if FLAGS.fp16 else np.float32)
categorical_features = categorical_features.long().cpu().numpy()
output = run_infer(FLAGS.triton_model_name, FLAGS.triton_model_version,
numerical_features, categorical_features, headers_dict)
results.append(output.as_numpy('output__0'))
tgt_list.append(target.cpu().numpy())
results = np.concatenate(results).squeeze()
tgt_list = np.concatenate(tgt_list)
score = roc_auc_score(tgt_list, results)
print(f"Model score: {score}")
statistics = triton_client.get_inference_statistics(model_name=FLAGS.triton_model_name, headers=headers_dict)
print(statistics)
if len(statistics['model_stats']) != 1:
print("FAILED: Inference Statistics")
sys.exit(1)
| [
"tqdm.tqdm",
"json.load",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"dlrm.data.datasets.SyntheticDataset",
"sys.exit",
"sklearn.metrics.roc_auc_score",
"tritonclient.http.InferInput",
"tritonclient.http.InferenceServerClient",
"tritonclient.http.InferRequestedOutput",
"numpy.conc... | [((2762, 2849), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': 'None', 'num_workers': '(0)', 'pin_memory': '(False)'}), '(data, batch_size=None, num_workers=0,\n pin_memory=False)\n', (2789, 2849), False, 'import torch\n'), ((3962, 3987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3985, 3987), False, 'import argparse\n'), ((6531, 6547), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (6535, 6547), False, 'from tqdm import tqdm\n'), ((7109, 7133), 'numpy.concatenate', 'np.concatenate', (['tgt_list'], {}), '(tgt_list)\n', (7123, 7133), True, 'import numpy as np\n'), ((7147, 7179), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['tgt_list', 'results'], {}), '(tgt_list, results)\n', (7160, 7179), False, 'from sklearn.metrics import roc_auc_score\n'), ((2370, 2563), 'dlrm.data.datasets.SyntheticDataset', 'SyntheticDataset', ([], {'num_entries': '(batch_size * 1024)', 'batch_size': 'batch_size', 'numerical_features': 'model_config.num_numerical_features', 'categorical_feature_sizes': 'categorical_sizes', 'device': '"""cpu"""'}), "(num_entries=batch_size * 1024, batch_size=batch_size,\n numerical_features=model_config.num_numerical_features,\n categorical_feature_sizes=categorical_sizes, device='cpu')\n", (2386, 2563), False, 'from dlrm.data.datasets import SyntheticDataset, SplitCriteoDataset\n'), ((3190, 3260), 'tritonclient.http.InferInput', 'http_client.InferInput', (['"""input__0"""', 'numerical_features.shape', 'num_type'], {}), "('input__0', numerical_features.shape, num_type)\n", (3212, 3260), True, 'import tritonclient.http as http_client\n'), ((3280, 3351), 'tritonclient.http.InferInput', 'http_client.InferInput', (['"""input__1"""', 'categorical_features.shape', '"""INT64"""'], {}), "('input__1', categorical_features.shape, 'INT64')\n", (3302, 3351), True, 'import tritonclient.http as http_client\n'), ((3547, 3610), 'tritonclient.http.InferRequestedOutput', 'http_client.InferRequestedOutput', (['"""output__0"""'], {'binary_data': '(True)'}), "('output__0', binary_data=True)\n", (3579, 3610), True, 'import tritonclient.http as http_client\n'), ((5751, 5841), 'tritonclient.http.InferenceServerClient', 'http_client.InferenceServerClient', ([], {'url': 'FLAGS.triton_server_url', 'verbose': 'FLAGS.verbose'}), '(url=FLAGS.triton_server_url, verbose=\n FLAGS.verbose)\n', (5784, 5841), True, 'import tritonclient.http as http_client\n'), ((6253, 6264), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6261, 6264), False, 'import sys\n'), ((7450, 7461), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7458, 7461), False, 'import sys\n'), ((5924, 5935), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5932, 5935), False, 'import sys\n'), ((7060, 7083), 'numpy.concatenate', 'np.concatenate', (['results'], {}), '(results)\n', (7074, 7083), True, 'import numpy as np\n'), ((1946, 1958), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1955, 1958), False, 'import json\n')] |
"""
Parameter_Extraction_For_Random Forest_Batch
============================================
Purpose:
--------
This module is used to extract parameters from the heatmaps of Normal, Tumor and Test slides
based on different thresholds (0.5, 0.6, 0.7, 0.8, 0.9).
Description:
------------
The input files:
- A directory:
- A csv file: it has the slide name and its label as normal or tumor, which serves as
ground truth.
The output files:
- A csv file: it included the following parameters for whole-slide image classification task:
::
global features
1. The ratio between the area of metastatic regions and the tissue area.
2. The sum of all cancer metastases probailities detected in the metastasis identification task, divided by the
tissue area. caculate them at 5 different thresholds (0.5, 0.6, 0.7, 0.8, 0.9), so the total 10 global features
local features
Based on 2 largest metastatic candidate regions (select them based on a threshold of 0.5). 9 features were extracted
from the 2 largest regions:
1. Area: the area of connected region
2. Eccentricity: The eccentricity of the ellipse that has the same second-moments as the region
3. Extend: The ratio of region area over the total bounding box area
4. Bounding box area
5. Major axis length: the length of the major axis of the ellipse that has the same normalized second central
moments as the region
6. Max/mean/min intensity: The max/mean/minimum probability value in the region
7. Aspect ratio of the bounding box
8. Solidity: Ratio of region area over the surrounding convex area
"""
import csv
import glob
import os
import random
import cv2
import numpy as np
import scipy.stats.stats as st
from skimage.measure import label
from skimage.measure import regionprops
from skimage.segmentation import clear_border
from skimage.morphology import closing, square
from matplotlib import cm
from tqdm import tqdm
from skimage.filters import threshold_otsu
from keras.models import load_model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os.path as osp
import openslide
from pathlib import Path
from skimage.filters import threshold_otsu
import glob
import math
# before importing HDFStore, make sure 'tables' is installed by pip3 install tables
from pandas import HDFStore
from openslide.deepzoom import DeepZoomGenerator
from sklearn.model_selection import StratifiedShuffleSplit
from keras.utils.np_utils import to_categorical
import os.path as osp
import openslide
from pathlib import Path
import numpy as np
import skimage.io as io
import skimage.transform as trans
import re
################################################################################################
# for global features
# ##############################################################################################
def glob_features(slide_path, heatmap):
"""
The function is used to generate global features based on heatmap.
:param slide_path: The path for individual WSI, not the path for WSIs folder.
:type slide_path: str
:param heatmap: the heatmap created by the trained model during the prediction. The heatmap here
is the final assembled array from many small arrays.
:type heatmap: array
:return: global features
:rtype: list
"""
# make the heatmap path the same as slide path
with openslide.open_slide(slide_path) as slide:
dtotal = (slide.dimensions[0] / 16, slide.dimensions[1] / 16)
thumbnail = slide.get_thumbnail((dtotal[0], dtotal[1]))
thum = np.array(thumbnail)
ddtotal = thum.shape
# dimensions.extend(ddtotal)
hsv_image = cv2.cvtColor(thum, cv2.COLOR_RGB2HSV)
#hsv_image = cv2.cvtColor(thum, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv_image)
hthresh = threshold_otsu(h)
sthresh = threshold_otsu(s)
#vthresh = threshold_otsu(v)
# be min value for v can be changed later
minhsv = np.array([hthresh, sthresh, 0], np.uint8)
#maxhsv = np.array([180, 255, vthresh], np.uint8)
maxhsv = np.array([180, 255, 255], np.uint8)
thresh = [minhsv, maxhsv]
# extraction the countor for tissue
rgbbinary = cv2.inRange(hsv_image, thresh[0], thresh[1])
rgbbinaryarea = cv2.countNonZero(rgbbinary)
predthreshold50 = heatmap > 0.5
predthreshold60 = heatmap > 0.6
predthreshold70 = heatmap > 0.7
predthreshold80 = heatmap > 0.8
predthreshold90 = heatmap > 0.9
ratio_cancer_tissue50 = cv2.countNonZero(
predthreshold50*1)/rgbbinaryarea
ratio_cancer_tissue60 = cv2.countNonZero(
predthreshold60*1)/rgbbinaryarea
ratio_cancer_tissue70 = cv2.countNonZero(
predthreshold70*1)/rgbbinaryarea
ratio_cancer_tissue80 = cv2.countNonZero(
predthreshold80*1)/rgbbinaryarea
ratio_cancer_tissue90 = cv2.countNonZero(
predthreshold90*1)/rgbbinaryarea
predthreshold250 = heatmap - 0.5
predthreshold260 = heatmap - 0.6
predthreshold270 = heatmap - 0.7
predthreshold280 = heatmap - 0.8
predthreshold290 = heatmap - 0.9
predthreshold250 = predthreshold250.clip(min=0)
predthreshold260 = predthreshold260.clip(min=0)
predthreshold270 = predthreshold270.clip(min=0)
predthreshold280 = predthreshold280.clip(min=0)
predthreshold290 = predthreshold290.clip(min=0)
ratio_sum_tissue50 = predthreshold250.sum()/rgbbinaryarea
ratio_sum_tissue60 = predthreshold260.sum()/rgbbinaryarea
ratio_sum_tissue70 = predthreshold270.sum()/rgbbinaryarea
ratio_sum_tissue80 = predthreshold280.sum()/rgbbinaryarea
ratio_sum_tissue90 = predthreshold290.sum()/rgbbinaryarea
globalfeatures = [ratio_cancer_tissue50, ratio_cancer_tissue60, ratio_cancer_tissue70, ratio_cancer_tissue80,
ratio_cancer_tissue90, ratio_sum_tissue50, ratio_sum_tissue60, ratio_sum_tissue70,
ratio_sum_tissue80, ratio_sum_tissue90]
return globalfeatures
# ##########################################################################################
# for local features
# ##########################################################################################
def get_region_props(heatmapbinary, heatmap):
"""
This is the most fundamental function for local feature extraction.
:param heatmapbinary: the binary array generated by a threshold, for example, 0.5.
:type heatmapbinary: array
:param heatmap: The heatmap created by the trained model during the prediction. The heatmap here
is the final assembled array from many small arrays.
:type heatmap: array
:return: region properties
:rtype: dataframe
"""
#heatmapbinary = closing(heatmapbinary, square[3])
heatmapbinary = clear_border(heatmapbinary)
labeled_img = label(heatmapbinary)
return regionprops(labeled_img, intensity_image=heatmap)
# 1. Area: the area of connected region
def get_largest_tumor_index_area(region_props):
"""
The function is used to find the largest tumor region.
:param region_props: the results from get_region_props.
:type region_props: dataframe
:return: the largest tumor region with its area and index.
:rtype: tuple
"""
largest_tumor_index = -1
largest_tumor_area = -1
n_regions = len(region_props)
for index in range(n_regions):
if region_props[index]['area'] > largest_tumor_area:
largest_tumor_area = region_props[index]['area']
largest_tumor_index = index
return (largest_tumor_index, largest_tumor_area)
def get_second_largest_tumor_index_area(region_props, largest_index):
"""
The function is used to find the second largest tumor region.
:param region_props: the results from get_region_props.
:type region_props: dataframe
:param largest_index: the index of the largest region
:type int
:return: the second largest tumor region with its area and index.
:rtype: tuple
"""
second_largest_tumor_index = -1
second_largest_tumor_area = -1
n_regions = len(region_props)
for index in range(n_regions):
if region_props[index]['area'] > second_largest_tumor_area and region_props[index]['area'] < region_props[largest_index]['area']:
second_largest_tumor_area = region_props[index]['area']
second_largest_tumor_index = index
return (second_largest_tumor_index, second_largest_tumor_area)
# Major axis length: the length of the major axis of the ellipse that has the same normalized second central moments
# as the region
# def get_longest_axis_in_largest_tumor_region(region_props, tumor_region_index):
# tumor_region = region_props[tumor_region_index]
# return max(tumor_region['major_axis_length'], tumor_region['minor_axis_length'])
def local_features(heatmap, threshold):
"""
The function is to be used for generation of local_features from heatmap
:param heatmap: The heatmap created by the trained model during the prediction. The heatmap here
is the final assembled array from many small arrays.
:type heatmap: array
:param threshold: the threshold set for generating binary arrays.
:type threshold: float
:return: local features
:rtype: list
"""
# I used 0.5 as threshold here, but Dayong may use another value, for example 0.9
heatmapbinary = (heatmap > threshold)*1
#heatmapbinary = (heatmap > 0.5)*1
features = []
# extract parameters from regionprops function of scikit-image
region_props_largest = get_region_props(heatmapbinary, heatmap)
number_tumor_region = len(region_props_largest)
if number_tumor_region == 0:
return [0.00] * N_FEATURES
# else:
# 1. Area: the area of connected region
# the area and index of largest lesion:
largest_lesion = get_largest_tumor_index_area(region_props_largest)
largest_area = largest_lesion[1]
largest_index = largest_lesion[0]
# print(largest_area)
# features.append(largest_area)
# 2. Eccentricity: The eccentricity of the ellipse that has the same second-moments as the region
eccentricity_largest = region_props_largest[largest_index]['eccentricity']
# features.append(eccentricity_largest)
# 3. Extend: The ratio of region area over the total bounding box area
extend_largest = region_props_largest[largest_index]['extent']
# features.append(extent_largest)
# 4. Bounding box area
area_bbox_largest = region_props_largest[largest_index]['bbox_area']
# features.append(area_bbox_largest)
# 5. Major axis length: the length of the major axis of the ellipse that has the same normalized second central
# moments as the region
major_axis_length_largest = region_props_largest[largest_index]['major_axis_length']
features.append(major_axis_length_largest)
# 6. Max/mean/min intensity: The max/mean/minimum probability value in the region
maxprob_largest = region_props_largest[largest_index]['max_intensity']
minprob_largest = region_props_largest[largest_index]['min_intensity']
aveprob_largest = region_props_largest[largest_index]['mean_intensity']
#features.append(maxprob_largest, minprob_largest, aveprob_largest)
# 7. Aspect ratio of the bounding box
coordinates_of_bbox_largest = region_props_largest[largest_index]['bbox']
aspect_ratio_bbox_largest = (coordinates_of_bbox_largest[2]-coordinates_of_bbox_largest[0])/(
coordinates_of_bbox_largest[3]-coordinates_of_bbox_largest[1])
# features.append(aspect_ratio_bbox_largest)
# 8. Solidity: Ratio of region area over the surrounding convex area
solidity_largest = region_props_largest[largest_index]['solidity']
# features.append(solidity_largest)
# 1. Area: the area of connected region
# the area and index of largest lesion:
second_largest_lesion = get_second_largest_tumor_index_area(
region_props_largest, largest_index=largest_lesion[0])
second_largest_area = second_largest_lesion[1]
second_largest_index = second_largest_lesion[0]
# features.append(second_largest_area)
# 2. Eccentricity: The eccentricity of the ellipse that has the same second-moments as the region
eccentricity_second_largest = region_props_largest[second_largest_index]['eccentricity']
# features.append(eccentricity_second_largest)
# 3. Extend: The ratio of region area over the total bounding box area
extend_second_largest = region_props_largest[second_largest_index]['extent']
# features.append(extent_second_largest)
# 4. Bounding box area
area_bbox_second_largest = region_props_largest[second_largest_index]['bbox_area']
# features.append(area_bbox_second_largest)
# 5. Major axis length: the length of the major axis of the ellipse that has the same normalized second central
# moments as the region
major_axis_length_second_largest = region_props_largest[
second_largest_index]['major_axis_length']
# features.append(major_axis_length_second_largest)
# 6. Max/mean/min intensity: The max/mean/minimum probability value in the region
maxprob_second_largest = region_props_largest[second_largest_index]['max_intensity']
minprob_second_largest = region_props_largest[second_largest_index]['min_intensity']
aveprob_second_largest = region_props_largest[second_largest_index]['mean_intensity']
#features.append(maxprob_second_largest, minprob_second_largest, aveprob_second_largest)
# 7. Aspect ratio of the bounding box
coordinates_of_bbox_second_largest = region_props_largest[second_largest_index]['bbox']
aspect_ratio_bbox_second_largest = (coordinates_of_bbox_second_largest[2]-coordinates_of_bbox_second_largest[0])/(
coordinates_of_bbox_second_largest[3]-coordinates_of_bbox_second_largest[1])
# features.extend(aspect_ratio_bbox_second_largest)
# 8. Solidity: Ratio of region area over the surrounding convex area
solidity_second_largest = region_props_largest[second_largest_index]['solidity']
# features.append(solidity_second_largest)
localfeatures = [largest_area, eccentricity_largest, extend_largest, area_bbox_largest, major_axis_length_largest,
maxprob_largest, minprob_largest, aveprob_largest, aspect_ratio_bbox_largest, solidity_largest,
second_largest_area, eccentricity_second_largest, extend_second_largest, area_bbox_second_largest,
major_axis_length_second_largest, maxprob_second_largest, minprob_second_largest,
aveprob_second_largest, aspect_ratio_bbox_second_largest, solidity_second_largest]
return localfeatures
def create_folder(result_folder, sub_folder):
"""
To create folders
:param sub_folder: the folder to be created.
:type sub_folder: str
:param result_folder: the folder to store the results
:return: folder_to_create
:rtype: str
"""
folder_to_create = osp.join(result_folder, sub_folder)
print(folder_to_create)
try:
os.makedirs(folder_to_create)
except:
print('Folder exists, skip folder creation')
return folder_to_create
if __name__ == "__main__":
N_FEATURES = 30
# heatmap_path = '/home/wli/Downloads/pred/'
# heatmap_paths = glob.glob(osp.join(heatmap_path, '*.npy'))
# slide_path = '/home/wli/Downloads/Camelyon16/training/tumor'
# result_folder = '/home/wzli/Downloads/RF_parameters_64stride'
result_folder = '/raidb/wli/testing_1219/test_roc'
cols = ['name', 'tumor', 'ratio_cancer_tissue50', 'ratio_cancer_tissue60', 'ratio_cancer_tissue70', 'ratio_cancer_tissue80', 'ratio_cancer_tissue90', 'ratio_sum_tissue50', 'ratio_sum_tissue60', 'ratio_sum_tissue70', 'ratio_sum_tissue80', 'ratio_sum_tissue90', 'largest_area', 'eccentricity_largest', 'extend_largest', 'area_bbox_largest', 'major_axis_length_largest', 'maxprob_largest',
'minprob_largest', 'aveprob_largest', 'aspect_ratio_bbox_largest', 'solidity_largest', 'second_largest_area', 'eccentricity_second_largest', 'extend_second_largest', 'area_bbox_second_largest', 'major_axis_length_second_largest', 'maxprob_second_largest', 'minprob_second_largest', 'aveprob_second_largest', 'aspect_ratio_bbox_second_largest', 'solidity_second_largest']
list_thres = [0.5, 0.6, 0.7, 0.8, 0.9]
list_slide_path = ['/raida/wjc/CAMELYON16/training/normal/',
'/raida/wjc/CAMELYON16/training/tumor/', '/raida/wjc/CAMELYON16/testing/images/']
#list_heatmap_path = ['/home/wzli/Method_II_Model_I_HNM_no_norm/normal_0425_new', '/home/wzli/Method_II_Model_I_HNM_no_norm/tumor_0425_new', '/home/wzli/Method_II_Model_I_HNM_no_norm/test_0425_new_new']
list_heatmap_path = ['/raidb/wli/Final_Results/Heat_map/Method_II/color_noise_color_normalization/Method_II_Model_I_norm/normal_0506',
'/raidb/wli/Final_Results/Heat_map/Method_II/color_noise_color_normalization/Method_II_Model_I_norm/tumor_0506',
'/raidb/wli/Final_Results/Heat_map/Method_II/color_noise_color_normalization/Method_II_Model_I_norm/test_0506']
list_names = ['normal', 'tumor', 'test']
for m in list_thres:
new_folder = create_folder(result_folder, str(m*10))
print(new_folder)
for n in [0, 1, 2]:
slide_paths = glob.glob(osp.join(list_slide_path[n], '*.tif'))
slide_paths.sort()
# print(slide_paths)
heatmap_paths = glob.glob(osp.join(list_heatmap_path[n], '*.npy'))
heatmap_paths.sort()
# print(heatmap_paths)
totalfeatures = []
for i in range(len(heatmap_paths)):
try:
heatmap = np.load(heatmap_paths[i])
# the except here will give two same entry in the final table.
except ValueError:
i = i+1
heatmap = np.load(heatmap_paths[i])
# downsample the heatmap by 4, which is equal to stride 56
heatmap = heatmap[0::4, 0::4]
new_slide_path = [x for x in slide_paths if re.search(
osp.basename(heatmap_paths[i]).replace('.npy', '.tif'), x)]
slide_path = new_slide_path[0]
#slide_path = glob.glob(osp.join(slide_path, os.rename(split(basename(heatmap_path[i])))))
#data_sheet_for_random_forest.at[i, 'name'] = osp.basename(slide_paths[i])
heatmapbinary_lesion = (heatmap > m)*1
#heatmapbinary_lesion = (heatmap > 0.5)*1
number_lesion = len(get_region_props(
heatmapbinary_lesion, heatmap))
if number_lesion == 0:
features = [0.00]*N_FEATURES
else:
features = glob_features(
slide_path, heatmap) + local_features(heatmap, m)
slide_contains_tumor = osp.basename(
slide_path).startswith('tumor_')
slide_test = osp.basename(slide_path).startswith('test_')
if slide_contains_tumor:
features = [1] + features
#data_sheet_for_random_forest.at[i, 'tumor'] = 1
elif slide_test:
features = [np.nan] + features
else:
features = [0] + features
#data_sheet_for_random_forest.at[i, 'tumor'] = 0
# add file name to the first column.
features = [osp.basename(slide_path)] + features
#data_sheet_for_random_forest = data_sheet_for_random_forest.append(features)
print(features)
totalfeatures.append(features)
#data_sheet_for_random_forest.append(pd.Series(features, index=cols[:]), ignore_index=True)
#data_sheet_for_random_forest = data_sheet_for_random_forest.append(pd.Series(features, index=cols[2:]), ignore_index=True)
#data_sheet_for_random_forest.at[i, 'name'] = osp.basename(slide_paths[i])
data_sheet_for_random_forest = pd.DataFrame(
totalfeatures, columns=cols)
data_sheet_for_random_forest.to_csv(
'%s/data_sheet_for_random_forest_56_strike_%s_%d_1023_Method_II_Model_I_norm.csv' % (new_folder, list_names[n], m*10))
| [
"pandas.DataFrame",
"numpy.load",
"skimage.filters.threshold_otsu",
"os.makedirs",
"cv2.countNonZero",
"cv2.cvtColor",
"os.path.basename",
"openslide.open_slide",
"cv2.inRange",
"skimage.segmentation.clear_border",
"skimage.measure.label",
"numpy.array",
"cv2.split",
"os.path.join",
"ski... | [((6990, 7017), 'skimage.segmentation.clear_border', 'clear_border', (['heatmapbinary'], {}), '(heatmapbinary)\n', (7002, 7017), False, 'from skimage.segmentation import clear_border\n'), ((7036, 7056), 'skimage.measure.label', 'label', (['heatmapbinary'], {}), '(heatmapbinary)\n', (7041, 7056), False, 'from skimage.measure import label\n'), ((7068, 7117), 'skimage.measure.regionprops', 'regionprops', (['labeled_img'], {'intensity_image': 'heatmap'}), '(labeled_img, intensity_image=heatmap)\n', (7079, 7117), False, 'from skimage.measure import regionprops\n'), ((15293, 15328), 'os.path.join', 'osp.join', (['result_folder', 'sub_folder'], {}), '(result_folder, sub_folder)\n', (15301, 15328), True, 'import os.path as osp\n'), ((3430, 3462), 'openslide.open_slide', 'openslide.open_slide', (['slide_path'], {}), '(slide_path)\n', (3450, 3462), False, 'import openslide\n'), ((3622, 3641), 'numpy.array', 'np.array', (['thumbnail'], {}), '(thumbnail)\n', (3630, 3641), True, 'import numpy as np\n'), ((3728, 3765), 'cv2.cvtColor', 'cv2.cvtColor', (['thum', 'cv2.COLOR_RGB2HSV'], {}), '(thum, cv2.COLOR_RGB2HSV)\n', (3740, 3765), False, 'import cv2\n'), ((3843, 3863), 'cv2.split', 'cv2.split', (['hsv_image'], {}), '(hsv_image)\n', (3852, 3863), False, 'import cv2\n'), ((3882, 3899), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['h'], {}), '(h)\n', (3896, 3899), False, 'from skimage.filters import threshold_otsu\n'), ((3918, 3935), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['s'], {}), '(s)\n', (3932, 3935), False, 'from skimage.filters import threshold_otsu\n'), ((4037, 4078), 'numpy.array', 'np.array', (['[hthresh, sthresh, 0]', 'np.uint8'], {}), '([hthresh, sthresh, 0], np.uint8)\n', (4045, 4078), True, 'import numpy as np\n'), ((4154, 4189), 'numpy.array', 'np.array', (['[180, 255, 255]', 'np.uint8'], {}), '([180, 255, 255], np.uint8)\n', (4162, 4189), True, 'import numpy as np\n'), ((4286, 4330), 'cv2.inRange', 'cv2.inRange', (['hsv_image', 'thresh[0]', 'thresh[1]'], {}), '(hsv_image, thresh[0], thresh[1])\n', (4297, 4330), False, 'import cv2\n'), ((4355, 4382), 'cv2.countNonZero', 'cv2.countNonZero', (['rgbbinary'], {}), '(rgbbinary)\n', (4371, 4382), False, 'import cv2\n'), ((15374, 15403), 'os.makedirs', 'os.makedirs', (['folder_to_create'], {}), '(folder_to_create)\n', (15385, 15403), False, 'import os\n'), ((4617, 4654), 'cv2.countNonZero', 'cv2.countNonZero', (['(predthreshold50 * 1)'], {}), '(predthreshold50 * 1)\n', (4633, 4654), False, 'import cv2\n'), ((4712, 4749), 'cv2.countNonZero', 'cv2.countNonZero', (['(predthreshold60 * 1)'], {}), '(predthreshold60 * 1)\n', (4728, 4749), False, 'import cv2\n'), ((4807, 4844), 'cv2.countNonZero', 'cv2.countNonZero', (['(predthreshold70 * 1)'], {}), '(predthreshold70 * 1)\n', (4823, 4844), False, 'import cv2\n'), ((4902, 4939), 'cv2.countNonZero', 'cv2.countNonZero', (['(predthreshold80 * 1)'], {}), '(predthreshold80 * 1)\n', (4918, 4939), False, 'import cv2\n'), ((4997, 5034), 'cv2.countNonZero', 'cv2.countNonZero', (['(predthreshold90 * 1)'], {}), '(predthreshold90 * 1)\n', (5013, 5034), False, 'import cv2\n'), ((20508, 20549), 'pandas.DataFrame', 'pd.DataFrame', (['totalfeatures'], {'columns': 'cols'}), '(totalfeatures, columns=cols)\n', (20520, 20549), True, 'import pandas as pd\n'), ((17691, 17728), 'os.path.join', 'osp.join', (['list_slide_path[n]', '"""*.tif"""'], {}), "(list_slide_path[n], '*.tif')\n", (17699, 17728), True, 'import os.path as osp\n'), ((17833, 17872), 'os.path.join', 'osp.join', (['list_heatmap_path[n]', '"""*.npy"""'], {}), "(list_heatmap_path[n], '*.npy')\n", (17841, 17872), True, 'import os.path as osp\n'), ((18073, 18098), 'numpy.load', 'np.load', (['heatmap_paths[i]'], {}), '(heatmap_paths[i])\n', (18080, 18098), True, 'import numpy as np\n'), ((18271, 18296), 'numpy.load', 'np.load', (['heatmap_paths[i]'], {}), '(heatmap_paths[i])\n', (18278, 18296), True, 'import numpy as np\n'), ((19309, 19333), 'os.path.basename', 'osp.basename', (['slide_path'], {}), '(slide_path)\n', (19321, 19333), True, 'import os.path as osp\n'), ((19405, 19429), 'os.path.basename', 'osp.basename', (['slide_path'], {}), '(slide_path)\n', (19417, 19429), True, 'import os.path as osp\n'), ((19914, 19938), 'os.path.basename', 'osp.basename', (['slide_path'], {}), '(slide_path)\n', (19926, 19938), True, 'import os.path as osp\n'), ((18510, 18540), 'os.path.basename', 'osp.basename', (['heatmap_paths[i]'], {}), '(heatmap_paths[i])\n', (18522, 18540), True, 'import os.path as osp\n')] |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import PyNvCodec as nvc
import numpy as np
gpuID = 0
decFile = open("big_buck_bunny_1080p_h264.nv12", "rb")
encFile = open("big_buck_bunny_1080p_h264.h264", "wb")
nvEnc = nvc.PyNvEncoder({'preset': 'hq', 'codec': 'h264', 's': '1920x1080'}, gpuID)
nvUpl = nvc.PyFrameUploader(nvEnc.Width(), nvEnc.Height(), nvEnc.PixelFormat(), gpuID)
# Size of raw Full HD NV12 frame is 1920 * (1080 + 540)
nv12FrameSize = 1920 * (1080 + 540)
while True:
rawFrame = np.fromfile(decFile, dtype = np.uint8, count = nv12FrameSize)
if not rawFrame.size:
break
rawSurface = nvUpl.UploadSingleFrame(rawFrame)
if rawSurface.Empty():
break
encFrame = nvEnc.EncodeSingleSurface(rawSurface)
if encFrame.size:
encByteArray = bytearray(encFrame)
encFile.write(encByteArray)
# Encoder is asyncronous, so we need to flush it
encFrames = nvEnc.Flush()
for encFrame in encFrames:
if encFrame.size:
encByteArray = bytearray(encFrame)
encFile.write(encByteArray)
| [
"numpy.fromfile",
"PyNvCodec.PyNvEncoder"
] | [((759, 834), 'PyNvCodec.PyNvEncoder', 'nvc.PyNvEncoder', (["{'preset': 'hq', 'codec': 'h264', 's': '1920x1080'}", 'gpuID'], {}), "({'preset': 'hq', 'codec': 'h264', 's': '1920x1080'}, gpuID)\n", (774, 834), True, 'import PyNvCodec as nvc\n'), ((1043, 1100), 'numpy.fromfile', 'np.fromfile', (['decFile'], {'dtype': 'np.uint8', 'count': 'nv12FrameSize'}), '(decFile, dtype=np.uint8, count=nv12FrameSize)\n', (1054, 1100), True, 'import numpy as np\n')] |
from __future__ import print_function
import json
import numpy as np
from networkx.readwrite import json_graph
from argparse import ArgumentParser
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error, r2_score, explained_variance_score
from sklearn.dummy import DummyRegressor
''' To evaluate the embeddings, we run a logistic regression.
Run this script after running unsupervised training.
Baseline of using features-only can be run by setting data_dir as 'feat'
Example:
python eval_scripts/lkpg-osm_eval.py ../data/lkpg-osm unsup-lkpg-osm/n2v_big_0.000010 test
'''
def run_dummy_regressor(train_embeds, train_targets, test_embeds, test_targets, scaler=None):
dummy = DummyRegressor(strategy="median")
dummy.fit(train_embeds, train_targets)
if scaler == None:
rmse= mean_squared_error(test_targets, dummy.predict(test_embeds), squared=False)
else:
rmse = mean_squared_error(test_targets, dummy.predict(test_embeds), squared=False)
rmse = scaler.inverse_transform(np.array(rmse).reshape(1,-1))[0][0]
print('Dummy regressor RMSE:', rmse)
print('Dummy regressor MAPE:', mean_absolute_percentage_error(test_targets, dummy.predict(test_embeds)))
print('Dummy regressor R2:', r2_score(test_targets, dummy.predict(test_embeds)))
def run_regression(train_embeds, train_targets, test_embeds, test_targets, scaler=None):
print('Ravelling train targets...')
train_targets_ravelled = train_targets.ravel()
print('Fitting squared loss Regressor...')
print(np.shape(train_embeds), np.shape(train_targets_ravelled))
regressor = SGDRegressor(loss="squared_loss", max_iter=1000, tol=1e-3)
regressor.fit(train_embeds, train_targets_ravelled)
print('Predicting outputs...')
tr_res = []
if scaler == None:
tr_res.append(mean_squared_error(train_targets, regressor.predict(train_embeds), squared=False))
else:
rmse = mean_squared_error(train_targets, regressor.predict(train_embeds), squared=False)
tr_res.append(scaler.inverse_transform(np.array(rmse).reshape(1,-1))[0][0])
tr_res.append(mean_absolute_percentage_error(train_targets, regressor.predict(train_embeds)))
tr_res.append(r2_score(train_targets, regressor.predict(train_embeds)))
tr_res.append(explained_variance_score(train_targets, regressor.predict(train_embeds)))
ts_res = []
if scaler == None:
ts_res.append(mean_squared_error(test_targets, regressor.predict(test_embeds), squared=False))
else:
rmse = mean_squared_error(test_targets, regressor.predict(test_embeds), squared=False)
ts_res.append(scaler.inverse_transform(np.array(rmse).reshape(1,-1))[0][0])
ts_res.append(mean_absolute_percentage_error(test_targets, regressor.predict(test_embeds)))
ts_res.append(r2_score(test_targets, regressor.predict(test_embeds)))
ts_res.append(explained_variance_score(test_targets, regressor.predict(test_embeds)))
return tr_res, ts_res
def calculate_results(tr_res, ts_res, setting):
prefixes = ['train', setting]
results = [tr_res, ts_res]
dicts = [{}, {}]
cols = ['RMSE','MAPE', 'R2', 'expl_var']#, 'accuracy', 'precision', 'recall']
for prefix, result, dict in zip(prefixes, results, dicts):
for i, col in enumerate(cols):
dict[prefix + '_' + col + '_avg'] = np.mean(result[:, i])
#TODO: Add stds back in
dict[prefix + '_' + col + '_std'] = np.std(result[:, i])
return dicts
def evaluate(train_prefix, embed_dir, *, setting ='val',n_iter=100, scaling=False):
print("Loading data...")
G = json_graph.node_link_graph(json.load(open(train_prefix + "-G.json")))
targets = json.load(open(train_prefix + "-target_map.json"))
# targets = {int(i): l for i, l in targets.iteritems()}
targets = {int(i): float(l) for i, l in targets.items()}
train_ids = [n for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']]
test_ids = [n for n in G.nodes() if G.node[n][setting]]
train_targets = np.array([targets[i] for i in train_ids])
if train_targets.ndim == 1:
train_targets = np.expand_dims(train_targets, 1)
test_targets = np.array([targets[i] for i in test_ids])
if test_targets.ndim == 1:
test_targets = np.expand_dims(test_targets, 1)
if scaling:
print('Scaling targets...')
from sklearn.preprocessing import StandardScaler, MinMaxScaler
target_scaler = MinMaxScaler()
target_scaler.fit(train_targets)
train_targets = target_scaler.transform(train_targets)
test_targets = target_scaler.transform(test_targets)
else:
target_scaler=None
print('train targets:', np.shape(train_targets))
print('test targets:', np.shape(test_targets))
if embed_dir == "feat":
print("Using only features..")
feats = np.load(train_prefix + "-feats.npy")
#TODO: Test using histograms only
# feats = feats[:,:19]
# print('----- FEATURES----')
# for val in feats[0]:
# print(val)
# print('^^^^^ FEATURES ^^^^^')
# ## Logistic gets thrown off by big counts, so log transform num comments and score
# feats[:, 0] = np.log(feats[:, 0] + 1.0)
# feats[:, 1] = np.log(feats[:, 1] - min(np.min(feats[:, 1]), -1))
feat_id_map = json.load(open( train_prefix + "-id_map.json"))
feat_id_map = {int(id): val for id, val in feat_id_map.items()}
train_feats = feats[[feat_id_map[id] for id in train_ids]]
test_feats = feats[[feat_id_map[id] for id in test_ids]]
print('train features:',np.shape(train_feats))
print('test features:',np.shape(test_feats))
print('Scaling features...')
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# scaler.fit(train_feats)
# train_feats = scaler.transform(train_feats)
# test_feats = scaler.transform(test_feats)
# Exclude histogram features from scaling!
scaler.fit(train_feats[:,:19])
train_feats[:,:19] = scaler.transform(train_feats[:,:19])
test_feats[:,:19] = scaler.transform(test_feats[:,:19])
print("Running regression...")
run_dummy_regressor(train_feats, train_targets, test_feats, test_targets, target_scaler)
all_tr_res = []
all_ts_res = []
for i in range(n_iter):
tr_res, ts_res = run_regression(train_feats, train_targets, test_feats, test_targets, target_scaler)
all_tr_res.append(tr_res)
all_ts_res.append(ts_res)
# print('(%d/%d) %s_f1_avg %.4f' % (i, n_iter, setting, np.mean(np.array(all_ts_res)[:,0])), end='\r')
else:
embeds = np.load(embed_dir + "/val.npy")
id_map = {}
with open(embed_dir + "/val.txt") as fp:
for i, line in enumerate(fp):
id_map[int(line.strip())] = i
train_embeds = embeds[[id_map[id] for id in train_ids]]
test_embeds = embeds[[id_map[id] for id in test_ids]]
print("Running regression...")
all_tr_res = []
all_ts_res = []
for i in range(n_iter):
tr_res, ts_res = run_regression(train_embeds, train_targets, test_embeds, test_targets, target_scaler)
all_tr_res.append(tr_res)
all_ts_res.append(ts_res)
# print('(%d/%d) %s_f1_avg %.4f' % (i, n_iter, setting, np.mean(np.array(all_ts_res)[:,0])), end='\r')
tr_res_dict, ts_res_dict = calculate_results(np.array(all_tr_res), np.array(all_ts_res), setting)
return tr_res_dict, ts_res_dict
if __name__ == '__main__':
#TODO: remove the fix seed
np.random.seed(1)
parser = ArgumentParser("Run evaluation on osm regression data.")
parser.add_argument("train_prefix", help="Path to directory containing the dataset.")
parser.add_argument("embed_dir", help="Path to directory containing the learned node embeddings. Set to 'feat' for raw features.")
parser.add_argument("setting", help="Either val or test.")
parser.add_argument("n_iter", help="Number of re-runs (for avg and std).")
parser.add_argument("scaling", help="Wheter to scale target values.")
args = parser.parse_args()
train_prefix = args.train_prefix
embed_dir = args.embed_dir
setting = args.setting
n_iter = int(args.n_iter)
scaling = True if args.scaling == "true" else False
print('train prefix:', train_prefix)
print('embed dir:', embed_dir)
print('setting:', setting)
print('n iter:', n_iter, type(n_iter))
print('scaling:', scaling)
print('\n\n')
tr_res_dict, ts_res_dict = evaluate(train_prefix, embed_dir, setting=setting, n_iter=n_iter, scaling=scaling)
print('Train stats')
print(tr_res_dict)
print(setting + ' stats')
print(ts_res_dict)
| [
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"sklearn.preprocessing.StandardScaler",
"sklearn.linear_model.SGDRegressor",
"sklearn.dummy.DummyRegressor",
"numpy.std",
"sklearn.preprocessing.MinMaxScaler",
"numpy.expand_dims",
"numpy.shape",
"numpy.mean",
"numpy.array"
] | [((753, 786), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {'strategy': '"""median"""'}), "(strategy='median')\n", (767, 786), False, 'from sklearn.dummy import DummyRegressor\n'), ((1677, 1736), 'sklearn.linear_model.SGDRegressor', 'SGDRegressor', ([], {'loss': '"""squared_loss"""', 'max_iter': '(1000)', 'tol': '(0.001)'}), "(loss='squared_loss', max_iter=1000, tol=0.001)\n", (1689, 1736), False, 'from sklearn.linear_model import SGDRegressor\n'), ((4113, 4154), 'numpy.array', 'np.array', (['[targets[i] for i in train_ids]'], {}), '([targets[i] for i in train_ids])\n', (4121, 4154), True, 'import numpy as np\n'), ((4263, 4303), 'numpy.array', 'np.array', (['[targets[i] for i in test_ids]'], {}), '([targets[i] for i in test_ids])\n', (4271, 4303), True, 'import numpy as np\n'), ((7783, 7800), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7797, 7800), True, 'import numpy as np\n'), ((7814, 7870), 'argparse.ArgumentParser', 'ArgumentParser', (['"""Run evaluation on osm regression data."""'], {}), "('Run evaluation on osm regression data.')\n", (7828, 7870), False, 'from argparse import ArgumentParser\n'), ((1603, 1625), 'numpy.shape', 'np.shape', (['train_embeds'], {}), '(train_embeds)\n', (1611, 1625), True, 'import numpy as np\n'), ((1627, 1659), 'numpy.shape', 'np.shape', (['train_targets_ravelled'], {}), '(train_targets_ravelled)\n', (1635, 1659), True, 'import numpy as np\n'), ((4211, 4243), 'numpy.expand_dims', 'np.expand_dims', (['train_targets', '(1)'], {}), '(train_targets, 1)\n', (4225, 4243), True, 'import numpy as np\n'), ((4358, 4389), 'numpy.expand_dims', 'np.expand_dims', (['test_targets', '(1)'], {}), '(test_targets, 1)\n', (4372, 4389), True, 'import numpy as np\n'), ((4538, 4552), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4550, 4552), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((4788, 4811), 'numpy.shape', 'np.shape', (['train_targets'], {}), '(train_targets)\n', (4796, 4811), True, 'import numpy as np\n'), ((4840, 4862), 'numpy.shape', 'np.shape', (['test_targets'], {}), '(test_targets)\n', (4848, 4862), True, 'import numpy as np\n'), ((4948, 4984), 'numpy.load', 'np.load', (["(train_prefix + '-feats.npy')"], {}), "(train_prefix + '-feats.npy')\n", (4955, 4984), True, 'import numpy as np\n'), ((5905, 5921), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5919, 5921), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6839, 6870), 'numpy.load', 'np.load', (["(embed_dir + '/val.npy')"], {}), "(embed_dir + '/val.npy')\n", (6846, 6870), True, 'import numpy as np\n'), ((7631, 7651), 'numpy.array', 'np.array', (['all_tr_res'], {}), '(all_tr_res)\n', (7639, 7651), True, 'import numpy as np\n'), ((7653, 7673), 'numpy.array', 'np.array', (['all_ts_res'], {}), '(all_ts_res)\n', (7661, 7673), True, 'import numpy as np\n'), ((3416, 3437), 'numpy.mean', 'np.mean', (['result[:, i]'], {}), '(result[:, i])\n', (3423, 3437), True, 'import numpy as np\n'), ((3522, 3542), 'numpy.std', 'np.std', (['result[:, i]'], {}), '(result[:, i])\n', (3528, 3542), True, 'import numpy as np\n'), ((5717, 5738), 'numpy.shape', 'np.shape', (['train_feats'], {}), '(train_feats)\n', (5725, 5738), True, 'import numpy as np\n'), ((5771, 5791), 'numpy.shape', 'np.shape', (['test_feats'], {}), '(test_feats)\n', (5779, 5791), True, 'import numpy as np\n'), ((1087, 1101), 'numpy.array', 'np.array', (['rmse'], {}), '(rmse)\n', (1095, 1101), True, 'import numpy as np\n'), ((2126, 2140), 'numpy.array', 'np.array', (['rmse'], {}), '(rmse)\n', (2134, 2140), True, 'import numpy as np\n'), ((2724, 2738), 'numpy.array', 'np.array', (['rmse'], {}), '(rmse)\n', (2732, 2738), True, 'import numpy as np\n')] |
"""Base and child classes to handle models
used to fit light curves
Author: <NAME>
Email: <EMAIL>
"""
import copy
import inspect
import os
import astropy.units as q
import batman
from bokeh.plotting import figure, show
import numpy as np
from .parameters import Parameters
from ..limb_darkening.limb_darkening_fit import ld_profile
class Model:
def __init__(self, **kwargs):
"""
Create a model instance
"""
# Set up model attributes
self.name = 'New Model'
self._time = None
self._flux = None
self._units = q.day
self._parameters = Parameters()
self.components = None
self.fmt = None
# Store the arguments as attributes
for arg, val in kwargs.items():
setattr(self, arg, val)
def __mul__(self, other):
"""Multiply model components to make a combined model
Parameters
----------
other: ExoCTK.lightcurve_fitting.models.Model
The model to multiply
Returns
-------
ExoCTK.lightcurve_fitting.lightcurve.Model
The combined model
"""
# Make sure it is the right type
attrs = ['units', 'flux', 'time']
if not all([hasattr(other, attr) for attr in attrs]):
raise TypeError('Only another Model instance may be multiplied.')
# Combine the model parameters too
params = self.parameters + other.parameters
return CompositeModel([copy.copy(self), other], parameters=params)
@property
def flux(self):
"""A getter for the flux"""
return self._flux
@flux.setter
def flux(self, flux_array):
"""A setter for the flux
Parameters
----------
flux_array: sequence
The flux array
"""
# Check the type
if not isinstance(flux_array, (np.ndarray, tuple, list)):
raise TypeError("flux axis must be a tuple, list, or numpy array.")
# Set the array
self._flux = np.array(flux_array)
def interp(self, new_time):
"""Interpolate the flux to a new time axis
Parameters
----------
new_time: sequence, astropy.units.quantity.Quantity
The time array
"""
# Check the type
if not isinstance(new_time, (np.ndarray, tuple, list)):
raise TypeError("Time axis must be a tuple, list, or numpy array")
# Calculate the new flux
self.flux = np.interp(new_time, self.time, self.flux)
# Set the new time axis
self.time = new_time
@property
def parameters(self):
"""A getter for the parameters"""
return self._parameters
@parameters.setter
def parameters(self, params):
"""A setter for the parameters"""
# Process if it is a parameters file
if isinstance(params, str) and os.file.exists(params):
params = Parameters(params)
# Or a Parameters instance
if not isinstance(params, (Parameters, type(None))):
raise TypeError("'params' argument must be a JSON file, ascii\
file, or parameters.Parameters instance.")
# Set the parameters attribute
self._parameters = params
def plot(self, time, components=False, fig=None, draw=False, **kwargs):
"""Plot the model
Parameters
----------
time: array-like
The time axis to use
components: bool
Plot all model components
fig: bokeh.plotting.figure (optional)
The figure to plot on
Returns
-------
bokeh.plotting.figure
The figure
"""
# Make the figure
if fig is None:
fig = figure()
# Set the time
self.time = time
# Plot the model
fig.line(self.time, self.eval(**kwargs), legend=self.name)
if components and self.components is not None:
for comp in self.components:
fig = comp.plot(self.time, fig=fig, draw=False, **kwargs)
# Format axes
fig.xaxis.axis_label = str(self.units)
fig.yaxis.axis_label = 'Flux'
if draw:
show(fig)
else:
return fig
@property
def time(self):
"""A getter for the time"""
return self._time
@time.setter
def time(self, time_array, units='MJD'):
"""A setter for the time
Parameters
----------
time_array: sequence, astropy.units.quantity.Quantity
The time array
units: str
The units of the input time_array, ['MJD', 'BJD', 'phase']
"""
# Check the type
if not isinstance(time_array, (np.ndarray, tuple, list)):
raise TypeError("Time axis must be a tuple, list, or numpy array.")
# Set the units
self.units = units
# Set the array
self._time = time_array
@property
def units(self):
"""A getter for the units"""
return self._units
@units.setter
def units(self, units):
"""A setter for the units
Parameters
----------
units: str
The time units ['BJD', 'MJD', 'phase']
"""
# Check the type
if units not in ['BJD', 'MJD', 'phase']:
raise TypeError("units axis must be 'BJD', 'MJD', or 'phase'.")
self._units = units
class CompositeModel(Model):
"""A class to create composite models"""
def __init__(self, models, **kwargs):
"""Initialize the composite model
Parameters
----------
models: sequence
The list of models
"""
# Inherit from Model calss
super().__init__(**kwargs)
# Store the models
self.components = models
def eval(self, **kwargs):
"""Evaluate the model components"""
# Get the time
if self.time is None:
self.time = kwargs.get('time')
# Empty flux
flux = 1.
# Evaluate flux at each model
for model in self.components:
flux *= model.eval(**kwargs)
return flux
class PolynomialModel(Model):
"""Polynomial Model"""
def __init__(self, **kwargs):
"""Initialize the polynomial model
"""
# Inherit from Model class
super().__init__(**kwargs)
# Check for Parameters instance
self.parameters = kwargs.get('parameters')
# Generate parameters from kwargs if necessary
if self.parameters is None:
self._parse_coeffs(kwargs)
def _parse_coeffs(self, coeff_dict):
"""Convert dict of 'c#' coefficients into a list
of coefficients in decreasing order, i.e. ['c2','c1','c0']
Parameters
----------
coeff_dict: dict
The dictionary of coefficients
Returns
-------
np.ndarray
The sequence of coefficient values
"""
params = {cN: coeff for cN, coeff in coeff_dict.items()
if cN.startswith('c') and cN[1:].isdigit()}
self.parameters = Parameters(**params)
# Parse 'c#' keyword arguments as coefficients
coeffs = np.zeros(100)
for k, v in self.parameters.dict.items():
if k.lower().startswith('c') and k[1:].isdigit():
coeffs[int(k[1:])] = v[0]
# Trim zeros and reverse
self.coeffs = np.trim_zeros(coeffs)[::-1]
def eval(self, **kwargs):
"""Evaluate the function with the given values"""
# Get the time
if self.time is None:
self.time = kwargs.get('time')
# Create the polynomial from the coeffs
poly = np.poly1d(self.coeffs)
# Convert to local time
time_local = self.time - self.time.mean()
# Evaluate the polynomial
return np.polyval(poly, time_local)
class TransitModel(Model):
"""Transit Model"""
def __init__(self, **kwargs):
"""Initialize the transit model
"""
# Inherit from Model calss
super().__init__(**kwargs)
# Check for Parameters instance
self.parameters = kwargs.get('parameters')
# Generate parameters from kwargs if necessary
if self.parameters is None:
self.parameters = Parameters(**kwargs)
# Store the ld_profile
self.ld_func = ld_profile(self.parameters.limb_dark.value)
len_params = len(inspect.signature(self.ld_func).parameters)
self.coeffs = ['u{}'.format(n) for n in range(len_params)[1:]]
def eval(self, **kwargs):
"""Evaluate the function with the given values"""
# Get the time
if self.time is None:
self.time = kwargs.get('time')
# Generate with batman
bm_params = batman.TransitParams()
# Set all parameters
for p in self.parameters.list:
setattr(bm_params, p[0], p[1])
# Combine limb darkening coeffs
bm_params.u = [getattr(self.parameters, u).value for u in self.coeffs]
# Use batman ld_profile name
if self.parameters.limb_dark.value == '4-parameter':
bm_params.limb_dark = 'nonlinear'
# Make the eclipse
tt = self.parameters.transittype.value
m_eclipse = batman.TransitModel(bm_params, self.time, transittype=tt)
# Evaluate the light curve
return m_eclipse.light_curve(bm_params)
| [
"numpy.poly1d",
"bokeh.plotting.figure",
"batman.TransitParams",
"numpy.trim_zeros",
"os.file.exists",
"numpy.polyval",
"numpy.zeros",
"copy.copy",
"numpy.array",
"bokeh.plotting.show",
"inspect.signature",
"numpy.interp",
"batman.TransitModel"
] | [((2048, 2068), 'numpy.array', 'np.array', (['flux_array'], {}), '(flux_array)\n', (2056, 2068), True, 'import numpy as np\n'), ((2513, 2554), 'numpy.interp', 'np.interp', (['new_time', 'self.time', 'self.flux'], {}), '(new_time, self.time, self.flux)\n', (2522, 2554), True, 'import numpy as np\n'), ((7317, 7330), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (7325, 7330), True, 'import numpy as np\n'), ((7818, 7840), 'numpy.poly1d', 'np.poly1d', (['self.coeffs'], {}), '(self.coeffs)\n', (7827, 7840), True, 'import numpy as np\n'), ((7974, 8002), 'numpy.polyval', 'np.polyval', (['poly', 'time_local'], {}), '(poly, time_local)\n', (7984, 8002), True, 'import numpy as np\n'), ((8923, 8945), 'batman.TransitParams', 'batman.TransitParams', ([], {}), '()\n', (8943, 8945), False, 'import batman\n'), ((9418, 9475), 'batman.TransitModel', 'batman.TransitModel', (['bm_params', 'self.time'], {'transittype': 'tt'}), '(bm_params, self.time, transittype=tt)\n', (9437, 9475), False, 'import batman\n'), ((2916, 2938), 'os.file.exists', 'os.file.exists', (['params'], {}), '(params)\n', (2930, 2938), False, 'import os\n'), ((3807, 3815), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (3813, 3815), False, 'from bokeh.plotting import figure, show\n'), ((4267, 4276), 'bokeh.plotting.show', 'show', (['fig'], {}), '(fig)\n', (4271, 4276), False, 'from bokeh.plotting import figure, show\n'), ((7541, 7562), 'numpy.trim_zeros', 'np.trim_zeros', (['coeffs'], {}), '(coeffs)\n', (7554, 7562), True, 'import numpy as np\n'), ((1500, 1515), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (1509, 1515), False, 'import copy\n'), ((8571, 8602), 'inspect.signature', 'inspect.signature', (['self.ld_func'], {}), '(self.ld_func)\n', (8588, 8602), False, 'import inspect\n')] |
# solutions.py
"""Volume II: Interior Point II (Quadratic Optimization). Test Driver."""
# Wrappers ====================================================================
import signal
from functools import wraps
from matplotlib import pyplot as plt
def _autoclose(func):
"""Decorator for closing figures automatically."""
@wraps(func)
def wrapper(*args, **kwargs):
try:
plt.ion()
return func(*args, **kwargs)
finally:
plt.close('all')
plt.ioff()
return wrapper
def _timeout(seconds):
"""Decorator for preventing a function from running for too long.
Inputs:
seconds (int): The number of seconds allowed.
Notes:
This decorator uses signal.SIGALRM, which is only available on Unix.
"""
assert isinstance(seconds, int), "@timeout(sec) requires an int"
class TimeoutError(Exception):
pass
def _handler(signum, frame):
"""Handle the alarm by raising a custom exception."""
raise TimeoutError("Timeout after {0} seconds".format(seconds))
def decorator(func):
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handler)
signal.alarm(seconds) # Set the alarm.
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0) # Turn the alarm off.
return result
return wraps(func)(wrapper)
return decorator
# Test Script and Class =======================================================
import numpy as np
from real_solutions import portfolio
def test(student_module):
"""Grade a student's entire solutions file.
20 points for problems 1-2
10 points for problem 3
10 points for problem 4
Inputs:
student_module: the imported module for the student's file.
Returns:
score (int): the student's score, out of 40.
feedback (str): a printout of test results for the student.
"""
tester = _testDriver()
tester.test_all(student_module)
return tester.score, tester.feedback
class _testDriver(object):
"""Class for testing a student's work.
Attributes:
Score (int)
Feedback (str)
"""
# Constructor -------------------------------------------------------------
def __init__(self):
"""Initialize the feedback attribute."""
self.feedback = ""
# Main routine -----------------------------------------------------------
def test_all(self, student_module, total=40):
"""Grade the provided module on each problem and compile feedback."""
# Reset feedback and score.
self.feedback = ""
self.score = 0
def test_one(problem, number, value):
"""Test a single problem, checking for errors."""
try:
self.feedback += "\n\nProblem {} ({} points):".format(
number, value)
points = problem(student_module)
self.score += points
self.feedback += "\nScore += {}".format(points)
except BaseException as e:
self.feedback += "\n{}: {}".format(self._errType(e), e)
# Grade each problem.
test_one(self.problem2, 2, 20) # Problems 1-2: 20 points.
test_one(self.problem3, 3, 10) # Problem 3: 10 points.
test_one(self.problem4, 4, 10) # Problem 4: 10 points.
# Report final score.
percentage = (100. * self.score) / total
self.feedback += "\n\nTotal score: {}/{} = {}%".format(
self.score, total, round(percentage, 2))
if percentage >= 98: self.feedback += "\n\nExcellent!"
elif percentage >= 90: self.feedback += "\n\nGreat job!"
# Add comments (optionally).
print(self.feedback)
comments = str(raw_input("Comments: "))
if len(comments) > 0:
self.feedback += '\n\n\nComments:\n\t{}'.format(comments)
# Helper Functions --------------------------------------------------------
@staticmethod
def _errType(error):
"""Get just the name of the exception 'error' in string format."""
return str(type(error).__name__)
def _eqTest(self, correct, student, message):
"""Test to see if 'correct' and 'student' are equal.
Report the given 'message' if they are not.
"""
if not isinstance(student, np.ndarray):
raise TypeError("Failed to return a NumPy array")
if np.allclose(correct, student):
return 1
else:
self.feedback += "\n{}".format(message)
self.feedback += "\n\tCorrect response: {}".format(correct)
self.feedback += "\n\tStudent response: {}".format(student)
return 0
def _grade(self, points, message=None):
"""Manually grade a problem worth 'points'. Return the score.
If full points are not earned, get feedback on the problem.
"""
credit = -1
while credit > points or credit < 0:
try:
credit = int(input("\nScore out of {}: ".format(points)))
except:
credit = -1
if credit != points:
# Add comments (optionally),
comments = raw_input("Comments: ")
if len(comments) > 0:
self.feedback += "\n{}".format(comments)
# Or add a predetermined error message.
elif message is not None:
self.feedback += "\n{}".format(message)
return credit
# Problems ----------------------------------------------------------------
@_timeout(5)
def problem2(self, s):
"""Test qInteriorPoint(). 20 points."""
Q = np.array([[1,-1.],[-1,2]])
c = np.array([-2,-6.])
A = np.array([[-1, -1], [1, -2.], [-2, -1], [1, 0], [0,1]])
b = np.array([-2, -2, -3., 0, 0])
x0 = np.array([.5, .5])
y0 = np.ones(5)
m0 = np.ones(5)
point, value = s.qInteriorPoint(Q=Q, c=c, A=A, b=b, guess=(x0,y0,m0))
return 20 * self._eqTest(np.array([2/3., 4/3.]), point,
"qInteriorPoint() failed for the QP in Problem 2")
@_autoclose
def problem3(self, s):
"""Test the circus tent problem. 10 points."""
s.circus(n=15)
return self._grade(10, "Incorrect circus tent graph with n=15")
@_timeout(5)
def problem4(self, s):
"""Test the portfolio optimization problem. 10 points."""
try:
s1, s2 = s.portfolio(filename="portfolio.txt")
s1, s2 = np.ravel(s1), np.ravel(s2)
except ValueError as e:
if str(e) == "too many values to unpack":
raise ValueError("Failed to return two NumPy arrays")
else: raise
except TypeError as e:
if "is not iterable" in str(e):
raise TypeError("Failed to return two NumPy arrays")
else: raise
c1, c2 = portfolio(filename="portfolio.txt")
points = 5 * self._eqTest(c1, s1,
"Incorrect percentages (with short selling)")
points += 5 * self._eqTest(c2, s2,
"Incorrect percentages (with short selling)")
return points
# Validation ==================================================================
if __name__ == '__main__':
"""Validate the test driver by testing the solutions file."""
import solutions
test(real_solutions)
| [
"real_solutions.portfolio",
"matplotlib.pyplot.ioff",
"numpy.ravel",
"matplotlib.pyplot.close",
"numpy.allclose",
"numpy.ones",
"matplotlib.pyplot.ion",
"numpy.array",
"functools.wraps",
"signal.alarm",
"signal.signal"
] | [((333, 344), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (338, 344), False, 'from functools import wraps\n'), ((4637, 4666), 'numpy.allclose', 'np.allclose', (['correct', 'student'], {}), '(correct, student)\n', (4648, 4666), True, 'import numpy as np\n'), ((5881, 5911), 'numpy.array', 'np.array', (['[[1, -1.0], [-1, 2]]'], {}), '([[1, -1.0], [-1, 2]])\n', (5889, 5911), True, 'import numpy as np\n'), ((5920, 5940), 'numpy.array', 'np.array', (['[-2, -6.0]'], {}), '([-2, -6.0])\n', (5928, 5940), True, 'import numpy as np\n'), ((5951, 6008), 'numpy.array', 'np.array', (['[[-1, -1], [1, -2.0], [-2, -1], [1, 0], [0, 1]]'], {}), '([[-1, -1], [1, -2.0], [-2, -1], [1, 0], [0, 1]])\n', (5959, 6008), True, 'import numpy as np\n'), ((6019, 6049), 'numpy.array', 'np.array', (['[-2, -2, -3.0, 0, 0]'], {}), '([-2, -2, -3.0, 0, 0])\n', (6027, 6049), True, 'import numpy as np\n'), ((6062, 6082), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (6070, 6082), True, 'import numpy as np\n'), ((6094, 6104), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (6101, 6104), True, 'import numpy as np\n'), ((6118, 6128), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (6125, 6128), True, 'import numpy as np\n'), ((7149, 7184), 'real_solutions.portfolio', 'portfolio', ([], {'filename': '"""portfolio.txt"""'}), "(filename='portfolio.txt')\n", (7158, 7184), False, 'from real_solutions import portfolio\n'), ((404, 413), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (411, 413), True, 'from matplotlib import pyplot as plt\n'), ((484, 500), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (493, 500), True, 'from matplotlib import pyplot as plt\n'), ((513, 523), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (521, 523), True, 'from matplotlib import pyplot as plt\n'), ((1167, 1206), 'signal.signal', 'signal.signal', (['signal.SIGALRM', '_handler'], {}), '(signal.SIGALRM, _handler)\n', (1180, 1206), False, 'import signal\n'), ((1219, 1240), 'signal.alarm', 'signal.alarm', (['seconds'], {}), '(seconds)\n', (1231, 1240), False, 'import signal\n'), ((1468, 1479), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1473, 1479), False, 'from functools import wraps\n'), ((1373, 1388), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (1385, 1388), False, 'import signal\n'), ((6240, 6268), 'numpy.array', 'np.array', (['[2 / 3.0, 4 / 3.0]'], {}), '([2 / 3.0, 4 / 3.0])\n', (6248, 6268), True, 'import numpy as np\n'), ((6757, 6769), 'numpy.ravel', 'np.ravel', (['s1'], {}), '(s1)\n', (6765, 6769), True, 'import numpy as np\n'), ((6771, 6783), 'numpy.ravel', 'np.ravel', (['s2'], {}), '(s2)\n', (6779, 6783), True, 'import numpy as np\n')] |
import numpy as np
from skimage.filters import prewitt_h, prewitt_v, laplace
class Feature:
# create new Feature object storing 4 images
def __init__(self, no_flash1, no_flash2, flash, lst_flash, n=5, boxsize=3, r=3):
self.n = n
self.boxsize = boxsize
self.r = r
self.locs = self.getmaxlocs(flash-no_flash2, n=self.n)
self.comb_img = [no_flash1, no_flash2, flash, flash-no_flash1, flash-no_flash2, flash-lst_flash]
# get the top n brightest spots
def getmaxlocs(self, img, n):
deleteradius = 20
locs = []
cut_img = img[deleteradius:(img.shape[0]-deleteradius), deleteradius:(img.shape[1]-deleteradius)].copy()
for i in range(n):
loc = np.unravel_index(cut_img.argmax(), cut_img.shape)
v = cut_img[loc[0],loc[1]]
locs.append([loc[0]+deleteradius, loc[1]+deleteradius, v])
cut_img[(loc[0]-deleteradius):(loc[0]+deleteradius),(loc[1]-deleteradius):(loc[1]+deleteradius)]=-10000
return np.array(locs)
# get the boxsize by boxsize cut image of top n brightest dots
def get_square_cut(self, img, boxsize):
cut = []
for brightest in self.locs:
x1 = int(brightest[0]-boxsize)
x2 = int(brightest[0]+boxsize)+1
y1 = int(brightest[1]-boxsize)
y2 = int(brightest[1]+boxsize)+1
cut.append(img[x1:x2, y1:y2].copy())
return np.array(cut)
# Get the mean of an image
def get_mean(self, cut):
mean = []
for img in cut:
mean.append(np.mean(img))
return np.array(mean)
# get the standard deviation of an image
def get_std(self, cut):
std = []
for img in cut:
std.append(np.std(img))
return np.array(std)
# get the mean and std of concentric ring of top n brightest dots
def get_concentric_ring(self, img, r):
boxsize = int(r)
cut = []
for brightest in self.locs:
x1 = int(brightest[0]-boxsize)
x2 = int(brightest[0]+boxsize)+1
y1 = int(brightest[1]-boxsize)
y2 = int(brightest[1]+boxsize)+1
circle = img[x1:x2, y1:y2].copy()
store_lst = []
for i in range(1, circle.shape[0]+1):
for j in range(1, circle.shape[1]+1):
if (i-r)**2 + (j-r)**2 <= r**2:
store_lst.append(circle[i,j])
store_lst = np.array(store_lst)
return np.mean(store_lst), np.std(store_lst)
# edge detection by convolution, and get its mean and std
def detect_edge(self, cut):
laplacians = []
mean = []
std = []
for img in cut:
# convolute with proper kernels
laplacian = laplace(img)
laplacians.append(laplacian)
mean.append(np.mean(laplacian))
std.append(np.std(laplacian))
return np.array(laplacians), np.array(mean), np.array(std)
# get the feature
def get_feature(self):
m = len(self.comb_img)
img_size = (self.boxsize*2+1)**2
feature_cut = np.zeros((self.n, (img_size+2)*m))
feature_edge = np.zeros((self.n, (img_size+2)*m))
feature_ring = np.zeros((self.n, 2*m))
for i in range(m):
img = self.comb_img[i]
cut = self.get_square_cut(img, boxsize=self.boxsize)
feature_cut[:, i*img_size:(i+1)*img_size] = cut.reshape((self.n, img_size))
feature_cut[:, img_size*m+i] = self.get_mean(cut)
feature_cut[:, img_size*m+m+i] = self.get_std(cut)
edge, edge_mean, edge_std = self.detect_edge(cut)
feature_edge[:, i*img_size:(i+1)*img_size] = edge.reshape((self.n, img_size))
feature_edge[:, img_size*m+i] = edge_mean
feature_edge[:, img_size*m+m+i] = edge_std
feature_ring[:, i], feature_ring[:, m+i] = self.get_concentric_ring(img, r=self.r)
feature = np.concatenate((feature_cut, feature_edge, feature_ring), axis=1)
return feature
# get the index window of each feature
def get_index(self):
index = []
m = len(self.comb_img)
img_size = (self.boxsize*2+1)**2
temp_index = 0
for j in range(2):
for i in range(m):
index.append(temp_index+img_size*(i+1))
temp_index = index[-1]
for i in range(2*m):
index.append(temp_index+i+1)
temp_index = index[-1]
for i in range(2*m):
index.append(temp_index+1+i)
index = [0] + index
return index
# get the number of all features
def get_num_feature(self):
m = len(self.comb_img)
img_size = (self.boxsize*2+1)**2
number = m*img_size*2 + m*2*3
return number
def get_belong_feature(self, index):
index_window = self.get_index()
feature_type = ['cut', 'cut_mean', 'cut_std','edge', 'edge_mean', 'edge_std', 'ring_mean', 'ring_std']
image_type = ['no_flash1', 'no_flash2', 'flash', 'flash-no_flash1', 'flash-no_flash2', 'flash-lst_flash']
len_type = len(feature_type)
len_image = len(image_type)
for i in index:
for window in index_window[1:]:
if i < window:
order = index_window.index(window)-1
image_order = order % len_image
feature_order = order // len_image % len_type
print('the image is %s and the type of feature is %s' % (image_type[image_order], feature_type[feature_order]))
break
return 0
| [
"skimage.filters.laplace",
"numpy.std",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] | [((1036, 1050), 'numpy.array', 'np.array', (['locs'], {}), '(locs)\n', (1044, 1050), True, 'import numpy as np\n'), ((1458, 1471), 'numpy.array', 'np.array', (['cut'], {}), '(cut)\n', (1466, 1471), True, 'import numpy as np\n'), ((1629, 1643), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (1637, 1643), True, 'import numpy as np\n'), ((1811, 1824), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (1819, 1824), True, 'import numpy as np\n'), ((3173, 3211), 'numpy.zeros', 'np.zeros', (['(self.n, (img_size + 2) * m)'], {}), '((self.n, (img_size + 2) * m))\n', (3181, 3211), True, 'import numpy as np\n'), ((3231, 3269), 'numpy.zeros', 'np.zeros', (['(self.n, (img_size + 2) * m)'], {}), '((self.n, (img_size + 2) * m))\n', (3239, 3269), True, 'import numpy as np\n'), ((3289, 3314), 'numpy.zeros', 'np.zeros', (['(self.n, 2 * m)'], {}), '((self.n, 2 * m))\n', (3297, 3314), True, 'import numpy as np\n'), ((4030, 4095), 'numpy.concatenate', 'np.concatenate', (['(feature_cut, feature_edge, feature_ring)'], {'axis': '(1)'}), '((feature_cut, feature_edge, feature_ring), axis=1)\n', (4044, 4095), True, 'import numpy as np\n'), ((2501, 2520), 'numpy.array', 'np.array', (['store_lst'], {}), '(store_lst)\n', (2509, 2520), True, 'import numpy as np\n'), ((2536, 2554), 'numpy.mean', 'np.mean', (['store_lst'], {}), '(store_lst)\n', (2543, 2554), True, 'import numpy as np\n'), ((2556, 2573), 'numpy.std', 'np.std', (['store_lst'], {}), '(store_lst)\n', (2562, 2573), True, 'import numpy as np\n'), ((2821, 2833), 'skimage.filters.laplace', 'laplace', (['img'], {}), '(img)\n', (2828, 2833), False, 'from skimage.filters import prewitt_h, prewitt_v, laplace\n'), ((2976, 2996), 'numpy.array', 'np.array', (['laplacians'], {}), '(laplacians)\n', (2984, 2996), True, 'import numpy as np\n'), ((2998, 3012), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (3006, 3012), True, 'import numpy as np\n'), ((3014, 3027), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (3022, 3027), True, 'import numpy as np\n'), ((1600, 1612), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (1607, 1612), True, 'import numpy as np\n'), ((1783, 1794), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (1789, 1794), True, 'import numpy as np\n'), ((2899, 2917), 'numpy.mean', 'np.mean', (['laplacian'], {}), '(laplacian)\n', (2906, 2917), True, 'import numpy as np\n'), ((2942, 2959), 'numpy.std', 'np.std', (['laplacian'], {}), '(laplacian)\n', (2948, 2959), True, 'import numpy as np\n')] |
import numpy as np
from keras.layers.core import Flatten, Dense, Activation, Lambda
from keras.models import Model
from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D
from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization
# PatchGAN-discriminator
def discriminator(img_shape, disc_img_shape, patch_num):
disc_raw_img_shape = (disc_img_shape[0], disc_img_shape[1], img_shape[-1])
list_input = [Input(shape=disc_img_shape, name='disc_input_'+str(i)) for i in range(patch_num)] # => DCT
list_raw_input = [Input(shape=disc_raw_img_shape, name='disc_raw_input_'+str(i)) for i in range(patch_num)] # => Input Image
filter_num = 64
conv_num = int(np.floor(np.log(disc_img_shape[1]) / np.log(2)))
list_filters = [filter_num*min(8, (2**i)) for i in range(conv_num)]
# First_Convolution_for_generated_images
generated_patch_input = Input(shape=disc_img_shape, name="discriminator_dct_input") # DCT
xg = Conv2D(list_filters[0], kernel_size=(3, 3), strides=(2, 2), name="disc_conv2d_1", padding="same")(generated_patch_input)
xg = BatchNormalization(axis=-1)(xg)
xg = LeakyReLU(0.2)(xg)
# first_Convolution_for_predicted_guetzli_DCT
raw_patch_input = Input(shape=disc_raw_img_shape, name="discriminator_image_input") # Raw Input
xr = Conv2D(list_filters[0], kernel_size=(3, 3), strides=(2, 2), name="dic_dct_conv2d_1", padding="same")(raw_patch_input)
xr = BatchNormalization(axis=-1)(xr)
xr = LeakyReLU(0.2)(xr)
# Next Conv
for i, f in enumerate(list_filters[1:]):
x = Concatenate(axis=-1)([xg, xr])
x = Conv2D(f, (3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization(axis=-1)(x)
x = LeakyReLU(0.2)(x)
x_flat = Flatten()(x)
x = Dense(2, activation='softmax', name='discriminator_dense')(x_flat)
PatchGAN = Model(inputs=[generated_patch_input, raw_patch_input], outputs=[x], name='PatchGAN')
print('PatchGAN Summary')
PatchGAN.summary()
x = [PatchGAN([list_input[i], list_raw_input[i]]) for i in range(patch_num)]
if len(x) > 1:
x = Concatenate(axis=-1)(x)
else:
x = x[0]
x_out = Dense(2, activation='softmax', name='discriminator_output')(x)
discriminator_model = Model(inputs=(list_input+list_raw_input), outputs=[x_out], name="Discriminator")
return discriminator_model
def generator_3layer(input_size):
inputs = Input((input_size[0], input_size[1], 3))
conv1 = Conv2D(32, (3, 3), padding='same')(inputs)
conv1 = LeakyReLU(0.2)(conv1)
conv2 = Conv2D(64, (3, 3), strides=(2, 2), padding='same')(conv1)
conv2 = LeakyReLU(0.2)(conv2)
conv2 = BatchNormalization(axis=-1)(conv2)
conv3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same')(conv2)
conv3 = LeakyReLU(0.2)(conv3)
conv3 = BatchNormalization(axis=-1)(conv3)
conv4 = Conv2D(256, (3, 3), strides=(2, 2), padding='same')(conv3)
conv4 = Conv2D(256, (3, 3), padding="same")(conv4)
conv4 = Activation('relu')(conv4)
# Decoder Side
up1 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv4), conv3], axis=3)
conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(up1)
conv5 = BatchNormalization(axis=-1)(conv5)
up2 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv5), conv2], axis=3)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(up2)
conv6 = BatchNormalization(axis=-1)(conv6)
up3 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv6), conv1], axis=3)
conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(up3)
conv7 = BatchNormalization(axis=-1)(conv7)
conv8 = Conv2D(3, (1, 1), activation='sigmoid', data_format="channels_last")(conv7)
fcn = Model(input=inputs, output=conv8)
return fcn
def generator_2layer(input_size):
inputs = Input((input_size[0], input_size[1], 3))
conv1 = Conv2D(32, (3, 3), padding='same')(inputs)
conv1 = LeakyReLU(0.2)(conv1)
conv2 = Conv2D(64, (3, 3), strides=(2, 2), padding='same')(conv1)
conv2 = LeakyReLU(0.2)(conv2)
conv2 = BatchNormalization(axis=-1)(conv2)
conv3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same')(conv2)
conv3 = LeakyReLU(0.2)(conv3)
conv3 = BatchNormalization(axis=-1)(conv3)
"""
conv4 = Conv2D(256, (3, 3), strides=(2, 2), padding='same')(conv3)
conv4 = Conv2D(256, (3, 3), padding="same")(conv4)
conv4 = Activation('relu')(conv4)
"""
# Decoder Side
up1 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv3), conv2], axis=3)
conv5 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
conv5 = BatchNormalization(axis=-1)(conv5)
up2 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv5), conv1], axis=3)
conv6 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
conv6 = BatchNormalization(axis=-1)(conv6)
"""
up3 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv6), conv1], axis=3)
conv7 = Conv2D(32, (3, 3), activation='relu', padding='same')(up3)
conv7 = BatchNormalization(axis=-1)(conv7)
"""
conv7 = Conv2D(3, (1, 1), activation='sigmoid', data_format="channels_last")(conv6)
fcn = Model(input=inputs, output=conv7)
return fcn
def generator_discriminator(generator, discriminator, img_shape, patch_size):
raw_input = Input(shape=img_shape, name='DCGAN_input')
generated_image = generator(raw_input)
height, width = img_shape[:-1]
pheight, pwidth = patch_size, patch_size
# split input patchsize
list_row_idx = [(i*pheight, (i+1)*pheight) for i in range(height//pheight)]
list_col_idx = [(i*pwidth, (i+1)*pwidth) for i in range(width//pwidth)]
list_gen_patch = []
list_raw_patch = []
for row_idx in list_row_idx:
for col_idx in list_col_idx:
raw_patch = Lambda(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])(raw_input)
list_raw_patch.append(raw_patch)
x_patch = Lambda(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])(generated_image)
list_gen_patch.append(x_patch)
DCGAN_output = discriminator(list_gen_patch+list_raw_patch)
DCGAN = Model(inputs=[raw_input],
outputs=[generated_image, DCGAN_output],
name="DCGAN")
return DCGAN
def generator_butteraugli(input_size):
# EncoderSide
inputs = Input((input_size[0], input_size[1], 3))
conv1 = Conv2D(32, (3, 3), padding='same', data_format="channels_last")(inputs)
conv1 = LeakyReLU(0.2)(conv1)
conv2 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', data_format="channels_last")(conv1)
conv2 = LeakyReLU(0.2)(conv2)
conv2 = BatchNormalization(axis=-1)(conv2)
conv3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same', data_format="channels_last")(conv2)
conv3 = LeakyReLU(0.2)(conv3)
conv3 = BatchNormalization(axis=-1)(conv3)
conv4 = Conv2D(256, (3, 3), strides=(2, 2), padding='same', data_format="channels_last")(conv3)
conv4 = LeakyReLU(0.2)(conv4)
conv4 = BatchNormalization(axis=-1)(conv4)
conv5 = Conv2D(512, (3, 3), strides=(2, 2), padding='same', data_format="channels_last")(conv4)
conv5 = LeakyReLU(0.2)(conv5)
conv5 = BatchNormalization(axis=-1)(conv5)
conv6 = Conv2D(1024, (3, 3), strides=(2, 2), padding='same', data_format="channels_last")(conv5)
conv6 = Conv2D(1024, (3, 3), padding='same', data_format="channels_last")(conv6)
conv6 = Activation('relu')(conv6)
# Decoder Side
up1 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv6), conv5], axis=3)
conv7 = Conv2D(512, (3, 3), activation='relu', padding='same', data_format="channels_last")(up1)
conv7 = BatchNormalization(axis=-1)(conv7)
up2 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv7), conv4], axis=3)
conv8 = Conv2D(256, (3, 3), activation='relu', padding='same', data_format="channels_last")(up2)
conv8 = BatchNormalization(axis=-1)(conv8)
up3 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv8), conv3], axis=3)
conv9 = Conv2D(128, (3, 3), activation='relu', padding='same', data_format="channels_last")(up3)
conv9 = BatchNormalization(axis=-1)(conv9)
up4 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv9), conv2], axis=3)
conv10 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format="channels_last")(up4)
conv10 = BatchNormalization(axis=-1)(conv10)
up5 = concatenate([UpSampling2D(size=(2, 2), data_format="channels_last")(conv10), conv1], axis=3)
conv11 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format="channels_last")(up5)
conv11 = BatchNormalization(axis=-1)(conv11)
conv12 = Conv2D(3, (1, 1), activation='sigmoid', data_format="channels_last")(conv11)
fcn = Model(input=inputs, output=conv12)
return fcn
def get_generator(img_shape):
model = generator_3layer(img_shape)
model.summary()
return model
def get_discriminator(img_shape, disc_shape, patch_num):
model = discriminator(img_shape, disc_shape, patch_num)
model.summary()
return model
def get_GAN(generator, discriminator, img_shape, patch_size):
model = generator_discriminator(generator, discriminator, img_shape, patch_size)
return model | [
"keras.layers.core.Lambda",
"keras.layers.core.Dense",
"keras.layers.LeakyReLU",
"numpy.log",
"keras.layers.convolutional.UpSampling2D",
"keras.layers.core.Activation",
"keras.models.Model",
"keras.layers.Concatenate",
"keras.layers.convolutional.Conv2D",
"keras.layers.core.Flatten",
"keras.laye... | [((928, 987), 'keras.layers.Input', 'Input', ([], {'shape': 'disc_img_shape', 'name': '"""discriminator_dct_input"""'}), "(shape=disc_img_shape, name='discriminator_dct_input')\n", (933, 987), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1266, 1331), 'keras.layers.Input', 'Input', ([], {'shape': 'disc_raw_img_shape', 'name': '"""discriminator_image_input"""'}), "(shape=disc_raw_img_shape, name='discriminator_image_input')\n", (1271, 1331), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1905, 1994), 'keras.models.Model', 'Model', ([], {'inputs': '[generated_patch_input, raw_patch_input]', 'outputs': '[x]', 'name': '"""PatchGAN"""'}), "(inputs=[generated_patch_input, raw_patch_input], outputs=[x], name=\n 'PatchGAN')\n", (1910, 1994), False, 'from keras.models import Model\n'), ((2319, 2404), 'keras.models.Model', 'Model', ([], {'inputs': '(list_input + list_raw_input)', 'outputs': '[x_out]', 'name': '"""Discriminator"""'}), "(inputs=list_input + list_raw_input, outputs=[x_out], name='Discriminator'\n )\n", (2324, 2404), False, 'from keras.models import Model\n'), ((2479, 2519), 'keras.layers.Input', 'Input', (['(input_size[0], input_size[1], 3)'], {}), '((input_size[0], input_size[1], 3))\n', (2484, 2519), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((3865, 3898), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv8'}), '(input=inputs, output=conv8)\n', (3870, 3898), False, 'from keras.models import Model\n'), ((3964, 4004), 'keras.layers.Input', 'Input', (['(input_size[0], input_size[1], 3)'], {}), '((input_size[0], input_size[1], 3))\n', (3969, 4004), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((5377, 5410), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv7'}), '(input=inputs, output=conv7)\n', (5382, 5410), False, 'from keras.models import Model\n'), ((5523, 5565), 'keras.layers.Input', 'Input', ([], {'shape': 'img_shape', 'name': '"""DCGAN_input"""'}), "(shape=img_shape, name='DCGAN_input')\n", (5528, 5565), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((6382, 6467), 'keras.models.Model', 'Model', ([], {'inputs': '[raw_input]', 'outputs': '[generated_image, DCGAN_output]', 'name': '"""DCGAN"""'}), "(inputs=[raw_input], outputs=[generated_image, DCGAN_output], name='DCGAN'\n )\n", (6387, 6467), False, 'from keras.models import Model\n'), ((6587, 6627), 'keras.layers.Input', 'Input', (['(input_size[0], input_size[1], 3)'], {}), '((input_size[0], input_size[1], 3))\n', (6592, 6627), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((9079, 9113), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv12'}), '(input=inputs, output=conv12)\n', (9084, 9113), False, 'from keras.models import Model\n'), ((1003, 1105), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['list_filters[0]'], {'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'name': '"""disc_conv2d_1"""', 'padding': '"""same"""'}), "(list_filters[0], kernel_size=(3, 3), strides=(2, 2), name=\n 'disc_conv2d_1', padding='same')\n", (1009, 1105), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((1133, 1160), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1151, 1160), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1174, 1188), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1183, 1188), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1353, 1458), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['list_filters[0]'], {'kernel_size': '(3, 3)', 'strides': '(2, 2)', 'name': '"""dic_dct_conv2d_1"""', 'padding': '"""same"""'}), "(list_filters[0], kernel_size=(3, 3), strides=(2, 2), name=\n 'dic_dct_conv2d_1', padding='same')\n", (1359, 1458), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((1480, 1507), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1498, 1507), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1521, 1535), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1530, 1535), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1801, 1810), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (1808, 1810), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((1822, 1880), 'keras.layers.core.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""discriminator_dense"""'}), "(2, activation='softmax', name='discriminator_dense')\n", (1827, 1880), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((2229, 2288), 'keras.layers.core.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""discriminator_output"""'}), "(2, activation='softmax', name='discriminator_output')\n", (2234, 2288), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((2532, 2566), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (2538, 2566), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((2587, 2601), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2596, 2601), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2622, 2672), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (3, 3), strides=(2, 2), padding='same')\n", (2628, 2672), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((2692, 2706), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2701, 2706), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2726, 2753), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2744, 2753), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2774, 2825), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (3, 3), strides=(2, 2), padding='same')\n", (2780, 2825), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((2845, 2859), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2854, 2859), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2879, 2906), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2897, 2906), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2927, 2978), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (3, 3), strides=(2, 2), padding='same')\n", (2933, 2978), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((2998, 3033), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (3004, 3033), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3053, 3071), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3063, 3071), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((3213, 3267), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (3219, 3267), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3285, 3312), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (3303, 3312), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((3435, 3488), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (3441, 3488), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3506, 3533), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (3524, 3533), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((3660, 3713), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (3666, 3713), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3731, 3758), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (3749, 3758), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((3779, 3847), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""sigmoid"""', 'data_format': '"""channels_last"""'}), "(3, (1, 1), activation='sigmoid', data_format='channels_last')\n", (3785, 3847), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4017, 4051), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (4023, 4051), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4072, 4086), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4081, 4086), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4107, 4157), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (3, 3), strides=(2, 2), padding='same')\n", (4113, 4157), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4177, 4191), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4186, 4191), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4211, 4238), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4229, 4238), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4259, 4310), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (3, 3), strides=(2, 2), padding='same')\n", (4265, 4310), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4330, 4344), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4339, 4344), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4364, 4391), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4382, 4391), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4714, 4767), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (4720, 4767), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4785, 4812), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4803, 4812), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((4935, 4988), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (4941, 4988), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((5006, 5033), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5024, 5033), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((5291, 5359), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""sigmoid"""', 'data_format': '"""channels_last"""'}), "(3, (1, 1), activation='sigmoid', data_format='channels_last')\n", (5297, 5359), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((6641, 6704), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(32, (3, 3), padding='same', data_format='channels_last')\n", (6647, 6704), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((6725, 6739), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (6734, 6739), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((6760, 6839), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(64, (3, 3), strides=(2, 2), padding='same', data_format='channels_last')\n", (6766, 6839), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((6859, 6873), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (6868, 6873), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((6893, 6920), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (6911, 6920), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((6941, 7026), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(128, (3, 3), strides=(2, 2), padding='same', data_format='channels_last'\n )\n", (6947, 7026), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7041, 7055), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (7050, 7055), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7075, 7102), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7093, 7102), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7123, 7208), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(256, (3, 3), strides=(2, 2), padding='same', data_format='channels_last'\n )\n", (7129, 7208), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7223, 7237), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (7232, 7237), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7257, 7284), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7275, 7284), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7305, 7390), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(512, (3, 3), strides=(2, 2), padding='same', data_format='channels_last'\n )\n", (7311, 7390), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7405, 7419), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (7414, 7419), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7439, 7466), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7457, 7466), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((7487, 7573), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(1024, (3, 3), strides=(2, 2), padding='same', data_format=\n 'channels_last')\n", (7493, 7573), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7588, 7653), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1024)', '(3, 3)'], {'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(1024, (3, 3), padding='same', data_format='channels_last')\n", (7594, 7653), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7673, 7691), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7683, 7691), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((7833, 7921), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(512, (3, 3), activation='relu', padding='same', data_format=\n 'channels_last')\n", (7839, 7921), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7934, 7961), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (7952, 7961), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((8084, 8172), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(256, (3, 3), activation='relu', padding='same', data_format=\n 'channels_last')\n", (8090, 8172), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8185, 8212), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8203, 8212), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((8335, 8423), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(128, (3, 3), activation='relu', padding='same', data_format=\n 'channels_last')\n", (8341, 8423), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8436, 8463), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8454, 8463), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((8587, 8674), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(64, (3, 3), activation='relu', padding='same', data_format=\n 'channels_last')\n", (8593, 8674), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8688, 8715), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8706, 8715), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((8841, 8928), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'data_format': '"""channels_last"""'}), "(32, (3, 3), activation='relu', padding='same', data_format=\n 'channels_last')\n", (8847, 8928), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8942, 8969), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (8960, 8969), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((8992, 9060), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'activation': '"""sigmoid"""', 'data_format': '"""channels_last"""'}), "(3, (1, 1), activation='sigmoid', data_format='channels_last')\n", (8998, 9060), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((1614, 1634), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1625, 1634), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1657, 1706), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['f', '(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(f, (3, 3), strides=(2, 2), padding='same')\n", (1663, 1706), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((1722, 1749), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1740, 1749), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((1765, 1779), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (1774, 1779), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((2161, 2181), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2172, 2181), False, 'from keras.layers import Input, Concatenate, concatenate, LeakyReLU, BatchNormalization\n'), ((742, 767), 'numpy.log', 'np.log', (['disc_img_shape[1]'], {}), '(disc_img_shape[1])\n', (748, 767), True, 'import numpy as np\n'), ((770, 779), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (776, 779), True, 'import numpy as np\n'), ((3122, 3176), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (3134, 3176), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3344, 3398), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (3356, 3398), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((3569, 3623), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (3581, 3623), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4623, 4677), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (4635, 4677), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((4844, 4898), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (4856, 4898), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((6018, 6089), 'keras.layers.core.Lambda', 'Lambda', (['(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])'], {}), '(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])\n', (6024, 6089), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((6168, 6239), 'keras.layers.core.Lambda', 'Lambda', (['(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])'], {}), '(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:col_idx[1], :])\n', (6174, 6239), False, 'from keras.layers.core import Flatten, Dense, Activation, Lambda\n'), ((7742, 7796), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (7754, 7796), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((7993, 8047), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (8005, 8047), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8244, 8298), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (8256, 8298), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8495, 8549), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (8507, 8549), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n'), ((8748, 8802), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)', 'data_format': '"""channels_last"""'}), "(size=(2, 2), data_format='channels_last')\n", (8760, 8802), False, 'from keras.layers.convolutional import Conv2D, Deconv2D, ZeroPadding2D, UpSampling2D\n')] |
"""
Tests for leap_ec.distrib.evaluate.
"""
from distributed import Client
import numpy as np
from leap_ec.individual import Individual
from leap_ec.binary_rep.problems import MaxOnes
from leap_ec.distrib.evaluate import evaluate
from leap_ec.global_vars import context
def test_good_eval():
"""
This is for testing a plain ole good individual to ensure that
leap_ec.distrib.evaluate works for normal circumstances.
"""
# set up a basic dask local cluster
with Client() as client:
# hand craft an individual that should evaluate fine
# Let's try evaluating a single individual
individual = Individual(np.array([1, 1]),
problem=MaxOnes())
future = client.submit(evaluate(context=context),
individual)
evaluated_individual = future.result()
assert evaluated_individual.fitness == 2
def test_broken_individual_eval():
"""
Test an individual that intentionally throws an exception during
evaluation, which marks that individual has non-viable.
TODO implement this
"""
# set up a basic dask local cluster
# hand craft an individual that should throw an exception on evaluation
# evaluate that individual
# check that the individual state is sane given it is non-viable
pass
| [
"leap_ec.distrib.evaluate.evaluate",
"numpy.array",
"distributed.Client",
"leap_ec.binary_rep.problems.MaxOnes"
] | [((501, 509), 'distributed.Client', 'Client', ([], {}), '()\n', (507, 509), False, 'from distributed import Client\n'), ((665, 681), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (673, 681), True, 'import numpy as np\n'), ((766, 791), 'leap_ec.distrib.evaluate.evaluate', 'evaluate', ([], {'context': 'context'}), '(context=context)\n', (774, 791), False, 'from leap_ec.distrib.evaluate import evaluate\n'), ((723, 732), 'leap_ec.binary_rep.problems.MaxOnes', 'MaxOnes', ([], {}), '()\n', (730, 732), False, 'from leap_ec.binary_rep.problems import MaxOnes\n')] |
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.transformation import optimizer
from dace.transformation.dataflow import GPUTransformMap
import numpy as np
import pytest
@dace.program
def prog1(A: dace.float32[32], B: dace.float32[32]):
@dace.map
def work1(i: _[0:32]):
a << A[i]
b >> B[i]
b = a * 2.0
@dace.program
def prog2(A: dace.float32[32], B: dace.float32[32]):
@dace.map
def work2(i: _[0:32]):
a << A[i]
b >> B[i]
b = a / 2.0
######################################
@pytest.mark.gpu
def test_multiprogram():
print('Multi-program CUDA test')
A = np.random.rand(32).astype(np.float32)
B = np.random.rand(32).astype(np.float32)
C = np.random.rand(32).astype(np.float32)
s1 = prog1.to_sdfg()
s1.apply_transformations(GPUTransformMap)
s2 = prog2.to_sdfg()
s2.apply_transformations(GPUTransformMap)
s1func = s1.compile()
s2func = s2.compile()
s1func(A=A, B=B)
s2func(A=B, B=C)
diff = np.linalg.norm(A - C)
print('Difference:', diff)
assert diff <= 1e-5
if __name__ == '__main__':
test()
| [
"numpy.random.rand",
"numpy.linalg.norm"
] | [((1063, 1084), 'numpy.linalg.norm', 'np.linalg.norm', (['(A - C)'], {}), '(A - C)\n', (1077, 1084), True, 'import numpy as np\n'), ((681, 699), 'numpy.random.rand', 'np.random.rand', (['(32)'], {}), '(32)\n', (695, 699), True, 'import numpy as np\n'), ((727, 745), 'numpy.random.rand', 'np.random.rand', (['(32)'], {}), '(32)\n', (741, 745), True, 'import numpy as np\n'), ((773, 791), 'numpy.random.rand', 'np.random.rand', (['(32)'], {}), '(32)\n', (787, 791), True, 'import numpy as np\n')] |
import numpy as np
import scipy.io
from fealpy.mesh import TriangleMesh
from fealpy.writer import MeshWriter
def mat2vtk(mfile, vfile):
data = scipy.io.loadmat(mfile)
node = np.array(data['node'], dtype=np.float64)
cell = np.array(data['elem'] - 1, dtype=np.int_)
mesh = TriangleMesh(node, cell)
writer = MeshWriter(mesh)
writer.write(fname=vfile)
| [
"fealpy.writer.MeshWriter",
"numpy.array",
"fealpy.mesh.TriangleMesh"
] | [((186, 226), 'numpy.array', 'np.array', (["data['node']"], {'dtype': 'np.float64'}), "(data['node'], dtype=np.float64)\n", (194, 226), True, 'import numpy as np\n'), ((238, 279), 'numpy.array', 'np.array', (["(data['elem'] - 1)"], {'dtype': 'np.int_'}), "(data['elem'] - 1, dtype=np.int_)\n", (246, 279), True, 'import numpy as np\n'), ((291, 315), 'fealpy.mesh.TriangleMesh', 'TriangleMesh', (['node', 'cell'], {}), '(node, cell)\n', (303, 315), False, 'from fealpy.mesh import TriangleMesh\n'), ((329, 345), 'fealpy.writer.MeshWriter', 'MeshWriter', (['mesh'], {}), '(mesh)\n', (339, 345), False, 'from fealpy.writer import MeshWriter\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): <NAME>
See LICENCE.txt for licensing and contact information.
"""
__all__ = ['ColoredRenderer', 'TexturedRenderer']
import numpy as np
import pdb
import cv2
import time
import platform
import scipy.sparse as sp
from copy import deepcopy
from opendr import common
from opendr.topology import get_vertices_per_edge, get_faces_per_edge
# if platform.system()=='Darwin':
# from opendr.contexts.ctx_mac import OsContext
# else:
# from opendr.contexts.ctx_mesa import OsContext
import OpenGL.GL as GL
import OpenGL.GL.shaders as shaders
from OpenGL.arrays import vbo
from PIL import Image
# import pdb
import matplotlib.pyplot as plt
from chumpy import *
# from opendr.contexts._constants import *
from chumpy.utils import row, col
import time
pixel_center_offset = 0.5
class BaseRenderer(Ch):
terms = ['f', 'frustum','overdraw', 'win', 'f_list', 'v_list', 'vn_list', 'vc_list']
dterms = ['camera', 'v']
def makeCurrentContext(self):
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
else:
from OpenGL import arrays
from OpenGL.raw.osmesa import mesa
mesa.OSMesaMakeCurrent(self.ctx, GL.GLuint(self.mesap), GL.GL_UNSIGNED_BYTE, self.frustum['width'], self.frustum['height'])
def clear(self):
try:
self.win
except:
# print ("Clearing when not initialized.")
return
if self.win:
try:
# print ("Clearing base renderer.")
GL.glDeleteProgram(self.colorProgram)
self.makeCurrentContext()
self.vbo_indices.set_array(np.array([]))
self.vbo_indices.bind()
self.vbo_indices.unbind()
self.vbo_indices.delete()
self.vbo_indices_range.set_array(np.array([]))
self.vbo_indices_range.bind()
self.vbo_indices_range.unbind()
self.vbo_indices_range.delete()
self.vbo_indices_dyn.set_array(np.array([]))
self.vbo_indices_dyn.bind()
self.vbo_indices_dyn.unbind()
self.vbo_indices_dyn.delete()
self.vbo_verts.set_array(np.array([]))
self.vbo_verts.bind()
self.vbo_verts.unbind()
self.vbo_verts.delete()
self.vbo_verts_face.set_array(np.array([]))
self.vbo_verts_face.bind()
self.vbo_verts_face.unbind()
self.vbo_verts_face.delete()
self.vbo_verts_dyn.set_array(np.array([]))
self.vbo_verts_dyn.bind()
self.vbo_verts_dyn.unbind()
self.vbo_verts_dyn.delete()
self.vbo_colors_ub.set_array(np.array([]))
self.vbo_colors_ub.bind()
self.vbo_colors_ub.unbind()
self.vbo_colors_ub.delete()
self.vbo_colors.set_array(np.array([]))
self.vbo_colors.bind()
self.vbo_colors.unbind()
self.vbo_colors.delete()
self.vbo_colors_face.set_array(np.array([]))
self.vbo_colors_face.bind()
self.vbo_colors_face.unbind()
self.vbo_colors_face.delete()
GL.glDeleteVertexArrays(1, [self.vao_static.value])
GL.glDeleteVertexArrays(1, [self.vao_static_face.value])
GL.glDeleteVertexArrays(1, [self.vao_dyn.value])
GL.glDeleteVertexArrays(1, [self.vao_dyn_ub.value])
GL.glDeleteRenderbuffers(1, [int(self.render_buf)])
GL.glDeleteRenderbuffers(1, [int(self.z_buf)])
if self.msaa:
GL.glDeleteRenderbuffers(1, [int(self.render_buf_ms)])
GL.glDeleteRenderbuffers(1, [int(self.z_buf_ms)])
GL.glDeleteFramebuffers(1, [int(self.fbo)])
GL.glDeleteFramebuffers(1, [int(self.fbo_noms)])
if self.msaa:
GL.glDeleteFramebuffers(1, [int(self.fbo_ms)])
# print("Finished clearning base renderer")
except:
pdb.set_trace()
def initGL(self):
try:
self.frustum
self.f
self.v
self.vc
self.glMode
except:
print ("Necessary variables have not been set (frustum, f, v, or vc).")
return
if self.glMode == 'glfw':
import glfw
glfw.init()
print("Initializing GLFW.")
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
# glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL.GL_TRUE)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.DEPTH_BITS,32)
glfw.window_hint(glfw.VISIBLE, GL.GL_FALSE)
self.win = glfw.create_window(self.frustum['width'], self.frustum['height'], "test", None, self.sharedWin)
glfw.make_context_current(self.win)
else: #Mesa
from OpenGL import arrays
from OpenGL.raw.osmesa import mesa
try:
self.sharedWin
except:
self.sharedWin = None
self.ctx = mesa.OSMesaCreateContext(GL.GL_RGBA, self.sharedWin)
self.win = self.ctx
self.buf = arrays.GLubyteArray.zeros((self.frustum['height'], self.frustum['width'], 3))
self.mesap = arrays.ArrayDatatype.dataPointer(self.buf)
assert(mesa.OSMesaMakeCurrent(self.ctx, GL.GLuint(self.mesap), GL.GL_UNSIGNED_BYTE, self.frustum['width'], self.frustum['height']))
GL.USE_ACCELERATE = True
GL.glViewport(0, 0, self.frustum['width'], self.frustum['height'])
#FBO_f
self.fbo = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
self.render_buf = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER,self.render_buf)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_DRAW_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf)
self.z_buf = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf)
self.line_width = 1.
#FBO_f
# if self.msaa and self.glMode == 'glfw':
if self.msaa:
try:
self.nsamples
except:
self.nsamples = 8
try:
self.overdraw
except:
self.overdraw = True
self.fbo_ms = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_ms )
self.render_buf_ms = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER,self.render_buf_ms)
GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_DRAW_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf_ms)
self.z_buf_ms = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_ms)
GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_ms)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print ("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER,0)
self.fbo_noms = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_noms )
self.render_buf_noms = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER,self.render_buf_noms)
GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER,0, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_DRAW_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf_noms)
self.z_buf_noms = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_noms)
GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER,0 , GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_noms)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print ("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER,0)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT)
# GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
############################
# ENABLE SHADER
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
in vec3 theColor;
// Ouput data
out vec3 color;
void main(){
color = theColor;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
uniform mat4 MVP;
out vec3 theColor;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
}""", GL.GL_VERTEX_SHADER)
self.colorProgram = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
shaders.glUseProgram(self.colorProgram)
FRAGMENT_SHADER_NOPERSP = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
in vec3 theColor;
//noperspective in vec3 theColor;
// Ouput data
out vec3 color;
void main(){
color = color.xyz;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER_NOPERSP = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
uniform mat4 MVP;
out vec3 theColor;
//noperspective out vec3 theColor;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
}""", GL.GL_VERTEX_SHADER)
self.colorProgram_noperspective = shaders.compileProgram(VERTEX_SHADER_NOPERSP,FRAGMENT_SHADER_NOPERSP)
# self.colorProgram = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
position_location = GL.glGetAttribLocation(self.colorProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorProgram, 'color')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_location = GL.glGetUniformLocation(self.colorProgram, 'MVP')
#
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
indices = np.array(self.f, dtype=np.uint32)
self.vbo_indices = vbo.VBO(indices, target=GL.GL_ELEMENT_ARRAY_BUFFER)
self.vbo_indices_range = vbo.VBO(np.arange(self.f.size, dtype=np.uint32).ravel(), target=GL.GL_ELEMENT_ARRAY_BUFFER)
self.vbo_indices_dyn = vbo.VBO(indices, target=GL.GL_ELEMENT_ARRAY_BUFFER)
self.vbo_verts = vbo.VBO(np.array(self.v, dtype=np.float32))
# glGenBuffers(1, &vboID);
# glBindBuffer(GL_VERTEX_ARRAY, vboID);
# glBufferData(GL_VERTEX_ARRAY, 3 * sizeof(Vertex), &vertices[0], GL_STATIC_DRAW);
# glBindBuffer(GL_VERTEX_ARRAY, NULL);
self.vbo_verts_face = vbo.VBO(self.verts_by_face.astype(np.float32))
self.vbo_verts_dyn = vbo.VBO(np.array(self.v, dtype=np.float32))
self.vbo_colors = vbo.VBO(np.array(self.vc, dtype=np.float32))
self.vbo_colors_face = vbo.VBO(np.array(self.vc_by_face, dtype=np.float32))
self.vao_static = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_static)
GL.glBindVertexArray(self.vao_static)
self.vbo_indices.bind()
self.vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
self.vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_static_face = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_static_face)
GL.glBindVertexArray(self.vao_static_face)
#Can arrays be empty?
self.vbo_indices_range.bind()
self.vbo_verts_face.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
self.vbo_colors_face.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_dyn = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_dyn)
GL.glBindVertexArray(self.vao_dyn)
#Can arrays be empty?
self.vbo_indices_dyn.bind()
self.vbo_verts_dyn.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
self.vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_dyn_ub = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_dyn_ub)
GL.glBindVertexArray(self.vao_dyn_ub)
self.vbo_indices_dyn.bind()
self.vbo_verts_dyn.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
self.vbo_colors_ub = vbo.VBO(np.array(np.array(self.vc, dtype=np.uint8)))
self.vbo_colors_ub.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_UNSIGNED_BYTE, GL.GL_TRUE, 0, None)
self.initialized = True
print('glValidateProgram: ' + str(GL.glValidateProgram(self.colorProgram)))
print('glGetProgramInfoLog ' + str(GL.glGetProgramInfoLog(self.colorProgram)))
print('GL_MAX_VERTEX_ATTRIBS: ' + str(GL.glGetInteger(GL.GL_MAX_VERTEX_ATTRIBS)))
print (GL.glGetError())
@depends_on('f') # not v: specifically, it depends only on the number of vertices, not on the values in v
def primitives_per_edge(self):
v=self.v.r.reshape((-1,3))
f=self.f
vpe = get_vertices_per_edge(v, f)
fpe = get_faces_per_edge(v, f, vpe)
return fpe, vpe
@depends_on('f', 'frustum', 'camera', 'overdraw')
def barycentric_image(self):
self._call_on_changed()
return self.draw_barycentric_image(self.boundarybool_image if self.overdraw else None)
@depends_on(terms+dterms)
def boundaryid_image(self):
self._call_on_changed()
return self.draw_boundaryid_image( self.v.r, self.f, self.vpe, self.fpe, self.camera)
@depends_on('f', 'frustum', 'camera', 'overdraw')
def visibility_image(self):
self._call_on_changed()
return self.draw_visibility_image(self.v.r, self.f, self.boundarybool_image if self.overdraw else None)
@depends_on(terms+dterms)
def boundarybool_image(self):
self._call_on_changed()
boundaryid_image = self.boundaryid_image
return np.asarray(boundaryid_image != 4294967295, np.uint32).reshape(boundaryid_image.shape)
@depends_on(terms+dterms)
def boundarybool_image_aa(self):
self._call_on_changed()
boundaryid_image = self.boundaryid_image_aa
return np.asarray(boundaryid_image != 4294967295, np.uint32).reshape(boundaryid_image.shape)
@property
def shape(self):
raise NotImplementedError('Should be implemented in inherited class.')
# @v.setter
# def v(self, newval):
# self.camera.v = newval
@property
def vpe(self):
return self.primitives_per_edge[1]
@depends_on('f', 'v')
def verts_by_face(self):
verts_by_face = self.v.reshape((-1,3))[self.f.ravel()]
return np.asarray(verts_by_face, dtype=np.float64, order='C')
@depends_on('f', 'v')
def vc_by_face(self):
return np.asarray(np.tile(np.eye(3)[:self.f.shape[1], :], (self.verts_by_face.shape[0]//self.f.shape[1], 1)), dtype=np.float64, order='C')
@depends_on('f', 'v', 'vn')
def tn(self):
from opendr.geometry import TriNormals
# return TriNormals(self.v, self.f).r.reshape((-1,3))
tn = np.mean(self.vn.r[self.f.ravel()].reshape([-1, 3, 3]), 1)
return tn
@property
def fpe(self):
return self.primitives_per_edge[0]
@depends_on(terms+dterms)
def boundary_neighborhood(self):
return common.boundary_neighborhood(self.boundarybool_image)
def _setup_camera(self, cx, cy, fx, fy, w, h, near, far, view_matrix, k):
k = np.asarray(k)
#Make Projection matrix.
self.projectionMatrix = np.array([[fx/cx, 0,0,0], [0, fy/cy, 0,0], [0,0, -(near + far)/(far - near), -2*near*far/(far-near)], [0,0, -1, 0]], dtype=np.float32)
# self.projectionMatrix = np.array([[fx/w, 0,0,0], [0, fy/cy, 0,0], [0,0, -(near + far)/(far - near), -2*near*far/(far-near)], [0,0,-1,1]], dtype=np.float64)
def draw_colored_verts(self, vc):
GL.glUseProgram(self.colorProgram)
GL.glDisable(GL.GL_CULL_FACE)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if vc.shape[1] != 3:
#Pol: ??
vc = np.vstack((vc[:,0], vc[:,1%vc.shape[1]], vc[:,2%vc.shape[1]])).T.copy()
assert(vc.shape[1]==3)
GL.glBindVertexArray(self.vao_static)
self.vbo_colors.set_array(vc.astype(np.float32))
self.vbo_colors.bind()
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glDrawElements(GL.GL_TRIANGLES, len(self.vbo_indices)*3, GL.GL_UNSIGNED_INT, None)
GL.glDisable(GL.GL_CULL_FACE)
def draw_noncolored_verts(self, v, f):
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
shaders.glUseProgram(self.colorProgram)
GL.glBindVertexArray(self.vao_static)
self.vbo_colors.set_array(np.zeros_like(v.reshape((-1,3))[f.ravel()], dtype=np.float32, order='C'))
self.vbo_color.bind()
GL.glDrawElements(GL.GL_TRIANGLES, len(self.vbo_indices)*3, GL.GL_UNSIGNED_INT, None)
def draw_edge_visibility(self, v, e, f, hidden_wireframe=True):
"""Assumes camera is set up correctly in gl context."""
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(1, 1)
self.draw_colored_verts(np.zeros_like(self.vc.r))
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT)
ec = np.arange(1, len(e)+1)
ec = np.tile(ec.reshape((-1,1)), (1, 3))
ec[:, 0] = ec[:, 0] & 255
ec[:, 1] = (ec[:, 1] >> 8 ) & 255
ec[:, 2] = (ec[:, 2] >> 16 ) & 255
ec = np.asarray(ec, dtype=np.uint8)
# GL.glDepthFunc(GL.GL_GREATER)
# GL.glEnable(GL.GL_POLYGON_OFFSET_LINE)
# GL.glPolygonOffset(-10000.0, -10000.0)
# GL.glDepthMask(GL.GL_FALSE)
# self.projectionMatrix[2, 2] += 0.0000001
GL.glDepthFunc(GL.GL_LEQUAL)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn_ub, v, e, ec)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDepthFunc(GL.GL_LESS)
# self.projectionMatrix[2, 2] -= 0.0000001
# GL.glDisable(GL.GL_POLYGON_OFFSET_LINE)
# GL.glDepthMask(GL.GL_TRUE)
# if hidden_wireframe:
# GL.glEnable(GL.GL_DEPTH_TEST)
# GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
# #Pol change it to a smaller number to avoid double edges in my teapot.
# GL.glPolygonOffset(1.0, 1.0)
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# # self.draw_colored_primitives(self.vao_dyn_ub, v, f, fc=np.zeros(f.shape).astype(np.uint8))
# self.draw_colored_verts(np.zeros_like(self.vc.r))
# # self.draw_colored_primitives(self.vaoub, v, e, np.zeros_like(ec).astype(np.uint8))
# # self.projectionMatrix[2,2] -= delta
# GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
raw = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))
raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
return raw
def draw_edge_visibility_aa(self, v, e, f, hidden_wireframe=True):
"""Assumes camera is set up correctly in gl context."""
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(1, 1)
self.draw_colored_verts(np.zeros_like(self.vc.r))
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT)
ec = np.arange(1, len(e)+1)
ec = np.tile(ec.reshape((-1,1)), (1, 3))
ec[:, 0] = ec[:, 0] & 255
ec[:, 1] = (ec[:, 1] >> 8 ) & 255
ec[:, 2] = (ec[:, 2] >> 16 ) & 255
ec = np.asarray(ec, dtype=np.uint8)
ec = np.ones_like(ec, dtype=np.uint8)*255
# GL.glDepthFunc(GL.GL_GREATER)
# GL.glEnable(GL.GL_POLYGON_OFFSET_LINE)
# GL.glPolygonOffset(-10000.0, -10000.0)
# GL.glDepthMask(GL.GL_FALSE)
# self.projectionMatrix[2, 2] += 0.0000001
GL.glDepthFunc(GL.GL_LEQUAL)
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glEnable(GL.GL_LINE_SMOOTH)
GL.glEnable(GL.GL_BLEND)
# GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(1)
self.draw_colored_primitives(self.vao_dyn_ub, v, e, ec)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glLineWidth(self.line_width)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glDisable(GL.GL_LINE_SMOOTH)
GL.glDisable(GL.GL_BLEND)
GL.glDepthFunc(GL.GL_LESS)
# self.projectionMatrix[2, 2] -= 0.0000001
# GL.glDisable(GL.GL_POLYGON_OFFSET_LINE)
# GL.glDepthMask(GL.GL_TRUE)
# if hidden_wireframe:
# GL.glEnable(GL.GL_DEPTH_TEST)
# GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
# #Pol change it to a smaller number to avoid double edges in my teapot.
# GL.glPolygonOffset(1.0, 1.0)
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# # self.draw_colored_primitives(self.vao_dyn_ub, v, f, fc=np.zeros(f.shape).astype(np.uint8))
# self.draw_colored_verts(np.zeros_like(self.vc.r))
# # self.draw_colored_primitives(self.vaoub, v, e, np.zeros_like(ec).astype(np.uint8))
# # self.projectionMatrix[2,2] -= delta
# GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
raw = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))
raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256
plt.imsave('raw.png',raw)
import ipdb; ipdb.set_trace()
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
return raw
# this assumes that fc is either "by faces" or "verts by face", not "by verts"
def draw_colored_primitives(self, vao, v, f, fc=None):
GL.glUseProgram(self.colorProgram)
# gl.EnableClientState(GL_VERTEX_ARRAY)
verts_by_face = np.asarray(v.reshape((-1,3))[f.ravel()], dtype=np.float64, order='C')
# gl.VertexPointer(verts_by_face)
GL.glBindVertexArray(vao)
self.vbo_verts_dyn.set_array(verts_by_face.astype(np.float32))
self.vbo_verts_dyn.bind()
if fc is not None:
# gl.EnableClientState(GL_COLOR_ARRAY)
if fc.size == verts_by_face.size:
vc_by_face = fc
else:
vc_by_face = np.repeat(fc, f.shape[1], axis=0)
if vc_by_face.size != verts_by_face.size:
raise Exception('fc must have either rows=(#rows in faces) or rows=(# elements in faces)')
if isinstance(fc[0,0], np.float32) or isinstance(fc[0,0], np.float64):
vc_by_face = np.asarray(vc_by_face, dtype=np.float32, order='C')
self.vbo_colors.set_array(vc_by_face)
self.vbo_colors.bind()
elif isinstance(fc[0,0], np.uint8):
vc_by_face = np.asarray(vc_by_face, dtype=np.uint8, order='C')
self.vbo_colors_ub.set_array(vc_by_face)
self.vbo_colors_ub.bind()
else:
raise Exception('Unknown color type for fc')
else:
self.vbo_colors.set_array(np.zeros_like(verts_by_face, dtype=np.float32))
self.vbo_colors.bind()
if f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
# if primtype == GL.GL_LINES:
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# else:
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDrawElements(primtype, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
#Pol: FIX THIS (UNCOMMENT)
if primtype == GL.GL_LINES:
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
f = np.fliplr(f).copy()
verts_by_edge = v.reshape((-1,3))[f.ravel()]
verts_by_edge = np.asarray(verts_by_edge, dtype=np.float32, order='C')
self.vbo_verts_dyn.set_array(verts_by_edge)
self.vbo_verts_dyn.bind()
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
# GL.glDrawElements(GL.GL_LINES, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
def compute_vpe_boundary_idxs(self, v, f, camera, fpe):
# Figure out which edges are on pairs of differently visible triangles
#ray = cv2.Rodrigues(camera.rt.r)[0].T[:,2]
campos = -cv2.Rodrigues(camera.rt.r)[0].T.dot(camera.t.r)
rays_to_verts = v.reshape((-1,3)) - row(campos)
rays_to_faces = rays_to_verts.take(f[:,0],axis=0) +rays_to_verts.take(f[:,1],axis=0) +rays_to_verts.take(f[:,2],axis=0)
# rays_to_faces = np.sum(rays_to_verts.take(f[:,:],axis=0), axis=1)
faces_invisible = np.sum(rays_to_faces * self.tn, axis=1)
dps = faces_invisible.take(fpe[:,0]) * faces_invisible.take(fpe[:,1])
# dps = faces_invisible0 * faces_invisible1
# idxs = (dps<=0) & (faces_invisible.take(fpe[:,0]) + faces_invisible.take(fpe[:,1]) > 0.0)
silhouette_edges = np.asarray(np.nonzero(dps<=0.)[0], np.uint32)
self.vis_silhouette_face = np.c_[faces_invisible.take(fpe[:, 0])[dps <= 0.], faces_invisible.take(fpe[:, 1])[dps <= 0.]] < 0
# silhouette_edges = np.asarray(np.nonzero(dps<=1e-5)[0], np.uint32)
return silhouette_edges, faces_invisible < 0
def draw_boundaryid_image(self, v, f, vpe, fpe, camera):
GL.glUseProgram(self.colorProgram)
if False:
visibility = self.draw_edge_visibility(v, vpe, f, hidden_wireframe=True)
return visibility
if True:
#try:
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT);
silhouette_edges, faces_facing_camera = self.compute_vpe_boundary_idxs(v, f, camera, fpe)
# self.faces_facing_camera = faces_facing_camera
self.silhouette_edges = silhouette_edges
lines_e = vpe[silhouette_edges]
self.lines_e = lines_e
lines_v = v
if len(lines_e)==0:
return np.ones((self.frustum['height'], self.frustum['width'])).astype(np.int32) * 4294967295
# fpe = fpe[np.any(np.in1d(fpe, np.unique(self.visibility_image[self.visibility_image != 4294967295])).reshape([-1, 2]), 1)]
visibility = self.draw_edge_visibility(lines_v, lines_e, f, hidden_wireframe=True)
visibility_edge = visibility.copy()
# plt.imsave("opendr_boundary_edge_visibility.png", visibility)
shape = visibility.shape
visibility = visibility.ravel()
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
visibility[visible] = silhouette_edges.take(visibility.take(visible))
self.frontFacingEdgeFaces = np.zeros([visibility_edge.shape[0],visibility_edge.shape[1], 2]).astype(np.int32).reshape([-1,2])
self.frontFacingEdgeFaces[visible] = self.vis_silhouette_face[visibility_edge.ravel().take(visible)]
# plt.imsave("opendr_boundary_edge_visibility_result.png", visibility.reshape(shape))
return visibility.reshape(shape)
def draw_boundaryid_image_aa(self, v, f, vpe, fpe, camera):
GL.glUseProgram(self.colorProgram)
if False:
visibility = self.draw_edge_visibility(v, vpe, f, hidden_wireframe=True)
return visibility
if True:
#try:
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT);
silhouette_edges, faces_facing_camera = self.compute_vpe_boundary_idxs(v, f, camera, fpe)
# self.faces_facing_camera = faces_facing_camera
self.silhouette_edges = silhouette_edges
lines_e = vpe[silhouette_edges]
self.lines_e = lines_e
lines_v = v
if len(lines_e)==0:
return np.ones((self.frustum['height'], self.frustum['width'])).astype(np.int32) * 4294967295
# fpe = fpe[np.any(np.in1d(fpe, np.unique(self.visibility_image[self.visibility_image != 4294967295])).reshape([-1, 2]), 1)]
visibility = self.draw_edge_visibility_aa(lines_v, lines_e, f, hidden_wireframe=True)
visibility_edge = visibility.copy()
# plt.imsave("opendr_boundary_edge_visibility.png", visibility)
shape = visibility.shape
visibility = visibility.ravel()
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
visibility[visible] = silhouette_edges.take(visibility.take(visible))
self.frontFacingEdgeFaces = np.zeros([visibility_edge.shape[0],visibility_edge.shape[1], 2]).astype(np.int32).reshape([-1,2])
self.frontFacingEdgeFaces[visible] = self.vis_silhouette_face[visibility_edge.ravel().take(visible)]
# plt.imsave("opendr_boundary_edge_visibility_result.png", visibility.reshape(shape))
return visibility.reshape(shape)
def draw_visibility_image(self, v, f, boundarybool_image=None):
v = np.asarray(v)
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
shaders.glUseProgram(self.colorProgram)
self.makeCurrentContext()
result = self.draw_visibility_image_internal(v, f)
if boundarybool_image is None:
return result
rr = result.ravel()
faces_to_draw = np.unique(rr[rr != 4294967295])
if len(faces_to_draw)==0:
result = np.ones((self.frustum['height'], self.frustum['width'])).astype(np.uint32)*4294967295
return result
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
result2 = self.draw_visibility_image_internal(v, f)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
bbi = boundarybool_image
# result2 = result2.ravel()
# idxs = result2 != 4294967295
# result2[idxs] = faces_to_draw[result2[idxs]]
if False:
import matplotlib.pyplot as plt
result2 = result2.reshape(result.shape[:2])
plt.figure()
plt.subplot(121)
plt.imshow(result.squeeze())
plt.subplot(122)
plt.imshow(result2.squeeze())
plt.show()
pdb.set_trace()
result2 = result2.reshape(result.shape[:2])
return result2 * bbi + result * (1 - bbi)
def draw_visibility_image_internal(self, v, f):
"""Assumes camera is set up correctly in"""
GL.glUseProgram(self.colorProgram)
#Attach FBO
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
fc = np.arange(1, len(f)+1)
fc = np.tile(fc.reshape((-1,1)), (1, 3))
fc[:, 0] = fc[:, 0] & 255
fc[:, 1] = (fc[:, 1] >> 8 ) & 255
fc[:, 2] = (fc[:, 2] >> 16 ) & 255
fc = np.asarray(fc, dtype=np.uint8)
self.draw_colored_primitives(self.vao_dyn_ub, v, f, fc)
#Read image.
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
raw = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))
# plt.imsave("draw_edge_visibility_internal_raw1.png", raw)
return raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1
def draw_barycentric_image(self, boundarybool_image=None):
GL.glDisable(GL.GL_CULL_FACE)
without_overdraw = self.draw_barycentric_image_internal()
if boundarybool_image is None:
return without_overdraw
# return without_overdraw
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
overdraw = self.draw_barycentric_image_internal()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
bbi = np.atleast_3d(boundarybool_image)
return bbi * overdraw + (1. - bbi) * without_overdraw
def draw_barycentric_image_internal(self):
GL.glUseProgram(self.colorProgram)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glBindVertexArray(self.vao_static_face)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glDrawElements(GL.GL_TRIANGLES if self.f.shape[1]==3 else GL.GL_LINES, len(self.vbo_indices_range), GL.GL_UNSIGNED_INT, None)
#Read image.
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# return np.array(im.transpose(Image.FLIP_TOP_BOTTOM), np.float64)/255.0
return np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.float64))/255.0
def setup_camera(self, camera):
near = 0.01
far = 10
fx = camera.f.r[0]
fy = camera.f.r[1]
cx = camera.c.r[0]
cy = camera.c.r[1]
self.projectionMatrix = np.array([[fx / cx, 0, 0, 0], [0, fy / cy, 0, 0], [0, 0, -(near + far) / (far - near), -2 * near * far / (far - near)], [0, 0, -1, 0]], dtype=np.float32)
# self.projectionMatrix = np.array([[camera.f.r[0], 0, camera.c.r[0], 0], [0, camera.f.r[1], camera.c.r[1], 0], [0, 0, 1, 0], [0, 0, 1, 0]], dtype=np.float32, order='F')
def setup_camera_old(self, camera, frustum):
self._setup_camera(camera.c.r[0], camera.c.r[1],
camera.f.r[0], camera.f.r[1],
frustum['width'], frustum['height'],
frustum['near'], frustum['far'],
camera.view_matrix,
camera.k.r)
class ColoredRenderer(BaseRenderer):
terms = 'f', 'frustum', 'background_image', 'overdraw', 'num_channels'
dterms = 'vc', 'camera', 'bgcolor' , 'v'
@depends_on('vc')
def num_channels(self):
if hasattr(self, 'vc'):
return self.vc.shape[1]
return 3
def clear(self):
# print ("Clearing color renderer.")
super().clear()
@property
def shape(self):
if not hasattr(self, 'num_channels'):
self.num_channels = 3
if self.num_channels > 1:
return (self.frustum['height'], self.frustum['width'], self.num_channels)
else:
return (self.frustum['height'], self.frustum['width'])
def compute_r(self):
return self.color_image # .reshape((self.frustum['height'], self.frustum['width'], -1)).squeeze()
def compute_dr_wrt(self, wrt):
if wrt is not self.camera and wrt is not self.vc and wrt is not self.bgcolor:
return None
visibility = self.visibility_image
shape = visibility.shape
color = self.color_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
num_visible = len(visible)
barycentric = self.barycentric_image
if wrt is self.camera:
if self.overdraw:
# return common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
return common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
else:
return common.dImage_wrt_2dVerts(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)
elif wrt is self.vc:
return common.dr_wrt_vc(visible, visibility, self.f, barycentric, self.frustum, self.vc.size, num_channels=self.num_channels)
elif wrt is self.bgcolor:
return common.dr_wrt_bgcolor(visibility, self.frustum, num_channels=self.num_channels)
def on_changed(self, which):
if 'frustum' in which:
w = self.frustum['width']
h = self.frustum['height']
if 'frustum' in which or 'camera' in which:
self.setup_camera(self.camera)
# setup_camera(self.glf, self.camera, self.frustum)
if not hasattr(self, 'num_channels'):
self.num_channels = 3
if not hasattr(self, 'bgcolor'):
self.bgcolor = Ch(np.array([.5]*self.num_channels))
which.add('bgcolor')
if not hasattr(self, 'overdraw'):
self.overdraw = True
'''
if 'v' or 'f' in which:
self.vbo_verts_face.set_array(np.array(self.verts_by_face).astype(np.float32))
self.vbo_verts_face.bind()
self.vbo_colors_face.set_array(np.array(self.vc_by_face).astype(np.float32))
self.vbo_colors_face.bind()
if 'v' in which:
self.vbo_verts.set_array(self.v.r.astype(np.float32))
self.vbo_verts.bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
'''
def flow_to(self, v_next, cam_next=None):
return common.flow_to(self, v_next, cam_next)
def filter_for_triangles(self, which_triangles):
cim = self.color_image
vim = self.visibility_image+1
arr = np.zeros(len(self.f)+1)
arr[which_triangles+1] = 1
relevant_pixels = arr[vim.ravel()]
cim2 = cim.copy() * np.atleast_3d(relevant_pixels.reshape(vim.shape))
relevant_pixels = np.nonzero(arr[vim.ravel()])[0]
xs = relevant_pixels % vim.shape[1]
ys = relevant_pixels / vim.shape[1]
return cim2[np.min(ys):np.max(ys), np.min(xs):np.max(xs), :]
def draw_color_image(self):
self.makeCurrentContext()
self._call_on_changed()
try:
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
self.draw_colored_verts(self.vc.r)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'], GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.float64))/255.0
# plt.imsave("opendr_draw_color_image.png", result)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0.,0.,0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1,1,self.num_channels)).squeeze()
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
except:
import pdb; pdb.set_trace()
'''
@depends_on(dterms+terms)
def color_image(self):
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image()
if not self.overdraw:
return no_overdraw
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
overdraw = self.draw_color_image()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
boundarybool_image = self.boundarybool_image
if self.num_channels > 1:
boundarybool_image = np.atleast_3d(boundarybool_image)
return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
'''
class TexturedRenderer(ColoredRenderer):
terms = 'f', 'frustum', 'vt', 'ft', 'background_image', 'ft_list', 'haveUVs_list', 'textures_list', 'vc_list'
dterms = 'vc', 'camera', 'bgcolor', 'texture_stack', 'v'
# def __init__(self):
# try:
# self.overdraw
# except:
# self.overdraw = True
#
# try:
# self.nsamples
# except:
# self.nsamples = 8
def clear(self):
try:
GL.glFlush()
GL.glFinish()
# print ("Clearing textured renderer.")
# for msh in self.vbo_indices_mesh_list:
# for vbo in msh:
# vbo.set_array([])
[vbo.set_array(np.array([])) for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.set_array(np.array([])) for vbo in self.vbo_colors_mesh]
[vbo.bind() for vbo in self.vbo_colors_mesh]
[vbo.delete() for vbo in self.vbo_colors_mesh]
[vbo.unbind() for vbo in self.vbo_colors_mesh]
[vbo.delete() for vbo in self.vbo_verts_mesh]
[vbo.set_array(np.array([])) for vbo in self.vbo_uvs_mesh]
[vbo.bind() for vbo in self.vbo_uvs_mesh]
[vbo.unbind() for vbo in self.vbo_uvs_mesh]
[vbo.delete() for vbo in self.vbo_uvs_mesh]
[GL.glDeleteVertexArrays(1, [vao.value]) for sublist in self.vao_tex_mesh_list for vao in sublist]
self.release_textures()
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
GL.glDeleteProgram(self.colorTextureProgram)
super().clear()
except:
pdb.set_trace()
print("Program had not been initialized")
def initGLTexture(self):
print("Initializing Texture OpenGL.")
GL.glLineWidth(1.)
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
//#extension GL_EXT_shader_image_load_store : enable
in vec3 theColor;
in vec2 UV;
uniform sampler2D myTextureSampler;
// Ouput data
out vec3 color;
void main(){
color = theColor * texture2D( myTextureSampler, UV).rgb;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout(location = 2) in vec2 vertexUV;
uniform mat4 MVP;
out vec3 theColor;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
UV = vertexUV;
}""", GL.GL_VERTEX_SHADER)
self.colorTextureProgram = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
#Define the other VAO/VBOs and shaders.
#Text VAO and bind color, vertex indices AND uvbuffer:
position_location = GL.glGetAttribLocation(self.colorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorTextureProgram, 'color')
uvs_location = GL.glGetAttribLocation(self.colorTextureProgram, 'vertexUV')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_texture_location = GL.glGetUniformLocation(self.colorTextureProgram, 'MVP')
self.vbo_indices_mesh_list = []
self.vbo_colors_mesh = []
self.vbo_verts_mesh = []
self.vao_tex_mesh_list = []
self.vbo_uvs_mesh = []
self.textureID_mesh_list = []
for mesh in range(len(self.f_list)):
vbo_verts = vbo.VBO(np.array(self.v_list[mesh]).astype(np.float32))
vbo_colors = vbo.VBO(np.array(self.vc_list[mesh]).astype(np.float32))
vbo_uvs = vbo.VBO(np.array(self.ft_list[mesh]).astype(np.float32))
self.vbo_colors_mesh = self.vbo_colors_mesh + [vbo_colors]
self.vbo_verts_mesh = self.vbo_verts_mesh + [vbo_verts]
self.vbo_uvs_mesh = self.vbo_uvs_mesh + [vbo_uvs]
vaos_mesh = []
vbo_indices_mesh = []
textureIDs_mesh = []
for polygons in range(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
vbo_indices = vbo.VBO(np.array(self.f_list[mesh][polygons]).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
vbo_indices.bind()
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
if self.haveUVs_list[mesh][polygons]:
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#Textures:
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = GL.GLuint(0)
GL.glGenTextures( 1, texture )
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
textureIDs_mesh = textureIDs_mesh + [texture]
vbo_indices_mesh = vbo_indices_mesh + [vbo_indices]
vaos_mesh = vaos_mesh + [vao]
self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs_mesh]
self.vao_tex_mesh_list = self.vao_tex_mesh_list + [vaos_mesh]
self.vbo_indices_mesh_list = self.vbo_indices_mesh_list + [vbo_indices_mesh]
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindVertexArray(0)
self.textureID = GL.glGetUniformLocation(self.colorTextureProgram, "myTextureSampler")
# def __del__(self):
# pass
# # self.release_textures()
@property
def shape(self):
return (self.frustum['height'], self.frustum['width'], 3)
@property
def num_channels(self):
return 3
def release_textures(self):
if hasattr(self, 'textureID_mesh_list'):
if self.textureID_mesh_list != []:
for texture_mesh in self.textureID_mesh_list:
if texture_mesh != []:
for texture in texture_mesh:
if texture != None:
GL.glDeleteTextures(1, [texture.value])
self.textureID_mesh_list = []
def compute_r(self):
return self.color_image # .reshape((self.frustum['height'], self.frustum['width'], -1)).squeeze()
@depends_on(dterms+terms)
def color_image(self):
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image(with_vertex_colors=True, with_texture_on=True)
if not self.overdraw or self.msaa:
return no_overdraw
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
overdraw = self.draw_color_image()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# return overdraw * np.atleast_3d(self.boundarybool_image)
boundarybool_image = self.boundarybool_image
if self.num_channels > 1:
boundarybool_image = np.atleast_3d(boundarybool_image)
return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
def image_mesh_bool(self, meshes):
self.makeCurrentContext()
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for mesh in meshes:
self.draw_index(mesh)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result!=0
@depends_on(dterms+terms)
def indices_image(self):
self._call_on_changed()
self.makeCurrentContext()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for index in range(len(self.f_list)):
self.draw_index(index)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result
def draw_index(self, index):
mesh = index
vbo_color = self.vbo_colors_mesh[mesh]
vc = self.vc_list[mesh]
colors = np.array(np.ones_like(vc)*(index)/255.0, dtype=np.float32)
#Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, MVP)
GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
def draw_texcoord_image(self, v, f, ft, boundarybool_image=None):
# gl = glf
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
self.makeCurrentContext()
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# want vtc: texture-coordinates per vertex (not per element in vc)
colors = ft
#use the third channel to identify the corresponding textures.
color3 = np.vstack([np.ones([self.ft_list[mesh].shape[0],1])*mesh for mesh in range(len(self.ft_list))]).astype(np.float32) / len(self.ft_list)
colors = np.asarray(np.hstack((colors, color3)), np.float64, order='C')
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
#Why do we need this?
if boundarybool_image is not None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3)[:,:,:3].astype(np.float64))/255.0
result[:,:,1] = 1. - result[:,:,1]
return result
def compute_dr_wrt(self, wrt):
result = super().compute_dr_wrt(wrt)
if wrt is self.vc:
cim = self.draw_color_image(with_vertex_colors=False).ravel()
cim = sp.spdiags(row(cim), [0], cim.size, cim.size)
result = cim.dot(result)
elif wrt is self.texture_stack:
IS = np.nonzero(self.visibility_image.ravel() != 4294967295)[0]
texcoords, texidx = self.texcoord_image_quantized
vis_texidx = texidx.ravel()[IS]
vis_texcoords = texcoords.ravel()[IS]
JS = vis_texcoords * np.tile(col(vis_texidx), [1,2]).ravel()
clr_im = self.draw_color_image(with_vertex_colors=True, with_texture_on=False)
if False:
cv2.imshow('clr_im', clr_im)
# cv2.imshow('texmap', self.texture_image.r)
cv2.waitKey(1)
r = clr_im[:,:,0].ravel()[IS]
g = clr_im[:,:,1].ravel()[IS]
b = clr_im[:,:,2].ravel()[IS]
data = np.concatenate((r,g,b))
IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
return sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.r.size))
return result
def on_changed(self, which):
super().on_changed(which)
# have to redo if frustum changes, b/c frustum triggers new # context
# if 'frustum' in which:
if 'v' in which:
for mesh in range(len(self.f_list)):
self.vbo_verts_mesh[mesh].set_array(np.array(self.v_list[mesh]).astype(np.float32))
self.vbo_colors_mesh[mesh].set_array(np.array(self.vc_list[mesh]).astype(np.float32))
self.vbo_verts_mesh[mesh].bind()
self.vbo_colors_mesh[mesh].bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
if 'texture_stack' in which:
# gl = self.glf
# texture_data = np.array(self.texture_image*255., dtype='uint8', order='C')
# self.release_textures()
#
# for mesh in range(len(self.f_list)):
# textureIDs = []
# for polygons in range(len(self.f_list[mesh])):
# texture = None
# if self.haveUVs_list[mesh][polygons]:
# texture = GL.GLuint(0)
# GL.glGenTextures( 1, texture )
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# #Send texture.
# #Pol: Check if textures are float or uint from Blender import.
# image = (self.textures_list[mesh][polygons]*255.0).astype(np.uint8)
# GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, image)
# textureIDs = textureIDs + [texture]
# self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs]
# gl.GenTextures(1, tmp) # TODO: free after done
# self.textureID = tmp[0]
if self.initialized:
textureCoordIdx = 0
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
#Update the OpenGL textures with all the textures. (Inefficient as many might not have changed).
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
self.textures_list[mesh][polygons] = self.texture_stack[textureCoordIdx:image.size+textureCoordIdx].reshape(image.shape)
textureCoordIdx = textureCoordIdx + image.size
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
@depends_on('ft', 'textures')
def mesh_tex_coords(self):
ftidxs = self.ft.ravel()
data = self.ft
# Pol: careful with this:
data[:,1] = 1.0 - 1.0*data[:,1]
return data
# Depends on 'f' because vpe/fpe depend on f
# Pol: Check that depends on works on other attributes that depend_on x, if x changes.
@depends_on( 'ft', 'f')
def wireframe_tex_coords(self):
print("wireframe_tex_coords is being computed!")
vvt = np.zeros((self.v.r.size/3,2), dtype=np.float64, order='C')
vvt[self.f.flatten()] = self.mesh_tex_coords
edata = np.zeros((self.vpe.size,2), dtype=np.float64, order='C')
edata = vvt[self.ma.ravel()]
return edata
# TODO: can this not be inherited from base? turning off texture mapping in that instead?
@depends_on(dterms+terms)
def boundaryid_image(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
def draw_color_image(self, with_vertex_colors=True, with_texture_on=True):
self.makeCurrentContext()
self._call_on_changed()
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
vbo_color = self.vbo_colors_mesh[mesh]
vc = self.vc_list[mesh]
colors = None
if with_vertex_colors:
colors = vc.r.astype(np.float32)
else:
#Only texture.
colors = np.ones_like(vc).astype(np.float32)
#Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
if with_texture_on and self.haveUVs_list[mesh][polygons]:
GL.glUseProgram(self.colorTextureProgram)
texture = self.textureID_mesh_list[mesh][polygons]
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureID, 0)
else:
GL.glUseProgram(self.colorProgram)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'], GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.float64))/255.0
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0.,0.,0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1,1,3))
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image_quantized(self):
texcoord_image = self.texcoord_image[:,:, :2].copy()
#Temprary:
self.texture_image = self.textures_list[0][0].r.copy()
texcoord_image[:,:,0] *= self.texture_image.shape[1]-1
texcoord_image[:,:,1] *= self.texture_image.shape[0]-1
texture_idx = (self.texcoord_image[:,:,2]*len(self.ft_list)).astype(np.uint32)
texcoord_image = np.round(texcoord_image)
texcoord_image = texcoord_image[:,:,0] + texcoord_image[:,:,1]*self.texture_image.shape[1]
return texcoord_image, texture_idx
def checkBufferNum(self):
GL.glGenBuffers(1)
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image(self):
return self.draw_texcoord_image(self.v.r, self.f, self.ft, self.boundarybool_image if self.overdraw else None)
class AnalyticRenderer(ColoredRenderer):
terms = 'f', 'frustum', 'vt', 'ft', 'background_image', 'overdraw', 'ft_list', 'haveUVs_list', 'textures_list', 'vc_list' , 'imageGT'
dterms = 'vc', 'camera', 'bgcolor', 'texture_stack', 'v'
def __init__(self):
super().__init__()
def clear(self):
try:
GL.glFlush()
GL.glFinish()
# print ("Clearing textured renderer.")
# for msh in self.vbo_indices_mesh_list:
# for vbo in msh:
# vbo.set_array([])
[vbo.set_array(np.array([])) for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_face_ids_list for vbo in sublist]
[GL.glDeleteVertexArrays(1, [vao.value]) for sublist in self.vao_tex_mesh_list for vao in sublist]
self.release_textures()
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
GL.glDeleteProgram(self.colorTextureProgram)
super().clear()
except:
import pdb
pdb.set_trace()
print("Program had not been initialized")
def initGLTexture(self):
print("Initializing Texture OpenGL.")
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
//#extension GL_EXT_shader_image_load_store : enable
in vec3 theColor;
in vec2 UV;
uniform sampler2D myTextureSampler;
// Ouput data
out vec3 color;
void main(){
color = theColor * texture2D( myTextureSampler, UV).rgb;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout(location = 2) in vec2 vertexUV;
uniform mat4 MVP;
out vec3 theColor;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
UV = vertexUV;
}""", GL.GL_VERTEX_SHADER)
self.colorTextureProgram = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
#Define the other VAO/VBOs and shaders.
#Text VAO and bind color, vertex indices AND uvbuffer:
position_location = GL.glGetAttribLocation(self.colorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorTextureProgram, 'color')
uvs_location = GL.glGetAttribLocation(self.colorTextureProgram, 'vertexUV')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_texture_location = GL.glGetUniformLocation(self.colorTextureProgram, 'MVP')
self.vbo_indices_mesh_list = []
self.vbo_colors_mesh = []
self.vbo_verts_mesh = []
self.vao_tex_mesh_list = []
self.vbo_uvs_mesh = []
self.textureID_mesh_list = []
# GL.glEnable(GL.GL_LINE_SMOOTH)
# GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(2.)
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_indices_mesh = []
vbo_face_ids_mesh = []
vbo_colors_mesh = []
vbo_vertices_mesh = []
vbo_uvs_mesh = []
textureIDs_mesh = []
for polygons in range(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_verts = vbo.VBO(np.array(verts_by_face).astype(np.float32))
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_colors = vbo.VBO(np.array(colors_by_face).astype(np.float32))
uvs_by_face = np.asarray(self.ft_list[mesh].reshape((-1, 2))[f.ravel()], dtype=np.float32, order='C')
vbo_uvs = vbo.VBO(np.array(uvs_by_face).astype(np.float32))
vbo_indices = vbo.VBO(np.array(self.f_list[mesh][polygons]).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
vbo_indices.bind()
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
if self.haveUVs_list[mesh][polygons]:
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#Textures:
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = GL.GLuint(0)
GL.glGenTextures( 1, texture )
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
textureIDs_mesh = textureIDs_mesh + [texture]
vbo_indices_mesh = vbo_indices_mesh + [vbo_indices]
vbo_colors_mesh = vbo_colors_mesh + [vbo_colors]
vbo_vertices_mesh = vbo_vertices_mesh + [vbo_verts]
vbo_uvs_mesh = vbo_uvs_mesh + [vbo_uvs]
vaos_mesh = vaos_mesh + [vao]
self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs_mesh]
self.vao_tex_mesh_list = self.vao_tex_mesh_list + [vaos_mesh]
self.vbo_indices_mesh_list = self.vbo_indices_mesh_list + [vbo_indices_mesh]
self.vbo_colors_mesh = self.vbo_colors_mesh + [vbo_colors_mesh]
self.vbo_verts_mesh = self.vbo_verts_mesh + [vbo_vertices_mesh]
self.vbo_uvs_mesh = self.vbo_uvs_mesh + [vbo_uvs_mesh]
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindVertexArray(0)
self.textureID = GL.glGetUniformLocation(self.colorTextureProgram, "myTextureSampler")
def initGL_AnalyticRenderer(self):
self.initGLTexture()
self.updateRender = True
self.updateDerivatives = True
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 colorIn;
layout(location = 2) in vec2 vertexUV;
layout(location = 3) in uint face_id;
layout(location = 4) in vec3 barycentric;
uniform mat4 MVP;
out vec3 theColor;
out vec4 pos;
flat out uint face_out;
out vec3 barycentric_vert_out;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
pos = MVP * vec4(position,1);
//pos = pos4.xyz;
theColor = colorIn;
UV = vertexUV;
face_out = face_id;
barycentric_vert_out = barycentric;
}""", GL.GL_VERTEX_SHADER)
ERRORS_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
//layout(early_fragment_tests) in;
// Interpolated values from the vertex shaders
in vec3 theColor;
in vec2 UV;
flat in uint face_out;
in vec4 pos;
in vec3 barycentric_vert_out;
layout(location = 3) uniform sampler2D myTextureSampler;
uniform float ww;
uniform float wh;
// Ouput data
layout(location = 0) out vec3 color;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 barycentric1;
layout(location = 4) out vec2 barycentric2;
void main(){
vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;
color = finalColor.rgb;
sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);
sample_face = face_out;
barycentric1 = barycentric_vert_out.xy;
barycentric2 = vec2(barycentric_vert_out.z, 0.);
}""", GL.GL_FRAGMENT_SHADER)
self.errorTextureProgram = shaders.compileProgram(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)
FETCH_VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
void main() {}
""", GL.GL_VERTEX_SHADER)
FETCH_GEOMETRY_SHADER = shaders.compileShader("""#version 330 core
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main() {
for (int i = 0; i < 4; ++i) {
gl_Position = vec4( data[i], 0.0, 1.0 );
EmitVertex();
}
EndPrimitive();
}""", GL.GL_GEOMETRY_SHADER)
FETCH_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
layout(location = 2) uniform sampler2DMS colors;
layout(location = 3) uniform sampler2DMS sample_positions;
layout(location = 4) uniform usampler2DMS sample_faces;
layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;
layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;
uniform float ww;
uniform float wh;
uniform int sample;
// Ouput data
layout(location = 0) out vec3 colorFetchOut;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 sample_barycentric1;
layout(location = 4) out vec2 sample_barycentric2;
//out int gl_SampleMask[];
const int all_sample_mask = 0xffff;
void main(){
ivec2 texcoord = ivec2(gl_FragCoord.xy);
colorFetchOut = texelFetch(colors, texcoord, sample).xyz;
sample_pos = texelFetch(sample_positions, texcoord, sample).xy;
sample_face = texelFetch(sample_faces, texcoord, sample).r;
sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;
sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;
}""", GL.GL_FRAGMENT_SHADER)
GL.glClampColor(GL.GL_CLAMP_READ_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_VERTEX_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_FRAGMENT_COLOR, False)
self.fetchSamplesProgram = shaders.compileProgram(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER, FETCH_FRAGMENT_SHADER)
self.textureGT = GL.GLuint(0)
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glGenTextures(1, self.textureGT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# #
# try:
# if self.imageGT.r is not None and self.imageGT.r.size != 0: #if GT image is defined.
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# except:
# pass
# GL.glGenTextures(1, self.textureEdges)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureEdges)
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glActiveTexture(GL.GL_TEXTURE0)
whitePixel = np.ones([1,1,3])
self.whitePixelTextureID = GL.GLuint(0)
GL.glGenTextures( 1, self.whitePixelTextureID )
GL.glBindTexture(GL.GL_TEXTURE_2D, self.whitePixelTextureID)
image = np.array(np.flipud((whitePixel)), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
self.fbo_ms_errors = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)
self.texture_errors_render = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RGB8, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
self.texture_errors_sample_position = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
self.texture_errors_sample_faces = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_R32UI, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)
#
self.texture_errors_sample_barycentric1 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)
self.texture_errors_sample_barycentric2 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)
self.z_buf_ms_errors = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)
# self.z_buf_ms_errors = GL.glGenRenderbuffers(1)
# GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
# GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
# GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.fbo_sample_fetch = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)
self.render_buffer_fetch_sample_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
self.render_buffer_fetch_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
self.render_buffer_fetch_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
#
self.render_buffer_fetch_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
self.render_buffer_fetch_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
self.z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#FBO_f
self.fbo_errors_nonms = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)
render_buf_errors_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, render_buf_errors_render)
render_buf_errors_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
render_buf_errors_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
#
render_buf_errors_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
render_buf_errors_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
#
z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.textureObjLoc = GL.glGetUniformLocation(self.errorTextureProgram, "myTextureSampler")
#Add background cube:
position_location = GL.glGetAttribLocation(self.errorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.errorTextureProgram, 'colorIn')
uvs_location = GL.glGetAttribLocation(self.errorTextureProgram, 'vertexUV')
face_ids_location = GL.glGetAttribLocation(self.errorTextureProgram, 'face_id')
barycentric_location = GL.glGetAttribLocation(self.errorTextureProgram, 'barycentric')
# self.vbo_verts_cube= vbo.VBO(np.array(self.v_bgCube).astype(np.float32))
# self.vbo_colors_cube= vbo.VBO(np.array(self.vc_bgCube).astype(np.float32))
# self.vbo_uvs_cube = vbo.VBO(np.array(self.ft_bgCube).astype(np.float32))
# self.vao_bgCube = GL.GLuint(0)
# GL.glGenVertexArrays(1, self.vao_bgCube)
#
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube = vbo.VBO(np.array(self.f_bgCube).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
# self.vbo_f_bgCube.bind()
# self.vbo_verts_cube.bind()
# GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_colors_cube.bind()
# GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_uvs_cube.bind()
# GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#
# f = self.f_bgCube
# fc = np.tile(np.arange(len(self.f), len(self.f) + len(f))[:, None], [1, 3]).ravel()
# # fc[:, 0] = fc[:, 0] & 255
# # fc[:, 1] = (fc[:, 1] >> 8) & 255
# # fc[:, 2] = (fc[:, 2] >> 16) & 255
# fc = np.asarray(fc, dtype=np.uint32)
# vbo_face_ids_cube = vbo.VBO(fc)
# vbo_face_ids_cube.bind()
# GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
# GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
#
# #Barycentric cube:
# f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
# vbo_barycentric_cube = vbo.VBO(f_barycentric)
# vbo_barycentric_cube.bind()
# GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_quad = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_quad)
GL.glBindVertexArray(self.vao_quad)
#Bind VAO
self.vbo_face_ids_list = []
self.vbo_barycentric_list = []
self.vao_errors_mesh_list = []
flen = 1
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_face_ids_mesh = []
vbo_barycentric_mesh = []
for polygons in np.arange(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_f.bind()
vbo_verts = self.vbo_verts_mesh[mesh][polygons]
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors = self.vbo_colors_mesh[mesh][polygons]
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_uvs = self.vbo_uvs_mesh[mesh][polygons]
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
f = self.f_list[mesh][polygons]
fc = np.tile(np.arange(flen, flen + len(f))[:,None], [1,3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
vbo_face_ids = vbo.VBO(fc)
vbo_face_ids.bind()
GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
vbo_barycentric = vbo.VBO(f_barycentric)
vbo_barycentric.bind()
GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
flen += len(f)
vaos_mesh += [vao]
vbo_face_ids_mesh += [vbo_face_ids]
vbo_barycentric_mesh += [vbo_face_ids]
GL.glBindVertexArray(0)
self.vbo_face_ids_list += [vbo_face_ids_mesh]
self.vbo_barycentric_list += [vbo_barycentric_mesh]
self.vao_errors_mesh_list += [vaos_mesh]
def render_image_buffers(self):
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
self.makeCurrentContext()
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
GL.glUseProgram(self.errorTextureProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3, GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
# GL.glClearBufferiv(GL.GL_COLOR, 0, 0)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
wwLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_errors_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
# vbo_color.bind()
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
assert(primtype == GL.GL_TRIANGLES)
# GL.glUseProgram(self.errorTextureProgram)
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
else:
texture = self.whitePixelTextureID
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureObjLoc, 0)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f)*vbo_f.data.shape[1])
# # #Background cube:
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube.bind()
# texture = self.whitePixelTextureID
# self.vbo_uvs_cube.bind()
#
# GL.glActiveTexture(GL.GL_TEXTURE0)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# GL.glUniform1i(self.textureObjLoc, 0)
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube)*self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
# self.draw_visibility_image_ms(self.v, self.f)
# GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# # result_blit = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# result_blit2 = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# result_blit_pos = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
GL.glUseProgram(self.fetchSamplesProgram)
# GL.glDisable(GL.GL_MULTISAMPLE)
self.colorsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "colors")
self.sample_positionsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_positions")
self.sample_facesLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_faces")
self.sample_barycentric1Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords1")
self.sample_barycentric2Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords2")
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glActiveTexture(GL.GL_TEXTURE2)
# GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_face)
# GL.glUniform1i(self.sample_facesLoc, 2)
wwLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
self.renders = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],3])
self.renders_sample_pos = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],2])
self.renders_faces = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height']]).astype(np.uint32)
self.renders_sample_barycentric1 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_sample_barycentric2 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],1])
self.renders_sample_barycentric = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],3])
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3,
GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
for sample in np.arange(self.nsamples):
sampleLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'sample')
GL.glUniform1i(sampleLoc, sample)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glUniform1i(self.colorsLoc, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glUniform1i(self.sample_positionsLoc, 1)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glUniform1i(self.sample_facesLoc, 2)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glUniform1i(self.sample_barycentric1Loc, 3)
GL.glActiveTexture(GL.GL_TEXTURE4)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glUniform1i(self.sample_barycentric2Loc, 4)
GL.glBindVertexArray(self.vao_quad)
GL.glDrawArrays(GL.GL_POINTS, 0, 1)
# GL.glBindVertexArray(self.vao_bgCube)
# # self.vbo_f_bgCube.bind()
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube) * self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
self.renders[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:2].astype(np.float64))
self.renders_sample_pos[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RED_INTEGER, GL.GL_UNSIGNED_INT), np.uint32).reshape(self.frustum['height'], self.frustum['height'])[:,:].astype(np.uint32))
self.renders_faces[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT3)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:2].astype(np.float64))
self.renders_sample_barycentric1[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:1].astype(np.float64))
self.renders_sample_barycentric2[sample] = result
self.renders_sample_barycentric[sample] = np.concatenate([self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2[sample][:,:,0:1]], 2)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
# result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# self.renders_faces[sample] = result
GL.glBindVertexArray(0)
GL.glClearColor(0.,0.,0., 1.)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_MULTISAMPLE)
##Finally return image and derivatives
self.render_resolved = np.mean(self.renders, 0)
self.updateRender = True
self.updateDerivatives_verts = True
self.updateDerivatives_vc = True
def draw_visibility_image_ms(self, v, f):
"""Assumes camera is set up correctly in"""
GL.glUseProgram(self.visibilityProgram_ms)
v = np.asarray(v)
self.draw_visibility_image_ms(v, f)
#Attach FBO
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
fc = np.arange(1, len(f)+1)
fc = np.tile(fc.reshape((-1,1)), (1, 3))
fc[:, 0] = fc[:, 0] & 255
fc[:, 1] = (fc[:, 1] >> 8 ) & 255
fc[:, 2] = (fc[:, 2] >> 16 ) & 255
fc = np.asarray(fc, dtype=np.uint8)
self.draw_colored_primitives_ms(self.vao_dyn_ub, v, f, fc)
# this assumes that fc is either "by faces" or "verts by face", not "by verts"
def draw_colored_primitives_ms(self, vao, v, f, fc=None):
# gl.EnableClientState(GL_VERTEX_ARRAY)
verts_by_face = np.asarray(v.reshape((-1,3))[f.ravel()], dtype=np.float64, order='C')
# gl.VertexPointer(verts_by_face)
GL.glBindVertexArray(vao)
self.vbo_verts_dyn.set_array(verts_by_face.astype(np.float32))
self.vbo_verts_dyn.bind()
if fc is not None:
# gl.EnableClientState(GL_COLOR_ARRAY)
if fc.size == verts_by_face.size:
vc_by_face = fc
else:
vc_by_face = np.repeat(fc, f.shape[1], axis=0)
if vc_by_face.size != verts_by_face.size:
raise Exception('fc must have either rows=(#rows in faces) or rows=(# elements in faces)')
vc_by_face = np.asarray(vc_by_face, dtype=np.uint8, order='C')
self.vbo_colors_ub.set_array(vc_by_face)
self.vbo_colors_ub.bind()
primtype = GL.GL_TRIANGLES
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT2]
GL.glDrawBuffers(1, drawingBuffers)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDrawElements(primtype, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
def compute_dr_wrt(self, wrt):
visibility = self.visibility_image
if wrt is self.camera:
derivatives_verts = self.get_derivatives_verts()
return derivatives_verts
elif wrt is self.vc:
derivatives_vc = self.get_derivatives_vc()
return derivatives_vc
# Not working atm.:
elif wrt is self.bgcolor:
return 2. * (self.imageGT.r - self.render_image).ravel() * common.dr_wrt_bgcolor(visibility, self.frustum, num_channels=self.num_channels)
#Not working atm.:
elif wrt is self.texture_stack:
IS = np.nonzero(self.visibility_image.ravel() != 4294967295)[0]
texcoords, texidx = self.texcoord_image_quantized
vis_texidx = texidx.ravel()[IS]
vis_texcoords = texcoords.ravel()[IS]
JS = vis_texcoords * np.tile(col(vis_texidx), [1,2]).ravel()
clr_im = -2. * (self.imageGT.r - self.render_image) * self.renderWithoutTexture
if False:
cv2.imshow('clr_im', clr_im)
# cv2.imshow('texmap', self.texture_image.r)
cv2.waitKey(1)
r = clr_im[:,:,0].ravel()[IS]
g = clr_im[:,:,1].ravel()[IS]
b = clr_im[:,:,2].ravel()[IS]
data = np.concatenate((r,g,b))
IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
return sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.r.size))
return None
def compute_r(self):
return self.render()
@depends_on(dterms+terms)
def renderWithoutColor(self):
self._call_on_changed()
return self.render_nocolor
@depends_on(dterms+terms)
def renderWithoutTexture(self):
self._call_on_changed()
return self.render_notexture
# @depends_on(dterms+terms)
def render(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateRender:
render = self.compute_image(visible, visibility, self.f)
self.render_result = render
self.updateRender = False
return self.render_result
def get_derivatives_verts(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_verts:
if self.updateRender:
self.render()
derivatives_verts = self.compute_derivatives_verts(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
self.derivatives_verts = derivatives_verts
self.updateDerivatives_verts = False
return self.derivatives_verts
def get_derivatives_vc(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_vc:
if self.updateRender:
self.render()
derivatives_vc = self.compute_derivatives_vc(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
self.derivatives_vc = derivatives_vc
self.updateDerivatives_vc = False
return self.derivatives_vc
# # @depends_on(dterms+terms)
# def image_and_derivatives(self):
# # self._call_on_changed()
# visibility = self.visibility_image
#
# color = self.render_resolved
#
# visible = np.nonzero(visibility.ravel() != 4294967295)[0]
# num_visible = len(visible)
#
# barycentric = self.barycentric_image
#
# if self.updateRender:
# render, derivatives = self.compute_image_and_derivatives(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
# self.render = render
# self.derivatives = derivatives
# self.updateRender = False
#
# return self.render, self.derivatives
#
def barycentricDerivatives(self, vertices, faces, verts):
import chumpy as ch
vertices = np.concatenate([vertices, np.ones([vertices.size // 3, 1])], axis=1)
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# # Check with autodiff:
#
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# # negYMat = ch.array([[1,0,self.camera.c.r[0]],[0,-1,self.camera.c.r[1]],[0,0,1]])
# verts_hom_ch = ch.Ch(verts_hom)
# camMtx = ch.Ch(np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])])
# projVerts = (camMtx.dot(view_mtx)).dot(verts_hom_ch.T).T[:, :3].reshape([-1, 3])
# viewVerts = ch.Ch(np.array(projVerts))
# projVerts = projVerts[:, :2] / projVerts[:, 2:3]
#
# chViewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# p0 = ch.Ch(viewVerticesNonBnd[:, 0, :])
# chp0 = p0
#
# p1 = ch.Ch(viewVerticesNonBnd[:, 1, :])
# chp1 = p1
#
# p2 = ch.Ch(viewVerticesNonBnd[:, 2, :])
# chp2 = p2
#
# # D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
# nt = ch.cross(p1 - p0, p2 - p0)
# chnt = nt
# A = 0.5 * ch.sqrt(ch.sum(nt ** 2, axis=1))
# chnt_norm = nt / ch.sqrt(ch.sum(nt ** 2, axis=1))[:, None]
# # nt = nt / A
#
# chb0part2 = ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1)
# chb0 = 0.5 * ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1) / A
# chb1part2 = ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1)
# chb1 = 0.5 * ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1) / A
# chb2part2 = ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1)
# chb2 = 0.5 * ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1) / A
#
# drb0p0 = chb0.dr_wrt(p0)
# drb0p1 = chb0.dr_wrt(p1)
# drb0p2 = chb0.dr_wrt(p2)
#
# drb1p0 = chb1.dr_wrt(p0)
# drb1p1 = chb1.dr_wrt(p1)
# drb1p2 = chb1.dr_wrt(p2)
#
# drb2p0 = chb2.dr_wrt(p0)
# drb2p1 = chb2.dr_wrt(p1)
# drb2p2 = chb2.dr_wrt(p2)
#
# rows = np.tile(np.arange(drb0p0.shape[0])[None, :], [3, 1]).T.ravel()
# cols = np.arange(drb0p0.shape[0] * 3)
#
# drb0p0 = np.array(drb0p0[rows, cols]).reshape([-1, 3])
# drb0p1 = np.array(drb0p1[rows, cols]).reshape([-1, 3])
# drb0p2 = np.array(drb0p2[rows, cols]).reshape([-1, 3])
# drb1p0 = np.array(drb1p0[rows, cols]).reshape([-1, 3])
# drb1p1 = np.array(drb1p1[rows, cols]).reshape([-1, 3])
# drb1p2 = np.array(drb1p2[rows, cols]).reshape([-1, 3])
# drb2p0 = np.array(drb2p0[rows, cols]).reshape([-1, 3])
# drb2p1 = np.array(drb2p1[rows, cols]).reshape([-1, 3])
# drb2p2 = np.array(drb2p2[rows, cols]).reshape([-1, 3])
#
# chdp0 = np.concatenate([drb0p0[:, None, :], drb1p0[:, None, :], drb2p0[:, None, :]], axis=1)
# chdp1 = np.concatenate([drb0p1[:, None, :], drb1p1[:, None, :], drb2p1[:, None, :]], axis=1)
# chdp2 = np.concatenate([drb0p2[:, None, :], drb1p2[:, None, :], drb2p2[:, None, :]], axis=1)
# #
# # dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# # dp = dp[None, :]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerts = projVerts
projVerts = projVerts[:, :2] / projVerts[:, 2:3]
# viewVerticesNonBnd = negYMat.dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
p0 = viewVerticesNonBnd[:, 0, :]
p1 = viewVerticesNonBnd[:, 1, :]
p2 = viewVerticesNonBnd[:, 2, :]
p0_proj = p0[:,0:2]/p0[:,2:3]
p1_proj = p1[:,0:2]/p1[:,2:3]
p2_proj = p2[:,0:2]/p2[:,2:3]
# D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
nt = np.cross(p1 - p0, p2 - p0)
nt_norm = nt / np.linalg.norm(nt, axis=1)[:, None]
# a = -nt_norm[:, 0] / nt_norm[:, 2]
# b = -nt_norm[:, 1] / nt_norm[:, 2]
# c = np.sum(nt_norm * p0, 1) / nt_norm[:, 2]
cam_f = 1
u = p0[:, 0]/p0[:, 2]
v = p0[:, 1]/p0[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p0[:, 2][:,None], np.zeros([len(p0),1]), (-p0[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p0),1]), p0[:, 2][:,None], (-p0[:,1]/v**2)[:,None]]
dxdp_0 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p1[:, 0]/p1[:, 2]
v = p1[:, 1]/p1[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p1[:, 2][:,None], np.zeros([len(p1),1]), (-p1[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p1),1]), p1[:, 2][:,None], (-p1[:,1]/v**2)[:,None]]
dxdp_1 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p2[:, 0]/p2[:, 2]
v = p2[:, 1]/p2[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p2[:, 2][:,None], np.zeros([len(p2),1]), (-p2[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p2),1]), p2[:, 2][:,None], (-p2[:,1]/v**2)[:,None]]
dxdp_2 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
# x = u * c / (cam_f - a * u - b * v)
# y = v*c/(cam_f - a*u - b*v)
# z = c*cam_f/(cam_f - a*u - b*v)
A = 0.5*np.linalg.norm(np.cross(p1 - p0, p2 - p0),axis=1)
nt_mag = A*2
# nt = nt / A
# db1 = 0.5*np.cross(nt_norm, p2-p1)/A[:, None]
# db2 = 0.5*np.cross(nt_norm, p0-p2)/A[:, None]
# db3_2 = 0.5*np.cross(nt_norm, p1-p0)/A[:, None]
# db3 = - db1 - db2
p = viewVerts
pre1 = -1/(nt_mag[:,None]**2) * nt_norm
ident = np.identity(3)
ident = np.tile(ident[None,:],[len(p2),1,1])
dntdp0 = np.cross((p2-p0)[:,None,:], -ident) + np.cross(-ident, (p1-p0)[:,None,:])
dntdp1 = np.cross((p2-p0)[:,None,:],ident)
dntdp2 = np.cross(ident,(p1-p0)[:,None,:])
#Pol check this!:
dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
# dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
dntnormdp0 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp0)
dntnormdp1 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp1)
dntnormdp2 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp2)
dpart1p0 = np.einsum('ij,ijk->ik', pre1, dntdp0)
dpart1p1 = np.einsum('ij,ijk->ik', pre1, dntdp1)
dpart1p2 = np.einsum('ij,ijk->ik', pre1, dntdp2)
b0 = np.sum(np.cross(nt_norm, p2 - p1) * (p - p1), axis=1)[:,None]
db0part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident,nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1),-ident)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0.swapaxes(1,2)), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident, nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(p2-p1,nt_norm[:, :]),-ident)
db0part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :],-ident), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1), -ident)
db0part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p1)
db0dp0wrtpart1 = dpart1p0*b0
db0dp1wrtpart1 = dpart1p1*b0
db0dp2wrtpart1 = dpart1p2*b0
db0dp0wrtpart2 = 1./(nt_mag[:,None])*db0part2p0
db0dp1wrtpart2 = 1./(nt_mag[:,None])*db0part2p1
db0dp2wrtpart2 = 1./(nt_mag[:,None])*db0part2p2
db0dp0wrt = db0dp0wrtpart1 + db0dp0wrtpart2
db0dp1wrt = db0dp1wrtpart1 + db0dp1wrtpart2
db0dp2wrt = db0dp2wrtpart1 + db0dp2wrtpart2
######
b1 = np.sum(np.cross(nt_norm, p0 - p2) * (p - p2), axis=1)[:, None]
db1part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p2)
db1part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2)
db1part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], -ident), p - p2) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p0-p2), -ident)
db1dp0wrtpart1 = dpart1p0*b1
db1dp1wrtpart1 = dpart1p1*b1
db1dp2wrtpart1 = dpart1p2*b1
db1dp0wrtpart2 = 1./(nt_mag[:,None])*db1part2p0
db1dp1wrtpart2 = 1./(nt_mag[:,None])*db1part2p1
db1dp2wrtpart2 = 1./(nt_mag[:,None])*db1part2p2
db1dp0wrt = db1dp0wrtpart1 + db1dp0wrtpart2
db1dp1wrt = db1dp1wrtpart1 + db1dp1wrtpart2
db1dp2wrt = db1dp2wrtpart1 + db1dp2wrtpart2
######
b2 = np.sum(np.cross(nt_norm, p1 - p0) * (p - p0), axis=1)[:, None]
db2part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1, 2),(p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], -ident), p - p0) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p1 - p0), -ident)
db2part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1, 2),(p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p0)
db2part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0)
db2dp0wrtpart1 = dpart1p0*b2
db2dp1wrtpart1 = dpart1p1*b2
db2dp2wrtpart1 = dpart1p2*b2
db2dp0wrtpart2 = 1./(nt_mag[:,None])*db2part2p0
db2dp1wrtpart2 = 1./(nt_mag[:,None])*db2part2p1
db2dp2wrtpart2 = 1./(nt_mag[:,None])*db2part2p2
db2dp0wrt = db2dp0wrtpart1 + db2dp0wrtpart2
db2dp1wrt = db2dp1wrtpart1 + db2dp1wrtpart2
db2dp2wrt = db2dp2wrtpart1 + db2dp2wrtpart2
dp0 = np.concatenate([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]], axis=1)
dp1 = np.concatenate([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]], axis=1)
dp2 = np.concatenate([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]], axis=1)
#
dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
#If dealing with degenerate triangles, ignore that gradient.
# dp[nt_mag<=1e-15] = 0
dp = dp[None, :]
nFaces = len(faces)
# visTriVC = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc[vc > 1] = 1
vc[vc < 0] = 0
visTriVC = vc
dxdp = np.concatenate([dxdp_0[:,None,:],dxdp_1[:,None,:],dxdp_2[:,None,:]], axis=1)
dxdp = dxdp[None, :, None]
# dbvc = np.sum(dp * visTriVC, 2)
# dbvc = dp * visTriVC * t_area[None, :, None, None, None]
dbvc = dp * visTriVC
didp = np.sum(dbvc[:, :, :, :, :, None] * dxdp, 4).sum(2)
#output should be shape: VC x Ninput x Tri Points x UV
# drb0p0 # db0dp0wrt
# drb0p1 # db0dp1wrt
# drb0p2 # db0dp2wrt
# drb1p0 # db1dp0wrt
# drb1p1 # db1dp1wrt
# drb1p2 # db1dp2wrt
# drb2p0 # db2dp0wrt
# drb2p1 # db2dp1wrt
# drb2p2 # db2dp2wrt
#
return didp
def compute_image(self, visible, visibility, f):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
# xdiff = dEdx
# ydiff = dEdy
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
nsamples = self.nsamples
if np.any(boundaryImage):
boundaryFaces = visibility[(boundaryImage) & (visibility != 4294967295)]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
edgeFaces= np.tile(self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][None, :, :], [8, 1, 1])
edgeSampled = np.any((edgeFaces[:,:, 0]== sampleFaces) | (edgeFaces[:,:, 1]== sampleFaces),0)
facesInsideBnd = projFacesBndTiled == sampleFaces
wrongBnd = ~edgeSampled
# wrongBnd = np.all(facesInsideBnd, 0)
whereBnd = np.where(boundaryImage.ravel())[0]
# boundaryImage.ravel()[whereBnd[wrongBnd]] = False
if np.any(boundaryImage):
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 2])
# sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:,(zerosIm*boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
boundaryFaces = visibility[(boundaryImage)&(visibility !=4294967295 )]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
frontFacing = self.frontFacingEdgeFaces[(zerosIm * boundaryImage).ravel().astype(np.bool)].astype(np.bool)
frontFacingEdgeFaces = self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][frontFacing]
vertsPerFaceProjBnd = self.camera.r[f[frontFacingEdgeFaces.ravel()].ravel()].reshape([1, -1, 2])
vertsPerFaceProjBnd = np.tile(vertsPerFaceProjBnd, [self.nsamples, 1,1])
vertsPerFaceProjBnd = vertsPerFaceProjBnd.reshape([-1,3,2])[facesOutsideBnd.ravel()]
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_edge = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_edge[t_area_bnd_edge > 1] = 1
# if self.debug:
# import pdb; pdb.set_trace()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_outside = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_outside[t_area_bnd_outside > 1] = 1
faces = f[sampleFaces[facesInsideBnd]].ravel()
vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_inside = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_inside[t_area_bnd_inside > 1] = 1
#Trick to cap to 1 while keeping gradients.
p1 = vertsProjBndSamplesOutside[:,0,:]
p2 = vertsProjBndSamplesOutside[:,1,:]
p = sampleV[facesOutsideBnd]
l = (p2 - p1)
linedist = np.sqrt((np.sum(l**2,axis=1)))[:,None]
self.linedist = linedist
lnorm = l/linedist
self.lnorm = lnorm
v1 = p - p1
self.v1 = v1
d = v1[:,0]* lnorm[:,0] + v1[:,1]* lnorm[:,1]
self.d = d
intersectPoint = p1 + d[:,None] * lnorm
self.intersectPoint = intersectPoint
v2 = p - p2
self.v2 = v2
l12 = (p1 - p2)
linedist12 = np.sqrt((np.sum(l12**2,axis=1)))[:,None]
lnorm12 = l12/linedist12
d2 = v2[:,0]* lnorm12[:,0] + v2[:,1]* lnorm12[:,1]
nonIntersect = (d2 < 0) | (d<0)
self.nonIntersect = nonIntersect
argminDistNonIntersect = np.argmin(np.c_[d[nonIntersect], d2[nonIntersect]], 1)
self.argminDistNonIntersect = argminDistNonIntersect
intersectPoint[nonIntersect] = vertsProjBndSamplesOutside[nonIntersect][np.arange(nonIntersect.sum()), argminDistNonIntersect]
lineToPoint = (p - intersectPoint)
n=lineToPoint
dist = np.sqrt((np.sum(lineToPoint ** 2, axis=1)))[:, None]
n_norm = lineToPoint /dist
self.n_norm = n_norm
self.dist = dist
d_final = dist.squeeze()
# max_nx_ny = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final = d_final/max_nx_ny
# d_final = d_final
verticesBnd = self.v.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2 , 3])
verticesBndSamples = np.tile(verticesBnd[None,:,:],[self.nsamples,1,1, 1])
verticesBndOutside = verticesBndSamples[facesOutsideBnd]
vc = self.vc.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2 , 3])
vc[vc > 1] = 1
vc[vc < 0] = 0
vcBnd = vc
vcBndSamples = np.tile(vcBnd[None,:,:],[self.nsamples,1,1,1])
vcBndOutside = vcBndSamples[facesOutsideBnd]
invViewMtx = np.linalg.inv(np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])])
#
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
# invCamMtx = np.r_[np.c_[np.linalg.inv(self.camera.camera_mtx), np.array([0,0,0])], np.array([[0, 0, 0, 1]])]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
verticesBndOutside = np.concatenate([verticesBndOutside.reshape([-1,3]), np.ones([verticesBndOutside.size//3, 1])], axis=1)
projVerticesBndOutside = (camMtx.dot(view_mtx)).dot(verticesBndOutside.T).T[:,:3].reshape([-1,2,3])
projVerticesBndDir = projVerticesBndOutside[:,1,:] - projVerticesBndOutside[:,0,:]
projVerticesBndDir = projVerticesBndDir/np.sqrt((np.sum(projVerticesBndDir ** 2, 1)))[:, None]
dproj = (intersectPoint[:,0]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,0]) / (projVerticesBndDir[:,0] - projVerticesBndDir[:,2]*intersectPoint[:,0])
# Code to check computation that dproj == dprojy
# dproj_y = (intersectPoint[:,1]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,1]) / (projVerticesBndDir[:,1] - projVerticesBndDir[:,2]*intersectPoint[:,1])
projPoint = projVerticesBndOutside[:,0,:][:,: ] + dproj[:,None]*projVerticesBndDir[:,:]
projPointVec4 = np.concatenate([projPoint, np.ones([projPoint.shape[0],1])], axis=1)
viewPointIntersect = (invViewMtx.dot(np.linalg.inv(camMtx)).dot(projPointVec4.T.reshape([4,-1])).reshape([4,-1])).T[:,:3]
barycentricVertsDistIntesect = np.linalg.norm(viewPointIntersect - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,0,:], axis=1)
barycentricVertsDistIntesect2 = np.linalg.norm(viewPointIntersect - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,1,:], axis=1)
# Code to check barycentricVertsDistIntesect + barycentricVertsDistIntesect2 = barycentricVertsDistEdge
barycentricVertsDistEdge = np.linalg.norm(verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,0,:] - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,1,:], axis=1)
nonIntersect = np.abs(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 - barycentricVertsDistEdge) > 1e-4
argminDistNonIntersect = np.argmin(np.c_[barycentricVertsDistIntesect[nonIntersect], barycentricVertsDistIntesect2[nonIntersect]],1)
barycentricVertsIntersect = barycentricVertsDistIntesect2 / (barycentricVertsDistIntesect + barycentricVertsDistIntesect2)
barycentricVertsIntersect[nonIntersect] = np.array(argminDistNonIntersect == 0).astype(np.float64)
self.barycentricVertsIntersect = barycentricVertsIntersect
self.viewPointIntersect = viewPointIntersect
self.viewPointIntersect[nonIntersect] = verticesBndOutside.reshape([-1, 2, 4])[nonIntersect, :, 0:3][np.arange(nonIntersect.sum()), argminDistNonIntersect, :]
vcEdges1 = barycentricVertsIntersect[:, None] * vcBndOutside.reshape([-1, 2, 3])[:, 0, :]
self.barycentricVertsIntersect = barycentricVertsIntersect
vcEdges2 = (1-barycentricVertsIntersect[:,None]) * vcBndOutside.reshape([-1,2,3])[:,1,:]
#Color:
colorVertsEdge = vcEdges1 + vcEdges2
#Point IN edge barycentric
d_finalNP = np.minimum(d_final.copy(),1.)
self.d_final_outside = d_finalNP
self.t_area_bnd_outside = t_area_bnd_outside
self.t_area_bnd_edge = t_area_bnd_edge
self.t_area_bnd_inside = t_area_bnd_inside
areaWeights = np.zeros([nsamples, nBndFaces])
areaWeights[facesOutsideBnd] = (1-d_finalNP)*t_area_bnd_edge + d_finalNP *t_area_bnd_outside
areaWeights[facesInsideBnd] = t_area_bnd_inside
areaWeightsTotal = areaWeights.sum(0)
# areaWeightsTotal[areaWeightsTotal < 1] = 1
self.areaWeightsTotal = areaWeightsTotal
finalColorBndOutside = np.zeros([self.nsamples, boundaryFaces.size, 3])
finalColorBndOutside_edge = np.zeros([self.nsamples, boundaryFaces.size, 3])
finalColorBndInside = np.zeros([self.nsamples, boundaryFaces.size, 3])
sampleColorsOutside = sampleColors[facesOutsideBnd]
self.sampleColorsOutside = sampleColors.copy()
finalColorBndOutside[facesOutsideBnd] = sampleColorsOutside
finalColorBndOutside[facesOutsideBnd] = sampleColorsOutside / self.nsamples
self.finalColorBndOutside_for_dr = finalColorBndOutside.copy()
# finalColorBndOutside[facesOutsideBnd] *= d_finalNP[:, None] * t_area_bnd_outside[:, None]
finalColorBndOutside[facesOutsideBnd] *= d_finalNP[:, None]
finalColorBndOutside_edge[facesOutsideBnd] = colorVertsEdge
finalColorBndOutside_edge[facesOutsideBnd] = colorVertsEdge/ self.nsamples
self.finalColorBndOutside_edge_for_dr = finalColorBndOutside_edge.copy()
# finalColorBndOutside_edge[facesOutsideBnd] *= (1 - d_finalNP[:, None]) * t_area_bnd_edge[:, None]
finalColorBndOutside_edge[facesOutsideBnd] *= (1 - d_finalNP[:, None])
sampleColorsInside = sampleColors[facesInsideBnd]
self.sampleColorsInside = sampleColorsInside.copy()
# finalColorBndInside[facesInsideBnd] = sampleColorsInside * self.t_area_bnd_inside[:, None]
finalColorBndInside[facesInsideBnd] = sampleColorsInside / self.nsamples
# finalColorBnd = finalColorBndOutside + finalColorBndOutside_edge + finalColorBndInside
finalColorBnd = finalColorBndOutside + finalColorBndOutside_edge + finalColorBndInside
# finalColorBnd /= areaWeightsTotal[None, :, None]
bndColorsImage = np.zeros_like(self.render_resolved)
bndColorsImage[(zerosIm * boundaryImage), :] = np.sum(finalColorBnd, axis=0)
# bndColorsImage1 = np.zeros_like(self.render_resolved)
# bndColorsImage1[(zerosIm * boundaryImage), :] = np.sum(self.finalColorBndOutside_for_dr, axis=0)
#
# bndColorsImage2 = np.zeros_like(self.render_resolved)
# bndColorsImage2[(zerosIm * boundaryImage), :] = np.sum(self.finalColorBndOutside_edge_for_dr, axis=0)
#
# bndColorsImage3 = np.zeros_like(self.render_resolved)
# bndColorsImage3[(zerosIm * boundaryImage), :] = np.sum(finalColorBndInside, axis=0)
finalColorImageBnd = bndColorsImage
if np.any(boundaryImage):
finalColor = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * finalColorImageBnd
# finalColor1 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage1
# finalColor2 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage2
# finalColor3 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage3
else:
finalColor = self.color_image
finalColor[finalColor>1] = 1
finalColor[finalColor<0] = 0
return finalColor
def compute_derivatives_verts(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
n_norm = self.n_norm
dist = self.dist
linedist = self.linedist
d = self.d
v1 = self.v1
lnorm = self.lnorm
finalColorBndOutside_for_dr = self.finalColorBndOutside_for_dr
finalColorBndOutside_edge_for_dr = self.finalColorBndOutside_edge_for_dr
d_final_outside = self.d_final_outside
barycentricVertsIntersect = self.barycentricVertsIntersect
# xdiff = dEdx
# ydiff = dEdy
nVisF = len(visibility.ravel()[visible])
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility!=4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage)&(visibility !=4294967295 )]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
p1 = vertsProjBndSamplesOutside[:, 0, :]
p2 = vertsProjBndSamplesOutside[:, 1, :]
p = sampleV[facesOutsideBnd]
#Computing gradients:
#A multisampled pixel color is given by: w R + (1-w) R' thus:
#1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
#2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
#3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
#4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
#5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
#6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
#7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
#for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
#Finally, stack data, and IJ of nonbnd with bnd on both dwrt_v and dwrt_vc.
######## 1 derivatives samples outside wrt v 1: (dw * (bar*vc) - dw (bar'*vc') )/ nsamples for face sample
# #Chumpy autodiff code to check derivatives here:
# chEdgeVerts = ch.Ch(vertsProjBndSamplesOutside)
#
# chEdgeVerts1 = chEdgeVerts[:,0,:]
# chEdgeVerts2 = chEdgeVerts[:,1,:]
#
# chSampleVerts = ch.Ch(sampleV[facesOutsideBnd])
# # c1 = (chEdgeVerts1 - chSampleVerts)
# # c2 = (chEdgeVerts2 - chSampleVerts)
# # n = (chEdgeVerts2 - chEdgeVerts1)
#
# #Code to check computation of distance below
# # d2 = ch.abs(c1[:,:,0]*c2[:,:,1] - c1[:,:,1]*c2[:,:,0]) / ch.sqrt((ch.sum(n**2,2)))
# # # np_mat = ch.dot(ch.array([[0,-1],[1,0]]), n)
# # np_mat2 = -ch.concatenate([-n[:,:,1][:,:,None], n[:,:,0][:,:,None]],2)
# # np_vec2 = np_mat2 / ch.sqrt((ch.sum(np_mat2**2,2)))[:,:,None]
# # d2 = d2 / ch.maximum(ch.abs(np_vec2[:,:,0]),ch.abs(np_vec2[:,:,1]))
#
# chl = (chEdgeVerts2 - chEdgeVerts1)
# chlinedist = ch.sqrt((ch.sum(chl**2,axis=1)))[:,None]
# chlnorm = chl/chlinedist
#
# chv1 = chSampleVerts - chEdgeVerts1
# chd = chv1[:,0]* chlnorm[:,0] + chv1[:,1]* chlnorm[:,1]
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
# # intersectPointDist1 = intersectPoint - chEdgeVerts1
# # intersectPointDist2 = intersectPoint - chEdgeVerts2
# # Code to check computation of distances below:
# # lengthIntersectToPoint1 = np.linalg.norm(intersectPointDist1.r,axis=1)
# # lengthIntersectToPoint2 = np.linalg.norm(intersectPointDist2.r,axis=1)
#
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
#
# chlineToPoint = (chSampleVerts - chintersectPoint)
# chn_norm = chlineToPoint / ch.sqrt((ch.sum(chlineToPoint ** 2, axis=1)))[:, None]
#
# chdist = chlineToPoint[:,0]*chn_norm[:,0] + chlineToPoint[:,1]*chn_norm[:,1]
#
# d_final_ch = chdist / ch.maximum(ch.abs(chn_norm[:, 0]), ch.abs(chn_norm[:, 1]))
#
# d_final_outside = d_final_ch.ravel()
# dwdv = d_final_outside.dr_wrt(chEdgeVerts1)
# rows = np.tile(np.arange(d_final_outside.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_outside.shape[0] * 2)
#
# dwdv_r_v1 = np.array(dwdv[rows, cols]).reshape([-1, 2])
#
# dwdv = d_final_outside.dr_wrt(chEdgeVerts2)
# rows = np.tile(np.arange(d_final_ch.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_ch.shape[0] * 2)
#
# dwdv_r_v2 = np.array(dwdv[rows, cols]).reshape([-1, 2])
nonIntersect = self.nonIntersect
argminDistNonIntersect = self.argminDistNonIntersect
max_dx_dy = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final_np = dist / max_dx_dy
d_final_np = dist
ident = np.identity(2)
ident = np.tile(ident[None, :], [len(p2), 1, 1])
dlnorm = (ident - np.einsum('ij,ik->ijk', lnorm, lnorm)) / linedist[:, None]
dl_normdp1 = np.einsum('ijk,ikl->ijl', dlnorm, -ident)
dl_normdp2 = np.einsum('ijk,ikl->ijl', dlnorm, ident)
dv1dp1 = -ident
dv1dp2 = 0
dddp1 = np.einsum('ijk,ij->ik', dv1dp1, lnorm) + np.einsum('ij,ijl->il', v1, dl_normdp1)
dddp2 = 0 + np.einsum('ij,ijl->il', v1, dl_normdp2)
dipdp1 = ident + (dddp1[:,None,:]*lnorm[:,:,None]) + d[:,None,None]*dl_normdp1
dipdp2 = (dddp2[:,None,:]*lnorm[:,:,None]) + d[:,None,None]*dl_normdp2
dndp1 = -dipdp1
dndp2 = -dipdp2
dn_norm = (ident - np.einsum('ij,ik->ijk', n_norm, n_norm)) / dist[:,None]
dn_normdp1 = np.einsum('ijk,ikl->ijl', dn_norm, dndp1)
dn_normdp2 = np.einsum('ijk,ikl->ijl', dn_norm, dndp2)
ddistdp1 = np.einsum('ij,ijl->il', n_norm, dndp1)
ddistdp2 = np.einsum('ij,ijl->il', n_norm, dndp2)
argmax_nx_ny = np.argmax(np.abs(n_norm),axis=1)
dmax_nx_ny_p1 = np.sign(n_norm)[np.arange(len(n_norm)),argmax_nx_ny][:,None]*dn_normdp1[np.arange(len(dn_normdp1)),argmax_nx_ny]
dmax_nx_ny_p2 = np.sign(n_norm)[np.arange(len(n_norm)),argmax_nx_ny][:,None]*dn_normdp2[np.arange(len(dn_normdp2)),argmax_nx_ny]
# dd_final_dp1 = -1./max_dx_dy[:,None]**2 * dmax_nx_ny_p1 * dist + 1./max_dx_dy[:,None] * ddistdp1
# dd_final_dp2 = -1./max_dx_dy[:,None]**2 * dmax_nx_ny_p2 * dist + 1./max_dx_dy[:,None] * ddistdp2
dd_final_dp1 = ddistdp1
dd_final_dp2 = ddistdp2
#For those non intersecting points straight to the edge:
v1 = self.v1[nonIntersect][argminDistNonIntersect==0]
v1_norm = v1/np.sqrt((np.sum(v1**2,axis=1)))[:,None]
dd_final_dp1_nonintersect = -v1_norm
v2 = self.v2[nonIntersect][argminDistNonIntersect==1]
v2_norm = v2/np.sqrt((np.sum(v2**2,axis=1)))[:,None]
dd_final_dp2_nonintersect = -v2_norm
dd_final_dp1[nonIntersect][argminDistNonIntersect == 0] = dd_final_dp1_nonintersect
dd_final_dp1[nonIntersect][argminDistNonIntersect == 1] = 0
dd_final_dp2[nonIntersect][argminDistNonIntersect == 1] = dd_final_dp2_nonintersect
dd_final_dp2[nonIntersect][argminDistNonIntersect == 0] = 0
dImage_wrt_outside_v1 = finalColorBndOutside_for_dr[facesOutsideBnd][:,:,None]*dd_final_dp1[:,None,:] - dd_final_dp1[:,None,:]*finalColorBndOutside_edge_for_dr[facesOutsideBnd][:,:,None]
dImage_wrt_outside_v2 = finalColorBndOutside_for_dr[facesOutsideBnd][:,:,None]*dd_final_dp2[:,None,:] - dd_final_dp2[:,None,:]*finalColorBndOutside_edge_for_dr[facesOutsideBnd][:,:,None]
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2*2)).ravel()
# faces = f[sampleFaces[facesOutsideBnd]].ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1, -1, 2]), [self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data1 = dImage_wrt_outside_v1.transpose([1,0,2])
data2 = dImage_wrt_outside_v2.transpose([1,0,2])
data = np.concatenate([data1[:,:,None,:], data2[:,:,None,:]], 2)
data = data.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd_outside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
######## 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
######## 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
verticesBnd = self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3])
sampleBarycentricBar = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([-1, 3, 1])
verts = np.sum(self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3, 3]) * sampleBarycentricBar, axis=1)
dImage_wrt_bar_v = self.barycentricDerivatives(verticesBnd, f[sampleFaces.ravel()], verts).swapaxes(0,1)
dImage_wrt_bar_v[facesOutsideBnd.ravel()] = dImage_wrt_bar_v[facesOutsideBnd.ravel()] * d_final_outside[:,None,None, None] * self.t_area_bnd_outside[:, None, None, None]
dImage_wrt_bar_v[facesInsideBnd.ravel()] = dImage_wrt_bar_v[facesInsideBnd.ravel()] * self.t_area_bnd_inside[:, None, None, None]
# dImage_wrt_bar_v /= np.tile(areaWeightsTotal[None,:], [self.nsamples,1]).ravel()[:, None,None, None]
dImage_wrt_bar_v /= self.nsamples
### Derivatives wrt V: 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
# data = np.tile(dImage_wrt_bar_v[facesOutsideBnd.ravel()][None,:],[3,1,1,1]).ravel()
data = np.transpose(dImage_wrt_bar_v[facesOutsideBnd.ravel()],[1,0,2,3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_outside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
### Derivatives wrt V: 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesInsideBnd]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
faces = f[sampleFaces[facesInsideBnd]].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v[facesInsideBnd.ravel()], [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_inside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
####### 3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
frontFacing = self.frontFacingEdgeFaces[(zerosIm * boundaryImage).ravel().astype(np.bool)].astype(np.bool)
frontFacingEdgeFaces = self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][frontFacing]
verticesBnd = self.v.r[f[frontFacingEdgeFaces.ravel()].ravel()].reshape([1, -1, 3])
verticesBnd = np.tile(verticesBnd, [self.nsamples, 1,1])
verticesBnd = verticesBnd.reshape([-1,3,3])[facesOutsideBnd.ravel()].reshape([-1,3])
verts = self.viewPointIntersect
fFrontEdge = np.tile(f[frontFacingEdgeFaces][None,:], [self.nsamples, 1, 1]).reshape([-1,3])[facesOutsideBnd.ravel()]
dImage_wrt_bar_v_edge = self.barycentricDerivatives(verticesBnd, fFrontEdge, verts).swapaxes(0, 1)
dImage_wrt_bar_v_edge = dImage_wrt_bar_v_edge * (1-d_final_outside[:,None,None, None]) * self.t_area_bnd_edge[:, None, None, None]
# dImage_wrt_bar_v_edge /= np.tile(self.areaWeightsTotal[None,:], [self.nsamples,1])[facesOutsideBnd][:, None, None,None]
dImage_wrt_bar_v_edge /= self.nsamples
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 3 * 2)).ravel()
# faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = f[frontFacingEdgeFaces]
faces = np.tile(faces.reshape([1, -1, 3]), [self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v_edge, [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_outside_edge = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
vertsPerFaceProjBnd = self.camera.r[f[nonBoundaryFaces].ravel()].reshape([-1,3,2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
t_area_nonbnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
t_area_nonbnd[t_area_nonbnd> 1] = 1
bc = barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))
verts = np.sum(self.v.r[f[nonBoundaryFaces.ravel()].ravel()].reshape([-1, 3, 3]) * bc[:, :,None], axis=1)
didp = self.barycentricDerivatives(verticesNonBnd, f[nonBoundaryFaces.ravel()], verts)
didp = didp * t_area_nonbnd[None,:,None, None]
n_channels = np.atleast_3d(observed).shape[2]
shape = visibility.shape
####### 2: Take the data and copy the corresponding dxs and dys to these new pixels.
### Derivatives wrt V:
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.where(((~boundaryImage)&(visibility !=4294967295 )).ravel())[0]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
JS = col(f[nonBoundaryFaces].ravel())
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
# data = np.concatenate(((visTriVC[:,0,:] * dBar1dx[:,None])[:,:,None],(visTriVC[:, 0, :] * dBar1dy[:, None])[:,:,None], (visTriVC[:,1,:]* dBar2dx[:,None])[:,:,None], (visTriVC[:, 1, :] * dBar2dy[:, None])[:,:,None],(visTriVC[:,2,:]* dBar3dx[:,None])[:,:,None],(visTriVC[:, 2, :] * dBar3dy[:, None])[:,:,None]),axis=2).swapaxes(0,1).ravel()
data = didp.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_nonbnd = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
# result_wrt_verts_nonbnd.sum_duplicates()
if np.any(boundaryImage):
result_wrt_verts = result_wrt_verts_bnd_outside + result_wrt_verts_bar_outside + result_wrt_verts_bar_inside + result_wrt_verts_bar_outside_edge + result_wrt_verts_nonbnd
# result_wrt_verts = result_wrt_verts_bnd_outside
else:
result_wrt_verts = result_wrt_verts_nonbnd
return result_wrt_verts
def compute_derivatives_vc(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
d_final_outside = self.d_final_outside
barycentricVertsIntersect = self.barycentricVertsIntersect
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility!=4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage)&(visibility !=4294967295 )]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
#Computing gradients:
#A multisampled pixel color is given by: w R + (1-w) R' thus:
#1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
#2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
#3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
#4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
#5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
#6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
#7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
#for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
####### 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
dImage_wrt_outside_vc_outside = d_final_outside[:,None] * sampleBarycentric[facesOutsideBnd] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 3)).ravel()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_outside.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_bnd_outside = result
# result_wrt_vc_bnd_outside.sum_duplicates()
######## 5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
dImage_wrt_outside_vc_edge = (1-d_final_outside[:, None]) * np.c_[barycentricVertsIntersect, 1-barycentricVertsIntersect] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2)).ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1,-1,2]),[self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_edge.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_vc_bnd_outside_edge = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
# result_wrt_vc_bnd_outside_edge.sum_duplicates()
######## 7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
dImage_wrt_outside_vc_inside = sampleBarycentric[facesInsideBnd] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesInsideBnd]
IS = np.tile(col(pixels), (1, 3)).ravel()
faces = f[sampleFaces[facesInsideBnd]].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_inside.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_vc_bnd_inside = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
# result_wrt_vc_bnd_inside.sum_duplicates()
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
# barySample = self.renders_sample_barycentric[0].reshape([-1,3])[(~boundaryImage)&(visibility !=4294967295 ).ravel().astype(np.bool), :]
bc = barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))
# barySample[barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))]
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.where(((~boundaryImage)&(visibility !=4294967295 )).ravel())[0]
IS = np.tile(col(pixels), (1, 3)).ravel()
JS = col(f[nonBoundaryFaces].ravel())
bc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
# bc = barySample.reshape((-1, 3))
data = np.asarray(bc, order='C').ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
# IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
# JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
# data = np.concatenate((data, data, data))
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_nonbnd = result
# result_wrt_vc_nonbnd.sum_duplicates()
if np.any(boundaryImage):
# result_wrt_verts = result_wrt_verts_bar_outside_edge
# result_wrt_verts = result_wrt_verts_nonbnd
result_wrt_vc = result_wrt_vc_bnd_outside + result_wrt_vc_bnd_outside_edge + result_wrt_vc_bnd_inside + result_wrt_vc_nonbnd
# result_wrt_vc = sp.csc_matrix((width * height * num_channels, vc_size))
else:
# result_wrt_verts = sp.csc_matrix((image_width*image_height*n_channels, num_verts*2))
result_wrt_vc = result_wrt_vc_nonbnd
# result_wrt_vc = sp.csc_matrix((width * height * num_channels, vc_size))
return result_wrt_vc
def on_changed(self, which):
super().on_changed(which)
if 'v' or 'camera' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_verts_mesh[mesh][polygons].set_array(verts_by_face.astype(np.float32))
self.vbo_verts_mesh[mesh][polygons].bind()
if 'vc' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
flen = 1
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
# fc = np.arange(flen, flen + len(f))
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
self.vbo_face_ids_list[mesh][polygons].set_array(fc)
self.vbo_face_ids_list[mesh][polygons].bind()
flen += len(f)
self.vbo_indices_mesh_list[mesh][polygons].set_array(np.array(self.f_list[mesh][polygons]).astype(np.uint32))
self.vbo_indices_mesh_list[mesh][polygons].bind()
if 'texture_stack' in which:
# gl = self.glf
# texture_data = np.array(self.texture_image*255., dtype='uint8', order='C')
# self.release_textures()
#
# for mesh in range(len(self.f_list)):
# textureIDs = []
# for polygons in range(len(self.f_list[mesh])):
# texture = None
# if self.haveUVs_list[mesh][polygons]:
# texture = GL.GLuint(0)
# GL.glGenTextures( 1, texture )
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# #Send texture.
# #Pol: Check if textures are float or uint from Blender import.
# image = (self.textures_list[mesh][polygons]*255.0).astype(np.uint8)
# GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, image)
# textureIDs = textureIDs + [texture]
# self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs]
# gl.GenTextures(1, tmp) # TODO: free after done
# self.textureID = tmp[0]
if self.initialized:
textureCoordIdx = 0
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
#Update the OpenGL textures with all the textures. (Inefficient as many might not have changed).
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
self.textures_list[mesh][polygons] = self.texture_stack[textureCoordIdx:image.size+textureCoordIdx].reshape(image.shape)
textureCoordIdx = textureCoordIdx + image.size
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
if 'v' or 'f' or 'vc' or 'ft' or 'camera' or 'texture_stack' in which:
self.render_image_buffers()
def release_textures(self):
if hasattr(self, 'textureID_mesh_list'):
if self.textureID_mesh_list != []:
for texture_mesh in self.textureID_mesh_list:
if texture_mesh != []:
for texture in texture_mesh:
if texture != None:
GL.glDeleteTextures(1, [texture.value])
self.textureID_mesh_list = []
@depends_on(dterms+terms)
def color_image(self):
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image(with_vertex_colors=True, with_texture_on=True)
return no_overdraw
# if not self.overdraw:
# return no_overdraw
#
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
# overdraw = self.draw_color_image()
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
#
# # return overdraw * np.atleast_3d(self.boundarybool_image)
#
# boundarybool_image = self.boundarybool_image
# if self.num_channels > 1:
# boundarybool_image = np.atleast_3d(boundarybool_image)
#
# return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
@depends_on('f', 'frustum', 'camera', 'overdraw')
def barycentric_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_barycentric_image(self.boundarybool_image if self.overdraw else None)
@depends_on('f', 'frustum', 'camera', 'overdraw')
def visibility_image(self):
self._call_on_changed()
#Overload method to call without overdraw.
return self.draw_visibility_image(self.v.r, self.f, self.boundarybool_image if self.overdraw else None)
def image_mesh_bool(self, meshes):
self.makeCurrentContext()
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for mesh in meshes:
self.draw_index(mesh)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result!=0
@depends_on(dterms+terms)
def indices_image(self):
self._call_on_changed()
self.makeCurrentContext()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for index in range(len(self.f_list)):
self.draw_index(index)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result
def draw_index(self, index):
mesh = index
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
vc = self.vc_list[mesh]
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(vc.reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
colors = np.array(np.ones_like(colors_by_face) * (index) / 255.0, dtype=np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
def draw_texcoord_image(self, v, f, ft, boundarybool_image=None):
# gl = glf
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
self.makeCurrentContext()
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# want vtc: texture-coordinates per vertex (not per element in vc)
colors = ft
#use the third channel to identify the corresponding textures.
color3 = np.vstack([np.ones([self.ft_list[mesh].shape[0],1])*mesh for mesh in range(len(self.ft_list))]).astype(np.float32) / len(self.ft_list)
colors = np.asarray(np.hstack((colors, color3)), np.float64, order='C')
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
#Why do we need this?
if boundarybool_image is not None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3)[:,:,:3].astype(np.float64))/255.0
result[:,:,1] = 1. - result[:,:,1]
return result
@depends_on('ft', 'textures')
def mesh_tex_coords(self):
ftidxs = self.ft.ravel()
data = self.ft
# Pol: careful with this:
data[:,1] = 1.0 - 1.0*data[:,1]
return data
# Depends on 'f' because vpe/fpe depend on f
# Pol: Check that depends on works on other attributes that depend_on x, if x changes.
@depends_on( 'ft', 'f')
def wireframe_tex_coords(self):
print("wireframe_tex_coords is being computed!")
vvt = np.zeros((self.v.r.size/3,2), dtype=np.float64, order='C')
vvt[self.f.flatten()] = self.mesh_tex_coords
edata = np.zeros((self.vpe.size,2), dtype=np.float64, order='C')
edata = vvt[self.ma.ravel()]
return edata
# TODO: can this not be inherited from base? turning off texture mapping in that instead?
@depends_on(dterms+terms)
def boundaryid_image(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
def draw_color_image(self, with_vertex_colors=True, with_texture_on=True):
self.makeCurrentContext()
self._call_on_changed()
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vc = colors_by_face
if with_vertex_colors:
colors = vc.astype(np.float32)
else:
# Only texture.
colors = np.ones_like(vc).astype(np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
if with_texture_on and self.haveUVs_list[mesh][polygons]:
GL.glUseProgram(self.colorTextureProgram)
texture = self.textureID_mesh_list[mesh][polygons]
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureID, 0)
else:
GL.glUseProgram(self.colorProgram)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'], GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.float64))/255.0
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0.,0.,0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1,1,3))
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image_quantized(self):
texcoord_image = self.texcoord_image[:,:, :2].copy()
#Temprary:
self.texture_image = self.textures_list[0][0].r.copy()
texcoord_image[:,:,0] *= self.texture_image.shape[1]-1
texcoord_image[:,:,1] *= self.texture_image.shape[0]-1
texture_idx = (self.texcoord_image[:,:,2]*len(self.ft_list)).astype(np.uint32)
texcoord_image = np.round(texcoord_image)
texcoord_image = texcoord_image[:,:,0] + texcoord_image[:,:,1]*self.texture_image.shape[1]
return texcoord_image, texture_idx
def checkBufferNum(self):
GL.glGenBuffers(1)
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image(self):
return self.draw_texcoord_image(self.v.r, self.f, self.ft, self.boundarybool_image if self.overdraw else None)
class AnalyticRendererOpenDR(ColoredRenderer):
terms = 'f', 'frustum', 'vt', 'ft', 'background_image', 'overdraw', 'ft_list', 'haveUVs_list', 'textures_list', 'vc_list' , 'imageGT'
dterms = 'vc', 'camera', 'bgcolor', 'texture_stack', 'v'
def __init__(self):
super().__init__()
def clear(self):
try:
GL.glFlush()
GL.glFinish()
# print ("Clearing textured renderer.")
# for msh in self.vbo_indices_mesh_list:
# for vbo in msh:
# vbo.set_array([])
[vbo.set_array(np.array([])) for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_face_ids_list for vbo in sublist]
[GL.glDeleteVertexArrays(1, [vao.value]) for sublist in self.vao_tex_mesh_list for vao in sublist]
self.release_textures()
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
GL.glDeleteProgram(self.colorTextureProgram)
super().clear()
except:
import pdb
pdb.set_trace()
print("Program had not been initialized")
def initGLTexture(self):
print("Initializing Texture OpenGL.")
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
//#extension GL_EXT_shader_image_load_store : enable
in vec3 theColor;
in vec2 UV;
uniform sampler2D myTextureSampler;
// Ouput data
out vec3 color;
void main(){
color = theColor * texture2D( myTextureSampler, UV).rgb;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout(location = 2) in vec2 vertexUV;
uniform mat4 MVP;
out vec3 theColor;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
UV = vertexUV;
}""", GL.GL_VERTEX_SHADER)
self.colorTextureProgram = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
#Define the other VAO/VBOs and shaders.
#Text VAO and bind color, vertex indices AND uvbuffer:
position_location = GL.glGetAttribLocation(self.colorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorTextureProgram, 'color')
uvs_location = GL.glGetAttribLocation(self.colorTextureProgram, 'vertexUV')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_texture_location = GL.glGetUniformLocation(self.colorTextureProgram, 'MVP')
self.vbo_indices_mesh_list = []
self.vbo_colors_mesh = []
self.vbo_verts_mesh = []
self.vao_tex_mesh_list = []
self.vbo_uvs_mesh = []
self.textureID_mesh_list = []
# GL.glEnable(GL.GL_LINE_SMOOTH)
# GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(2.)
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_indices_mesh = []
vbo_face_ids_mesh = []
vbo_colors_mesh = []
vbo_vertices_mesh = []
vbo_uvs_mesh = []
textureIDs_mesh = []
for polygons in range(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_verts = vbo.VBO(np.array(verts_by_face).astype(np.float32))
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_colors = vbo.VBO(np.array(colors_by_face).astype(np.float32))
uvs_by_face = np.asarray(self.ft_list[mesh].reshape((-1, 2))[f.ravel()], dtype=np.float32, order='C')
vbo_uvs = vbo.VBO(np.array(uvs_by_face).astype(np.float32))
vbo_indices = vbo.VBO(np.array(self.f_list[mesh][polygons]).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
vbo_indices.bind()
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
if self.haveUVs_list[mesh][polygons]:
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#Textures:
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = GL.GLuint(0)
GL.glGenTextures( 1, texture )
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
textureIDs_mesh = textureIDs_mesh + [texture]
vbo_indices_mesh = vbo_indices_mesh + [vbo_indices]
vbo_colors_mesh = vbo_colors_mesh + [vbo_colors]
vbo_vertices_mesh = vbo_vertices_mesh + [vbo_verts]
vbo_uvs_mesh = vbo_uvs_mesh + [vbo_uvs]
vaos_mesh = vaos_mesh + [vao]
self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs_mesh]
self.vao_tex_mesh_list = self.vao_tex_mesh_list + [vaos_mesh]
self.vbo_indices_mesh_list = self.vbo_indices_mesh_list + [vbo_indices_mesh]
self.vbo_colors_mesh = self.vbo_colors_mesh + [vbo_colors_mesh]
self.vbo_verts_mesh = self.vbo_verts_mesh + [vbo_vertices_mesh]
self.vbo_uvs_mesh = self.vbo_uvs_mesh + [vbo_uvs_mesh]
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindVertexArray(0)
self.textureID = GL.glGetUniformLocation(self.colorTextureProgram, "myTextureSampler")
def initGL_AnalyticRenderer(self):
self.initGLTexture()
self.updateRender = True
self.updateDerivatives = True
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 colorIn;
layout(location = 2) in vec2 vertexUV;
layout(location = 3) in uint face_id;
layout(location = 4) in vec3 barycentric;
uniform mat4 MVP;
out vec3 theColor;
out vec4 pos;
flat out uint face_out;
out vec3 barycentric_vert_out;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
pos = MVP * vec4(position,1);
//pos = pos4.xyz;
theColor = colorIn;
UV = vertexUV;
face_out = face_id;
barycentric_vert_out = barycentric;
}""", GL.GL_VERTEX_SHADER)
ERRORS_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
//layout(early_fragment_tests) in;
// Interpolated values from the vertex shaders
in vec3 theColor;
in vec2 UV;
flat in uint face_out;
in vec4 pos;
in vec3 barycentric_vert_out;
layout(location = 3) uniform sampler2D myTextureSampler;
uniform float ww;
uniform float wh;
// Ouput data
layout(location = 0) out vec3 color;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 barycentric1;
layout(location = 4) out vec2 barycentric2;
void main(){
vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;
color = finalColor.rgb;
sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);
sample_face = face_out;
barycentric1 = barycentric_vert_out.xy;
barycentric2 = vec2(barycentric_vert_out.z, 0.);
}""", GL.GL_FRAGMENT_SHADER)
self.errorTextureProgram = shaders.compileProgram(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)
FETCH_VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
void main() {}
""", GL.GL_VERTEX_SHADER)
FETCH_GEOMETRY_SHADER = shaders.compileShader("""#version 330 core
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main() {
for (int i = 0; i < 4; ++i) {
gl_Position = vec4( data[i], 0.0, 1.0 );
EmitVertex();
}
EndPrimitive();
}""", GL.GL_GEOMETRY_SHADER)
FETCH_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
layout(location = 2) uniform sampler2DMS colors;
layout(location = 3) uniform sampler2DMS sample_positions;
layout(location = 4) uniform usampler2DMS sample_faces;
layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;
layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;
uniform float ww;
uniform float wh;
uniform int sample;
// Ouput data
layout(location = 0) out vec3 colorFetchOut;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 sample_barycentric1;
layout(location = 4) out vec2 sample_barycentric2;
//out int gl_SampleMask[];
const int all_sample_mask = 0xffff;
void main(){
ivec2 texcoord = ivec2(gl_FragCoord.xy);
colorFetchOut = texelFetch(colors, texcoord, sample).xyz;
sample_pos = texelFetch(sample_positions, texcoord, sample).xy;
sample_face = texelFetch(sample_faces, texcoord, sample).r;
sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;
sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;
}""", GL.GL_FRAGMENT_SHADER)
GL.glClampColor(GL.GL_CLAMP_READ_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_VERTEX_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_FRAGMENT_COLOR, False)
self.fetchSamplesProgram = shaders.compileProgram(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER, FETCH_FRAGMENT_SHADER)
self.textureGT = GL.GLuint(0)
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glGenTextures(1, self.textureGT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# #
# try:
# if self.imageGT.r is not None and self.imageGT.r.size != 0: #if GT image is defined.
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# except:
# pass
# GL.glGenTextures(1, self.textureEdges)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureEdges)
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glActiveTexture(GL.GL_TEXTURE0)
whitePixel = np.ones([1,1,3])
self.whitePixelTextureID = GL.GLuint(0)
GL.glGenTextures( 1, self.whitePixelTextureID )
GL.glBindTexture(GL.GL_TEXTURE_2D, self.whitePixelTextureID)
image = np.array(np.flipud((whitePixel)), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
self.fbo_ms_errors = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)
self.texture_errors_render = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RGB8, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
self.texture_errors_sample_position = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
self.texture_errors_sample_faces = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_R32UI, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)
#
self.texture_errors_sample_barycentric1 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)
self.texture_errors_sample_barycentric2 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)
self.z_buf_ms_errors = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)
# self.z_buf_ms_errors = GL.glGenRenderbuffers(1)
# GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
# GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
# GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.fbo_sample_fetch = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)
self.render_buffer_fetch_sample_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
self.render_buffer_fetch_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
self.render_buffer_fetch_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
#
self.render_buffer_fetch_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
self.render_buffer_fetch_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
self.z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#FBO_f
self.fbo_errors_nonms = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)
render_buf_errors_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, render_buf_errors_render)
render_buf_errors_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
render_buf_errors_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
#
render_buf_errors_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
render_buf_errors_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
#
z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.textureObjLoc = GL.glGetUniformLocation(self.errorTextureProgram, "myTextureSampler")
#Add background cube:
position_location = GL.glGetAttribLocation(self.errorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.errorTextureProgram, 'colorIn')
uvs_location = GL.glGetAttribLocation(self.errorTextureProgram, 'vertexUV')
face_ids_location = GL.glGetAttribLocation(self.errorTextureProgram, 'face_id')
barycentric_location = GL.glGetAttribLocation(self.errorTextureProgram, 'barycentric')
# self.vbo_verts_cube= vbo.VBO(np.array(self.v_bgCube).astype(np.float32))
# self.vbo_colors_cube= vbo.VBO(np.array(self.vc_bgCube).astype(np.float32))
# self.vbo_uvs_cube = vbo.VBO(np.array(self.ft_bgCube).astype(np.float32))
# self.vao_bgCube = GL.GLuint(0)
# GL.glGenVertexArrays(1, self.vao_bgCube)
#
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube = vbo.VBO(np.array(self.f_bgCube).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
# self.vbo_f_bgCube.bind()
# self.vbo_verts_cube.bind()
# GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_colors_cube.bind()
# GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_uvs_cube.bind()
# GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#
# f = self.f_bgCube
# fc = np.tile(np.arange(len(self.f), len(self.f) + len(f))[:, None], [1, 3]).ravel()
# # fc[:, 0] = fc[:, 0] & 255
# # fc[:, 1] = (fc[:, 1] >> 8) & 255
# # fc[:, 2] = (fc[:, 2] >> 16) & 255
# fc = np.asarray(fc, dtype=np.uint32)
# vbo_face_ids_cube = vbo.VBO(fc)
# vbo_face_ids_cube.bind()
# GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
# GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
#
# #Barycentric cube:
# f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
# vbo_barycentric_cube = vbo.VBO(f_barycentric)
# vbo_barycentric_cube.bind()
# GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_quad = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_quad)
GL.glBindVertexArray(self.vao_quad)
#Bind VAO
self.vbo_face_ids_list = []
self.vbo_barycentric_list = []
self.vao_errors_mesh_list = []
flen = 1
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_face_ids_mesh = []
vbo_barycentric_mesh = []
for polygons in np.arange(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_f.bind()
vbo_verts = self.vbo_verts_mesh[mesh][polygons]
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors = self.vbo_colors_mesh[mesh][polygons]
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_uvs = self.vbo_uvs_mesh[mesh][polygons]
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
f = self.f_list[mesh][polygons]
fc = np.tile(np.arange(flen, flen + len(f))[:,None], [1,3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
vbo_face_ids = vbo.VBO(fc)
vbo_face_ids.bind()
GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
vbo_barycentric = vbo.VBO(f_barycentric)
vbo_barycentric.bind()
GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
flen += len(f)
vaos_mesh += [vao]
vbo_face_ids_mesh += [vbo_face_ids]
vbo_barycentric_mesh += [vbo_face_ids]
GL.glBindVertexArray(0)
self.vbo_face_ids_list += [vbo_face_ids_mesh]
self.vbo_barycentric_list += [vbo_barycentric_mesh]
self.vao_errors_mesh_list += [vaos_mesh]
def render_image_buffers(self):
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
self.makeCurrentContext()
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
GL.glUseProgram(self.errorTextureProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3, GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
# GL.glClearBufferiv(GL.GL_COLOR, 0, 0)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
wwLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_errors_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
# vbo_color.bind()
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
assert(primtype == GL.GL_TRIANGLES)
# GL.glUseProgram(self.errorTextureProgram)
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
else:
texture = self.whitePixelTextureID
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureObjLoc, 0)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f)*vbo_f.data.shape[1])
# # #Background cube:
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube.bind()
# texture = self.whitePixelTextureID
# self.vbo_uvs_cube.bind()
#
# GL.glActiveTexture(GL.GL_TEXTURE0)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# GL.glUniform1i(self.textureObjLoc, 0)
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube)*self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
# self.draw_visibility_image_ms(self.v, self.f)
# GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# # result_blit = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# result_blit2 = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# result_blit_pos = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
GL.glUseProgram(self.fetchSamplesProgram)
# GL.glDisable(GL.GL_MULTISAMPLE)
self.colorsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "colors")
self.sample_positionsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_positions")
self.sample_facesLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_faces")
self.sample_barycentric1Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords1")
self.sample_barycentric2Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords2")
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glActiveTexture(GL.GL_TEXTURE2)
# GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_face)
# GL.glUniform1i(self.sample_facesLoc, 2)
wwLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
self.renders = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],3])
self.renders_sample_pos = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],2])
self.renders_faces = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height']]).astype(np.uint32)
self.renders_sample_barycentric1 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_sample_barycentric2 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],1])
self.renders_sample_barycentric = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'],3])
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3,
GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
for sample in np.arange(self.nsamples):
sampleLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'sample')
GL.glUniform1i(sampleLoc, sample)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glUniform1i(self.colorsLoc, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glUniform1i(self.sample_positionsLoc, 1)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glUniform1i(self.sample_facesLoc, 2)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glUniform1i(self.sample_barycentric1Loc, 3)
GL.glActiveTexture(GL.GL_TEXTURE4)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glUniform1i(self.sample_barycentric2Loc, 4)
GL.glBindVertexArray(self.vao_quad)
GL.glDrawArrays(GL.GL_POINTS, 0, 1)
# GL.glBindVertexArray(self.vao_bgCube)
# # self.vbo_f_bgCube.bind()
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube) * self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
self.renders[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:2].astype(np.float64))
self.renders_sample_pos[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RED_INTEGER, GL.GL_UNSIGNED_INT), np.uint32).reshape(self.frustum['height'], self.frustum['height'])[:,:].astype(np.uint32))
self.renders_faces[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT3)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:2].astype(np.float64))
self.renders_sample_barycentric1[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:1].astype(np.float64))
self.renders_sample_barycentric2[sample] = result
self.renders_sample_barycentric[sample] = np.concatenate([self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2[sample][:,:,0:1]], 2)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
# result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# self.renders_faces[sample] = result
GL.glBindVertexArray(0)
GL.glClearColor(0.,0.,0., 1.)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_MULTISAMPLE)
##Finally return image and derivatives
self.render_resolved = np.mean(self.renders, 0)
self.updateRender = True
self.updateDerivatives_verts = True
self.updateDerivatives_vc = True
def draw_visibility_image_ms(self, v, f):
"""Assumes camera is set up correctly in"""
GL.glUseProgram(self.visibilityProgram_ms)
v = np.asarray(v)
self.draw_visibility_image_ms(v, f)
#Attach FBO
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
fc = np.arange(1, len(f)+1)
fc = np.tile(fc.reshape((-1,1)), (1, 3))
fc[:, 0] = fc[:, 0] & 255
fc[:, 1] = (fc[:, 1] >> 8 ) & 255
fc[:, 2] = (fc[:, 2] >> 16 ) & 255
fc = np.asarray(fc, dtype=np.uint8)
self.draw_colored_primitives_ms(self.vao_dyn_ub, v, f, fc)
# this assumes that fc is either "by faces" or "verts by face", not "by verts"
def draw_colored_primitives_ms(self, vao, v, f, fc=None):
# gl.EnableClientState(GL_VERTEX_ARRAY)
verts_by_face = np.asarray(v.reshape((-1,3))[f.ravel()], dtype=np.float64, order='C')
# gl.VertexPointer(verts_by_face)
GL.glBindVertexArray(vao)
self.vbo_verts_dyn.set_array(verts_by_face.astype(np.float32))
self.vbo_verts_dyn.bind()
if fc is not None:
# gl.EnableClientState(GL_COLOR_ARRAY)
if fc.size == verts_by_face.size:
vc_by_face = fc
else:
vc_by_face = np.repeat(fc, f.shape[1], axis=0)
if vc_by_face.size != verts_by_face.size:
raise Exception('fc must have either rows=(#rows in faces) or rows=(# elements in faces)')
vc_by_face = np.asarray(vc_by_face, dtype=np.uint8, order='C')
self.vbo_colors_ub.set_array(vc_by_face)
self.vbo_colors_ub.bind()
primtype = GL.GL_TRIANGLES
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT2]
GL.glDrawBuffers(1, drawingBuffers)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDrawElements(primtype, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
def compute_dr_wrt(self, wrt):
visibility = self.visibility_image
if wrt is self.camera:
derivatives_verts = self.get_derivatives_verts()
return derivatives_verts
elif wrt is self.vc:
derivatives_vc = self.get_derivatives_vc()
return derivatives_vc
# Not working atm.:
elif wrt is self.bgcolor:
return 2. * (self.imageGT.r - self.render_image).ravel() * common.dr_wrt_bgcolor(visibility, self.frustum, num_channels=self.num_channels)
#Not working atm.:
elif wrt is self.texture_stack:
IS = np.nonzero(self.visibility_image.ravel() != 4294967295)[0]
texcoords, texidx = self.texcoord_image_quantized
vis_texidx = texidx.ravel()[IS]
vis_texcoords = texcoords.ravel()[IS]
JS = vis_texcoords * np.tile(col(vis_texidx), [1,2]).ravel()
clr_im = -2. * (self.imageGT.r - self.render_image) * self.renderWithoutTexture
if False:
cv2.imshow('clr_im', clr_im)
# cv2.imshow('texmap', self.texture_image.r)
cv2.waitKey(1)
r = clr_im[:,:,0].ravel()[IS]
g = clr_im[:,:,1].ravel()[IS]
b = clr_im[:,:,2].ravel()[IS]
data = np.concatenate((r,g,b))
IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
return sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.r.size))
return None
def compute_r(self):
return self.render()
@depends_on(dterms+terms)
def renderWithoutColor(self):
self._call_on_changed()
return self.render_nocolor
@depends_on(dterms+terms)
def renderWithoutTexture(self):
self._call_on_changed()
return self.render_notexture
# @depends_on(dterms+terms)
def render(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateRender:
render = self.compute_image(visible, visibility, self.f)
self.render_result = render
self.updateRender = False
return self.render_result
def get_derivatives_verts(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_verts:
if self.updateRender:
self.render()
if self.overdraw:
# return common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
derivatives_verts = common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
else:
derivatives_verts = common.dImage_wrt_2dVerts(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)
self.derivatives_verts = derivatives_verts
self.updateDerivatives_verts = False
return self.derivatives_verts
def get_derivatives_vc(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_vc:
if self.updateRender:
self.render()
derivatives_vc = self.compute_derivatives_vc(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
self.derivatives_vc = derivatives_vc
self.updateDerivatives_vc = False
return self.derivatives_vc
# # @depends_on(dterms+terms)
# def image_and_derivatives(self):
# # self._call_on_changed()
# visibility = self.visibility_image
#
# color = self.render_resolved
#
# visible = np.nonzero(visibility.ravel() != 4294967295)[0]
# num_visible = len(visible)
#
# barycentric = self.barycentric_image
#
# if self.updateRender:
# render, derivatives = self.compute_image_and_derivatives(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
# self.render = render
# self.derivatives = derivatives
# self.updateRender = False
#
# return self.render, self.derivatives
#
def barycentricDerivatives(self, vertices, faces, verts):
import chumpy as ch
vertices = np.concatenate([vertices, np.ones([vertices.size // 3, 1])], axis=1)
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# # Check with autodiff:
#
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# # negYMat = ch.array([[1,0,self.camera.c.r[0]],[0,-1,self.camera.c.r[1]],[0,0,1]])
# verts_hom_ch = ch.Ch(verts_hom)
# camMtx = ch.Ch(np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])])
# projVerts = (camMtx.dot(view_mtx)).dot(verts_hom_ch.T).T[:, :3].reshape([-1, 3])
# viewVerts = ch.Ch(np.array(projVerts))
# projVerts = projVerts[:, :2] / projVerts[:, 2:3]
#
# chViewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# p0 = ch.Ch(viewVerticesNonBnd[:, 0, :])
# chp0 = p0
#
# p1 = ch.Ch(viewVerticesNonBnd[:, 1, :])
# chp1 = p1
#
# p2 = ch.Ch(viewVerticesNonBnd[:, 2, :])
# chp2 = p2
#
# # D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
# nt = ch.cross(p1 - p0, p2 - p0)
# chnt = nt
# A = 0.5 * ch.sqrt(ch.sum(nt ** 2, axis=1))
# chnt_norm = nt / ch.sqrt(ch.sum(nt ** 2, axis=1))[:, None]
# # nt = nt / A
#
# chb0part2 = ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1)
# chb0 = 0.5 * ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1) / A
# chb1part2 = ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1)
# chb1 = 0.5 * ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1) / A
# chb2part2 = ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1)
# chb2 = 0.5 * ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1) / A
#
# drb0p0 = chb0.dr_wrt(p0)
# drb0p1 = chb0.dr_wrt(p1)
# drb0p2 = chb0.dr_wrt(p2)
#
# drb1p0 = chb1.dr_wrt(p0)
# drb1p1 = chb1.dr_wrt(p1)
# drb1p2 = chb1.dr_wrt(p2)
#
# drb2p0 = chb2.dr_wrt(p0)
# drb2p1 = chb2.dr_wrt(p1)
# drb2p2 = chb2.dr_wrt(p2)
#
# rows = np.tile(np.arange(drb0p0.shape[0])[None, :], [3, 1]).T.ravel()
# cols = np.arange(drb0p0.shape[0] * 3)
#
# drb0p0 = np.array(drb0p0[rows, cols]).reshape([-1, 3])
# drb0p1 = np.array(drb0p1[rows, cols]).reshape([-1, 3])
# drb0p2 = np.array(drb0p2[rows, cols]).reshape([-1, 3])
# drb1p0 = np.array(drb1p0[rows, cols]).reshape([-1, 3])
# drb1p1 = np.array(drb1p1[rows, cols]).reshape([-1, 3])
# drb1p2 = np.array(drb1p2[rows, cols]).reshape([-1, 3])
# drb2p0 = np.array(drb2p0[rows, cols]).reshape([-1, 3])
# drb2p1 = np.array(drb2p1[rows, cols]).reshape([-1, 3])
# drb2p2 = np.array(drb2p2[rows, cols]).reshape([-1, 3])
#
# chdp0 = np.concatenate([drb0p0[:, None, :], drb1p0[:, None, :], drb2p0[:, None, :]], axis=1)
# chdp1 = np.concatenate([drb0p1[:, None, :], drb1p1[:, None, :], drb2p1[:, None, :]], axis=1)
# chdp2 = np.concatenate([drb0p2[:, None, :], drb1p2[:, None, :], drb2p2[:, None, :]], axis=1)
# #
# # dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# # dp = dp[None, :]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerts = projVerts
projVerts = projVerts[:, :2] / projVerts[:, 2:3]
# viewVerticesNonBnd = negYMat.dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
p0 = viewVerticesNonBnd[:, 0, :]
p1 = viewVerticesNonBnd[:, 1, :]
p2 = viewVerticesNonBnd[:, 2, :]
p0_proj = p0[:,0:2]/p0[:,2:3]
p1_proj = p1[:,0:2]/p1[:,2:3]
p2_proj = p2[:,0:2]/p2[:,2:3]
# D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
nt = np.cross(p1 - p0, p2 - p0)
nt_norm = nt / np.linalg.norm(nt, axis=1)[:, None]
# a = -nt_norm[:, 0] / nt_norm[:, 2]
# b = -nt_norm[:, 1] / nt_norm[:, 2]
# c = np.sum(nt_norm * p0, 1) / nt_norm[:, 2]
cam_f = 1
u = p0[:, 0]/p0[:, 2]
v = p0[:, 1]/p0[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p0[:, 2][:,None], np.zeros([len(p0),1]), (-p0[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p0),1]), p0[:, 2][:,None], (-p0[:,1]/v**2)[:,None]]
dxdp_0 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p1[:, 0]/p1[:, 2]
v = p1[:, 1]/p1[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p1[:, 2][:,None], np.zeros([len(p1),1]), (-p1[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p1),1]), p1[:, 2][:,None], (-p1[:,1]/v**2)[:,None]]
dxdp_1 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p2[:, 0]/p2[:, 2]
v = p2[:, 1]/p2[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p2[:, 2][:,None], np.zeros([len(p2),1]), (-p2[:,0]/u**2)[:,None]]
xv = np.c_[np.zeros([len(p2),1]), p2[:, 2][:,None], (-p2[:,1]/v**2)[:,None]]
dxdp_2 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
# x = u * c / (cam_f - a * u - b * v)
# y = v*c/(cam_f - a*u - b*v)
# z = c*cam_f/(cam_f - a*u - b*v)
A = 0.5*np.linalg.norm(np.cross(p1 - p0, p2 - p0),axis=1)
nt_mag = A*2
# nt = nt / A
# db1 = 0.5*np.cross(nt_norm, p2-p1)/A[:, None]
# db2 = 0.5*np.cross(nt_norm, p0-p2)/A[:, None]
# db3_2 = 0.5*np.cross(nt_norm, p1-p0)/A[:, None]
# db3 = - db1 - db2
p = viewVerts
pre1 = -1/(nt_mag[:,None]**2) * nt_norm
ident = np.identity(3)
ident = np.tile(ident[None,:],[len(p2),1,1])
dntdp0 = np.cross((p2-p0)[:,None,:], -ident) + np.cross(-ident, (p1-p0)[:,None,:])
dntdp1 = np.cross((p2-p0)[:,None,:],ident)
dntdp2 = np.cross(ident,(p1-p0)[:,None,:])
#Pol check this!:
dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
# dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
dntnormdp0 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp0)
dntnormdp1 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp1)
dntnormdp2 = np.einsum('ijk,ikl->ijl',dntnorm, dntdp2)
dpart1p0 = np.einsum('ij,ijk->ik', pre1, dntdp0)
dpart1p1 = np.einsum('ij,ijk->ik', pre1, dntdp1)
dpart1p2 = np.einsum('ij,ijk->ik', pre1, dntdp2)
b0 = np.sum(np.cross(nt_norm, p2 - p1) * (p - p1), axis=1)[:,None]
db0part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident,nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1),-ident)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0.swapaxes(1,2)), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident, nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(p2-p1,nt_norm[:, :]),-ident)
db0part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :],-ident), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1), -ident)
db0part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1,2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p1)
db0dp0wrtpart1 = dpart1p0*b0
db0dp1wrtpart1 = dpart1p1*b0
db0dp2wrtpart1 = dpart1p2*b0
db0dp0wrtpart2 = 1./(nt_mag[:,None])*db0part2p0
db0dp1wrtpart2 = 1./(nt_mag[:,None])*db0part2p1
db0dp2wrtpart2 = 1./(nt_mag[:,None])*db0part2p2
db0dp0wrt = db0dp0wrtpart1 + db0dp0wrtpart2
db0dp1wrt = db0dp1wrtpart1 + db0dp1wrtpart2
db0dp2wrt = db0dp2wrtpart1 + db0dp2wrtpart2
######
b1 = np.sum(np.cross(nt_norm, p0 - p2) * (p - p2), axis=1)[:, None]
db1part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p2)
db1part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2)
db1part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1, 2),(p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], -ident), p - p2) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p0-p2), -ident)
db1dp0wrtpart1 = dpart1p0*b1
db1dp1wrtpart1 = dpart1p1*b1
db1dp2wrtpart1 = dpart1p2*b1
db1dp0wrtpart2 = 1./(nt_mag[:,None])*db1part2p0
db1dp1wrtpart2 = 1./(nt_mag[:,None])*db1part2p1
db1dp2wrtpart2 = 1./(nt_mag[:,None])*db1part2p2
db1dp0wrt = db1dp0wrtpart1 + db1dp0wrtpart2
db1dp1wrt = db1dp1wrtpart1 + db1dp1wrtpart2
db1dp2wrt = db1dp2wrtpart1 + db1dp2wrtpart2
######
b2 = np.sum(np.cross(nt_norm, p1 - p0) * (p - p0), axis=1)[:, None]
db2part2p0 = np.einsum('ikj,ij->ik',np.cross(dntnormdp0.swapaxes(1, 2),(p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], -ident), p - p0) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p1 - p0), -ident)
db2part2p1 = np.einsum('ikj,ij->ik',np.cross(dntnormdp1.swapaxes(1, 2),(p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(nt_norm[:, None, :], ident), p - p0)
db2part2p2 = np.einsum('ikj,ij->ik',np.cross(dntnormdp2.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0)
db2dp0wrtpart1 = dpart1p0*b2
db2dp1wrtpart1 = dpart1p1*b2
db2dp2wrtpart1 = dpart1p2*b2
db2dp0wrtpart2 = 1./(nt_mag[:,None])*db2part2p0
db2dp1wrtpart2 = 1./(nt_mag[:,None])*db2part2p1
db2dp2wrtpart2 = 1./(nt_mag[:,None])*db2part2p2
db2dp0wrt = db2dp0wrtpart1 + db2dp0wrtpart2
db2dp1wrt = db2dp1wrtpart1 + db2dp1wrtpart2
db2dp2wrt = db2dp2wrtpart1 + db2dp2wrtpart2
dp0 = np.concatenate([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]], axis=1)
dp1 = np.concatenate([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]], axis=1)
dp2 = np.concatenate([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]], axis=1)
#
dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
#If dealing with degenerate triangles, ignore that gradient.
# dp[nt_mag<=1e-15] = 0
dp = dp[None, :]
nFaces = len(faces)
# visTriVC = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc[vc > 1] = 1
vc[vc < 0] = 0
visTriVC = vc
dxdp = np.concatenate([dxdp_0[:,None,:],dxdp_1[:,None,:],dxdp_2[:,None,:]], axis=1)
dxdp = dxdp[None, :, None]
# dbvc = np.sum(dp * visTriVC, 2)
# dbvc = dp * visTriVC * t_area[None, :, None, None, None]
dbvc = dp * visTriVC
didp = np.sum(dbvc[:, :, :, :, :, None] * dxdp, 4).sum(2)
#output should be shape: VC x Ninput x Tri Points x UV
# drb0p0 # db0dp0wrt
# drb0p1 # db0dp1wrt
# drb0p2 # db0dp2wrt
# drb1p0 # db1dp0wrt
# drb1p1 # db1dp1wrt
# drb1p2 # db1dp2wrt
# drb2p0 # db2dp0wrt
# drb2p1 # db2dp1wrt
# drb2p2 # db2dp2wrt
#
return didp
def compute_image(self, visible, visibility, f):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
# xdiff = dEdx
# ydiff = dEdy
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
nsamples = self.nsamples
if np.any(boundaryImage):
boundaryFaces = visibility[(boundaryImage) & (visibility != 4294967295)]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
edgeFaces= np.tile(self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][None, :, :], [8, 1, 1])
edgeSampled = np.any((edgeFaces[:,:, 0]== sampleFaces) | (edgeFaces[:,:, 1]== sampleFaces),0)
facesInsideBnd = projFacesBndTiled == sampleFaces
wrongBnd = ~edgeSampled
# wrongBnd = np.all(facesInsideBnd, 0)
whereBnd = np.where(boundaryImage.ravel())[0]
# boundaryImage.ravel()[whereBnd[wrongBnd]] = False
if np.any(boundaryImage):
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 2])
# sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:,(zerosIm*boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
boundaryFaces = visibility[(boundaryImage)&(visibility !=4294967295 )]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
frontFacing = self.frontFacingEdgeFaces[(zerosIm * boundaryImage).ravel().astype(np.bool)].astype(np.bool)
frontFacingEdgeFaces = self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][frontFacing]
vertsPerFaceProjBnd = self.camera.r[f[frontFacingEdgeFaces.ravel()].ravel()].reshape([1, -1, 2])
vertsPerFaceProjBnd = np.tile(vertsPerFaceProjBnd, [self.nsamples, 1,1])
vertsPerFaceProjBnd = vertsPerFaceProjBnd.reshape([-1,3,2])[facesOutsideBnd.ravel()]
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_edge = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_edge[t_area_bnd_edge > 1] = 1
# if self.debug:
# import pdb; pdb.set_trace()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_outside = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_outside[t_area_bnd_outside > 1] = 1
faces = f[sampleFaces[facesInsideBnd]].ravel()
vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:,0,:], np.ones([nv,1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:,1,:], np.ones([nv,1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:,2,:], np.ones([nv,1])]
t_area_bnd_inside = np.abs(np.linalg.det(np.concatenate([p0_proj[:,None], p1_proj[:,None], p2_proj[:,None]], axis=1))*0.5)
t_area_bnd_inside[t_area_bnd_inside > 1] = 1
#Trick to cap to 1 while keeping gradients.
p1 = vertsProjBndSamplesOutside[:,0,:]
p2 = vertsProjBndSamplesOutside[:,1,:]
p = sampleV[facesOutsideBnd]
l = (p2 - p1)
linedist = np.sqrt((np.sum(l**2,axis=1)))[:,None]
self.linedist = linedist
lnorm = l/linedist
self.lnorm = lnorm
v1 = p - p1
self.v1 = v1
d = v1[:,0]* lnorm[:,0] + v1[:,1]* lnorm[:,1]
self.d = d
intersectPoint = p1 + d[:,None] * lnorm
self.intersectPoint = intersectPoint
v2 = p - p2
self.v2 = v2
l12 = (p1 - p2)
linedist12 = np.sqrt((np.sum(l12**2,axis=1)))[:,None]
lnorm12 = l12/linedist12
d2 = v2[:,0]* lnorm12[:,0] + v2[:,1]* lnorm12[:,1]
nonIntersect = (d2 < 0) | (d<0)
self.nonIntersect = nonIntersect
argminDistNonIntersect = np.argmin(np.c_[d[nonIntersect], d2[nonIntersect]], 1)
self.argminDistNonIntersect = argminDistNonIntersect
intersectPoint[nonIntersect] = vertsProjBndSamplesOutside[nonIntersect][np.arange(nonIntersect.sum()), argminDistNonIntersect]
lineToPoint = (p - intersectPoint)
n=lineToPoint
dist = np.sqrt((np.sum(lineToPoint ** 2, axis=1)))[:, None]
n_norm = lineToPoint /dist
self.n_norm = n_norm
self.dist = dist
d_final = dist.squeeze()
# max_nx_ny = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final = d_final/max_nx_ny
# d_final = d_final
verticesBnd = self.v.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2 , 3])
verticesBndSamples = np.tile(verticesBnd[None,:,:],[self.nsamples,1,1, 1])
verticesBndOutside = verticesBndSamples[facesOutsideBnd]
vc = self.vc.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2 , 3])
vc[vc > 1] = 1
vc[vc < 0] = 0
vcBnd = vc
vcBndSamples = np.tile(vcBnd[None,:,:],[self.nsamples,1,1,1])
vcBndOutside = vcBndSamples[facesOutsideBnd]
invViewMtx = np.linalg.inv(np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])])
#
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
# invCamMtx = np.r_[np.c_[np.linalg.inv(self.camera.camera_mtx), np.array([0,0,0])], np.array([[0, 0, 0, 1]])]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
verticesBndOutside = np.concatenate([verticesBndOutside.reshape([-1,3]), np.ones([verticesBndOutside.size//3, 1])], axis=1)
projVerticesBndOutside = (camMtx.dot(view_mtx)).dot(verticesBndOutside.T).T[:,:3].reshape([-1,2,3])
projVerticesBndDir = projVerticesBndOutside[:,1,:] - projVerticesBndOutside[:,0,:]
projVerticesBndDir = projVerticesBndDir/np.sqrt((np.sum(projVerticesBndDir ** 2, 1)))[:, None]
dproj = (intersectPoint[:,0]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,0]) / (projVerticesBndDir[:,0] - projVerticesBndDir[:,2]*intersectPoint[:,0])
# Code to check computation that dproj == dprojy
# dproj_y = (intersectPoint[:,1]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,1]) / (projVerticesBndDir[:,1] - projVerticesBndDir[:,2]*intersectPoint[:,1])
projPoint = projVerticesBndOutside[:,0,:][:,: ] + dproj[:,None]*projVerticesBndDir[:,:]
projPointVec4 = np.concatenate([projPoint, np.ones([projPoint.shape[0],1])], axis=1)
viewPointIntersect = (invViewMtx.dot(np.linalg.inv(camMtx)).dot(projPointVec4.T.reshape([4,-1])).reshape([4,-1])).T[:,:3]
barycentricVertsDistIntesect = np.linalg.norm(viewPointIntersect - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,0,:], axis=1)
barycentricVertsDistIntesect2 = np.linalg.norm(viewPointIntersect - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,1,:], axis=1)
# Code to check barycentricVertsDistIntesect + barycentricVertsDistIntesect2 = barycentricVertsDistEdge
barycentricVertsDistEdge = np.linalg.norm(verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,0,:] - verticesBndOutside[:,0:3].reshape([-1, 2, 3])[:,1,:], axis=1)
nonIntersect = np.abs(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 - barycentricVertsDistEdge) > 1e-4
argminDistNonIntersect = np.argmin(np.c_[barycentricVertsDistIntesect[nonIntersect], barycentricVertsDistIntesect2[nonIntersect]],1)
barycentricVertsIntersect = barycentricVertsDistIntesect2 / (barycentricVertsDistIntesect + barycentricVertsDistIntesect2)
barycentricVertsIntersect[nonIntersect] = np.array(argminDistNonIntersect == 0).astype(np.float64)
self.barycentricVertsIntersect = barycentricVertsIntersect
self.viewPointIntersect = viewPointIntersect
self.viewPointIntersect[nonIntersect] = verticesBndOutside.reshape([-1, 2, 4])[nonIntersect, :, 0:3][np.arange(nonIntersect.sum()), argminDistNonIntersect, :]
vcEdges1 = barycentricVertsIntersect[:, None] * vcBndOutside.reshape([-1, 2, 3])[:, 0, :]
self.barycentricVertsIntersect = barycentricVertsIntersect
vcEdges2 = (1-barycentricVertsIntersect[:,None]) * vcBndOutside.reshape([-1,2,3])[:,1,:]
#Color:
colorVertsEdge = vcEdges1 + vcEdges2
#Point IN edge barycentric
d_finalNP = np.minimum(d_final.copy(),1.)
self.d_final_outside = d_finalNP
self.t_area_bnd_outside = t_area_bnd_outside
self.t_area_bnd_edge = t_area_bnd_edge
self.t_area_bnd_inside = t_area_bnd_inside
areaWeights = np.zeros([nsamples, nBndFaces])
areaWeights[facesOutsideBnd] = (1-d_finalNP)*t_area_bnd_edge + d_finalNP *t_area_bnd_outside
areaWeights[facesInsideBnd] = t_area_bnd_inside
areaWeightsTotal = areaWeights.sum(0)
# areaWeightsTotal[areaWeightsTotal < 1] = 1
self.areaWeightsTotal = areaWeightsTotal
finalColorBndOutside = np.zeros([self.nsamples, boundaryFaces.size, 3])
finalColorBndOutside_edge = np.zeros([self.nsamples, boundaryFaces.size, 3])
finalColorBndInside = np.zeros([self.nsamples, boundaryFaces.size, 3])
sampleColorsOutside = sampleColors[facesOutsideBnd]
self.sampleColorsOutside = sampleColors.copy()
finalColorBndOutside[facesOutsideBnd] = sampleColorsOutside
finalColorBndOutside[facesOutsideBnd] = sampleColorsOutside / self.nsamples
self.finalColorBndOutside_for_dr = finalColorBndOutside.copy()
# finalColorBndOutside[facesOutsideBnd] *= d_finalNP[:, None] * t_area_bnd_outside[:, None]
finalColorBndOutside[facesOutsideBnd] *= d_finalNP[:, None]
finalColorBndOutside_edge[facesOutsideBnd] = colorVertsEdge
finalColorBndOutside_edge[facesOutsideBnd] = colorVertsEdge/ self.nsamples
self.finalColorBndOutside_edge_for_dr = finalColorBndOutside_edge.copy()
# finalColorBndOutside_edge[facesOutsideBnd] *= (1 - d_finalNP[:, None]) * t_area_bnd_edge[:, None]
finalColorBndOutside_edge[facesOutsideBnd] *= (1 - d_finalNP[:, None])
sampleColorsInside = sampleColors[facesInsideBnd]
self.sampleColorsInside = sampleColorsInside.copy()
# finalColorBndInside[facesInsideBnd] = sampleColorsInside * self.t_area_bnd_inside[:, None]
finalColorBndInside[facesInsideBnd] = sampleColorsInside / self.nsamples
# finalColorBnd = finalColorBndOutside + finalColorBndOutside_edge + finalColorBndInside
finalColorBnd = finalColorBndOutside + finalColorBndOutside_edge + finalColorBndInside
# finalColorBnd /= areaWeightsTotal[None, :, None]
bndColorsImage = np.zeros_like(self.render_resolved)
bndColorsImage[(zerosIm * boundaryImage), :] = np.sum(finalColorBnd, axis=0)
# bndColorsImage1 = np.zeros_like(self.render_resolved)
# bndColorsImage1[(zerosIm * boundaryImage), :] = np.sum(self.finalColorBndOutside_for_dr, axis=0)
#
# bndColorsImage2 = np.zeros_like(self.render_resolved)
# bndColorsImage2[(zerosIm * boundaryImage), :] = np.sum(self.finalColorBndOutside_edge_for_dr, axis=0)
#
# bndColorsImage3 = np.zeros_like(self.render_resolved)
# bndColorsImage3[(zerosIm * boundaryImage), :] = np.sum(finalColorBndInside, axis=0)
finalColorImageBnd = bndColorsImage
if np.any(boundaryImage):
finalColor = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * finalColorImageBnd
# finalColor1 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage1
# finalColor2 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage2
# finalColor3 = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * bndColorsImage3
else:
finalColor = self.color_image
finalColor[finalColor>1] = 1
finalColor[finalColor<0] = 0
return finalColor
def compute_derivatives_verts(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
n_norm = self.n_norm
dist = self.dist
linedist = self.linedist
d = self.d
v1 = self.v1
lnorm = self.lnorm
finalColorBndOutside_for_dr = self.finalColorBndOutside_for_dr
finalColorBndOutside_edge_for_dr = self.finalColorBndOutside_edge_for_dr
d_final_outside = self.d_final_outside
barycentricVertsIntersect = self.barycentricVertsIntersect
# xdiff = dEdx
# ydiff = dEdy
nVisF = len(visibility.ravel()[visible])
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility!=4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage)&(visibility !=4294967295 )]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
p1 = vertsProjBndSamplesOutside[:, 0, :]
p2 = vertsProjBndSamplesOutside[:, 1, :]
p = sampleV[facesOutsideBnd]
#Computing gradients:
#A multisampled pixel color is given by: w R + (1-w) R' thus:
#1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
#2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
#3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
#4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
#5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
#6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
#7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
#for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
#Finally, stack data, and IJ of nonbnd with bnd on both dwrt_v and dwrt_vc.
######## 1 derivatives samples outside wrt v 1: (dw * (bar*vc) - dw (bar'*vc') )/ nsamples for face sample
# #Chumpy autodiff code to check derivatives here:
# chEdgeVerts = ch.Ch(vertsProjBndSamplesOutside)
#
# chEdgeVerts1 = chEdgeVerts[:,0,:]
# chEdgeVerts2 = chEdgeVerts[:,1,:]
#
# chSampleVerts = ch.Ch(sampleV[facesOutsideBnd])
# # c1 = (chEdgeVerts1 - chSampleVerts)
# # c2 = (chEdgeVerts2 - chSampleVerts)
# # n = (chEdgeVerts2 - chEdgeVerts1)
#
# #Code to check computation of distance below
# # d2 = ch.abs(c1[:,:,0]*c2[:,:,1] - c1[:,:,1]*c2[:,:,0]) / ch.sqrt((ch.sum(n**2,2)))
# # # np_mat = ch.dot(ch.array([[0,-1],[1,0]]), n)
# # np_mat2 = -ch.concatenate([-n[:,:,1][:,:,None], n[:,:,0][:,:,None]],2)
# # np_vec2 = np_mat2 / ch.sqrt((ch.sum(np_mat2**2,2)))[:,:,None]
# # d2 = d2 / ch.maximum(ch.abs(np_vec2[:,:,0]),ch.abs(np_vec2[:,:,1]))
#
# chl = (chEdgeVerts2 - chEdgeVerts1)
# chlinedist = ch.sqrt((ch.sum(chl**2,axis=1)))[:,None]
# chlnorm = chl/chlinedist
#
# chv1 = chSampleVerts - chEdgeVerts1
# chd = chv1[:,0]* chlnorm[:,0] + chv1[:,1]* chlnorm[:,1]
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
# # intersectPointDist1 = intersectPoint - chEdgeVerts1
# # intersectPointDist2 = intersectPoint - chEdgeVerts2
# # Code to check computation of distances below:
# # lengthIntersectToPoint1 = np.linalg.norm(intersectPointDist1.r,axis=1)
# # lengthIntersectToPoint2 = np.linalg.norm(intersectPointDist2.r,axis=1)
#
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
#
# chlineToPoint = (chSampleVerts - chintersectPoint)
# chn_norm = chlineToPoint / ch.sqrt((ch.sum(chlineToPoint ** 2, axis=1)))[:, None]
#
# chdist = chlineToPoint[:,0]*chn_norm[:,0] + chlineToPoint[:,1]*chn_norm[:,1]
#
# d_final_ch = chdist / ch.maximum(ch.abs(chn_norm[:, 0]), ch.abs(chn_norm[:, 1]))
#
# d_final_outside = d_final_ch.ravel()
# dwdv = d_final_outside.dr_wrt(chEdgeVerts1)
# rows = np.tile(np.arange(d_final_outside.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_outside.shape[0] * 2)
#
# dwdv_r_v1 = np.array(dwdv[rows, cols]).reshape([-1, 2])
#
# dwdv = d_final_outside.dr_wrt(chEdgeVerts2)
# rows = np.tile(np.arange(d_final_ch.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_ch.shape[0] * 2)
#
# dwdv_r_v2 = np.array(dwdv[rows, cols]).reshape([-1, 2])
nonIntersect = self.nonIntersect
argminDistNonIntersect = self.argminDistNonIntersect
max_dx_dy = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final_np = dist / max_dx_dy
d_final_np = dist
ident = np.identity(2)
ident = np.tile(ident[None, :], [len(p2), 1, 1])
dlnorm = (ident - np.einsum('ij,ik->ijk', lnorm, lnorm)) / linedist[:, None]
dl_normdp1 = np.einsum('ijk,ikl->ijl', dlnorm, -ident)
dl_normdp2 = np.einsum('ijk,ikl->ijl', dlnorm, ident)
dv1dp1 = -ident
dv1dp2 = 0
dddp1 = np.einsum('ijk,ij->ik', dv1dp1, lnorm) + np.einsum('ij,ijl->il', v1, dl_normdp1)
dddp2 = 0 + np.einsum('ij,ijl->il', v1, dl_normdp2)
dipdp1 = ident + (dddp1[:,None,:]*lnorm[:,:,None]) + d[:,None,None]*dl_normdp1
dipdp2 = (dddp2[:,None,:]*lnorm[:,:,None]) + d[:,None,None]*dl_normdp2
dndp1 = -dipdp1
dndp2 = -dipdp2
dn_norm = (ident - np.einsum('ij,ik->ijk', n_norm, n_norm)) / dist[:,None]
dn_normdp1 = np.einsum('ijk,ikl->ijl', dn_norm, dndp1)
dn_normdp2 = np.einsum('ijk,ikl->ijl', dn_norm, dndp2)
ddistdp1 = np.einsum('ij,ijl->il', n_norm, dndp1)
ddistdp2 = np.einsum('ij,ijl->il', n_norm, dndp2)
argmax_nx_ny = np.argmax(np.abs(n_norm),axis=1)
dmax_nx_ny_p1 = np.sign(n_norm)[np.arange(len(n_norm)),argmax_nx_ny][:,None]*dn_normdp1[np.arange(len(dn_normdp1)),argmax_nx_ny]
dmax_nx_ny_p2 = np.sign(n_norm)[np.arange(len(n_norm)),argmax_nx_ny][:,None]*dn_normdp2[np.arange(len(dn_normdp2)),argmax_nx_ny]
# dd_final_dp1 = -1./max_dx_dy[:,None]**2 * dmax_nx_ny_p1 * dist + 1./max_dx_dy[:,None] * ddistdp1
# dd_final_dp2 = -1./max_dx_dy[:,None]**2 * dmax_nx_ny_p2 * dist + 1./max_dx_dy[:,None] * ddistdp2
dd_final_dp1 = ddistdp1
dd_final_dp2 = ddistdp2
#For those non intersecting points straight to the edge:
v1 = self.v1[nonIntersect][argminDistNonIntersect==0]
v1_norm = v1/np.sqrt((np.sum(v1**2,axis=1)))[:,None]
dd_final_dp1_nonintersect = -v1_norm
v2 = self.v2[nonIntersect][argminDistNonIntersect==1]
v2_norm = v2/np.sqrt((np.sum(v2**2,axis=1)))[:,None]
dd_final_dp2_nonintersect = -v2_norm
dd_final_dp1[nonIntersect][argminDistNonIntersect == 0] = dd_final_dp1_nonintersect
dd_final_dp1[nonIntersect][argminDistNonIntersect == 1] = 0
dd_final_dp2[nonIntersect][argminDistNonIntersect == 1] = dd_final_dp2_nonintersect
dd_final_dp2[nonIntersect][argminDistNonIntersect == 0] = 0
dImage_wrt_outside_v1 = finalColorBndOutside_for_dr[facesOutsideBnd][:,:,None]*dd_final_dp1[:,None,:] - dd_final_dp1[:,None,:]*finalColorBndOutside_edge_for_dr[facesOutsideBnd][:,:,None]
dImage_wrt_outside_v2 = finalColorBndOutside_for_dr[facesOutsideBnd][:,:,None]*dd_final_dp2[:,None,:] - dd_final_dp2[:,None,:]*finalColorBndOutside_edge_for_dr[facesOutsideBnd][:,:,None]
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2*2)).ravel()
# faces = f[sampleFaces[facesOutsideBnd]].ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1, -1, 2]), [self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data1 = dImage_wrt_outside_v1.transpose([1,0,2])
data2 = dImage_wrt_outside_v2.transpose([1,0,2])
data = np.concatenate([data1[:,:,None,:], data2[:,:,None,:]], 2)
data = data.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd_outside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
######## 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
######## 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
verticesBnd = self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3])
sampleBarycentricBar = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([-1, 3, 1])
verts = np.sum(self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3, 3]) * sampleBarycentricBar, axis=1)
dImage_wrt_bar_v = self.barycentricDerivatives(verticesBnd, f[sampleFaces.ravel()], verts).swapaxes(0,1)
dImage_wrt_bar_v[facesOutsideBnd.ravel()] = dImage_wrt_bar_v[facesOutsideBnd.ravel()] * d_final_outside[:,None,None, None] * self.t_area_bnd_outside[:, None, None, None]
dImage_wrt_bar_v[facesInsideBnd.ravel()] = dImage_wrt_bar_v[facesInsideBnd.ravel()] * self.t_area_bnd_inside[:, None, None, None]
# dImage_wrt_bar_v /= np.tile(areaWeightsTotal[None,:], [self.nsamples,1]).ravel()[:, None,None, None]
dImage_wrt_bar_v /= self.nsamples
### Derivatives wrt V: 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
# data = np.tile(dImage_wrt_bar_v[facesOutsideBnd.ravel()][None,:],[3,1,1,1]).ravel()
data = np.transpose(dImage_wrt_bar_v[facesOutsideBnd.ravel()],[1,0,2,3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_outside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
### Derivatives wrt V: 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesInsideBnd]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
faces = f[sampleFaces[facesInsideBnd]].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v[facesInsideBnd.ravel()], [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_inside = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
####### 3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
frontFacing = self.frontFacingEdgeFaces[(zerosIm * boundaryImage).ravel().astype(np.bool)].astype(np.bool)
frontFacingEdgeFaces = self.fpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]][frontFacing]
verticesBnd = self.v.r[f[frontFacingEdgeFaces.ravel()].ravel()].reshape([1, -1, 3])
verticesBnd = np.tile(verticesBnd, [self.nsamples, 1,1])
verticesBnd = verticesBnd.reshape([-1,3,3])[facesOutsideBnd.ravel()].reshape([-1,3])
verts = self.viewPointIntersect
fFrontEdge = np.tile(f[frontFacingEdgeFaces][None,:], [self.nsamples, 1, 1]).reshape([-1,3])[facesOutsideBnd.ravel()]
dImage_wrt_bar_v_edge = self.barycentricDerivatives(verticesBnd, fFrontEdge, verts).swapaxes(0, 1)
dImage_wrt_bar_v_edge = dImage_wrt_bar_v_edge * (1-d_final_outside[:,None,None, None]) * self.t_area_bnd_edge[:, None, None, None]
# dImage_wrt_bar_v_edge /= np.tile(self.areaWeightsTotal[None,:], [self.nsamples,1])[facesOutsideBnd][:, None, None,None]
dImage_wrt_bar_v_edge /= self.nsamples
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 3 * 2)).ravel()
# faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = f[frontFacingEdgeFaces]
faces = np.tile(faces.reshape([1, -1, 3]), [self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v_edge, [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bar_outside_edge = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
vertsPerFaceProjBnd = self.camera.r[f[nonBoundaryFaces].ravel()].reshape([-1,3,2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
t_area_nonbnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
t_area_nonbnd[t_area_nonbnd> 1] = 1
bc = barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))
verts = np.sum(self.v.r[f[nonBoundaryFaces.ravel()].ravel()].reshape([-1, 3, 3]) * bc[:, :,None], axis=1)
didp = self.barycentricDerivatives(verticesNonBnd, f[nonBoundaryFaces.ravel()], verts)
didp = didp * t_area_nonbnd[None,:,None, None]
n_channels = np.atleast_3d(observed).shape[2]
shape = visibility.shape
####### 2: Take the data and copy the corresponding dxs and dys to these new pixels.
### Derivatives wrt V:
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.where(((~boundaryImage)&(visibility !=4294967295 )).ravel())[0]
IS = np.tile(col(pixels), (1, 2*f.shape[1])).ravel()
JS = col(f[nonBoundaryFaces].ravel())
JS = np.hstack((JS*2, JS*2+1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
# data = np.concatenate(((visTriVC[:,0,:] * dBar1dx[:,None])[:,:,None],(visTriVC[:, 0, :] * dBar1dy[:, None])[:,:,None], (visTriVC[:,1,:]* dBar2dx[:,None])[:,:,None], (visTriVC[:, 1, :] * dBar2dy[:, None])[:,:,None],(visTriVC[:,2,:]* dBar3dx[:,None])[:,:,None],(visTriVC[:, 2, :] * dBar3dy[:, None])[:,:,None]),axis=2).swapaxes(0,1).ravel()
data = didp.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_nonbnd = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))
# result_wrt_verts_nonbnd.sum_duplicates()
if np.any(boundaryImage):
result_wrt_verts = result_wrt_verts_bnd_outside + result_wrt_verts_bar_outside + result_wrt_verts_bar_inside + result_wrt_verts_bar_outside_edge + result_wrt_verts_nonbnd
# result_wrt_verts = result_wrt_verts_bnd_outside
else:
result_wrt_verts = result_wrt_verts_nonbnd
return result_wrt_verts
def compute_derivatives_vc(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
d_final_outside = self.d_final_outside
barycentricVertsIntersect = self.barycentricVertsIntersect
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility!=4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage)&(visibility !=4294967295 )]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
projFacesBndTiled = np.tile(boundaryFaces[None, :], [self.nsamples, 1])
facesInsideBnd = projFacesBndTiled == sampleFaces
facesOutsideBnd = ~facesInsideBnd
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1,1,1])
vertsProjBndSamplesOutside = vertsProjBndSamples[facesOutsideBnd]
#Computing gradients:
#A multisampled pixel color is given by: w R + (1-w) R' thus:
#1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
#2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
#3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
#4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
#5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
#6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
#7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
#for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
####### 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
dImage_wrt_outside_vc_outside = d_final_outside[:,None] * sampleBarycentric[facesOutsideBnd] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 3)).ravel()
faces = f[sampleFaces[facesOutsideBnd]].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_outside.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_bnd_outside = result
# result_wrt_vc_bnd_outside.sum_duplicates()
######## 5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
dImage_wrt_outside_vc_edge = (1-d_final_outside[:, None]) * np.c_[barycentricVertsIntersect, 1-barycentricVertsIntersect] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesOutsideBnd]
IS = np.tile(col(pixels), (1, 2)).ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1,-1,2]),[self.nsamples, 1, 1])[facesOutsideBnd].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_edge.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_vc_bnd_outside_edge = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
# result_wrt_vc_bnd_outside_edge.sum_duplicates()
######## 7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
dImage_wrt_outside_vc_inside = sampleBarycentric[facesInsideBnd] / self.nsamples
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None,:], [self.nsamples, 1])[facesInsideBnd]
IS = np.tile(col(pixels), (1, 3)).ravel()
faces = f[sampleFaces[facesInsideBnd]].ravel()
JS = col(faces)
data = dImage_wrt_outside_vc_inside.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_vc_bnd_inside = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
# result_wrt_vc_bnd_inside.sum_duplicates()
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
# barySample = self.renders_sample_barycentric[0].reshape([-1,3])[(~boundaryImage)&(visibility !=4294967295 ).ravel().astype(np.bool), :]
bc = barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))
# barySample[barycentric[((~boundaryImage)&(visibility !=4294967295 ))].reshape((-1, 3))]
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.where(((~boundaryImage)&(visibility !=4294967295 )).ravel())[0]
IS = np.tile(col(pixels), (1, 3)).ravel()
JS = col(f[nonBoundaryFaces].ravel())
bc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
# bc = barySample.reshape((-1, 3))
data = np.asarray(bc, order='C').ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
data = np.concatenate([data for i in range(num_channels)])
# IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
# JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
# data = np.concatenate((data, data, data))
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_nonbnd = result
# result_wrt_vc_nonbnd.sum_duplicates()
if np.any(boundaryImage):
# result_wrt_verts = result_wrt_verts_bar_outside_edge
# result_wrt_verts = result_wrt_verts_nonbnd
result_wrt_vc = result_wrt_vc_bnd_outside + result_wrt_vc_bnd_outside_edge + result_wrt_vc_bnd_inside + result_wrt_vc_nonbnd
# result_wrt_vc = sp.csc_matrix((width * height * num_channels, vc_size))
else:
# result_wrt_verts = sp.csc_matrix((image_width*image_height*n_channels, num_verts*2))
result_wrt_vc = result_wrt_vc_nonbnd
# result_wrt_vc = sp.csc_matrix((width * height * num_channels, vc_size))
return result_wrt_vc
def on_changed(self, which):
super().on_changed(which)
if 'v' or 'camera' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_verts_mesh[mesh][polygons].set_array(verts_by_face.astype(np.float32))
self.vbo_verts_mesh[mesh][polygons].bind()
if 'vc' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
flen = 1
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
# fc = np.arange(flen, flen + len(f))
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
self.vbo_face_ids_list[mesh][polygons].set_array(fc)
self.vbo_face_ids_list[mesh][polygons].bind()
flen += len(f)
self.vbo_indices_mesh_list[mesh][polygons].set_array(np.array(self.f_list[mesh][polygons]).astype(np.uint32))
self.vbo_indices_mesh_list[mesh][polygons].bind()
if 'texture_stack' in which:
# gl = self.glf
# texture_data = np.array(self.texture_image*255., dtype='uint8', order='C')
# self.release_textures()
#
# for mesh in range(len(self.f_list)):
# textureIDs = []
# for polygons in range(len(self.f_list[mesh])):
# texture = None
# if self.haveUVs_list[mesh][polygons]:
# texture = GL.GLuint(0)
# GL.glGenTextures( 1, texture )
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# #Send texture.
# #Pol: Check if textures are float or uint from Blender import.
# image = (self.textures_list[mesh][polygons]*255.0).astype(np.uint8)
# GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, image)
# textureIDs = textureIDs + [texture]
# self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs]
# gl.GenTextures(1, tmp) # TODO: free after done
# self.textureID = tmp[0]
if self.initialized:
textureCoordIdx = 0
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
#Update the OpenGL textures with all the textures. (Inefficient as many might not have changed).
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
self.textures_list[mesh][polygons] = self.texture_stack[textureCoordIdx:image.size+textureCoordIdx].reshape(image.shape)
textureCoordIdx = textureCoordIdx + image.size
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
if 'v' or 'f' or 'vc' or 'ft' or 'camera' or 'texture_stack' in which:
self.render_image_buffers()
def release_textures(self):
if hasattr(self, 'textureID_mesh_list'):
if self.textureID_mesh_list != []:
for texture_mesh in self.textureID_mesh_list:
if texture_mesh != []:
for texture in texture_mesh:
if texture != None:
GL.glDeleteTextures(1, [texture.value])
self.textureID_mesh_list = []
@depends_on(dterms+terms)
def color_image(self):
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image(with_vertex_colors=True, with_texture_on=True)
return no_overdraw
# if not self.overdraw:
# return no_overdraw
#
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
# overdraw = self.draw_color_image()
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
#
# # return overdraw * np.atleast_3d(self.boundarybool_image)
#
# boundarybool_image = self.boundarybool_image
# if self.num_channels > 1:
# boundarybool_image = np.atleast_3d(boundarybool_image)
#
# return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
@depends_on('f', 'frustum', 'camera', 'overdraw')
def barycentric_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_barycentric_image(self.boundarybool_image if self.overdraw else None)
@depends_on('f', 'frustum', 'camera', 'overdraw')
def visibility_image(self):
self._call_on_changed()
#Overload method to call without overdraw.
return self.draw_visibility_image(self.v.r, self.f, self.boundarybool_image if self.overdraw else None)
def image_mesh_bool(self, meshes):
self.makeCurrentContext()
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for mesh in meshes:
self.draw_index(mesh)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result!=0
@depends_on(dterms+terms)
def indices_image(self):
self._call_on_changed()
self.makeCurrentContext()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0.,0.,0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for index in range(len(self.f_list)):
self.draw_index(index)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.uint32))[:,:,0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result
def draw_index(self, index):
mesh = index
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
vc = self.vc_list[mesh]
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(vc.reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
colors = np.array(np.ones_like(colors_by_face) * (index) / 255.0, dtype=np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
def draw_texcoord_image(self, v, f, ft, boundarybool_image=None):
# gl = glf
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
self.makeCurrentContext()
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# want vtc: texture-coordinates per vertex (not per element in vc)
colors = ft
#use the third channel to identify the corresponding textures.
color3 = np.vstack([np.ones([self.ft_list[mesh].shape[0],1])*mesh for mesh in range(len(self.ft_list))]).astype(np.float32) / len(self.ft_list)
colors = np.asarray(np.hstack((colors, color3)), np.float64, order='C')
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
#Why do we need this?
if boundarybool_image is not None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3)[:,:,:3].astype(np.float64))/255.0
result[:,:,1] = 1. - result[:,:,1]
return result
@depends_on('ft', 'textures')
def mesh_tex_coords(self):
ftidxs = self.ft.ravel()
data = self.ft
# Pol: careful with this:
data[:,1] = 1.0 - 1.0*data[:,1]
return data
# Depends on 'f' because vpe/fpe depend on f
# Pol: Check that depends on works on other attributes that depend_on x, if x changes.
@depends_on( 'ft', 'f')
def wireframe_tex_coords(self):
print("wireframe_tex_coords is being computed!")
vvt = np.zeros((self.v.r.size/3,2), dtype=np.float64, order='C')
vvt[self.f.flatten()] = self.mesh_tex_coords
edata = np.zeros((self.vpe.size,2), dtype=np.float64, order='C')
edata = vvt[self.ma.ravel()]
return edata
# TODO: can this not be inherited from base? turning off texture mapping in that instead?
@depends_on(dterms+terms)
def boundaryid_image(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
def draw_color_image(self, with_vertex_colors=True, with_texture_on=True):
self.makeCurrentContext()
self._call_on_changed()
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1%self.num_channels], self.bgcolor.r[2%self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))),np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vc = colors_by_face
if with_vertex_colors:
colors = vc.astype(np.float32)
else:
# Only texture.
colors = np.ones_like(vc).astype(np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_color.bind()
if self.f.shape[1]==2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
if with_texture_on and self.haveUVs_list[mesh][polygons]:
GL.glUseProgram(self.colorTextureProgram)
texture = self.textureID_mesh_list[mesh][polygons]
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureID, 0)
else:
GL.glUseProgram(self.colorProgram)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'], GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(np.frombuffer(GL.glReadPixels( 0,0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'],self.frustum['height'],3).astype(np.float64))/255.0
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0.,0.,0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1,1,3))
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image_quantized(self):
texcoord_image = self.texcoord_image[:,:, :2].copy()
#Temprary:
self.texture_image = self.textures_list[0][0].r.copy()
texcoord_image[:,:,0] *= self.texture_image.shape[1]-1
texcoord_image[:,:,1] *= self.texture_image.shape[0]-1
texture_idx = (self.texcoord_image[:,:,2]*len(self.ft_list)).astype(np.uint32)
texcoord_image = np.round(texcoord_image)
texcoord_image = texcoord_image[:,:,0] + texcoord_image[:,:,1]*self.texture_image.shape[1]
return texcoord_image, texture_idx
def checkBufferNum(self):
GL.glGenBuffers(1)
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image(self):
return self.draw_texcoord_image(self.v.r, self.f, self.ft, self.boundarybool_image if self.overdraw else None)
class ResidualRenderer(ColoredRenderer):
terms = 'f', 'frustum', 'vt', 'ft', 'background_image', 'overdraw', 'ft_list', 'haveUVs_list', 'textures_list', 'vc_list', 'imageGT'
dterms = 'vc', 'camera', 'bgcolor', 'texture_stack', 'v'
def __init__(self):
super().__init__()
def clear(self):
try:
GL.glFlush()
GL.glFinish()
# print ("Clearing textured renderer.")
# for msh in self.vbo_indices_mesh_list:
# for vbo in msh:
# vbo.set_array([])
[vbo.set_array(np.array([])) for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_face_ids_list for vbo in sublist]
[GL.glDeleteVertexArrays(1, [vao.value]) for sublist in self.vao_tex_mesh_list for vao in sublist]
self.release_textures()
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
GL.glDeleteProgram(self.colorTextureProgram)
super().clear()
except:
import pdb
pdb.set_trace()
print("Program had not been initialized")
def initGLTexture(self):
print("Initializing Texture OpenGL.")
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
//#extension GL_EXT_shader_image_load_store : enable
in vec3 theColor;
in vec2 UV;
uniform sampler2D myTextureSampler;
// Ouput data
out vec3 color;
void main(){
color = theColor * texture2D( myTextureSampler, UV).rgb;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout(location = 2) in vec2 vertexUV;
uniform mat4 MVP;
out vec3 theColor;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
UV = vertexUV;
}""", GL.GL_VERTEX_SHADER)
self.colorTextureProgram = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
# Define the other VAO/VBOs and shaders.
# Text VAO and bind color, vertex indices AND uvbuffer:
position_location = GL.glGetAttribLocation(self.colorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorTextureProgram, 'color')
uvs_location = GL.glGetAttribLocation(self.colorTextureProgram, 'vertexUV')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_texture_location = GL.glGetUniformLocation(self.colorTextureProgram, 'MVP')
self.vbo_indices_mesh_list = []
self.vbo_colors_mesh = []
self.vbo_verts_mesh = []
self.vao_tex_mesh_list = []
self.vbo_uvs_mesh = []
self.textureID_mesh_list = []
# GL.glEnable(GL.GL_LINE_SMOOTH)
# GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(2.)
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_indices_mesh = []
vbo_face_ids_mesh = []
vbo_colors_mesh = []
vbo_vertices_mesh = []
vbo_uvs_mesh = []
textureIDs_mesh = []
for polygons in range(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_verts = vbo.VBO(np.array(verts_by_face).astype(np.float32))
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_colors = vbo.VBO(np.array(colors_by_face).astype(np.float32))
uvs_by_face = np.asarray(self.ft_list[mesh].reshape((-1, 2))[f.ravel()], dtype=np.float32, order='C')
vbo_uvs = vbo.VBO(np.array(uvs_by_face).astype(np.float32))
vbo_indices = vbo.VBO(np.array(self.f_list[mesh][polygons]).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
vbo_indices.bind()
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
if self.haveUVs_list[mesh][polygons]:
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Textures:
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = GL.GLuint(0)
GL.glGenTextures(1, texture)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
textureIDs_mesh = textureIDs_mesh + [texture]
vbo_indices_mesh = vbo_indices_mesh + [vbo_indices]
vbo_colors_mesh = vbo_colors_mesh + [vbo_colors]
vbo_vertices_mesh = vbo_vertices_mesh + [vbo_verts]
vbo_uvs_mesh = vbo_uvs_mesh + [vbo_uvs]
vaos_mesh = vaos_mesh + [vao]
self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs_mesh]
self.vao_tex_mesh_list = self.vao_tex_mesh_list + [vaos_mesh]
self.vbo_indices_mesh_list = self.vbo_indices_mesh_list + [vbo_indices_mesh]
self.vbo_colors_mesh = self.vbo_colors_mesh + [vbo_colors_mesh]
self.vbo_verts_mesh = self.vbo_verts_mesh + [vbo_vertices_mesh]
self.vbo_uvs_mesh = self.vbo_uvs_mesh + [vbo_uvs_mesh]
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindVertexArray(0)
self.textureID = GL.glGetUniformLocation(self.colorTextureProgram, "myTextureSampler")
def initGL_AnalyticRenderer(self):
self.initGLTexture()
self.updateRender = True
self.updateDerivatives = True
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 colorIn;
layout(location = 2) in vec2 vertexUV;
layout(location = 3) in uint face_id;
layout(location = 4) in vec3 barycentric;
uniform mat4 MVP;
out vec3 theColor;
out vec4 pos;
flat out uint face_out;
out vec3 barycentric_vert_out;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
pos = MVP * vec4(position,1);
//pos = pos4.xyz;
theColor = colorIn;
UV = vertexUV;
face_out = face_id;
barycentric_vert_out = barycentric;
}""", GL.GL_VERTEX_SHADER)
ERRORS_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
//layout(early_fragment_tests) in;
// Interpolated values from the vertex shaders
in vec3 theColor;
in vec2 UV;
flat in uint face_out;
in vec4 pos;
in vec3 barycentric_vert_out;
layout(location = 3) uniform sampler2D myTextureSampler;
uniform float ww;
uniform float wh;
// Ouput data
layout(location = 0) out vec3 color;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 barycentric1;
layout(location = 4) out vec2 barycentric2;
void main(){
vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;
color = finalColor.rgb;
sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);
sample_face = face_out;
barycentric1 = barycentric_vert_out.xy;
barycentric2 = vec2(barycentric_vert_out.z, 0.);
}""", GL.GL_FRAGMENT_SHADER)
self.errorTextureProgram = shaders.compileProgram(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)
FETCH_VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
void main() {}
""", GL.GL_VERTEX_SHADER)
FETCH_GEOMETRY_SHADER = shaders.compileShader("""#version 330 core
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main() {
for (int i = 0; i < 4; ++i) {
gl_Position = vec4( data[i], 0.0, 1.0 );
EmitVertex();
}
EndPrimitive();
}""", GL.GL_GEOMETRY_SHADER)
FETCH_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
layout(location = 2) uniform sampler2DMS colors;
layout(location = 3) uniform sampler2DMS sample_positions;
layout(location = 4) uniform usampler2DMS sample_faces;
layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;
layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;
//layout(location = 7) uniform sampler2D imageGT;
uniform float ww;
uniform float wh;
uniform int sample;
// Ouput data
layout(location = 0) out vec3 colorFetchOut;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 sample_barycentric1;
layout(location = 4) out vec2 sample_barycentric2;
//layout(location = 5) out vec3 res;
//out int gl_SampleMask[];
const int all_sample_mask = 0xffff;
void main(){
ivec2 texcoord = ivec2(gl_FragCoord.xy);
colorFetchOut = texelFetch(colors, texcoord, sample).xyz;
sample_pos = texelFetch(sample_positions, texcoord, sample).xy;
sample_face = texelFetch(sample_faces, texcoord, sample).r;
sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;
sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;
//vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;
//res = imgColor - colorFetchOut;
}""", GL.GL_FRAGMENT_SHADER)
GL.glClampColor(GL.GL_CLAMP_READ_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_VERTEX_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_FRAGMENT_COLOR, False)
self.fetchSamplesProgram = shaders.compileProgram(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER, FETCH_FRAGMENT_SHADER)
self.textureGT = GL.GLuint(0)
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glGenTextures(1, self.textureGT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# #
# try:
# if self.imageGT.r is not None and self.imageGT.r.size != 0: #if GT image is defined.
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# except:
# pass
# GL.glGenTextures(1, self.textureEdges)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureEdges)
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glActiveTexture(GL.GL_TEXTURE0)
whitePixel = np.ones([1, 1, 3])
self.whitePixelTextureID = GL.GLuint(0)
GL.glGenTextures(1, self.whitePixelTextureID)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.whitePixelTextureID)
image = np.array(np.flipud((whitePixel)), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
self.fbo_ms_errors = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)
self.texture_errors_render = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RGB8, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
self.texture_errors_sample_position = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
self.texture_errors_sample_faces = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_R32UI, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)
#
self.texture_errors_sample_barycentric1 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1,
0)
self.texture_errors_sample_barycentric2 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2,
0)
self.z_buf_ms_errors = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'],
False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)
# self.z_buf_ms_errors = GL.glGenRenderbuffers(1)
# GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
# GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
# GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.fbo_sample_fetch = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)
self.render_buffer_fetch_sample_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
self.render_buffer_fetch_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
self.render_buffer_fetch_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
#
self.render_buffer_fetch_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
self.render_buffer_fetch_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
self.z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
# FBO_f
self.fbo_errors_nonms = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)
render_buf_errors_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, render_buf_errors_render)
render_buf_errors_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
render_buf_errors_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
#
render_buf_errors_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
render_buf_errors_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
#
z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.textureObjLoc = GL.glGetUniformLocation(self.errorTextureProgram, "myTextureSampler")
# Add background cube:
position_location = GL.glGetAttribLocation(self.errorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.errorTextureProgram, 'colorIn')
uvs_location = GL.glGetAttribLocation(self.errorTextureProgram, 'vertexUV')
face_ids_location = GL.glGetAttribLocation(self.errorTextureProgram, 'face_id')
barycentric_location = GL.glGetAttribLocation(self.errorTextureProgram, 'barycentric')
# self.vbo_verts_cube= vbo.VBO(np.array(self.v_bgCube).astype(np.float32))
# self.vbo_colors_cube= vbo.VBO(np.array(self.vc_bgCube).astype(np.float32))
# self.vbo_uvs_cube = vbo.VBO(np.array(self.ft_bgCube).astype(np.float32))
# self.vao_bgCube = GL.GLuint(0)
# GL.glGenVertexArrays(1, self.vao_bgCube)
#
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube = vbo.VBO(np.array(self.f_bgCube).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
# self.vbo_f_bgCube.bind()
# self.vbo_verts_cube.bind()
# GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_colors_cube.bind()
# GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_uvs_cube.bind()
# GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#
# f = self.f_bgCube
# fc = np.tile(np.arange(len(self.f), len(self.f) + len(f))[:, None], [1, 3]).ravel()
# # fc[:, 0] = fc[:, 0] & 255
# # fc[:, 1] = (fc[:, 1] >> 8) & 255
# # fc[:, 2] = (fc[:, 2] >> 16) & 255
# fc = np.asarray(fc, dtype=np.uint32)
# vbo_face_ids_cube = vbo.VBO(fc)
# vbo_face_ids_cube.bind()
# GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
# GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
#
# #Barycentric cube:
# f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
# vbo_barycentric_cube = vbo.VBO(f_barycentric)
# vbo_barycentric_cube.bind()
# GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_quad = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_quad)
GL.glBindVertexArray(self.vao_quad)
# Bind VAO
self.vbo_face_ids_list = []
self.vbo_barycentric_list = []
self.vao_errors_mesh_list = []
flen = 1
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_face_ids_mesh = []
vbo_barycentric_mesh = []
for polygons in np.arange(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_f.bind()
vbo_verts = self.vbo_verts_mesh[mesh][polygons]
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors = self.vbo_colors_mesh[mesh][polygons]
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_uvs = self.vbo_uvs_mesh[mesh][polygons]
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
f = self.f_list[mesh][polygons]
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
vbo_face_ids = vbo.VBO(fc)
vbo_face_ids.bind()
GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
vbo_barycentric = vbo.VBO(f_barycentric)
vbo_barycentric.bind()
GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
flen += len(f)
vaos_mesh += [vao]
vbo_face_ids_mesh += [vbo_face_ids]
vbo_barycentric_mesh += [vbo_face_ids]
GL.glBindVertexArray(0)
self.vbo_face_ids_list += [vbo_face_ids_mesh]
self.vbo_barycentric_list += [vbo_barycentric_mesh]
self.vao_errors_mesh_list += [vaos_mesh]
def render_image_buffers(self):
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
self.makeCurrentContext()
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels], self.bgcolor.r[2 % self.num_channels], 1.)
GL.glUseProgram(self.errorTextureProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3, GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
# GL.glClearBufferiv(GL.GL_COLOR, 0, 0)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
#ImageGT
GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glBindImageTexture(1,self.textureGT, 0, GL.GL_FALSE, 0, GL.GL_READ_ONLY, GL.GL_RGBA8)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
GL.glUniform1i(self.textureGTLoc, 1)
wwLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_errors_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
# vbo_color.bind()
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
assert (primtype == GL.GL_TRIANGLES)
# GL.glUseProgram(self.errorTextureProgram)
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
else:
texture = self.whitePixelTextureID
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureObjLoc, 0)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# # #Background cube:
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube.bind()
# texture = self.whitePixelTextureID
# self.vbo_uvs_cube.bind()
#
# GL.glActiveTexture(GL.GL_TEXTURE0)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# GL.glUniform1i(self.textureObjLoc, 0)
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube)*self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
# self.draw_visibility_image_ms(self.v, self.f)
# GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# # result_blit = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# result_blit2 = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# result_blit_pos = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
GL.glUseProgram(self.fetchSamplesProgram)
# GL.glDisable(GL.GL_MULTISAMPLE)
self.colorsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "colors")
self.sample_positionsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_positions")
self.sample_facesLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_faces")
self.sample_barycentric1Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords1")
self.sample_barycentric2Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords2")
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glActiveTexture(GL.GL_TEXTURE2)
# GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_face)
# GL.glUniform1i(self.sample_facesLoc, 2)
wwLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
self.renders = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 3])
self.renders_sample_pos = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_faces = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height']]).astype(np.uint32)
self.renders_sample_barycentric1 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_sample_barycentric2 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 1])
self.renders_sample_barycentric = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 3])
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3,
GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
for sample in np.arange(self.nsamples):
sampleLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'sample')
GL.glUniform1i(sampleLoc, sample)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glUniform1i(self.colorsLoc, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glUniform1i(self.sample_positionsLoc, 1)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glUniform1i(self.sample_facesLoc, 2)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glUniform1i(self.sample_barycentric1Loc, 3)
GL.glActiveTexture(GL.GL_TEXTURE4)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glUniform1i(self.sample_barycentric2Loc, 4)
GL.glBindVertexArray(self.vao_quad)
GL.glDrawArrays(GL.GL_POINTS, 0, 1)
# GL.glBindVertexArray(self.vao_bgCube)
# # self.vbo_f_bgCube.bind()
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube) * self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:3].astype(np.float64))
self.renders[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:2].astype(np.float64))
self.renders_sample_pos[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RED_INTEGER, GL.GL_UNSIGNED_INT),
np.uint32).reshape(self.frustum['height'], self.frustum['height'])[:, :].astype(np.uint32))
self.renders_faces[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT3)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:2].astype(np.float64))
self.renders_sample_barycentric1[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:1].astype(np.float64))
self.renders_sample_barycentric2[sample] = result
self.renders_sample_barycentric[sample] = np.concatenate(
[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2[sample][:, :, 0:1]], 2)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
# result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# self.renders_faces[sample] = result
GL.glBindVertexArray(0)
GL.glClearColor(0., 0., 0., 1.)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_MULTISAMPLE)
##Finally return image and derivatives
self.render_resolved = np.mean(self.renders, 0)
self.updateRender = True
self.updateDerivatives_verts = True
self.updateDerivatives_vc = True
def draw_visibility_image_ms(self, v, f):
"""Assumes camera is set up correctly in"""
GL.glUseProgram(self.visibilityProgram_ms)
v = np.asarray(v)
self.draw_visibility_image_ms(v, f)
# Attach FBO
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
fc = np.arange(1, len(f) + 1)
fc = np.tile(fc.reshape((-1, 1)), (1, 3))
fc[:, 0] = fc[:, 0] & 255
fc[:, 1] = (fc[:, 1] >> 8) & 255
fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint8)
self.draw_colored_primitives_ms(self.vao_dyn_ub, v, f, fc)
# this assumes that fc is either "by faces" or "verts by face", not "by verts"
def draw_colored_primitives_ms(self, vao, v, f, fc=None):
# gl.EnableClientState(GL_VERTEX_ARRAY)
verts_by_face = np.asarray(v.reshape((-1, 3))[f.ravel()], dtype=np.float64, order='C')
# gl.VertexPointer(verts_by_face)
GL.glBindVertexArray(vao)
self.vbo_verts_dyn.set_array(verts_by_face.astype(np.float32))
self.vbo_verts_dyn.bind()
if fc is not None:
# gl.EnableClientState(GL_COLOR_ARRAY)
if fc.size == verts_by_face.size:
vc_by_face = fc
else:
vc_by_face = np.repeat(fc, f.shape[1], axis=0)
if vc_by_face.size != verts_by_face.size:
raise Exception('fc must have either rows=(#rows in faces) or rows=(# elements in faces)')
vc_by_face = np.asarray(vc_by_face, dtype=np.uint8, order='C')
self.vbo_colors_ub.set_array(vc_by_face)
self.vbo_colors_ub.bind()
primtype = GL.GL_TRIANGLES
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT2]
GL.glDrawBuffers(1, drawingBuffers)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDrawElements(primtype, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
def compute_dr_wrt(self, wrt):
visibility = self.visibility_image
if wrt is self.camera:
derivatives_verts = self.get_derivatives_verts()
return derivatives_verts
elif wrt is self.vc:
derivatives_vc = self.get_derivatives_vc()
return derivatives_vc
# Not working atm.:
elif wrt is self.bgcolor:
return 2. * (self.imageGT.r - self.render_image).ravel() * common.dr_wrt_bgcolor(visibility, self.frustum, num_channels=self.num_channels)
# Not working atm.:
elif wrt is self.texture_stack:
IS = np.nonzero(self.visibility_image.ravel() != 4294967295)[0]
texcoords, texidx = self.texcoord_image_quantized
vis_texidx = texidx.ravel()[IS]
vis_texcoords = texcoords.ravel()[IS]
JS = vis_texcoords * np.tile(col(vis_texidx), [1, 2]).ravel()
clr_im = -2. * (self.imageGT.r - self.render_image) * self.renderWithoutTexture
if False:
cv2.imshow('clr_im', clr_im)
# cv2.imshow('texmap', self.texture_image.r)
cv2.waitKey(1)
r = clr_im[:, :, 0].ravel()[IS]
g = clr_im[:, :, 1].ravel()[IS]
b = clr_im[:, :, 2].ravel()[IS]
data = np.concatenate((r, g, b))
IS = np.concatenate((IS * 3, IS * 3 + 1, IS * 3 + 2))
JS = np.concatenate((JS * 3, JS * 3 + 1, JS * 3 + 2))
return sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.r.size))
return None
def compute_r(self):
return self.render()
@depends_on(dterms + terms)
def renderWithoutColor(self):
self._call_on_changed()
return self.render_nocolor
@depends_on(dterms + terms)
def renderWithoutTexture(self):
self._call_on_changed()
return self.render_notexture
# @depends_on(dterms+terms)
def render(self):
self._call_on_changed()
visibility = self.visibility_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
if self.updateRender:
render, residuals = self.compute_image(visible, visibility, self.f)
self.render_result = render
self.residuals_result = residuals
self.updateRender = False
if self.imageGT is None:
returnResult = self.render_result
else:
returnResult = self.residuals_result
return returnResult
def get_derivatives_verts(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_verts:
if self.updateRender:
self.render()
derivatives_verts = self.compute_derivatives_verts(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'],
self.v.r.size / 3, self.f)
self.derivatives_verts = derivatives_verts
self.updateDerivatives_verts = False
return self.derivatives_verts
def get_derivatives_vc(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_vc:
if self.updateRender:
self.render()
derivatives_vc = self.compute_derivatives_vc(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'],
self.v.r.size / 3, self.f)
self.derivatives_vc = derivatives_vc
self.updateDerivatives_vc = False
return self.derivatives_vc
# # @depends_on(dterms+terms)
# def image_and_derivatives(self):
# # self._call_on_changed()
# visibility = self.visibility_image
#
# color = self.render_resolved
#
# visible = np.nonzero(visibility.ravel() != 4294967295)[0]
# num_visible = len(visible)
#
# barycentric = self.barycentric_image
#
# if self.updateRender:
# render, derivatives = self.compute_image_and_derivatives(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
# self.render = render
# self.derivatives = derivatives
# self.updateRender = False
#
# return self.render, self.derivatives
#
def barycentricDerivatives(self, vertices, faces, verts):
import chumpy as ch
vertices = np.concatenate([vertices, np.ones([vertices.size // 3, 1])], axis=1)
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# # # Check with autodiff:
# #
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# # negYMat = ch.array([[1,0,self.camera.c.r[0]],[0,-1,self.camera.c.r[1]],[0,0,1]])
# verts_hom_ch = ch.Ch(verts_hom)
# camMtx = ch.Ch(np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])])
# projVerts = (camMtx.dot(view_mtx)).dot(verts_hom_ch.T).T[:, :3].reshape([-1, 3])
# viewVerts = ch.Ch(np.array(projVerts))
# projVerts = projVerts[:, :2] / projVerts[:, 2:3]
#
# chViewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# p0 = ch.Ch(viewVerticesNonBnd[:, 0, :])
# chp0 = p0
#
# p1 = ch.Ch(viewVerticesNonBnd[:, 1, :])
# chp1 = p1
#
# p2 = ch.Ch(viewVerticesNonBnd[:, 2, :])
# chp2 = p2
#
# # D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
# nt = ch.cross(p1 - p0, p2 - p0)
# chnt = nt
# A = 0.5 * ch.sqrt(ch.sum(nt ** 2, axis=1))
# chnt_norm = nt / ch.sqrt(ch.sum(nt ** 2, axis=1))[:, None]
# # nt = nt / A
#
# chb0part2 = ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1)
# chb0 = 0.5 * ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1) / A
# chb1part2 = ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1)
# chb1 = 0.5 * ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1) / A
# chb2part2 = ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1)
# chb2 = 0.5 * ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1) / A
#
# drb0p0 = chb0.dr_wrt(p0)
# drb0p1 = chb0.dr_wrt(p1)
# drb0p2 = chb0.dr_wrt(p2)
#
# drb1p0 = chb1.dr_wrt(p0)
# drb1p1 = chb1.dr_wrt(p1)
# drb1p2 = chb1.dr_wrt(p2)
#
# drb2p0 = chb2.dr_wrt(p0)
# drb2p1 = chb2.dr_wrt(p1)
# drb2p2 = chb2.dr_wrt(p2)
#
# rows = np.tile(np.arange(drb0p0.shape[0])[None, :], [3, 1]).T.ravel()
# cols = np.arange(drb0p0.shape[0] * 3)
#
# drb0p0 = np.array(drb0p0[rows, cols]).reshape([-1, 3])
# drb0p1 = np.array(drb0p1[rows, cols]).reshape([-1, 3])
# drb0p2 = np.array(drb0p2[rows, cols]).reshape([-1, 3])
# drb1p0 = np.array(drb1p0[rows, cols]).reshape([-1, 3])
# drb1p1 = np.array(drb1p1[rows, cols]).reshape([-1, 3])
# drb1p2 = np.array(drb1p2[rows, cols]).reshape([-1, 3])
# drb2p0 = np.array(drb2p0[rows, cols]).reshape([-1, 3])
# drb2p1 = np.array(drb2p1[rows, cols]).reshape([-1, 3])
# drb2p2 = np.array(drb2p2[rows, cols]).reshape([-1, 3])
#
# chdp0 = np.concatenate([drb0p0[:, None, :], drb1p0[:, None, :], drb2p0[:, None, :]], axis=1)
# chdp1 = np.concatenate([drb0p1[:, None, :], drb1p1[:, None, :], drb2p1[:, None, :]], axis=1)
# chdp2 = np.concatenate([drb0p2[:, None, :], drb1p2[:, None, :], drb2p2[:, None, :]], axis=1)
#
# dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# dp = dp[None, :]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerts = projVerts
projVerts = projVerts[:, :2] / projVerts[:, 2:3]
# viewVerticesNonBnd = negYMat.dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
p0 = viewVerticesNonBnd[:, 0, :]
p1 = viewVerticesNonBnd[:, 1, :]
p2 = viewVerticesNonBnd[:, 2, :]
p0_proj = p0[:, 0:2] / p0[:, 2:3]
p1_proj = p1[:, 0:2] / p1[:, 2:3]
p2_proj = p2[:, 0:2] / p2[:, 2:3]
# D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
nt = np.cross(p1 - p0, p2 - p0)
nt_norm = nt / np.linalg.norm(nt, axis=1)[:, None]
# a = -nt_norm[:, 0] / nt_norm[:, 2]
# b = -nt_norm[:, 1] / nt_norm[:, 2]
# c = np.sum(nt_norm * p0, 1) / nt_norm[:, 2]
cam_f = 1
u = p0[:, 0] / p0[:, 2]
v = p0[:, 1] / p0[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p0[:, 2][:, None], np.zeros([len(p0), 1]), (-p0[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p0), 1]), p0[:, 2][:, None], (-p0[:, 1] / v ** 2)[:, None]]
dxdp_0 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p1[:, 0] / p1[:, 2]
v = p1[:, 1] / p1[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p1[:, 2][:, None], np.zeros([len(p1), 1]), (-p1[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p1), 1]), p1[:, 2][:, None], (-p1[:, 1] / v ** 2)[:, None]]
dxdp_1 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p2[:, 0] / p2[:, 2]
v = p2[:, 1] / p2[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p2[:, 2][:, None], np.zeros([len(p2), 1]), (-p2[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p2), 1]), p2[:, 2][:, None], (-p2[:, 1] / v ** 2)[:, None]]
dxdp_2 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
# x = u * c / (cam_f - a * u - b * v)
# y = v*c/(cam_f - a*u - b*v)
# z = c*cam_f/(cam_f - a*u - b*v)
A = 0.5 * np.linalg.norm(np.cross(p1 - p0, p2 - p0), axis=1)
nt_mag = A * 2
# nt = nt / A
# db1 = 0.5*np.cross(nt_norm, p2-p1)/A[:, None]
# db2 = 0.5*np.cross(nt_norm, p0-p2)/A[:, None]
# db3_2 = 0.5*np.cross(nt_norm, p1-p0)/A[:, None]
# db3 = - db1 - db2
p = viewVerts
pre1 = -1 / (nt_mag[:, None] ** 2) * nt_norm
ident = np.identity(3)
ident = np.tile(ident[None, :], [len(p2), 1, 1])
dntdp0 = np.cross((p2 - p0)[:, None, :], -ident) + np.cross(-ident, (p1 - p0)[:, None, :])
dntdp1 = np.cross((p2 - p0)[:, None, :], ident)
dntdp2 = np.cross(ident, (p1 - p0)[:, None, :])
# Pol check this!:
dntnorm = (ident - np.einsum('ij,ik->ijk', nt_norm, nt_norm)) / nt_mag[:, None, None]
# dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
dntnormdp0 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp0)
dntnormdp1 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp1)
dntnormdp2 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp2)
dpart1p0 = np.einsum('ij,ijk->ik', pre1, dntdp0)
dpart1p1 = np.einsum('ij,ijk->ik', pre1, dntdp1)
dpart1p2 = np.einsum('ij,ijk->ik', pre1, dntdp2)
b0 = np.sum(np.cross(nt_norm, p2 - p1) * (p - p1), axis=1)[:, None]
db0part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident,nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1),-ident)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0.swapaxes(1,2)), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident, nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(p2-p1,nt_norm[:, :]),-ident)
db0part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2 - p1), -ident)
db0part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p1)
db0dp0wrtpart1 = dpart1p0 * b0
db0dp1wrtpart1 = dpart1p1 * b0
db0dp2wrtpart1 = dpart1p2 * b0
db0dp0wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p0
db0dp1wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p1
db0dp2wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p2
db0dp0wrt = db0dp0wrtpart1 + db0dp0wrtpart2
db0dp1wrt = db0dp1wrtpart1 + db0dp1wrtpart2
db0dp2wrt = db0dp2wrtpart1 + db0dp2wrtpart2
######
b1 = np.sum(np.cross(nt_norm, p0 - p2) * (p - p2), axis=1)[:, None]
db1part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p2)
db1part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2)
db1part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p2) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p0 - p2), -ident)
db1dp0wrtpart1 = dpart1p0 * b1
db1dp1wrtpart1 = dpart1p1 * b1
db1dp2wrtpart1 = dpart1p2 * b1
db1dp0wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p0
db1dp1wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p1
db1dp2wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p2
db1dp0wrt = db1dp0wrtpart1 + db1dp0wrtpart2
db1dp1wrt = db1dp1wrtpart1 + db1dp1wrtpart2
db1dp2wrt = db1dp2wrtpart1 + db1dp2wrtpart2
######
b2 = np.sum(np.cross(nt_norm, p1 - p0) * (p - p0), axis=1)[:, None]
db2part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p0) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p1 - p0), -ident)
db2part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p0)
db2part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0)
db2dp0wrtpart1 = dpart1p0 * b2
db2dp1wrtpart1 = dpart1p1 * b2
db2dp2wrtpart1 = dpart1p2 * b2
db2dp0wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p0
db2dp1wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p1
db2dp2wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p2
db2dp0wrt = db2dp0wrtpart1 + db2dp0wrtpart2
db2dp1wrt = db2dp1wrtpart1 + db2dp1wrtpart2
db2dp2wrt = db2dp2wrtpart1 + db2dp2wrtpart2
dp0 = np.concatenate([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]], axis=1)
dp1 = np.concatenate([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]], axis=1)
dp2 = np.concatenate([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]], axis=1)
#
dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# If dealing with degenerate triangles, ignore that gradient.
# dp[nt_mag <= 1e-15] = 0
dp = dp[None, :]
nFaces = len(faces)
# visTriVC = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc[vc > 1] = 1
vc[vc < 0] = 0
visTriVC = vc
dxdp = np.concatenate([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]], axis=1)
dxdp = dxdp[None, :, None]
# dbvc = np.sum(dp * visTriVC, 2)
# dbvc = dp * visTriVC * t_area[None, :, None, None, None]
dbvc = dp * visTriVC
didp = np.sum(dbvc[:, :, :, :, :, None] * dxdp, 4).sum(2)
# output should be shape: VC x Ninput x Tri Points x UV
# drb0p0 # db0dp0wrt
# drb0p1 # db0dp1wrt
# drb0p2 # db0dp2wrt
# drb1p0 # db1dp0wrt
# drb1p1 # db1dp1wrt
# drb1p2 # db1dp2wrt
# drb2p0 # db2dp0wrt
# drb2p1 # db2dp1wrt
# drb2p2 # db2dp2wrt
return didp
def compute_image(self, visible, visibility, f):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
nsamples = self.nsamples
if np.any(boundaryImage):
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
# sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:,(zerosIm*boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
boundaryFaces = visibility[(boundaryImage) & (visibility != 4294967295)]
nBndFaces = len(boundaryFaces)
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
# if self.debug:
# import pdb; pdb.set_trace()
faces = f[sampleFaces].ravel()
vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
t_area_bnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
t_area_bnd[t_area_bnd > 1] = 1
# Trick to cap to 1 while keeping gradients.
p1 = vertsProjBndSamples.reshape([-1,2,2])[:, 0, :]
p2 = vertsProjBndSamples.reshape([-1,2,2])[:, 1, :]
p = sampleV.reshape([-1,2])
l = (p2 - p1)
linedist = np.sqrt((np.sum(l ** 2, axis=1)))[:, None]
self.linedist = linedist
lnorm = l / linedist
self.lnorm = lnorm
v1 = p - p1
self.v1 = v1
d = v1[:, 0] * lnorm[:, 0] + v1[:, 1] * lnorm[:, 1]
self.d = d
intersectPoint = p1 + d[:, None] * lnorm
v2 = p - p2
self.v2 = v2
l12 = (p1 - p2)
linedist12 = np.sqrt((np.sum(l12 ** 2, axis=1)))[:, None]
lnorm12 = l12 / linedist12
d2 = v2[:, 0] * lnorm12[:, 0] + v2[:, 1] * lnorm12[:, 1]
nonIntersect = (d2 < 0) | (d < 0)
self.nonIntersect = nonIntersect
argminDistNonIntersect = np.argmin(np.c_[d[nonIntersect], d2[nonIntersect]], 1)
self.argminDistNonIntersect = argminDistNonIntersect
intersectPoint[nonIntersect] = vertsProjBndSamples.reshape([-1,2,2])[nonIntersect][np.arange(nonIntersect.sum()), argminDistNonIntersect]
lineToPoint = (p - intersectPoint)
n = lineToPoint
dist = np.sqrt((np.sum(lineToPoint ** 2, axis=1)))[:, None]
n_norm = lineToPoint / dist
self.n_norm = n_norm
self.dist = dist
d_final = dist.squeeze()
# max_nx_ny = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final = d_final / max_nx_ny
d_final = d_final
# invViewMtx = np.linalg.inv(np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])])
# #
# camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
# # invCamMtx = np.r_[np.c_[np.linalg.inv(self.camera.camera_mtx), np.array([0,0,0])], np.array([[0, 0, 0, 1]])]
#
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# verticesBndSamples = np.concatenate([verticesBndSamples.reshape([-1, 3]), np.ones([verticesBndSamples.size // 3, 1])], axis=1)
# projVerticesBndOutside = (camMtx.dot(view_mtx)).dot(verticesBndSamples.T).T[:, :3].reshape([-1, 2, 3])
# projVerticesBndDir = projVerticesBndOutside[:, 1, :] - projVerticesBndOutside[:, 0, :]
# projVerticesBndDir = projVerticesBndDir / np.sqrt((np.sum(projVerticesBndDir ** 2, 1)))[:, None]
# dproj = (intersectPoint[:, 0] * projVerticesBndOutside[:, 0, 2] - projVerticesBndOutside[:, 0, 0]) / (projVerticesBndDir[:, 0] - projVerticesBndDir[:, 2] * intersectPoint[:, 0])
# # Code to check computation that dproj == dprojy
# # dproj_y = (intersectPoint[:,1]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,1]) / (projVerticesBndDir[:,1] - projVerticesBndDir[:,2]*intersectPoint[:,1])
#
# projPoint = projVerticesBndOutside[:, 0, :][:, :] + dproj[:, None] * projVerticesBndDir[:, :]
#
# projPointVec4 = np.concatenate([projPoint, np.ones([projPoint.shape[0], 1])], axis=1)
# viewPointIntersect = (invViewMtx.dot(np.linalg.inv(camMtx)).dot(projPointVec4.T.reshape([4, -1])).reshape([4, -1])).T[:, :3]
#
# barycentricVertsDistIntesect = np.linalg.norm(viewPointIntersect - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 0, :], axis=1)
# barycentricVertsDistIntesect2 = np.linalg.norm(viewPointIntersect - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 1, :], axis=1)
# # Code to check barycentricVertsDistIntesect + barycentricVertsDistIntesect2 = barycentricVertsDistEdge
# barycentricVertsDistEdge = np.linalg.norm(
# verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 0, :] - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 1, :], axis=1)
#
# nonIntersect = np.abs(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 - barycentricVertsDistEdge) > 1e-4
# argminDistNonIntersect = np.argmin(np.c_[barycentricVertsDistIntesect[nonIntersect], barycentricVertsDistIntesect2[nonIntersect]], 1)
#
# self.viewPointIntersect = viewPointIntersect
# self.viewPointIntersect[nonIntersect] = verticesBndSamples.reshape([-1, 2, 4])[nonIntersect, :, 0:3][np.arange(nonIntersect.sum()),
# argminDistNonIntersect, :]
d_finalNP = d_final.copy()
self.d_final = d_finalNP
self.t_area_bnd = t_area_bnd
areaWeights = np.zeros([nsamples, nBndFaces])
areaWeights = t_area_bnd.reshape([nsamples, nBndFaces])
areaWeightsTotal = areaWeights.sum(0)
# areaWeightsTotal[areaWeightsTotal < 1] = 1
self.areaWeights = areaWeights
self.areaWeightsTotal = areaWeightsTotal
finalColorBnd = np.ones([self.nsamples, boundaryFaces.size, 3])
self.d_final_total = d_finalNP.reshape([self.nsamples, -1,1]).sum(0)
# if self.imageGT is not None:
finalColorBnd = sampleColors * d_finalNP.reshape([self.nsamples, -1,1]) / (self.d_final_total.reshape([1, -1,1]))
# finalColorBnd = areaWeights[:,:,None] * sampleColors * d_finalNP.reshape([self.nsamples, -1,1]) / (self.d_final_total.reshape([1, -1,1]) * areaWeightsTotal[None,:,None])
self.finalColorBnd = finalColorBnd
# else:
# finalColorBnd = sampleColors
bndColorsImage = np.zeros_like(self.color_image)
bndColorsImage[(zerosIm * boundaryImage), :] = np.sum(finalColorBnd, axis=0)
finalColorImageBnd = bndColorsImage
if self.imageGT is not None:
bndColorsResiduals = np.zeros_like(self.color_image)
self.sampleResiduals = (sampleColors - self.imageGT.r[(zerosIm * boundaryImage),:][None,:])
self.sampleResidualsWeighted = self.sampleResiduals**2 * d_finalNP.reshape([self.nsamples, -1,1]) / self.d_final_total.reshape([1, -1,1])
bndColorsResiduals[(zerosIm * boundaryImage), :] = np.sum(self.sampleResidualsWeighted,0)
if np.any(boundaryImage):
finalColor = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * finalColorImageBnd
if self.imageGT is not None:
self.residuals = (self.color_image - self.imageGT.r)
errors = self.residuals**2
finalResidual = (1 - boundaryImage)[:, :, None] * errors + boundaryImage[:, :, None] * bndColorsResiduals
else:
finalColor = self.color_image
if self.imageGT is not None:
finalResidual = (self.color_image - self.imageGT.r)**2
if self.imageGT is None:
finalResidual = None
finalColor[finalColor > 1] = 1
finalColor[finalColor < 0] = 0
return finalColor, finalResidual
def compute_derivatives_verts(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
# xdiff = dEdx
# ydiff = dEdy
nVisF = len(visibility.ravel()[visible])
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage) & (visibility != 4294967295)]
if np.any(boundaryImage):
n_norm = self.n_norm
dist = self.dist
linedist = self.linedist
d = self.d
v1 = self.v1
lnorm = self.lnorm
d_final = self.d_final
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])
# Computing gradients:
# A multisampled pixel color is given by: w R + (1-w) R' thus:
# 1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
# 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
# 3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
# 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
# 5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
# 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
# 7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
# for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
# Finally, stack data, and IJ of nonbnd with bnd on both dwrt_v and dwrt_vc.
######## 1 derivatives samples outside wrt v 1: (dw * (bar*vc) - dw (bar'*vc') )/ nsamples for face sample
# # #Chumpy autodiff code to check derivatives here:
# chEdgeVerts = ch.Ch(vertsProjBndSamples.reshape([-1,2,2]))
#
# chEdgeVerts1 = chEdgeVerts[:,0,:]
# chEdgeVerts2 = chEdgeVerts[:,1,:]
#
# chSampleVerts = ch.Ch(sampleV.reshape([-1,2]))
# # c1 = (chEdgeVerts1 - chSampleVerts)
# # c2 = (chEdgeVerts2 - chSampleVerts)
# # n = (chEdgeVerts2 - chEdgeVerts1)
#
# #Code to check computation of distance below
# # d2 = ch.abs(c1[:,:,0]*c2[:,:,1] - c1[:,:,1]*c2[:,:,0]) / ch.sqrt((ch.sum(n**2,2)))
# # # np_mat = ch.dot(ch.array([[0,-1],[1,0]]), n)
# # np_mat2 = -ch.concatenate([-n[:,:,1][:,:,None], n[:,:,0][:,:,None]],2)
# # np_vec2 = np_mat2 / ch.sqrt((ch.sum(np_mat2**2,2)))[:,:,None]
# # d2 = d2 / ch.maximum(ch.abs(np_vec2[:,:,0]),ch.abs(np_vec2[:,:,1]))
#
# chl = (chEdgeVerts2 - chEdgeVerts1)
# chlinedist = ch.sqrt((ch.sum(chl**2,axis=1)))[:,None]
# chlnorm = chl/chlinedist
#
# chv1 = chSampleVerts - chEdgeVerts1
#
# chd = chv1[:,0]* chlnorm[:,0] + chv1[:,1]* chlnorm[:,1]
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
# # intersectPointDist1 = intersectPoint - chEdgeVerts1
# # intersectPointDist2 = intersectPoint - chEdgeVerts2
# # Code to check computation of distances below:
# # lengthIntersectToPoint1 = np.linalg.norm(intersectPointDist1.r,axis=1)
# # lengthIntersectToPoint2 = np.linalg.norm(intersectPointDist2.r,axis=1)
#
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
#
# chlineToPoint = (chSampleVerts - chintersectPoint)
# chn_norm = chlineToPoint / ch.sqrt((ch.sum(chlineToPoint ** 2, axis=1)))[:, None]
#
# chdist = chlineToPoint[:,0]*chn_norm[:,0] + chlineToPoint[:,1]*chn_norm[:,1]
#
# # d_final_ch = chdist / ch.maximum(ch.abs(chn_norm[:, 0]), ch.abs(chn_norm[:, 1]))
# d_final_ch = chdist
#
# d_final_ch_weights = sampleColors * (d_final_ch.reshape([self.nsamples, -1]) / ch.sum(d_final_ch.reshape([self.nsamples, -1]), 0))[:,:,None]
#
# d_final_outside = d_final_ch.ravel()
# dwdv = d_final_outside.dr_wrt(chEdgeVerts1)
# rows = np.tile(np.arange(d_final_outside.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_outside.shape[0] * 2)
#
# dwdv_r_v1 = np.array(dwdv[rows, cols]).reshape([-1, 2])
#
# dwdv = d_final_outside.dr_wrt(chEdgeVerts2)
# rows = np.tile(np.arange(d_final_ch.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_ch.shape[0] * 2)
#
# dwdv_r_v2 = np.array(dwdv[rows, cols]).reshape([-1, 2])
nonIntersect = self.nonIntersect
argminDistNonIntersect = self.argminDistNonIntersect
# max_dx_dy = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
d_final_np = dist
# d_final_np = dist / max_dx_dy
ident = np.identity(2)
ident = np.tile(ident[None, :], [len(d_final_np), 1, 1])
dlnorm = (ident - np.einsum('ij,ik->ijk', lnorm, lnorm)) / linedist[:, None]
dl_normdp1 = np.einsum('ijk,ikl->ijl', dlnorm, -ident)
dl_normdp2 = np.einsum('ijk,ikl->ijl', dlnorm, ident)
dv1dp1 = -ident
dv1dp2 = 0
dddp1 = np.einsum('ijk,ij->ik', dv1dp1, lnorm) + np.einsum('ij,ijl->il', v1, dl_normdp1)
dddp2 = 0 + np.einsum('ij,ijl->il', v1, dl_normdp2)
dipdp1 = ident + (dddp1[:, None, :] * lnorm[:, :, None]) + d[:, None, None] * dl_normdp1
dipdp2 = (dddp2[:, None, :] * lnorm[:, :, None]) + d[:, None, None] * dl_normdp2
#good up to here.
dndp1 = -dipdp1
dndp2 = -dipdp2
dn_norm = (ident - np.einsum('ij,ik->ijk', n_norm, n_norm)) / dist[:, None]
# dn_normdp1 = np.einsum('ijk,ikl->ijl', dn_norm, dndp1)
# dn_normdp2 = np.einsum('ijk,ikl->ijl', dn_norm, dndp2)
ddistdp1 = np.einsum('ij,ijl->il', n_norm, dndp1)
ddistdp2 = np.einsum('ij,ijl->il', n_norm, dndp2)
# argmax_nx_ny = np.argmax(np.abs(n_norm), axis=1)
# dmax_nx_ny_p1 = np.sign(n_norm)[np.arange(len(n_norm)), argmax_nx_ny][:, None] * dn_normdp1[np.arange(len(dn_normdp1)), argmax_nx_ny]
# dmax_nx_ny_p2 = np.sign(n_norm)[np.arange(len(n_norm)), argmax_nx_ny][:, None] * dn_normdp2[np.arange(len(dn_normdp2)), argmax_nx_ny]
# dd_final_dp1 = -1. / max_dx_dy[:, None] ** 2 * dmax_nx_ny_p1 * dist + 1. / max_dx_dy[:, None] * ddistdp1
# dd_final_dp2 = -1. / max_dx_dy[:, None] ** 2 * dmax_nx_ny_p2 * dist + 1. / max_dx_dy[:, None] * ddistdp2
dd_final_dp1 = ddistdp1
dd_final_dp2 = ddistdp2
# For those non intersecting points straight to the edge:
v1 = self.v1[nonIntersect][argminDistNonIntersect == 0]
v1_norm = v1 / np.sqrt((np.sum(v1 ** 2, axis=1)))[:, None]
dd_final_dp1_nonintersect = -v1_norm
v2 = self.v2[nonIntersect][argminDistNonIntersect == 1]
v2_norm = v2 / np.sqrt((np.sum(v2 ** 2, axis=1)))[:, None]
dd_final_dp2_nonintersect = -v2_norm
dd_final_dp1[nonIntersect][argminDistNonIntersect == 0] = dd_final_dp1_nonintersect
dd_final_dp1[nonIntersect][argminDistNonIntersect == 1] = 0
dd_final_dp2[nonIntersect][argminDistNonIntersect == 1] = dd_final_dp2_nonintersect
dd_final_dp2[nonIntersect][argminDistNonIntersect == 0] = 0
dd_final_dp1_weighted_part1 = -self.d_final[:,None]* np.tile(dd_final_dp1.reshape([self.nsamples, -1, 2]).sum(0)[None,:,:],[self.nsamples,1,1]).reshape([-1, 2])/(np.tile(self.d_final_total[None,:], [self.nsamples, 1,1]).reshape([-1,1])**2)
dd_final_dp1_weighted_part2 = dd_final_dp1 / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1])
dd_final_dp1_weighted = dd_final_dp1_weighted_part1 + dd_final_dp1_weighted_part2
dd_final_dp2_weighted_part1 = -self.d_final[:,None]*np.tile(dd_final_dp2.reshape([self.nsamples, -1, 2]).sum(0)[None,:,:],[self.nsamples,1,1]).reshape([-1, 2])/(np.tile(self.d_final_total[None,:], [self.nsamples, 1,1]).reshape([-1,1])**2)
dd_final_dp2_weighted_part2 = dd_final_dp2 / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1])
dd_final_dp2_weighted = dd_final_dp2_weighted_part1 + dd_final_dp2_weighted_part2
if self.imageGT is None:
dImage_wrt_outside_v1 = sampleColors.reshape([-1,3,1]) * dd_final_dp1_weighted[:, None, :]
dImage_wrt_outside_v2 = sampleColors.reshape([-1,3,1]) * dd_final_dp2_weighted[:, None, :]
else:
dImage_wrt_outside_v1 = self.sampleResiduals.reshape([-1,3,1])**2 * dd_final_dp1_weighted[:, None, :]
dImage_wrt_outside_v2 = self.sampleResiduals.reshape([-1,3,1])**2 * dd_final_dp2_weighted[:, None, :]
# sampleV
# z = dd_final_dp1.reshape([8, -1, 2])
# eq = np.array([np.all(np.sign(z[:, i, :]) == -1) or np.all(np.sign(z[:, i, :]) == 1) for i in range(z.shape[1])])
# dist_ns = dist.reshape([8,-1])
# rightV = sampleV[0, :, 0] > np.max(sampleV[0, :, :], 0)[0] - 1
# dist_ns[0, rightV]
# dImage_wrt_outside_v1.reshape([8, -1, 3, 2])[0, rightV,:]
# d_final_ch_weights
# self.finalColorBnd
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 2 * 2)).ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1, -1, 2]), [self.nsamples, 1, 1]).ravel()
JS = col(faces)
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data1 = dImage_wrt_outside_v1.transpose([1, 0, 2])
data2 = dImage_wrt_outside_v2.transpose([1, 0, 2])
data = np.concatenate([data1[:, :, None, :], data2[:, :, None, :]], 2)
data = data.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
######## 2 derivatives samples wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
verticesBnd = self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3])
sampleBarycentricBar = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),
:].reshape([-1, 3, 1])
verts = np.sum(self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3, 3]) * sampleBarycentricBar, axis=1)
dImage_wrt_bar_v = self.barycentricDerivatives(verticesBnd, f[sampleFaces.ravel()], verts).swapaxes(0, 1)
if self.imageGT is None:
# dImage_wrt_bar_v = dImage_wrt_bar_v * d_final[:, None, None, None] * self.t_area_bnd[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
dImage_wrt_bar_v = dImage_wrt_bar_v * d_final[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# areaTotal = np.tile(self.areaWeightsTotal[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# d_final_total = np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# dImage_wrt_bar_v = self.areaWeights.reshape([-1,1,1,1]) * dImage_wrt_bar_v * d_final[:, None, None, None] / (areaTotal*d_final_total)
else:
dImage_wrt_bar_v = 2*self.sampleResiduals.reshape([-1,3])[:,:,None,None] * dImage_wrt_bar_v * d_final[:, None, None, None] * self.t_area_bnd[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
### Derivatives wrt V: 2 derivatives samples wrt v bar: (w * (dbar*vc) )/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 2 * f.shape[1])).ravel()
faces = f[sampleFaces].ravel()
JS = col(faces)
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v, [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd_bar = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
vertsPerFaceProjBnd = self.camera.r[f[nonBoundaryFaces].ravel()].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
t_area_nonbnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
t_area_nonbnd[t_area_nonbnd > 1] = 1
bc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
verts = np.sum(self.v.r[f[nonBoundaryFaces.ravel()].ravel()].reshape([-1, 3, 3]) * bc[:, :, None], axis=1)
didp = self.barycentricDerivatives(verticesNonBnd, f[nonBoundaryFaces.ravel()], verts)
if self.imageGT is None:
# didp = didp * t_area_nonbnd[None, :, None, None]
didp = didp
else:
didp = 2 * self.residuals[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3)).T[:,:,None,None] * didp * t_area_nonbnd[None, :, None, None]
n_channels = np.atleast_3d(observed).shape[2]
####### 2: Take the data and copy the corresponding dxs and dys to these new pixels.
### Derivatives wrt V:
pixels = np.where(((~boundaryImage) & (visibility != 4294967295)).ravel())[0]
IS = np.tile(col(pixels), (1, 2 * f.shape[1])).ravel()
JS = col(f[nonBoundaryFaces].ravel())
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = didp.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_nonbnd = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
if np.any(boundaryImage):
result_wrt_verts = result_wrt_verts_bnd + result_wrt_verts_bnd_bar + result_wrt_verts_nonbnd
else:
result_wrt_verts = result_wrt_verts_nonbnd
return result_wrt_verts
def compute_derivatives_vc(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
d_final = self.d_final
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
nsamples = self.nsamples
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),
:].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage) & (visibility != 4294967295)]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
# Computing gradients:
# A multisampled pixel color is given by: w R + (1-w) R' thus:
# 1 derivatives samples wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
# 2 derivatives samples wrt v bar: (w * (dbar*vc) )/ nsamples for faces sample
# 4 derivatives samples wrt vc : (w * (bar) )/ nsamples for faces sample
# for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
####### 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
if self.imageGT is None:
dImage_wrt_bnd_vc = d_final[:, None] * sampleBarycentric.reshape([-1,3]) / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1,1])
else:
dImage_wrt_bnd_vc = d_final[:, None] * sampleBarycentric.reshape([-1,3]) / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1,1])
dImage_wrt_bnd_vc = 2 * self.sampleResiduals.reshape([-1,3]).T[:,:,None] * dImage_wrt_bnd_vc[None,:]
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 3)).ravel()
faces = f[sampleFaces].ravel()
JS = col(faces)
data = dImage_wrt_bnd_vc.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
if self.imageGT is None:
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_bnd = result
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.where(((~boundaryImage) & (visibility != 4294967295)).ravel())[0]
IS = np.tile(col(pixels), (1, 3)).ravel()
JS = col(f[nonBoundaryFaces].ravel())
if self.imageGT is None:
dImage_wrt_nonbnd_vc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
else:
dImage_wrt_nonbnd_vc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
dImage_wrt_nonbnd_vc = 2* self.residuals[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3)).T[:,:,None] * dImage_wrt_nonbnd_vc[None,:]
data = np.asarray(dImage_wrt_nonbnd_vc, order='C').ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
if self.imageGT is None:
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_nonbnd = result
if np.any(boundaryImage):
result_wrt_vc = result_wrt_vc_bnd + result_wrt_vc_nonbnd
else:
result_wrt_vc = result_wrt_vc_nonbnd
return result_wrt_vc
def on_changed(self, which):
super().on_changed(which)
if 'v' or 'camera' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_verts_mesh[mesh][polygons].set_array(verts_by_face.astype(np.float32))
self.vbo_verts_mesh[mesh][polygons].bind()
if 'vc' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
flen = 1
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
# fc = np.arange(flen, flen + len(f))
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
self.vbo_face_ids_list[mesh][polygons].set_array(fc)
self.vbo_face_ids_list[mesh][polygons].bind()
flen += len(f)
self.vbo_indices_mesh_list[mesh][polygons].set_array(np.array(self.f_list[mesh][polygons]).astype(np.uint32))
self.vbo_indices_mesh_list[mesh][polygons].bind()
if 'texture_stack' in which:
# gl = self.glf
# texture_data = np.array(self.texture_image*255., dtype='uint8', order='C')
# self.release_textures()
#
# for mesh in range(len(self.f_list)):
# textureIDs = []
# for polygons in range(len(self.f_list[mesh])):
# texture = None
# if self.haveUVs_list[mesh][polygons]:
# texture = GL.GLuint(0)
# GL.glGenTextures( 1, texture )
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# #Send texture.
# #Pol: Check if textures are float or uint from Blender import.
# image = (self.textures_list[mesh][polygons]*255.0).astype(np.uint8)
# GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, image)
# textureIDs = textureIDs + [texture]
# self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs]
# gl.GenTextures(1, tmp) # TODO: free after done
# self.textureID = tmp[0]
if self.initialized:
textureCoordIdx = 0
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# Update the OpenGL textures with all the textures. (Inefficient as many might not have changed).
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
self.textures_list[mesh][polygons] = self.texture_stack[textureCoordIdx:image.size + textureCoordIdx].reshape(image.shape)
textureCoordIdx = textureCoordIdx + image.size
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
# if 'imageGT' in which:
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# # GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGBA, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
if 'v' or 'f' or 'vc' or 'ft' or 'camera' or 'texture_stack' or 'imageGT' in which:
self.render_image_buffers()
def release_textures(self):
if hasattr(self, 'textureID_mesh_list'):
if self.textureID_mesh_list != []:
for texture_mesh in self.textureID_mesh_list:
if texture_mesh != []:
for texture in texture_mesh:
if texture != None:
GL.glDeleteTextures(1, [texture.value])
self.textureID_mesh_list = []
@depends_on(dterms + terms)
def color_image(self):
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image(with_vertex_colors=True, with_texture_on=True)
return no_overdraw
# if not self.overdraw:
# return no_overdraw
#
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
# overdraw = self.draw_color_image()
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
#
# # return overdraw * np.atleast_3d(self.boundarybool_image)
#
# boundarybool_image = self.boundarybool_image
# if self.num_channels > 1:
# boundarybool_image = np.atleast_3d(boundarybool_image)
#
# return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
@depends_on('f', 'frustum', 'camera', 'overdraw')
def barycentric_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_barycentric_image(self.boundarybool_image if self.overdraw else None)
@depends_on('f', 'frustum', 'camera', 'overdraw')
def visibility_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_visibility_image(self.v.r, self.f, self.boundarybool_image if self.overdraw else None)
def image_mesh_bool(self, meshes):
self.makeCurrentContext()
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0., 0., 0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for mesh in meshes:
self.draw_index(mesh)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.uint32))[:, :, 0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result != 0
@depends_on(dterms + terms)
def indices_image(self):
self._call_on_changed()
self.makeCurrentContext()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0., 0., 0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for index in range(len(self.f_list)):
self.draw_index(index)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.uint32))[:, :, 0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result
def draw_index(self, index):
mesh = index
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
vc = self.vc_list[mesh]
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(vc.reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
colors = np.array(np.ones_like(colors_by_face) * (index) / 255.0, dtype=np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_color.bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
def draw_texcoord_image(self, v, f, ft, boundarybool_image=None):
# gl = glf
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
self.makeCurrentContext()
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# want vtc: texture-coordinates per vertex (not per element in vc)
colors = ft
# use the third channel to identify the corresponding textures.
color3 = np.vstack([np.ones([self.ft_list[mesh].shape[0], 1]) * mesh for mesh in range(len(self.ft_list))]).astype(np.float32) / len(
self.ft_list)
colors = np.asarray(np.hstack((colors, color3)), np.float64, order='C')
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
# Why do we need this?
if boundarybool_image is not None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, :3].astype(np.float64)) / 255.0
result[:, :, 1] = 1. - result[:, :, 1]
return result
@depends_on('ft', 'textures')
def mesh_tex_coords(self):
ftidxs = self.ft.ravel()
data = self.ft
# Pol: careful with this:
data[:, 1] = 1.0 - 1.0 * data[:, 1]
return data
# Depends on 'f' because vpe/fpe depend on f
# Pol: Check that depends on works on other attributes that depend_on x, if x changes.
@depends_on('ft', 'f')
def wireframe_tex_coords(self):
print("wireframe_tex_coords is being computed!")
vvt = np.zeros((self.v.r.size / 3, 2), dtype=np.float64, order='C')
vvt[self.f.flatten()] = self.mesh_tex_coords
edata = np.zeros((self.vpe.size, 2), dtype=np.float64, order='C')
edata = vvt[self.ma.ravel()]
return edata
# TODO: can this not be inherited from base? turning off texture mapping in that instead?
@depends_on(dterms + terms)
def boundaryid_image(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
def draw_color_image(self, with_vertex_colors=True, with_texture_on=True):
self.makeCurrentContext()
self._call_on_changed()
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels], self.bgcolor.r[2 % self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vc = colors_by_face
if with_vertex_colors:
colors = vc.astype(np.float32)
else:
# Only texture.
colors = np.ones_like(vc).astype(np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_color.bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
if with_texture_on and self.haveUVs_list[mesh][polygons]:
GL.glUseProgram(self.colorTextureProgram)
texture = self.textureID_mesh_list[mesh][polygons]
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureID, 0)
else:
GL.glUseProgram(self.colorProgram)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],
GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.float64)) / 255.0
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0., 0., 0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1, 1, 3))
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image_quantized(self):
texcoord_image = self.texcoord_image[:, :, :2].copy()
# Temprary:
self.texture_image = self.textures_list[0][0].r.copy()
texcoord_image[:, :, 0] *= self.texture_image.shape[1] - 1
texcoord_image[:, :, 1] *= self.texture_image.shape[0] - 1
texture_idx = (self.texcoord_image[:, :, 2] * len(self.ft_list)).astype(np.uint32)
texcoord_image = np.round(texcoord_image)
texcoord_image = texcoord_image[:, :, 0] + texcoord_image[:, :, 1] * self.texture_image.shape[1]
return texcoord_image, texture_idx
def checkBufferNum(self):
GL.glGenBuffers(1)
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image(self):
return self.draw_texcoord_image(self.v.r, self.f, self.ft, self.boundarybool_image if self.overdraw else None)
class ResidualRendererOpenDR(ColoredRenderer):
terms = 'f', 'frustum', 'vt', 'ft', 'background_image', 'overdraw', 'ft_list', 'haveUVs_list', 'textures_list', 'vc_list', 'imageGT'
dterms = 'vc', 'camera', 'bgcolor', 'texture_stack', 'v'
def __init__(self):
super().__init__()
def clear(self):
try:
GL.glFlush()
GL.glFinish()
# print ("Clearing textured renderer.")
# for msh in self.vbo_indices_mesh_list:
# for vbo in msh:
# vbo.set_array([])
[vbo.set_array(np.array([])) for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_indices_mesh_list for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_colors_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_verts_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.bind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.delete() for sublist in self.vbo_uvs_mesh for vbo in sublist]
[vbo.set_array(np.array([])) for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.bind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.unbind() for sublist in self.vbo_face_ids_list for vbo in sublist]
[vbo.delete() for sublist in self.vbo_face_ids_list for vbo in sublist]
[GL.glDeleteVertexArrays(1, [vao.value]) for sublist in self.vao_tex_mesh_list for vao in sublist]
self.release_textures()
if self.glMode == 'glfw':
import glfw
glfw.make_context_current(self.win)
GL.glDeleteProgram(self.colorTextureProgram)
super().clear()
except:
import pdb
pdb.set_trace()
print("Program had not been initialized")
def initGLTexture(self):
print("Initializing Texture OpenGL.")
FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
// Interpolated values from the vertex shaders
//#extension GL_EXT_shader_image_load_store : enable
in vec3 theColor;
in vec2 UV;
uniform sampler2D myTextureSampler;
// Ouput data
out vec3 color;
void main(){
color = theColor * texture2D( myTextureSampler, UV).rgb;
}""", GL.GL_FRAGMENT_SHADER)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout(location = 2) in vec2 vertexUV;
uniform mat4 MVP;
out vec3 theColor;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
theColor = color;
UV = vertexUV;
}""", GL.GL_VERTEX_SHADER)
self.colorTextureProgram = shaders.compileProgram(VERTEX_SHADER, FRAGMENT_SHADER)
# Define the other VAO/VBOs and shaders.
# Text VAO and bind color, vertex indices AND uvbuffer:
position_location = GL.glGetAttribLocation(self.colorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.colorTextureProgram, 'color')
uvs_location = GL.glGetAttribLocation(self.colorTextureProgram, 'vertexUV')
# color_location_ub = GL.glGetAttribLocation(self.colorProgram, 'color')
self.MVP_texture_location = GL.glGetUniformLocation(self.colorTextureProgram, 'MVP')
self.vbo_indices_mesh_list = []
self.vbo_colors_mesh = []
self.vbo_verts_mesh = []
self.vao_tex_mesh_list = []
self.vbo_uvs_mesh = []
self.textureID_mesh_list = []
# GL.glEnable(GL.GL_LINE_SMOOTH)
# GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)
GL.glLineWidth(2.)
self.line_width = 2.
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_indices_mesh = []
vbo_face_ids_mesh = []
vbo_colors_mesh = []
vbo_vertices_mesh = []
vbo_uvs_mesh = []
textureIDs_mesh = []
for polygons in range(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_verts = vbo.VBO(np.array(verts_by_face).astype(np.float32))
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_colors = vbo.VBO(np.array(colors_by_face).astype(np.float32))
uvs_by_face = np.asarray(self.ft_list[mesh].reshape((-1, 2))[f.ravel()], dtype=np.float32, order='C')
vbo_uvs = vbo.VBO(np.array(uvs_by_face).astype(np.float32))
vbo_indices = vbo.VBO(np.array(self.f_list[mesh][polygons]).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
vbo_indices.bind()
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
if self.haveUVs_list[mesh][polygons]:
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# Textures:
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = GL.GLuint(0)
GL.glGenTextures(1, texture)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
textureIDs_mesh = textureIDs_mesh + [texture]
vbo_indices_mesh = vbo_indices_mesh + [vbo_indices]
vbo_colors_mesh = vbo_colors_mesh + [vbo_colors]
vbo_vertices_mesh = vbo_vertices_mesh + [vbo_verts]
vbo_uvs_mesh = vbo_uvs_mesh + [vbo_uvs]
vaos_mesh = vaos_mesh + [vao]
self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs_mesh]
self.vao_tex_mesh_list = self.vao_tex_mesh_list + [vaos_mesh]
self.vbo_indices_mesh_list = self.vbo_indices_mesh_list + [vbo_indices_mesh]
self.vbo_colors_mesh = self.vbo_colors_mesh + [vbo_colors_mesh]
self.vbo_verts_mesh = self.vbo_verts_mesh + [vbo_vertices_mesh]
self.vbo_uvs_mesh = self.vbo_uvs_mesh + [vbo_uvs_mesh]
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glBindVertexArray(0)
self.textureID = GL.glGetUniformLocation(self.colorTextureProgram, "myTextureSampler")
def initGL_AnalyticRenderer(self):
self.updateRender = True
self.updateDerivatives = True
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 colorIn;
layout(location = 2) in vec2 vertexUV;
layout(location = 3) in uint face_id;
layout(location = 4) in vec3 barycentric;
uniform mat4 MVP;
out vec3 theColor;
out vec4 pos;
flat out uint face_out;
out vec3 barycentric_vert_out;
out vec2 UV;
// Values that stay constant for the whole mesh.
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP* vec4(position,1);
pos = MVP * vec4(position,1);
//pos = pos4.xyz;
theColor = colorIn;
UV = vertexUV;
face_out = face_id;
barycentric_vert_out = barycentric;
}""", GL.GL_VERTEX_SHADER)
ERRORS_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
//layout(early_fragment_tests) in;
// Interpolated values from the vertex shaders
in vec3 theColor;
in vec2 UV;
flat in uint face_out;
in vec4 pos;
in vec3 barycentric_vert_out;
layout(location = 3) uniform sampler2D myTextureSampler;
uniform float ww;
uniform float wh;
// Ouput data
layout(location = 0) out vec3 color;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 barycentric1;
layout(location = 4) out vec2 barycentric2;
void main(){
vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;
color = finalColor.rgb;
sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);
sample_face = face_out;
barycentric1 = barycentric_vert_out.xy;
barycentric2 = vec2(barycentric_vert_out.z, 0.);
}""", GL.GL_FRAGMENT_SHADER)
self.errorTextureProgram = shaders.compileProgram(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)
FETCH_VERTEX_SHADER = shaders.compileShader("""#version 330 core
// Input vertex data, different for all executions of this shader.
void main() {}
""", GL.GL_VERTEX_SHADER)
FETCH_GEOMETRY_SHADER = shaders.compileShader("""#version 330 core
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
const vec2 data[4] = vec2[]
(
vec2(-1.0, 1.0),
vec2(-1.0, -1.0),
vec2( 1.0, 1.0),
vec2( 1.0, -1.0)
);
void main() {
for (int i = 0; i < 4; ++i) {
gl_Position = vec4( data[i], 0.0, 1.0 );
EmitVertex();
}
EndPrimitive();
}""", GL.GL_GEOMETRY_SHADER)
FETCH_FRAGMENT_SHADER = shaders.compileShader("""#version 330 core
#extension GL_ARB_explicit_uniform_location : enable
#extension GL_ARB_explicit_attrib_location : enable
layout(location = 2) uniform sampler2DMS colors;
layout(location = 3) uniform sampler2DMS sample_positions;
layout(location = 4) uniform usampler2DMS sample_faces;
layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;
layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;
//layout(location = 7) uniform sampler2D imageGT;
uniform float ww;
uniform float wh;
uniform int sample;
// Ouput data
layout(location = 0) out vec3 colorFetchOut;
layout(location = 1) out vec2 sample_pos;
layout(location = 2) out uint sample_face;
layout(location = 3) out vec2 sample_barycentric1;
layout(location = 4) out vec2 sample_barycentric2;
//layout(location = 5) out vec3 res;
//out int gl_SampleMask[];
const int all_sample_mask = 0xffff;
void main(){
ivec2 texcoord = ivec2(gl_FragCoord.xy);
colorFetchOut = texelFetch(colors, texcoord, sample).xyz;
sample_pos = texelFetch(sample_positions, texcoord, sample).xy;
sample_face = texelFetch(sample_faces, texcoord, sample).r;
sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;
sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;
//vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;
//res = imgColor - colorFetchOut;
}""", GL.GL_FRAGMENT_SHADER)
GL.glClampColor(GL.GL_CLAMP_READ_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_VERTEX_COLOR, False)
# GL.glClampColor(GL.GL_CLAMP_FRAGMENT_COLOR, False)
self.fetchSamplesProgram = shaders.compileProgram(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER, FETCH_FRAGMENT_SHADER)
self.textureGT = GL.GLuint(0)
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glGenTextures(1, self.textureGT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# #
# try:
# if self.imageGT.r is not None and self.imageGT.r.size != 0: #if GT image is defined.
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
# except:
# pass
# GL.glGenTextures(1, self.textureEdges)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureEdges)
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# texture = GL.GLuint(0)
# GL.glGenTextures(1, texture)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# image = np.array(np.flipud((self.textures_list[mesh][polygons])), order='C', dtype=np.float32)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
GL.glActiveTexture(GL.GL_TEXTURE0)
whitePixel = np.ones([4, 4, 3])
self.whitePixelTextureID = GL.GLuint(0)
GL.glGenTextures(1, self.whitePixelTextureID)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.whitePixelTextureID)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_WRAP_S,GL.GL_CLAMP_TO_EDGE)
GL.glTexParameteri(GL.GL_TEXTURE_2D,GL.GL_TEXTURE_WRAP_T,GL.GL_CLAMP_TO_EDGE)
image = np.array(np.flipud((whitePixel)), order='C', dtype=np.float32)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB32F, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_FLOAT, image)
# GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGBA8, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
self.fbo_ms_errors = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glEnable(GL.GL_MULTISAMPLE)
# GL.glHint(GL.GL_MULTISAMPLE_FILTER_HINT_NV, GL.GL_NICEST);
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)
self.texture_errors_render = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RGB8, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
self.texture_errors_sample_position = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
self.texture_errors_sample_faces = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_R32UI, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)
#
self.texture_errors_sample_barycentric1 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1,
0)
self.texture_errors_sample_barycentric2 = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_RG32F, self.frustum['width'], self.frustum['height'], False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2,
0)
self.z_buf_ms_errors = GL.glGenTextures(1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)
GL.glTexImage2DMultisample(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'],
False)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D_MULTISAMPLE, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
GL.glFramebufferTexture2D(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)
# self.z_buf_ms_errors = GL.glGenRenderbuffers(1)
# GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
# GL.glRenderbufferStorageMultisample(GL.GL_RENDERBUFFER, self.nsamples, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
# GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_ms_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.fbo_sample_fetch = GL.glGenFramebuffers(1)
GL.glDepthMask(GL.GL_TRUE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)
self.render_buffer_fetch_sample_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)
self.render_buffer_fetch_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)
self.render_buffer_fetch_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)
#
self.render_buffer_fetch_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)
self.render_buffer_fetch_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)
self.z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, self.z_buf_samples_errors)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
# FBO_f
self.fbo_errors_nonms = GL.glGenFramebuffers(1)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)
render_buf_errors_render = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_render)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, render_buf_errors_render)
render_buf_errors_sample_position = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_RENDERBUFFER, render_buf_errors_sample_position)
render_buf_errors_sample_face = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.GL_RENDERBUFFER, render_buf_errors_sample_face)
#
render_buf_errors_sample_barycentric1 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)
render_buf_errors_sample_barycentric2 = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)
#
z_buf_samples_errors = GL.glGenRenderbuffers(1)
GL.glBindRenderbuffer(GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glRenderbufferStorage(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])
GL.glFramebufferRenderbuffer(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.GL_RENDERBUFFER, z_buf_samples_errors)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
GL.glClear(GL.GL_DEPTH_BUFFER_BIT)
print("FRAMEBUFFER ERR: " + str(GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER)))
assert (GL.glCheckFramebufferStatus(GL.GL_FRAMEBUFFER) == GL.GL_FRAMEBUFFER_COMPLETE)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
self.textureObjLoc = GL.glGetUniformLocation(self.errorTextureProgram, "myTextureSampler")
# Add background cube:
position_location = GL.glGetAttribLocation(self.errorTextureProgram, 'position')
color_location = GL.glGetAttribLocation(self.errorTextureProgram, 'colorIn')
uvs_location = GL.glGetAttribLocation(self.errorTextureProgram, 'vertexUV')
face_ids_location = GL.glGetAttribLocation(self.errorTextureProgram, 'face_id')
barycentric_location = GL.glGetAttribLocation(self.errorTextureProgram, 'barycentric')
# self.vbo_verts_cube= vbo.VBO(np.array(self.v_bgCube).astype(np.float32))
# self.vbo_colors_cube= vbo.VBO(np.array(self.vc_bgCube).astype(np.float32))
# self.vbo_uvs_cube = vbo.VBO(np.array(self.ft_bgCube).astype(np.float32))
# self.vao_bgCube = GL.GLuint(0)
# GL.glGenVertexArrays(1, self.vao_bgCube)
#
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube = vbo.VBO(np.array(self.f_bgCube).astype(np.uint32), target=GL.GL_ELEMENT_ARRAY_BUFFER)
# self.vbo_f_bgCube.bind()
# self.vbo_verts_cube.bind()
# GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_colors_cube.bind()
# GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
# self.vbo_uvs_cube.bind()
# GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
#
# f = self.f_bgCube
# fc = np.tile(np.arange(len(self.f), len(self.f) + len(f))[:, None], [1, 3]).ravel()
# # fc[:, 0] = fc[:, 0] & 255
# # fc[:, 1] = (fc[:, 1] >> 8) & 255
# # fc[:, 2] = (fc[:, 2] >> 16) & 255
# fc = np.asarray(fc, dtype=np.uint32)
# vbo_face_ids_cube = vbo.VBO(fc)
# vbo_face_ids_cube.bind()
# GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
# GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
#
# #Barycentric cube:
# f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
# vbo_barycentric_cube = vbo.VBO(f_barycentric)
# vbo_barycentric_cube.bind()
# GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
# GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
GL.glBindVertexArray(0)
self.vao_quad = GL.GLuint(0)
GL.glGenVertexArrays(1, self.vao_quad)
GL.glBindVertexArray(self.vao_quad)
# Bind VAO
self.vbo_face_ids_list = []
self.vbo_barycentric_list = []
self.vao_errors_mesh_list = []
flen = 1
for mesh in range(len(self.f_list)):
vaos_mesh = []
vbo_face_ids_mesh = []
vbo_barycentric_mesh = []
for polygons in np.arange(len(self.f_list[mesh])):
vao = GL.GLuint(0)
GL.glGenVertexArrays(1, vao)
GL.glBindVertexArray(vao)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_f.bind()
vbo_verts = self.vbo_verts_mesh[mesh][polygons]
vbo_verts.bind()
GL.glEnableVertexAttribArray(position_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_colors = self.vbo_colors_mesh[mesh][polygons]
vbo_colors.bind()
GL.glEnableVertexAttribArray(color_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
vbo_uvs = self.vbo_uvs_mesh[mesh][polygons]
vbo_uvs.bind()
GL.glEnableVertexAttribArray(uvs_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
f = self.f_list[mesh][polygons]
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
vbo_face_ids = vbo.VBO(fc)
vbo_face_ids.bind()
GL.glEnableVertexAttribArray(face_ids_location) # from 'location = 0' in shader
GL.glVertexAttribIPointer(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)
f_barycentric = np.asarray(np.tile(np.eye(3), (f.size // 3, 1)), dtype=np.float32, order='C')
vbo_barycentric = vbo.VBO(f_barycentric)
vbo_barycentric.bind()
GL.glEnableVertexAttribArray(barycentric_location) # from 'location = 0' in shader
GL.glVertexAttribPointer(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)
flen += len(f)
vaos_mesh += [vao]
vbo_face_ids_mesh += [vbo_face_ids]
vbo_barycentric_mesh += [vbo_face_ids]
GL.glBindVertexArray(0)
self.vbo_face_ids_list += [vbo_face_ids_mesh]
self.vbo_barycentric_list += [vbo_barycentric_mesh]
self.vao_errors_mesh_list += [vaos_mesh]
def render_image_buffers(self):
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_SAMPLE_SHADING)
GL.glMinSampleShading(1.0)
self.makeCurrentContext()
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels], self.bgcolor.r[2 % self.num_channels], 1.)
GL.glUseProgram(self.errorTextureProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3, GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
# GL.glClearBufferiv(GL.GL_COLOR, 0, 0)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
#ImageGT
GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glBindImageTexture(1,self.textureGT, 0, GL.GL_FALSE, 0, GL.GL_READ_ONLY, GL.GL_RGBA8)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
self.textureGTLoc = GL.glGetUniformLocation(self.errorTextureProgram, "imageGT")
GL.glUniform1i(self.textureGTLoc, 1)
wwLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.errorTextureProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_errors_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
# vbo_color.bind()
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
assert (primtype == GL.GL_TRIANGLES)
# GL.glUseProgram(self.errorTextureProgram)
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
else:
texture = self.whitePixelTextureID
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureObjLoc, 0)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# # #Background cube:
# GL.glBindVertexArray(self.vao_bgCube)
# self.vbo_f_bgCube.bind()
# texture = self.whitePixelTextureID
# self.vbo_uvs_cube.bind()
#
# GL.glActiveTexture(GL.GL_TEXTURE0)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# GL.glUniform1i(self.textureObjLoc, 0)
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube)*self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
# self.draw_visibility_image_ms(self.v, self.f)
# GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, 0)
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT0)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
# # result_blit = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# result_blit2 = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
#
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms_errors)
# GL.glFramebufferTexture2D(GL.GL_READ_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glDrawBuffer(GL.GL_COLOR_ATTACHMENT1)
# GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],GL.GL_COLOR_BUFFER_BIT, GL.GL_NEAREST)
# GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_errors_nonms)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
# result_blit_pos = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
GL.glUseProgram(self.fetchSamplesProgram)
# GL.glDisable(GL.GL_MULTISAMPLE)
self.colorsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "colors")
self.sample_positionsLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_positions")
self.sample_facesLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_faces")
self.sample_barycentric1Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords1")
self.sample_barycentric2Loc = GL.glGetUniformLocation(self.fetchSamplesProgram, "sample_barycentric_coords2")
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glActiveTexture(GL.GL_TEXTURE2)
# GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_face)
# GL.glUniform1i(self.sample_facesLoc, 2)
wwLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'ww')
whLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'wh')
GL.glUniform1f(wwLoc, self.frustum['width'])
GL.glUniform1f(whLoc, self.frustum['height'])
self.renders = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 3])
self.renders_sample_pos = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_faces = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height']]).astype(np.uint32)
self.renders_sample_barycentric1 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 2])
self.renders_sample_barycentric2 = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 1])
self.renders_sample_barycentric = np.zeros([self.nsamples, self.frustum['width'], self.frustum['height'], 3])
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT0, GL.GL_COLOR_ATTACHMENT1, GL.GL_COLOR_ATTACHMENT2, GL.GL_COLOR_ATTACHMENT3,
GL.GL_COLOR_ATTACHMENT4]
GL.glDrawBuffers(5, drawingBuffers)
GL.glClearColor(0., 0., 0., 0.)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
for sample in np.arange(self.nsamples):
sampleLoc = GL.glGetUniformLocation(self.fetchSamplesProgram, 'sample')
GL.glUniform1i(sampleLoc, sample)
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)
GL.glUniform1i(self.colorsLoc, 0)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position)
GL.glUniform1i(self.sample_positionsLoc, 1)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces)
GL.glUniform1i(self.sample_facesLoc, 2)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1)
GL.glUniform1i(self.sample_barycentric1Loc, 3)
GL.glActiveTexture(GL.GL_TEXTURE4)
GL.glBindTexture(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2)
GL.glUniform1i(self.sample_barycentric2Loc, 4)
GL.glBindVertexArray(self.vao_quad)
GL.glDrawArrays(GL.GL_POINTS, 0, 1)
# GL.glBindVertexArray(self.vao_bgCube)
# # self.vbo_f_bgCube.bind()
# GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
#
# GL.glDrawElements(primtype, len(self.vbo_f_bgCube) * self.vbo_f_bgCube.data.shape[1], GL.GL_UNSIGNED_INT, None)
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:3].astype(np.float64))
self.renders[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT1)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:2].astype(np.float64))
self.renders_sample_pos[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RED_INTEGER, GL.GL_UNSIGNED_INT),
np.uint32).reshape(self.frustum['height'], self.frustum['height'])[:, :].astype(np.uint32))
self.renders_faces[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT3)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:2].astype(np.float64))
self.renders_sample_barycentric1[sample] = result
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT4)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, 0:1].astype(np.float64))
self.renders_sample_barycentric2[sample] = result
self.renders_sample_barycentric[sample] = np.concatenate(
[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2[sample][:, :, 0:1]], 2)
# GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT2)
# result = np.flipud(np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_FLOAT), np.float32).reshape(self.frustum['height'], self.frustum['height'], 3)[:,:,0:3].astype(np.float64))
# self.renders_faces[sample] = result
GL.glBindVertexArray(0)
GL.glClearColor(0., 0., 0., 1.)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_MULTISAMPLE)
##Finally return image and derivatives
self.render_resolved = np.mean(self.renders, 0)
self.updateRender = True
self.updateDerivatives_verts = True
self.updateDerivatives_vc = True
def draw_visibility_image_ms(self, v, f):
"""Assumes camera is set up correctly in"""
GL.glUseProgram(self.visibilityProgram_ms)
v = np.asarray(v)
self.draw_visibility_image_ms(v, f)
# Attach FBO
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
fc = np.arange(1, len(f) + 1)
fc = np.tile(fc.reshape((-1, 1)), (1, 3))
fc[:, 0] = fc[:, 0] & 255
fc[:, 1] = (fc[:, 1] >> 8) & 255
fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint8)
self.draw_colored_primitives_ms(self.vao_dyn_ub, v, f, fc)
# this assumes that fc is either "by faces" or "verts by face", not "by verts"
def draw_colored_primitives_ms(self, vao, v, f, fc=None):
# gl.EnableClientState(GL_VERTEX_ARRAY)
verts_by_face = np.asarray(v.reshape((-1, 3))[f.ravel()], dtype=np.float64, order='C')
# gl.VertexPointer(verts_by_face)
GL.glBindVertexArray(vao)
self.vbo_verts_dyn.set_array(verts_by_face.astype(np.float32))
self.vbo_verts_dyn.bind()
if fc is not None:
# gl.EnableClientState(GL_COLOR_ARRAY)
if fc.size == verts_by_face.size:
vc_by_face = fc
else:
vc_by_face = np.repeat(fc, f.shape[1], axis=0)
if vc_by_face.size != verts_by_face.size:
raise Exception('fc must have either rows=(#rows in faces) or rows=(# elements in faces)')
vc_by_face = np.asarray(vc_by_face, dtype=np.uint8, order='C')
self.vbo_colors_ub.set_array(vc_by_face)
self.vbo_colors_ub.bind()
primtype = GL.GL_TRIANGLES
self.vbo_indices_dyn.set_array(np.arange(f.size, dtype=np.uint32).ravel())
self.vbo_indices_dyn.bind()
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)
drawingBuffers = [GL.GL_COLOR_ATTACHMENT2]
GL.glDrawBuffers(1, drawingBuffers)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, np.dot(self.projectionMatrix, view_mtx))
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDrawElements(primtype, len(self.vbo_indices_dyn), GL.GL_UNSIGNED_INT, None)
GL.glEnable(GL.GL_DEPTH_TEST)
def compute_dr_wrt(self, wrt):
visibility = self.visibility_image
if wrt is self.camera:
derivatives_verts = self.get_derivatives_verts()
return derivatives_verts
elif wrt is self.vc:
derivatives_vc = self.get_derivatives_vc()
return derivatives_vc
# Not working atm.:
elif wrt is self.bgcolor:
return 2. * (self.imageGT.r - self.render_image).ravel() * common.dr_wrt_bgcolor(visibility, self.frustum, num_channels=self.num_channels)
# Not working atm.:
elif wrt is self.texture_stack:
IS = np.nonzero(self.visibility_image.ravel() != 4294967295)[0]
texcoords, texidx = self.texcoord_image_quantized
vis_texidx = texidx.ravel()[IS]
vis_texcoords = texcoords.ravel()[IS]
JS = vis_texcoords * np.tile(col(vis_texidx), [1, 2]).ravel()
clr_im = -2. * (self.imageGT.r - self.render_image) * self.renderWithoutTexture
if False:
cv2.imshow('clr_im', clr_im)
# cv2.imshow('texmap', self.texture_image.r)
cv2.waitKey(1)
r = clr_im[:, :, 0].ravel()[IS]
g = clr_im[:, :, 1].ravel()[IS]
b = clr_im[:, :, 2].ravel()[IS]
data = np.concatenate((r, g, b))
IS = np.concatenate((IS * 3, IS * 3 + 1, IS * 3 + 2))
JS = np.concatenate((JS * 3, JS * 3 + 1, JS * 3 + 2))
return sp.csc_matrix((data, (IS, JS)), shape=(self.r.size, wrt.r.size))
return None
def compute_r(self):
return self.render()
@depends_on(dterms + terms)
def renderWithoutColor(self):
self._call_on_changed()
return self.render_nocolor
@depends_on(dterms + terms)
def renderWithoutTexture(self):
self._call_on_changed()
return self.render_notexture
# @depends_on(dterms+terms)
def render(self):
self._call_on_changed()
visibility = self.visibility_image
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
if self.updateRender:
render, residuals = self.compute_image(visible, visibility, self.f)
self.render_result = render
self.residuals_result = residuals
self.updateRender = False
if self.imageGT is None:
returnResult = self.render_result
else:
returnResult = self.residuals_result
return returnResult
def get_derivatives_verts(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_verts:
if self.updateRender:
self.render()
if self.overdraw:
# return common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
derivatives_verts = common.dImage_wrt_2dVerts_bnd(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
else:
derivatives_verts = common.dImage_wrt_2dVerts(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)
self.derivatives_verts = derivatives_verts
self.updateDerivatives_verts = False
return self.derivatives_verts
def get_derivatives_vc(self):
self._call_on_changed()
visibility = self.visibility_image
color = self.render_resolved
visible = np.nonzero(visibility.ravel() != 4294967295)[0]
barycentric = self.barycentric_image
if self.updateDerivatives_vc:
if self.updateRender:
self.render()
derivatives_vc = self.compute_derivatives_vc(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'],
self.v.r.size / 3, self.f)
self.derivatives_vc = derivatives_vc
self.updateDerivatives_vc = False
return self.derivatives_vc
# # @depends_on(dterms+terms)
# def image_and_derivatives(self):
# # self._call_on_changed()
# visibility = self.visibility_image
#
# color = self.render_resolved
#
# visible = np.nonzero(visibility.ravel() != 4294967295)[0]
# num_visible = len(visible)
#
# barycentric = self.barycentric_image
#
# if self.updateRender:
# render, derivatives = self.compute_image_and_derivatives(color, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)
# self.render = render
# self.derivatives = derivatives
# self.updateRender = False
#
# return self.render, self.derivatives
#
def barycentricDerivatives(self, vertices, faces, verts):
import chumpy as ch
vertices = np.concatenate([vertices, np.ones([vertices.size // 3, 1])], axis=1)
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# # # Check with autodiff:
# #
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# # negYMat = ch.array([[1,0,self.camera.c.r[0]],[0,-1,self.camera.c.r[1]],[0,0,1]])
# verts_hom_ch = ch.Ch(verts_hom)
# camMtx = ch.Ch(np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])])
# projVerts = (camMtx.dot(view_mtx)).dot(verts_hom_ch.T).T[:, :3].reshape([-1, 3])
# viewVerts = ch.Ch(np.array(projVerts))
# projVerts = projVerts[:, :2] / projVerts[:, 2:3]
#
# chViewVerticesNonBnd = camMtx[0:3, 0:3].dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
# p0 = ch.Ch(viewVerticesNonBnd[:, 0, :])
# chp0 = p0
#
# p1 = ch.Ch(viewVerticesNonBnd[:, 1, :])
# chp1 = p1
#
# p2 = ch.Ch(viewVerticesNonBnd[:, 2, :])
# chp2 = p2
#
# # D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
# nt = ch.cross(p1 - p0, p2 - p0)
# chnt = nt
# A = 0.5 * ch.sqrt(ch.sum(nt ** 2, axis=1))
# chnt_norm = nt / ch.sqrt(ch.sum(nt ** 2, axis=1))[:, None]
# # nt = nt / A
#
# chb0part2 = ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1)
# chb0 = 0.5 * ch.sum(ch.cross(chnt_norm, p2 - p1) * (viewVerts - p1), axis=1) / A
# chb1part2 = ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1)
# chb1 = 0.5 * ch.sum(ch.cross(chnt_norm, p0 - p2) * (viewVerts - p2), axis=1) / A
# chb2part2 = ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1)
# chb2 = 0.5 * ch.sum(ch.cross(chnt_norm, p1 - p0) * (viewVerts - p0), axis=1) / A
#
# drb0p0 = chb0.dr_wrt(p0)
# drb0p1 = chb0.dr_wrt(p1)
# drb0p2 = chb0.dr_wrt(p2)
#
# drb1p0 = chb1.dr_wrt(p0)
# drb1p1 = chb1.dr_wrt(p1)
# drb1p2 = chb1.dr_wrt(p2)
#
# drb2p0 = chb2.dr_wrt(p0)
# drb2p1 = chb2.dr_wrt(p1)
# drb2p2 = chb2.dr_wrt(p2)
#
# rows = np.tile(np.arange(drb0p0.shape[0])[None, :], [3, 1]).T.ravel()
# cols = np.arange(drb0p0.shape[0] * 3)
#
# drb0p0 = np.array(drb0p0[rows, cols]).reshape([-1, 3])
# drb0p1 = np.array(drb0p1[rows, cols]).reshape([-1, 3])
# drb0p2 = np.array(drb0p2[rows, cols]).reshape([-1, 3])
# drb1p0 = np.array(drb1p0[rows, cols]).reshape([-1, 3])
# drb1p1 = np.array(drb1p1[rows, cols]).reshape([-1, 3])
# drb1p2 = np.array(drb1p2[rows, cols]).reshape([-1, 3])
# drb2p0 = np.array(drb2p0[rows, cols]).reshape([-1, 3])
# drb2p1 = np.array(drb2p1[rows, cols]).reshape([-1, 3])
# drb2p2 = np.array(drb2p2[rows, cols]).reshape([-1, 3])
#
# chdp0 = np.concatenate([drb0p0[:, None, :], drb1p0[:, None, :], drb2p0[:, None, :]], axis=1)
# chdp1 = np.concatenate([drb0p1[:, None, :], drb1p1[:, None, :], drb2p1[:, None, :]], axis=1)
# chdp2 = np.concatenate([drb0p2[:, None, :], drb1p2[:, None, :], drb2p2[:, None, :]], axis=1)
#
# dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# dp = dp[None, :]
view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
verts_hom = np.concatenate([verts.reshape([-1, 3]), np.ones([verts.size // 3, 1])], axis=1)
# viewVerts = negYMat.dot(view_mtx.dot(verts_hom.T).T[:, :3].T).T.reshape([-1, 3])
projVerts = (camMtx.dot(view_mtx)).dot(verts_hom.T).T[:, :3].reshape([-1, 3])
viewVerts = projVerts
projVerts = projVerts[:, :2] / projVerts[:, 2:3]
# viewVerticesNonBnd = negYMat.dot(view_mtx.dot(vertices.T).T[:, :3].T).T.reshape([-1, 3, 3])
p0 = viewVerticesNonBnd[:, 0, :]
p1 = viewVerticesNonBnd[:, 1, :]
p2 = viewVerticesNonBnd[:, 2, :]
p0_proj = p0[:, 0:2] / p0[:, 2:3]
p1_proj = p1[:, 0:2] / p1[:, 2:3]
p2_proj = p2[:, 0:2] / p2[:, 2:3]
# D = np.linalg.det(np.concatenate([(p3 - p1).reshape([nNonBndFaces, 1, 3]), (p1 - p2).reshape([nNonBndFaces, 1, 3])], axis=1))
nt = np.cross(p1 - p0, p2 - p0)
nt_norm = nt / np.linalg.norm(nt, axis=1)[:, None]
# a = -nt_norm[:, 0] / nt_norm[:, 2]
# b = -nt_norm[:, 1] / nt_norm[:, 2]
# c = np.sum(nt_norm * p0, 1) / nt_norm[:, 2]
cam_f = 1
u = p0[:, 0] / p0[:, 2]
v = p0[:, 1] / p0[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p0[:, 2][:, None], np.zeros([len(p0), 1]), (-p0[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p0), 1]), p0[:, 2][:, None], (-p0[:, 1] / v ** 2)[:, None]]
dxdp_0 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p1[:, 0] / p1[:, 2]
v = p1[:, 1] / p1[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p1[:, 2][:, None], np.zeros([len(p1), 1]), (-p1[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p1), 1]), p1[:, 2][:, None], (-p1[:, 1] / v ** 2)[:, None]]
dxdp_1 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
u = p2[:, 0] / p2[:, 2]
v = p2[:, 1] / p2[:, 2]
# xudiv = (cam_f - a * u - b * v) ** 2
# xu = np.c_[c * (cam_f - b * v) / xudiv, a * v * c / xudiv, a * cam_f * c / xudiv]
# xv = np.c_[b * u * c / xudiv, c * (cam_f - a * u) / xudiv, b * cam_f * c / xudiv]
xu = np.c_[p2[:, 2][:, None], np.zeros([len(p2), 1]), (-p2[:, 0] / u ** 2)[:, None]]
xv = np.c_[np.zeros([len(p2), 1]), p2[:, 2][:, None], (-p2[:, 1] / v ** 2)[:, None]]
dxdp_2 = np.concatenate([xu[:, :, None], xv[:, :, None]], axis=2)
# x = u * c / (cam_f - a * u - b * v)
# y = v*c/(cam_f - a*u - b*v)
# z = c*cam_f/(cam_f - a*u - b*v)
A = 0.5 * np.linalg.norm(np.cross(p1 - p0, p2 - p0), axis=1)
nt_mag = A * 2
# nt = nt / A
# db1 = 0.5*np.cross(nt_norm, p2-p1)/A[:, None]
# db2 = 0.5*np.cross(nt_norm, p0-p2)/A[:, None]
# db3_2 = 0.5*np.cross(nt_norm, p1-p0)/A[:, None]
# db3 = - db1 - db2
p = viewVerts
pre1 = -1 / (nt_mag[:, None] ** 2) * nt_norm
ident = np.identity(3)
ident = np.tile(ident[None, :], [len(p2), 1, 1])
dntdp0 = np.cross((p2 - p0)[:, None, :], -ident) + np.cross(-ident, (p1 - p0)[:, None, :])
dntdp1 = np.cross((p2 - p0)[:, None, :], ident)
dntdp2 = np.cross(ident, (p1 - p0)[:, None, :])
# Pol check this!:
dntnorm = (ident - np.einsum('ij,ik->ijk', nt_norm, nt_norm)) / nt_mag[:, None, None]
# dntnorm = (ident - np.einsum('ij,ik->ijk',nt_norm,nt_norm))/nt_mag[:,None,None]
dntnormdp0 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp0)
dntnormdp1 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp1)
dntnormdp2 = np.einsum('ijk,ikl->ijl', dntnorm, dntdp2)
dpart1p0 = np.einsum('ij,ijk->ik', pre1, dntdp0)
dpart1p1 = np.einsum('ij,ijk->ik', pre1, dntdp1)
dpart1p2 = np.einsum('ij,ijk->ik', pre1, dntdp2)
b0 = np.sum(np.cross(nt_norm, p2 - p1) * (p - p1), axis=1)[:, None]
db0part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident,nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2-p1),-ident)
# db0part2p1 = np.einsum('ikj,ij->ik',np.cross((p2 - p1)[:, None, :], dntnormdp0.swapaxes(1,2)), p - p1) + np.einsum('ikj,ij->ik', np.cross(-ident, nt_norm[:, None, :]), p - p1) + np.einsum('ik,ikj->ik', np.cross(p2-p1,nt_norm[:, :]),-ident)
db0part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p1) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p2 - p1), -ident)
db0part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p2 - p1)[:, None, :]), p - p1) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p1)
db0dp0wrtpart1 = dpart1p0 * b0
db0dp1wrtpart1 = dpart1p1 * b0
db0dp2wrtpart1 = dpart1p2 * b0
db0dp0wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p0
db0dp1wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p1
db0dp2wrtpart2 = 1. / (nt_mag[:, None]) * db0part2p2
db0dp0wrt = db0dp0wrtpart1 + db0dp0wrtpart2
db0dp1wrt = db0dp1wrtpart1 + db0dp1wrtpart2
db0dp2wrt = db0dp2wrtpart1 + db0dp2wrtpart2
######
b1 = np.sum(np.cross(nt_norm, p0 - p2) * (p - p2), axis=1)[:, None]
db1part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p2)
db1part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2)
db1part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p0 - p2)[:, None, :]), p - p2) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p2) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p0 - p2), -ident)
db1dp0wrtpart1 = dpart1p0 * b1
db1dp1wrtpart1 = dpart1p1 * b1
db1dp2wrtpart1 = dpart1p2 * b1
db1dp0wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p0
db1dp1wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p1
db1dp2wrtpart2 = 1. / (nt_mag[:, None]) * db1part2p2
db1dp0wrt = db1dp0wrtpart1 + db1dp0wrtpart2
db1dp1wrt = db1dp1wrtpart1 + db1dp1wrtpart2
db1dp2wrt = db1dp2wrtpart1 + db1dp2wrtpart2
######
b2 = np.sum(np.cross(nt_norm, p1 - p0) * (p - p0), axis=1)[:, None]
db2part2p0 = np.einsum('ikj,ij->ik', np.cross(dntnormdp0.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], -ident), p - p0) + np.einsum('ik,ikj->ik', np.cross(nt_norm[:, :], p1 - p0), -ident)
db2part2p1 = np.einsum('ikj,ij->ik', np.cross(dntnormdp1.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0) + np.einsum('ikj,ij->ik', np.cross(
nt_norm[:, None, :], ident), p - p0)
db2part2p2 = np.einsum('ikj,ij->ik', np.cross(dntnormdp2.swapaxes(1, 2), (p1 - p0)[:, None, :]), p - p0)
db2dp0wrtpart1 = dpart1p0 * b2
db2dp1wrtpart1 = dpart1p1 * b2
db2dp2wrtpart1 = dpart1p2 * b2
db2dp0wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p0
db2dp1wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p1
db2dp2wrtpart2 = 1. / (nt_mag[:, None]) * db2part2p2
db2dp0wrt = db2dp0wrtpart1 + db2dp0wrtpart2
db2dp1wrt = db2dp1wrtpart1 + db2dp1wrtpart2
db2dp2wrt = db2dp2wrtpart1 + db2dp2wrtpart2
dp0 = np.concatenate([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]], axis=1)
dp1 = np.concatenate([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]], axis=1)
dp2 = np.concatenate([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]], axis=1)
#
dp = np.concatenate([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)
# If dealing with degenerate triangles, ignore that gradient.
# dp[nt_mag <= 1e-15] = 0
dp = dp[None, :]
nFaces = len(faces)
# visTriVC = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc = self.vc.r[faces.ravel()].reshape([nFaces, 3, 3]).transpose([2, 0, 1])[:, :, :, None, None]
vc[vc > 1] = 1
vc[vc < 0] = 0
visTriVC = vc
dxdp = np.concatenate([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]], axis=1)
dxdp = dxdp[None, :, None]
# dbvc = np.sum(dp * visTriVC, 2)
# dbvc = dp * visTriVC * t_area[None, :, None, None, None]
dbvc = dp * visTriVC
didp = np.sum(dbvc[:, :, :, :, :, None] * dxdp, 4).sum(2)
# output should be shape: VC x Ninput x Tri Points x UV
# drb0p0 # db0dp0wrt
# drb0p1 # db0dp1wrt
# drb0p2 # db0dp2wrt
# drb1p0 # db1dp0wrt
# drb1p1 # db1dp1wrt
# drb1p2 # db1dp2wrt
# drb2p0 # db2dp0wrt
# drb2p1 # db2dp1wrt
# drb2p2 # db2dp2wrt
return didp
def compute_image(self, visible, visibility, f):
"""Construct a sparse jacobian that relates 2D projected vertex positions
(in the columns) to pixel values (in the rows). This can be done
in two steps."""
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
nsamples = self.nsamples
if np.any(boundaryImage):
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
# sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:,(zerosIm*boundaryImage).ravel().astype(np.bool),:].reshape([nsamples, -1, 3])
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
boundaryFaces = visibility[(boundaryImage) & (visibility != 4294967295)]
nBndFaces = len(boundaryFaces)
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
# if self.debug:
# import pdb; pdb.set_trace()
# faces = f[sampleFaces].ravel()
# vertsPerFaceProjBnd = self.camera.r[faces].reshape([-1, 3, 2])
# nv = len(vertsPerFaceProjBnd)
# p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
# p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
# p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
# t_area_bnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
# t_area_bnd[t_area_bnd > 1] = 1
# Trick to cap to 1 while keeping gradients.
p1 = vertsProjBndSamples.reshape([-1,2,2])[:, 0, :]
p2 = vertsProjBndSamples.reshape([-1,2,2])[:, 1, :]
p = sampleV.reshape([-1,2])
l = (p2 - p1)
linedist = np.sqrt((np.sum(l ** 2, axis=1)))[:, None]
self.linedist = linedist
lnorm = l / linedist
self.lnorm = lnorm
v1 = p - p1
self.v1 = v1
d = v1[:, 0] * lnorm[:, 0] + v1[:, 1] * lnorm[:, 1]
self.d = d
intersectPoint = p1 + d[:, None] * lnorm
v2 = p - p2
self.v2 = v2
l12 = (p1 - p2)
linedist12 = np.sqrt((np.sum(l12 ** 2, axis=1)))[:, None]
lnorm12 = l12 / linedist12
d2 = v2[:, 0] * lnorm12[:, 0] + v2[:, 1] * lnorm12[:, 1]
nonIntersect = (d2 < 0) | (d < 0)
self.nonIntersect = nonIntersect
argminDistNonIntersect = np.argmin(np.c_[d[nonIntersect], d2[nonIntersect]], 1)
self.argminDistNonIntersect = argminDistNonIntersect
intersectPoint[nonIntersect] = vertsProjBndSamples.reshape([-1,2,2])[nonIntersect][np.arange(nonIntersect.sum()), argminDistNonIntersect]
lineToPoint = (p - intersectPoint)
n = lineToPoint
dist = np.sqrt((np.sum(lineToPoint ** 2, axis=1)))[:, None]
n_norm = lineToPoint / dist
self.n_norm = n_norm
self.dist = dist
d_final = dist.squeeze()
# max_nx_ny = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
# d_final = d_final / max_nx_ny
d_final = d_final
# invViewMtx = np.linalg.inv(np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])])
# #
# camMtx = np.r_[np.c_[self.camera.camera_mtx, np.array([0, 0, 0])], np.array([[0, 0, 0, 1]])]
# # invCamMtx = np.r_[np.c_[np.linalg.inv(self.camera.camera_mtx), np.array([0,0,0])], np.array([[0, 0, 0, 1]])]
#
# view_mtx = np.r_[self.camera.view_mtx, np.array([[0, 0, 0, 1]])]
# verticesBndSamples = np.concatenate([verticesBndSamples.reshape([-1, 3]), np.ones([verticesBndSamples.size // 3, 1])], axis=1)
# projVerticesBndOutside = (camMtx.dot(view_mtx)).dot(verticesBndSamples.T).T[:, :3].reshape([-1, 2, 3])
# projVerticesBndDir = projVerticesBndOutside[:, 1, :] - projVerticesBndOutside[:, 0, :]
# projVerticesBndDir = projVerticesBndDir / np.sqrt((np.sum(projVerticesBndDir ** 2, 1)))[:, None]
# dproj = (intersectPoint[:, 0] * projVerticesBndOutside[:, 0, 2] - projVerticesBndOutside[:, 0, 0]) / (projVerticesBndDir[:, 0] - projVerticesBndDir[:, 2] * intersectPoint[:, 0])
# # Code to check computation that dproj == dprojy
# # dproj_y = (intersectPoint[:,1]* projVerticesBndOutside[:,0,2] - projVerticesBndOutside[:,0,1]) / (projVerticesBndDir[:,1] - projVerticesBndDir[:,2]*intersectPoint[:,1])
#
# projPoint = projVerticesBndOutside[:, 0, :][:, :] + dproj[:, None] * projVerticesBndDir[:, :]
#
# projPointVec4 = np.concatenate([projPoint, np.ones([projPoint.shape[0], 1])], axis=1)
# viewPointIntersect = (invViewMtx.dot(np.linalg.inv(camMtx)).dot(projPointVec4.T.reshape([4, -1])).reshape([4, -1])).T[:, :3]
#
# barycentricVertsDistIntesect = np.linalg.norm(viewPointIntersect - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 0, :], axis=1)
# barycentricVertsDistIntesect2 = np.linalg.norm(viewPointIntersect - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 1, :], axis=1)
# # Code to check barycentricVertsDistIntesect + barycentricVertsDistIntesect2 = barycentricVertsDistEdge
# barycentricVertsDistEdge = np.linalg.norm(
# verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 0, :] - verticesBndSamples[:, 0:3].reshape([-1, 2, 3])[:, 1, :], axis=1)
#
# nonIntersect = np.abs(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 - barycentricVertsDistEdge) > 1e-4
# argminDistNonIntersect = np.argmin(np.c_[barycentricVertsDistIntesect[nonIntersect], barycentricVertsDistIntesect2[nonIntersect]], 1)
#
# self.viewPointIntersect = viewPointIntersect
# self.viewPointIntersect[nonIntersect] = verticesBndSamples.reshape([-1, 2, 4])[nonIntersect, :, 0:3][np.arange(nonIntersect.sum()),
# argminDistNonIntersect, :]
d_finalNP = d_final.copy()
self.d_final = d_finalNP
# self.t_area_bnd = t_area_bnd
# areaWeights = np.zeros([nsamples, nBndFaces])
# areaWeights = t_area_bnd.reshape([nsamples, nBndFaces])
# areaWeightsTotal = areaWeights.sum(0)
## areaWeightsTotal[areaWeightsTotal < 1] = 1
# self.areaWeights = areaWeights
# self.areaWeightsTotal = areaWeightsTotal
finalColorBnd = np.ones([self.nsamples, boundaryFaces.size, 3])
self.d_final_total = d_finalNP.reshape([self.nsamples, -1,1]).sum(0)
# if self.imageGT is not None:
finalColorBnd = sampleColors * d_finalNP.reshape([self.nsamples, -1,1]) / (self.d_final_total.reshape([1, -1,1]))
# finalColorBnd = areaWeights[:,:,None] * sampleColors * d_finalNP.reshape([self.nsamples, -1,1]) / (self.d_final_total.reshape([1, -1,1]) * areaWeightsTotal[None,:,None])
self.finalColorBnd = finalColorBnd
# else:
# finalColorBnd = sampleColors
bndColorsImage = np.zeros_like(self.color_image)
bndColorsImage[(zerosIm * boundaryImage), :] = np.sum(finalColorBnd, axis=0)
finalColorImageBnd = bndColorsImage
if self.imageGT is not None:
bndColorsResiduals = np.zeros_like(self.color_image)
self.sampleResiduals = (sampleColors - self.imageGT.r[(zerosIm * boundaryImage),:][None,:])
self.sampleResidualsWeighted = self.sampleResiduals**2 * d_finalNP.reshape([self.nsamples, -1,1]) / self.d_final_total.reshape([1, -1,1])
bndColorsResiduals[(zerosIm * boundaryImage), :] = np.sum(self.sampleResidualsWeighted,0)
if np.any(boundaryImage):
finalColor = (1 - boundaryImage)[:, :, None] * self.color_image + boundaryImage[:, :, None] * finalColorImageBnd
if self.imageGT is not None:
self.residuals = (self.color_image - self.imageGT.r)
errors = self.residuals**2
finalResidual = (1 - boundaryImage)[:, :, None] * errors + boundaryImage[:, :, None] * bndColorsResiduals
else:
finalColor = self.color_image
if self.imageGT is not None:
finalResidual = (self.color_image - self.imageGT.r)**2
if self.imageGT is None:
finalResidual = None
finalColor[finalColor > 1] = 1
finalColor[finalColor < 0] = 0
return finalColor, finalResidual
def compute_derivatives_verts(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
# xdiff = dEdx
# ydiff = dEdy
nVisF = len(visibility.ravel()[visible])
# projVertices = self.camera.r[f[visibility.ravel()[visible]].ravel()].reshape([nVisF,3, 2])
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
rangeIm = np.arange(self.boundarybool_image.size)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
edge_visibility = self.boundaryid_image
vertsProjBnd = self.camera.r[self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()].reshape([-1, 2, 2])
nsamples = self.nsamples
sampleV = self.renders_sample_pos.reshape([nsamples, -1, 2])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape(
[nsamples, -1, 2])
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
if 4294967295 in sampleFaces:
sampleFaces[sampleFaces==4294967295] = 0 #Not correct but need to check further.
sampleColors = self.renders.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool), :].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage) & (visibility != 4294967295)]
if np.any(boundaryImage):
n_norm = self.n_norm
dist = self.dist
linedist = self.linedist
d = self.d
v1 = self.v1
lnorm = self.lnorm
d_final = self.d_final
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
# vertsProjBnd[None, :] - sampleV[:,None,:]
vertsProjBndSamples = np.tile(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])
# Computing gradients:
# A multisampled pixel color is given by: w R + (1-w) R' thus:
# 1 derivatives samples outside wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
# 2 derivatives samples outside wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
# 3 derivatives samples outside wrt v bar edge: (1-w) (dbar'*vc') )/ nsamples for faces edge (barv1', barv2', 0)
# 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
# 5 derivatives samples outside wrt vc : (1-w) (bar')/ nsamples for faces edge
# 6 derivatives samples inside wrt v : (dbar'*vc')/ nsamples for faces sample
# 7 derivatives samples inside wrt vc : (bar)/ nsamples for faces sample
# for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
# Finally, stack data, and IJ of nonbnd with bnd on both dwrt_v and dwrt_vc.
######## 1 derivatives samples outside wrt v 1: (dw * (bar*vc) - dw (bar'*vc') )/ nsamples for face sample
# # #Chumpy autodiff code to check derivatives here:
# chEdgeVerts = ch.Ch(vertsProjBndSamples.reshape([-1,2,2]))
#
# chEdgeVerts1 = chEdgeVerts[:,0,:]
# chEdgeVerts2 = chEdgeVerts[:,1,:]
#
# chSampleVerts = ch.Ch(sampleV.reshape([-1,2]))
# # c1 = (chEdgeVerts1 - chSampleVerts)
# # c2 = (chEdgeVerts2 - chSampleVerts)
# # n = (chEdgeVerts2 - chEdgeVerts1)
#
# #Code to check computation of distance below
# # d2 = ch.abs(c1[:,:,0]*c2[:,:,1] - c1[:,:,1]*c2[:,:,0]) / ch.sqrt((ch.sum(n**2,2)))
# # # np_mat = ch.dot(ch.array([[0,-1],[1,0]]), n)
# # np_mat2 = -ch.concatenate([-n[:,:,1][:,:,None], n[:,:,0][:,:,None]],2)
# # np_vec2 = np_mat2 / ch.sqrt((ch.sum(np_mat2**2,2)))[:,:,None]
# # d2 = d2 / ch.maximum(ch.abs(np_vec2[:,:,0]),ch.abs(np_vec2[:,:,1]))
#
# chl = (chEdgeVerts2 - chEdgeVerts1)
# chlinedist = ch.sqrt((ch.sum(chl**2,axis=1)))[:,None]
# chlnorm = chl/chlinedist
#
# chv1 = chSampleVerts - chEdgeVerts1
#
# chd = chv1[:,0]* chlnorm[:,0] + chv1[:,1]* chlnorm[:,1]
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
# # intersectPointDist1 = intersectPoint - chEdgeVerts1
# # intersectPointDist2 = intersectPoint - chEdgeVerts2
# # Code to check computation of distances below:
# # lengthIntersectToPoint1 = np.linalg.norm(intersectPointDist1.r,axis=1)
# # lengthIntersectToPoint2 = np.linalg.norm(intersectPointDist2.r,axis=1)
#
# chintersectPoint = chEdgeVerts1 + chd[:,None] * chlnorm
#
# chlineToPoint = (chSampleVerts - chintersectPoint)
# chn_norm = chlineToPoint / ch.sqrt((ch.sum(chlineToPoint ** 2, axis=1)))[:, None]
#
# chdist = chlineToPoint[:,0]*chn_norm[:,0] + chlineToPoint[:,1]*chn_norm[:,1]
#
# # d_final_ch = chdist / ch.maximum(ch.abs(chn_norm[:, 0]), ch.abs(chn_norm[:, 1]))
# d_final_ch = chdist
#
# d_final_ch_weights = sampleColors * (d_final_ch.reshape([self.nsamples, -1]) / ch.sum(d_final_ch.reshape([self.nsamples, -1]), 0))[:,:,None]
#
# d_final_outside = d_final_ch.ravel()
# dwdv = d_final_outside.dr_wrt(chEdgeVerts1)
# rows = np.tile(np.arange(d_final_outside.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_outside.shape[0] * 2)
#
# dwdv_r_v1 = np.array(dwdv[rows, cols]).reshape([-1, 2])
#
# dwdv = d_final_outside.dr_wrt(chEdgeVerts2)
# rows = np.tile(np.arange(d_final_ch.shape[0])[None, :], [2, 1]).T.ravel()
# cols = np.arange(d_final_ch.shape[0] * 2)
#
# dwdv_r_v2 = np.array(dwdv[rows, cols]).reshape([-1, 2])
nonIntersect = self.nonIntersect
argminDistNonIntersect = self.argminDistNonIntersect
# max_dx_dy = np.maximum(np.abs(n_norm[:, 0]), np.abs(n_norm[:, 1]))
d_final_np = dist
# d_final_np = dist / max_dx_dy
ident = np.identity(2)
ident = np.tile(ident[None, :], [len(d_final_np), 1, 1])
dlnorm = (ident - np.einsum('ij,ik->ijk', lnorm, lnorm)) / linedist[:, None]
dl_normdp1 = np.einsum('ijk,ikl->ijl', dlnorm, -ident)
dl_normdp2 = np.einsum('ijk,ikl->ijl', dlnorm, ident)
dv1dp1 = -ident
dv1dp2 = 0
dddp1 = np.einsum('ijk,ij->ik', dv1dp1, lnorm) + np.einsum('ij,ijl->il', v1, dl_normdp1)
dddp2 = 0 + np.einsum('ij,ijl->il', v1, dl_normdp2)
dipdp1 = ident + (dddp1[:, None, :] * lnorm[:, :, None]) + d[:, None, None] * dl_normdp1
dipdp2 = (dddp2[:, None, :] * lnorm[:, :, None]) + d[:, None, None] * dl_normdp2
#good up to here.
dndp1 = -dipdp1
dndp2 = -dipdp2
dn_norm = (ident - np.einsum('ij,ik->ijk', n_norm, n_norm)) / dist[:, None]
# dn_normdp1 = np.einsum('ijk,ikl->ijl', dn_norm, dndp1)
# dn_normdp2 = np.einsum('ijk,ikl->ijl', dn_norm, dndp2)
ddistdp1 = np.einsum('ij,ijl->il', n_norm, dndp1)
ddistdp2 = np.einsum('ij,ijl->il', n_norm, dndp2)
# argmax_nx_ny = np.argmax(np.abs(n_norm), axis=1)
# dmax_nx_ny_p1 = np.sign(n_norm)[np.arange(len(n_norm)), argmax_nx_ny][:, None] * dn_normdp1[np.arange(len(dn_normdp1)), argmax_nx_ny]
# dmax_nx_ny_p2 = np.sign(n_norm)[np.arange(len(n_norm)), argmax_nx_ny][:, None] * dn_normdp2[np.arange(len(dn_normdp2)), argmax_nx_ny]
# dd_final_dp1 = -1. / max_dx_dy[:, None] ** 2 * dmax_nx_ny_p1 * dist + 1. / max_dx_dy[:, None] * ddistdp1
# dd_final_dp2 = -1. / max_dx_dy[:, None] ** 2 * dmax_nx_ny_p2 * dist + 1. / max_dx_dy[:, None] * ddistdp2
dd_final_dp1 = ddistdp1
dd_final_dp2 = ddistdp2
# For those non intersecting points straight to the edge:
v1 = self.v1[nonIntersect][argminDistNonIntersect == 0]
v1_norm = v1 / np.sqrt((np.sum(v1 ** 2, axis=1)))[:, None]
dd_final_dp1_nonintersect = -v1_norm
v2 = self.v2[nonIntersect][argminDistNonIntersect == 1]
v2_norm = v2 / np.sqrt((np.sum(v2 ** 2, axis=1)))[:, None]
dd_final_dp2_nonintersect = -v2_norm
dd_final_dp1[nonIntersect][argminDistNonIntersect == 0] = dd_final_dp1_nonintersect
dd_final_dp1[nonIntersect][argminDistNonIntersect == 1] = 0
dd_final_dp2[nonIntersect][argminDistNonIntersect == 1] = dd_final_dp2_nonintersect
dd_final_dp2[nonIntersect][argminDistNonIntersect == 0] = 0
dd_final_dp1_weighted_part1 = -self.d_final[:,None]* np.tile(dd_final_dp1.reshape([self.nsamples, -1, 2]).sum(0)[None,:,:],[self.nsamples,1,1]).reshape([-1, 2])/(np.tile(self.d_final_total[None,:], [self.nsamples, 1,1]).reshape([-1,1])**2)
dd_final_dp1_weighted_part2 = dd_final_dp1 / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1])
dd_final_dp1_weighted = dd_final_dp1_weighted_part1 + dd_final_dp1_weighted_part2
dd_final_dp2_weighted_part1 = -self.d_final[:,None]*np.tile(dd_final_dp2.reshape([self.nsamples, -1, 2]).sum(0)[None,:,:],[self.nsamples,1,1]).reshape([-1, 2])/(np.tile(self.d_final_total[None,:], [self.nsamples, 1,1]).reshape([-1,1])**2)
dd_final_dp2_weighted_part2 = dd_final_dp2 / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1])
dd_final_dp2_weighted = dd_final_dp2_weighted_part1 + dd_final_dp2_weighted_part2
if self.imageGT is None:
dImage_wrt_outside_v1 = sampleColors.reshape([-1,3,1]) * dd_final_dp1_weighted[:, None, :]
dImage_wrt_outside_v2 = sampleColors.reshape([-1,3,1]) * dd_final_dp2_weighted[:, None, :]
else:
dImage_wrt_outside_v1 = self.sampleResiduals.reshape([-1,3,1])**2 * dd_final_dp1_weighted[:, None, :]
dImage_wrt_outside_v2 = self.sampleResiduals.reshape([-1,3,1])**2 * dd_final_dp2_weighted[:, None, :]
# sampleV
# z = dd_final_dp1.reshape([8, -1, 2])
# eq = np.array([np.all(np.sign(z[:, i, :]) == -1) or np.all(np.sign(z[:, i, :]) == 1) for i in range(z.shape[1])])
# dist_ns = dist.reshape([8,-1])
# rightV = sampleV[0, :, 0] > np.max(sampleV[0, :, :], 0)[0] - 1
# dist_ns[0, rightV]
# dImage_wrt_outside_v1.reshape([8, -1, 3, 2])[0, rightV,:]
# d_final_ch_weights
# self.finalColorBnd
### Derivatives wrt V:
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 2 * 2)).ravel()
faces = self.vpe[edge_visibility.ravel()[(zerosIm * boundaryImage).ravel().astype(np.bool)]].ravel()
faces = np.tile(faces.reshape([1, -1, 2]), [self.nsamples, 1, 1]).ravel()
JS = col(faces)
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data1 = dImage_wrt_outside_v1.transpose([1, 0, 2])
data2 = dImage_wrt_outside_v2.transpose([1, 0, 2])
data = np.concatenate([data1[:, :, None, :], data2[:, :, None, :]], 2)
data = data.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
######## 2 derivatives samples wrt v bar outside: (w * (dbar*vc) )/ nsamples for faces sample
verticesBnd = self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3])
sampleBarycentricBar = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),
:].reshape([-1, 3, 1])
verts = np.sum(self.v.r[f[sampleFaces.ravel()].ravel()].reshape([-1, 3, 3]) * sampleBarycentricBar, axis=1)
dImage_wrt_bar_v = self.barycentricDerivatives(verticesBnd, f[sampleFaces.ravel()], verts).swapaxes(0, 1)
if self.imageGT is None:
# dImage_wrt_bar_v = dImage_wrt_bar_v * d_final[:, None, None, None] * self.t_area_bnd[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
dImage_wrt_bar_v = dImage_wrt_bar_v * d_final[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# areaTotal = np.tile(self.areaWeightsTotal[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# d_final_total = np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
# dImage_wrt_bar_v = self.areaWeights.reshape([-1,1,1,1]) * dImage_wrt_bar_v * d_final[:, None, None, None] / (areaTotal*d_final_total)
else:
dImage_wrt_bar_v = 2*self.sampleResiduals.reshape([-1,3])[:,:,None,None] * dImage_wrt_bar_v * d_final[:, None, None, None] * self.t_area_bnd[:, None, None, None] / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1, 1, 1, 1])
### Derivatives wrt V: 2 derivatives samples wrt v bar: (w * (dbar*vc) )/ nsamples for faces sample
# IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 2 * f.shape[1])).ravel()
faces = f[sampleFaces].ravel()
JS = col(faces)
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = np.transpose(dImage_wrt_bar_v, [1, 0, 2, 3]).ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_bnd_bar = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
verticesNonBnd = self.v.r[f[nonBoundaryFaces].ravel()]
vertsPerFaceProjBnd = self.camera.r[f[nonBoundaryFaces].ravel()].reshape([-1, 3, 2])
nv = len(vertsPerFaceProjBnd)
p0_proj = np.c_[vertsPerFaceProjBnd[:, 0, :], np.ones([nv, 1])]
p1_proj = np.c_[vertsPerFaceProjBnd[:, 1, :], np.ones([nv, 1])]
p2_proj = np.c_[vertsPerFaceProjBnd[:, 2, :], np.ones([nv, 1])]
t_area_nonbnd = np.abs(np.linalg.det(np.concatenate([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)) * 0.5)
t_area_nonbnd[t_area_nonbnd > 1] = 1
bc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
verts = np.sum(self.v.r[f[nonBoundaryFaces.ravel()].ravel()].reshape([-1, 3, 3]) * bc[:, :, None], axis=1)
didp = self.barycentricDerivatives(verticesNonBnd, f[nonBoundaryFaces.ravel()], verts)
if self.imageGT is None:
# didp = didp * t_area_nonbnd[None, :, None, None]
didp = didp
else:
didp = 2 * self.residuals[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3)).T[:,:,None,None] * didp * t_area_nonbnd[None, :, None, None]
n_channels = np.atleast_3d(observed).shape[2]
####### 2: Take the data and copy the corresponding dxs and dys to these new pixels.
### Derivatives wrt V:
pixels = np.where(((~boundaryImage) & (visibility != 4294967295)).ravel())[0]
IS = np.tile(col(pixels), (1, 2 * f.shape[1])).ravel()
JS = col(f[nonBoundaryFaces].ravel())
JS = np.hstack((JS * 2, JS * 2 + 1)).ravel()
if n_channels > 1:
IS = np.concatenate([IS * n_channels + i for i in range(n_channels)])
JS = np.concatenate([JS for i in range(n_channels)])
data = didp.ravel()
ij = np.vstack((IS.ravel(), JS.ravel()))
result_wrt_verts_nonbnd = sp.csc_matrix((data, ij), shape=(image_width * image_height * n_channels, num_verts * 2))
if np.any(boundaryImage):
result_wrt_verts = result_wrt_verts_bnd + result_wrt_verts_bnd_bar + result_wrt_verts_nonbnd
else:
result_wrt_verts = result_wrt_verts_nonbnd
return result_wrt_verts
def compute_derivatives_vc(self, observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
width = self.frustum['width']
height = self.frustum['height']
num_channels = 3
n_channels = num_channels
vc_size = self.vc.size
d_final = self.d_final
boundaryImage = self.boundarybool_image.astype(np.bool) & (visibility != 4294967295)
zerosIm = np.ones(self.boundarybool_image.shape).astype(np.bool)
nsamples = self.nsamples
sampleFaces = self.renders_faces.reshape([nsamples, -1])[:, (zerosIm * boundaryImage).ravel().astype(np.bool)].reshape([nsamples, -1]) - 1
sampleBarycentric = self.renders_sample_barycentric.reshape([nsamples, -1, 3])[:, (zerosIm * boundaryImage).ravel().astype(np.bool),
:].reshape([nsamples, -1, 3])
nonBoundaryFaces = visibility[zerosIm * (~boundaryImage) & (visibility != 4294967295)]
if np.any(boundaryImage):
boundaryFaces = visibility[boundaryImage]
nBndFaces = len(boundaryFaces)
# Computing gradients:
# A multisampled pixel color is given by: w R + (1-w) R' thus:
# 1 derivatives samples wrt v 1: (dw * (svc) - dw (bar'*vc') )/ nsamples for face sample
# 2 derivatives samples wrt v bar: (w * (dbar*vc) )/ nsamples for faces sample
# 4 derivatives samples wrt vc : (w * (bar) )/ nsamples for faces sample
# for every boundary pixel i,j we have list of sample faces. compute gradients at each and sum them according to face identity, options:
# - Best: create sparse matrix for every matrix. sum them! same can be done with boundary.
####### 4 derivatives samples outside wrt vc : (w * (bar) )/ nsamples for faces sample
if self.imageGT is None:
dImage_wrt_bnd_vc = d_final[:, None] * sampleBarycentric.reshape([-1,3]) / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1,1])
else:
dImage_wrt_bnd_vc = d_final[:, None] * sampleBarycentric.reshape([-1,3]) / np.tile(self.d_final_total[None, :], [self.nsamples, 1, 1]).reshape([-1,1])
dImage_wrt_bnd_vc = 2 * self.sampleResiduals.reshape([-1,3]).T[:,:,None] * dImage_wrt_bnd_vc[None,:]
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.tile(np.where(boundaryImage.ravel())[0][None, :], [self.nsamples, 1])
IS = np.tile(col(pixels), (1, 3)).ravel()
if 4294967295 in sampleFaces:
sampleFaces[sampleFaces==4294967295] = 0 #Not correct but need to check further.
faces = f[sampleFaces].ravel()
JS = col(faces)
data = dImage_wrt_bnd_vc.ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
if self.imageGT is None:
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_bnd = result
########### Non boundary derivatives: ####################
nNonBndFaces = nonBoundaryFaces.size
### Derivatives wrt VC:
# Each pixel relies on three verts
pixels = np.where(((~boundaryImage) & (visibility != 4294967295)).ravel())[0]
IS = np.tile(col(pixels), (1, 3)).ravel()
JS = col(f[nonBoundaryFaces].ravel())
if self.imageGT is None:
dImage_wrt_nonbnd_vc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
else:
dImage_wrt_nonbnd_vc = barycentric[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3))
dImage_wrt_nonbnd_vc = 2* self.residuals[((~boundaryImage) & (visibility != 4294967295))].reshape((-1, 3)).T[:,:,None] * dImage_wrt_nonbnd_vc[None,:]
data = np.asarray(dImage_wrt_nonbnd_vc, order='C').ravel()
IS = np.concatenate([IS * num_channels + k for k in range(num_channels)])
JS = np.concatenate([JS * num_channels + k for k in range(num_channels)])
if self.imageGT is None:
data = np.concatenate([data for i in range(num_channels)])
ij = np.vstack((IS.ravel(), JS.ravel()))
result = sp.csc_matrix((data, ij), shape=(width * height * num_channels, vc_size))
result_wrt_vc_nonbnd = result
if np.any(boundaryImage):
result_wrt_vc = result_wrt_vc_bnd + result_wrt_vc_nonbnd
else:
result_wrt_vc = result_wrt_vc_nonbnd
return result_wrt_vc
def on_changed(self, which):
super().on_changed(which)
if 'v' or 'camera' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_verts_mesh[mesh][polygons].set_array(verts_by_face.astype(np.float32))
self.vbo_verts_mesh[mesh][polygons].bind()
if 'vc' in which:
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
self.vbo_colors_mesh[mesh][polygons].set_array(colors_by_face.astype(np.float32))
self.vbo_colors_mesh[mesh][polygons].bind()
if 'f' in which:
self.vbo_indices.set_array(self.f.astype(np.uint32))
self.vbo_indices.bind()
self.vbo_indices_range.set_array(np.arange(self.f.size, dtype=np.uint32).ravel())
self.vbo_indices_range.bind()
flen = 1
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
f = self.f_list[mesh][polygons]
# fc = np.arange(flen, flen + len(f))
fc = np.tile(np.arange(flen, flen + len(f))[:, None], [1, 3]).ravel()
# fc[:, 0] = fc[:, 0] & 255
# fc[:, 1] = (fc[:, 1] >> 8) & 255
# fc[:, 2] = (fc[:, 2] >> 16) & 255
fc = np.asarray(fc, dtype=np.uint32)
self.vbo_face_ids_list[mesh][polygons].set_array(fc)
self.vbo_face_ids_list[mesh][polygons].bind()
flen += len(f)
self.vbo_indices_mesh_list[mesh][polygons].set_array(np.array(self.f_list[mesh][polygons]).astype(np.uint32))
self.vbo_indices_mesh_list[mesh][polygons].bind()
if 'texture_stack' in which:
# gl = self.glf
# texture_data = np.array(self.texture_image*255., dtype='uint8', order='C')
# self.release_textures()
#
# for mesh in range(len(self.f_list)):
# textureIDs = []
# for polygons in range(len(self.f_list[mesh])):
# texture = None
# if self.haveUVs_list[mesh][polygons]:
# texture = GL.GLuint(0)
# GL.glGenTextures( 1, texture )
# GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT,1)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)
# GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
# GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_REPEAT)
# GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# #Send texture.
# #Pol: Check if textures are float or uint from Blender import.
# image = (self.textures_list[mesh][polygons]*255.0).astype(np.uint8)
# GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8, image.shape[1], image.shape[0], 0, GL.GL_RGB, GL.GL_UNSIGNED_BYTE, image)
# textureIDs = textureIDs + [texture]
# self.textureID_mesh_list = self.textureID_mesh_list + [textureIDs]
# gl.GenTextures(1, tmp) # TODO: free after done
# self.textureID = tmp[0]
if self.initialized:
textureCoordIdx = 0
for mesh in range(len(self.f_list)):
for polygons in range(len(self.f_list[mesh])):
texture = None
if self.haveUVs_list[mesh][polygons]:
texture = self.textureID_mesh_list[mesh][polygons]
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
# Update the OpenGL textures with all the textures. (Inefficient as many might not have changed).
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
self.textures_list[mesh][polygons] = self.texture_stack[textureCoordIdx:image.size + textureCoordIdx].reshape(image.shape)
textureCoordIdx = textureCoordIdx + image.size
image = np.array(np.flipud((self.textures_list[mesh][polygons] * 255.0)), order='C', dtype=np.uint8)
GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
image.reshape([image.shape[1], image.shape[0], -1]).ravel().tostring())
# if 'imageGT' in which:
# GL.glActiveTexture(GL.GL_TEXTURE1)
# GL.glBindTexture(GL.GL_TEXTURE_2D, self.textureGT)
# image = np.array(np.flipud((self.imageGT.r)), order='C', dtype=np.float32)
# # GL.glTexStorage2D(GL.GL_TEXTURE_2D, 1, GL.GL_RGBA, image.shape[1], image.shape[0])
# GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0], GL.GL_RGB, GL.GL_FLOAT, image)
if 'v' or 'f' or 'vc' or 'ft' or 'camera' or 'texture_stack' or 'imageGT' in which:
self.render_image_buffers()
def release_textures(self):
if hasattr(self, 'textureID_mesh_list'):
if self.textureID_mesh_list != []:
for texture_mesh in self.textureID_mesh_list:
if texture_mesh != []:
for texture in texture_mesh:
if texture != None:
GL.glDeleteTextures(1, [texture.value])
self.textureID_mesh_list = []
@depends_on(dterms + terms)
def color_image(self):
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
no_overdraw = self.draw_color_image(with_vertex_colors=True, with_texture_on=True)
return no_overdraw
# if not self.overdraw:
# return no_overdraw
#
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
# overdraw = self.draw_color_image()
# GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
#
# # return overdraw * np.atleast_3d(self.boundarybool_image)
#
# boundarybool_image = self.boundarybool_image
# if self.num_channels > 1:
# boundarybool_image = np.atleast_3d(boundarybool_image)
#
# return np.asarray((overdraw*boundarybool_image + no_overdraw*(1-boundarybool_image)), order='C')
@depends_on('f', 'frustum', 'camera', 'overdraw')
def barycentric_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_barycentric_image(self.boundarybool_image if self.overdraw else None)
@depends_on('f', 'frustum', 'camera', 'overdraw')
def visibility_image(self):
self._call_on_changed()
# Overload method to call without overdraw.
return self.draw_visibility_image(self.v.r, self.f, self.boundarybool_image if self.overdraw else None)
def image_mesh_bool(self, meshes):
self.makeCurrentContext()
self._call_on_changed()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0., 0., 0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for mesh in meshes:
self.draw_index(mesh)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.uint32))[:, :, 0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result != 0
@depends_on(dterms + terms)
def indices_image(self):
self._call_on_changed()
self.makeCurrentContext()
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
self._call_on_changed()
GL.glClearColor(0., 0., 0., 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(self.colorProgram)
for index in range(len(self.f_list)):
self.draw_index(index)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.uint32))[:, :, 0]
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
return result
def draw_index(self, index):
mesh = index
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
vc = self.vc_list[mesh]
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(vc.reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
colors = np.array(np.ones_like(colors_by_face) * (index) / 255.0, dtype=np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
vbo_color.bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
GL.glUniformMatrix4fv(self.MVP_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
def draw_texcoord_image(self, v, f, ft, boundarybool_image=None):
# gl = glf
# gl.Disable(GL_TEXTURE_2D)
# gl.DisableClientState(GL_TEXTURE_COORD_ARR
self.makeCurrentContext()
shaders.glUseProgram(self.colorProgram)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
# want vtc: texture-coordinates per vertex (not per element in vc)
colors = ft
# use the third channel to identify the corresponding textures.
color3 = np.vstack([np.ones([self.ft_list[mesh].shape[0], 1]) * mesh for mesh in range(len(self.ft_list))]).astype(np.float32) / len(
self.ft_list)
colors = np.asarray(np.hstack((colors, color3)), np.float64, order='C')
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
# Why do we need this?
if boundarybool_image is not None:
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
self.draw_colored_primitives(self.vao_dyn, v, f, colors)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3)[:, :, :3].astype(np.float64)) / 255.0
result[:, :, 1] = 1. - result[:, :, 1]
return result
@depends_on('ft', 'textures')
def mesh_tex_coords(self):
ftidxs = self.ft.ravel()
data = self.ft
# Pol: careful with this:
data[:, 1] = 1.0 - 1.0 * data[:, 1]
return data
# Depends on 'f' because vpe/fpe depend on f
# Pol: Check that depends on works on other attributes that depend_on x, if x changes.
@depends_on('ft', 'f')
def wireframe_tex_coords(self):
print("wireframe_tex_coords is being computed!")
vvt = np.zeros((self.v.r.size / 3, 2), dtype=np.float64, order='C')
vvt[self.f.flatten()] = self.mesh_tex_coords
edata = np.zeros((self.vpe.size, 2), dtype=np.float64, order='C')
edata = vvt[self.ma.ravel()]
return edata
# TODO: can this not be inherited from base? turning off texture mapping in that instead?
@depends_on(dterms + terms)
def boundaryid_image(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
@depends_on(dterms + terms)
def boundaryid_image_aa(self):
self._call_on_changed()
# self.texture_mapping_of
self.makeCurrentContext()
GL.glUseProgram(self.colorProgram)
result = self.draw_boundaryid_image_aa(self.v.r, self.f, self.vpe, self.fpe, self.camera)
GL.glUseProgram(self.colorTextureProgram)
# self.texture_mapping_on(with_vertex_colors=True)
return result
def draw_color_image(self, with_vertex_colors=True, with_texture_on=True):
self.makeCurrentContext()
self._call_on_changed()
GL.glEnable(GL.GL_MULTISAMPLE)
if hasattr(self, 'bgcolor'):
GL.glClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels], self.bgcolor.r[2 % self.num_channels], 1.)
# use face colors if given
# FIXME: this won't work for 2 channels
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self.msaa:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
view_mtx = self.camera.openglMat.dot(np.asarray(np.vstack((self.camera.view_matrix, np.array([0, 0, 0, 1]))), np.float32))
MVP = np.dot(self.projectionMatrix, view_mtx)
for mesh in range(len(self.f_list)):
for polygons in np.arange(len(self.f_list[mesh])):
vao_mesh = self.vao_tex_mesh_list[mesh][polygons]
vbo_f = self.vbo_indices_mesh_list[mesh][polygons]
GL.glBindVertexArray(vao_mesh)
f = self.f_list[mesh][polygons]
verts_by_face = np.asarray(self.v_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vbo_color = self.vbo_colors_mesh[mesh][polygons]
colors_by_face = np.asarray(self.vc_list[mesh].reshape((-1, 3))[f.ravel()], dtype=np.float32, order='C')
vc = colors_by_face
if with_vertex_colors:
colors = vc.astype(np.float32)
else:
# Only texture.
colors = np.ones_like(vc).astype(np.float32)
# Pol: Make a static zero vbo_color to make it more efficient?
vbo_color.set_array(colors)
vbo_color.bind()
if self.f.shape[1] == 2:
primtype = GL.GL_LINES
else:
primtype = GL.GL_TRIANGLES
if with_texture_on and self.haveUVs_list[mesh][polygons]:
GL.glUseProgram(self.colorTextureProgram)
texture = self.textureID_mesh_list[mesh][polygons]
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture)
GL.glUniform1i(self.textureID, 0)
else:
GL.glUseProgram(self.colorProgram)
GL.glUniformMatrix4fv(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)
GL.glDrawArrays(primtype, 0, len(vbo_f) * vbo_f.data.shape[1])
# GL.glDrawElements(primtype, len(vbo_f)*vbo_f.data.shape[1], GL.GL_UNSIGNED_INT, None)
if self.msaa:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)
else:
GL.glBindFramebuffer(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glBlitFramebuffer(0, 0, self.frustum['width'], self.frustum['height'], 0, 0, self.frustum['width'], self.frustum['height'],
GL.GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)
GL.glBindFramebuffer(GL.GL_FRAMEBUFFER, self.fbo)
GL.glReadBuffer(GL.GL_COLOR_ATTACHMENT0)
result = np.flipud(
np.frombuffer(GL.glReadPixels(0, 0, self.frustum['width'], self.frustum['height'], GL.GL_RGB, GL.GL_UNSIGNED_BYTE), np.uint8).reshape(
self.frustum['height'], self.frustum['height'], 3).astype(np.float64)) / 255.0
GL.glBindFramebuffer(GL.GL_DRAW_FRAMEBUFFER, self.fbo)
GL.glDisable(GL.GL_MULTISAMPLE)
GL.glClearColor(0., 0., 0., 1.)
if hasattr(self, 'background_image'):
bg_px = np.tile(np.atleast_3d(self.visibility_image) == 4294967295, (1, 1, 3))
fg_px = 1 - bg_px
result = bg_px * self.background_image + fg_px * result
return result
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image_quantized(self):
texcoord_image = self.texcoord_image[:, :, :2].copy()
# Temprary:
self.texture_image = self.textures_list[0][0].r.copy()
texcoord_image[:, :, 0] *= self.texture_image.shape[1] - 1
texcoord_image[:, :, 1] *= self.texture_image.shape[0] - 1
texture_idx = (self.texcoord_image[:, :, 2] * len(self.ft_list)).astype(np.uint32)
texcoord_image = np.round(texcoord_image)
texcoord_image = texcoord_image[:, :, 0] + texcoord_image[:, :, 1] * self.texture_image.shape[1]
return texcoord_image, texture_idx
def checkBufferNum(self):
GL.glGenBuffers(1)
@depends_on('ft', 'f', 'frustum', 'camera')
def texcoord_image(self):
return self.draw_texcoord_image(self.v.r, self.f, self.ft, self.boundarybool_image if self.overdraw else None)
def main():
pass
if __name__ == '__main__':
main()
| [
"OpenGL.GL.glTexImage2DMultisample",
"numpy.sum",
"glfw.make_context_current",
"numpy.einsum",
"numpy.ones",
"numpy.argmin",
"matplotlib.pyplot.figure",
"numpy.arange",
"OpenGL.arrays.vbo.unbind",
"OpenGL.GL.glCheckFramebufferStatus",
"OpenGL.GL.glRenderbufferStorage",
"numpy.identity",
"num... | [((5910, 5976), 'OpenGL.GL.glViewport', 'GL.glViewport', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']"], {}), "(0, 0, self.frustum['width'], self.frustum['height'])\n", (5923, 5976), True, 'import OpenGL.GL as GL\n'), ((6012, 6035), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (6032, 6035), True, 'import OpenGL.GL as GL\n'), ((6045, 6071), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (6059, 6071), True, 'import OpenGL.GL as GL\n'), ((6081, 6130), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (6101, 6130), True, 'import OpenGL.GL as GL\n'), ((6158, 6182), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (6179, 6182), True, 'import OpenGL.GL as GL\n'), ((6191, 6249), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buf'], {}), '(GL.GL_RENDERBUFFER, self.render_buf)\n', (6212, 6249), True, 'import OpenGL.GL as GL\n'), ((6257, 6365), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (6281, 6365), True, 'import OpenGL.GL as GL\n'), ((6369, 6488), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buf'], {}), '(GL.GL_DRAW_FRAMEBUFFER, GL.\n GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf)\n', (6397, 6488), True, 'import OpenGL.GL as GL\n'), ((6507, 6531), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (6528, 6531), True, 'import OpenGL.GL as GL\n'), ((6540, 6593), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf'], {}), '(GL.GL_RENDERBUFFER, self.z_buf)\n', (6561, 6593), True, 'import OpenGL.GL as GL\n'), ((6602, 6721), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (6626, 6721), True, 'import OpenGL.GL as GL\n'), ((6726, 6834), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf)\n', (6754, 6834), True, 'import OpenGL.GL as GL\n'), ((8627, 8650), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (8647, 8650), True, 'import OpenGL.GL as GL\n'), ((8660, 8686), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (8674, 8686), True, 'import OpenGL.GL as GL\n'), ((8696, 8750), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_noms)\n', (8716, 8750), True, 'import OpenGL.GL as GL\n'), ((8784, 8808), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (8805, 8808), True, 'import OpenGL.GL as GL\n'), ((8817, 8880), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buf_noms'], {}), '(GL.GL_RENDERBUFFER, self.render_buf_noms)\n', (8838, 8880), True, 'import OpenGL.GL as GL\n'), ((8889, 9011), 'OpenGL.GL.glRenderbufferStorageMultisample', 'GL.glRenderbufferStorageMultisample', (['GL.GL_RENDERBUFFER', '(0)', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, 0, GL.GL_RGB8, self\n .frustum['width'], self.frustum['height'])\n", (8924, 9011), True, 'import OpenGL.GL as GL\n'), ((9014, 9138), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buf_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, GL.\n GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf_noms)\n', (9042, 9138), True, 'import OpenGL.GL as GL\n'), ((9161, 9185), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (9182, 9185), True, 'import OpenGL.GL as GL\n'), ((9194, 9252), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_noms'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_noms)\n', (9215, 9252), True, 'import OpenGL.GL as GL\n'), ((9261, 9394), 'OpenGL.GL.glRenderbufferStorageMultisample', 'GL.glRenderbufferStorageMultisample', (['GL.GL_RENDERBUFFER', '(0)', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, 0, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])\n", (9296, 9394), True, 'import OpenGL.GL as GL\n'), ((9398, 9511), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_noms'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_noms)\n', (9426, 9511), True, 'import OpenGL.GL as GL\n'), ((9516, 9545), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (9527, 9545), True, 'import OpenGL.GL as GL\n'), ((9554, 9604), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (9570, 9604), True, 'import OpenGL.GL as GL\n'), ((9613, 9642), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (9625, 9642), True, 'import OpenGL.GL as GL\n'), ((9652, 9686), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (9662, 9686), True, 'import OpenGL.GL as GL\n'), ((9695, 9729), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (9705, 9729), True, 'import OpenGL.GL as GL\n'), ((9924, 9966), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (9944, 9966), True, 'import OpenGL.GL as GL\n'), ((10146, 10413), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (10167, 10413), True, 'import OpenGL.GL.shaders as shaders\n'), ((10430, 10973), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n uniform mat4 MVP;\n out vec3 theColor;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n uniform mat4 MVP;\n out vec3 theColor;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (10451, 10973), True, 'import OpenGL.GL.shaders as shaders\n'), ((10993, 11047), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (11015, 11047), True, 'import OpenGL.GL.shaders as shaders\n'), ((11056, 11095), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (11076, 11095), True, 'import OpenGL.GL.shaders as shaders\n'), ((11131, 11441), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n //noperspective in vec3 theColor;\n // Ouput data\n out vec3 color;\n void main(){\n color = color.xyz;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n //noperspective in vec3 theColor;\n // Ouput data\n out vec3 color;\n void main(){\n color = color.xyz;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (11152, 11441), True, 'import OpenGL.GL.shaders as shaders\n'), ((11465, 12051), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n uniform mat4 MVP;\n out vec3 theColor;\n //noperspective out vec3 theColor;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n uniform mat4 MVP;\n out vec3 theColor;\n //noperspective out vec3 theColor;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (11486, 12051), True, 'import OpenGL.GL.shaders as shaders\n'), ((12085, 12155), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER_NOPERSP', 'FRAGMENT_SHADER_NOPERSP'], {}), '(VERTEX_SHADER_NOPERSP, FRAGMENT_SHADER_NOPERSP)\n', (12107, 12155), True, 'import OpenGL.GL.shaders as shaders\n'), ((12269, 12322), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorProgram', '"""position"""'], {}), "(self.colorProgram, 'position')\n", (12291, 12322), True, 'import OpenGL.GL as GL\n'), ((12348, 12398), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorProgram', '"""color"""'], {}), "(self.colorProgram, 'color')\n", (12370, 12398), True, 'import OpenGL.GL as GL\n'), ((12508, 12557), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorProgram', '"""MVP"""'], {}), "(self.colorProgram, 'MVP')\n", (12531, 12557), True, 'import OpenGL.GL as GL\n'), ((12576, 12610), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (12586, 12610), True, 'import OpenGL.GL as GL\n'), ((12619, 12653), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (12629, 12653), True, 'import OpenGL.GL as GL\n'), ((12673, 12706), 'numpy.array', 'np.array', (['self.f'], {'dtype': 'np.uint32'}), '(self.f, dtype=np.uint32)\n', (12681, 12706), True, 'import numpy as np\n'), ((12734, 12785), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['indices'], {'target': 'GL.GL_ELEMENT_ARRAY_BUFFER'}), '(indices, target=GL.GL_ELEMENT_ARRAY_BUFFER)\n', (12741, 12785), False, 'from OpenGL.arrays import vbo\n'), ((12942, 12993), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['indices'], {'target': 'GL.GL_ELEMENT_ARRAY_BUFFER'}), '(indices, target=GL.GL_ELEMENT_ARRAY_BUFFER)\n', (12949, 12993), False, 'from OpenGL.arrays import vbo\n'), ((13623, 13635), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (13632, 13635), True, 'import OpenGL.GL as GL\n'), ((13645, 13685), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_static'], {}), '(1, self.vao_static)\n', (13665, 13685), True, 'import OpenGL.GL as GL\n'), ((13694, 13731), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_static'], {}), '(self.vao_static)\n', (13714, 13731), True, 'import OpenGL.GL as GL\n'), ((13804, 13851), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (13832, 13851), True, 'import OpenGL.GL as GL\n'), ((13892, 13977), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (13916, 13977), True, 'import OpenGL.GL as GL\n'), ((14014, 14058), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (14042, 14058), True, 'import OpenGL.GL as GL\n'), ((14099, 14177), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (14123, 14177), True, 'import OpenGL.GL as GL\n'), ((14187, 14210), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (14207, 14210), True, 'import OpenGL.GL as GL\n'), ((14243, 14255), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (14252, 14255), True, 'import OpenGL.GL as GL\n'), ((14264, 14309), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_static_face'], {}), '(1, self.vao_static_face)\n', (14284, 14309), True, 'import OpenGL.GL as GL\n'), ((14318, 14360), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_static_face'], {}), '(self.vao_static_face)\n', (14338, 14360), True, 'import OpenGL.GL as GL\n'), ((14476, 14523), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (14504, 14523), True, 'import OpenGL.GL as GL\n'), ((14564, 14649), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (14588, 14649), True, 'import OpenGL.GL as GL\n'), ((14691, 14735), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (14719, 14735), True, 'import OpenGL.GL as GL\n'), ((14776, 14854), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (14800, 14854), True, 'import OpenGL.GL as GL\n'), ((14864, 14887), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (14884, 14887), True, 'import OpenGL.GL as GL\n'), ((14912, 14924), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (14921, 14924), True, 'import OpenGL.GL as GL\n'), ((14933, 14970), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_dyn'], {}), '(1, self.vao_dyn)\n', (14953, 14970), True, 'import OpenGL.GL as GL\n'), ((14979, 15013), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_dyn'], {}), '(self.vao_dyn)\n', (14999, 15013), True, 'import OpenGL.GL as GL\n'), ((15125, 15172), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (15153, 15172), True, 'import OpenGL.GL as GL\n'), ((15213, 15298), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (15237, 15298), True, 'import OpenGL.GL as GL\n'), ((15335, 15379), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (15363, 15379), True, 'import OpenGL.GL as GL\n'), ((15420, 15498), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (15444, 15498), True, 'import OpenGL.GL as GL\n'), ((15508, 15531), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (15528, 15531), True, 'import OpenGL.GL as GL\n'), ((15559, 15571), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (15568, 15571), True, 'import OpenGL.GL as GL\n'), ((15580, 15620), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_dyn_ub'], {}), '(1, self.vao_dyn_ub)\n', (15600, 15620), True, 'import OpenGL.GL as GL\n'), ((15629, 15666), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_dyn_ub'], {}), '(self.vao_dyn_ub)\n', (15649, 15666), True, 'import OpenGL.GL as GL\n'), ((15747, 15794), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (15775, 15794), True, 'import OpenGL.GL as GL\n'), ((15835, 15920), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (15859, 15920), True, 'import OpenGL.GL as GL\n'), ((16043, 16087), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (16071, 16087), True, 'import OpenGL.GL as GL\n'), ((16128, 16217), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_UNSIGNED_BYTE', 'GL.GL_TRUE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_UNSIGNED_BYTE, GL.GL_TRUE,\n 0, None)\n', (16152, 16217), True, 'import OpenGL.GL as GL\n'), ((16754, 16781), 'opendr.topology.get_vertices_per_edge', 'get_vertices_per_edge', (['v', 'f'], {}), '(v, f)\n', (16775, 16781), False, 'from opendr.topology import get_vertices_per_edge, get_faces_per_edge\n'), ((16796, 16825), 'opendr.topology.get_faces_per_edge', 'get_faces_per_edge', (['v', 'f', 'vpe'], {}), '(v, f, vpe)\n', (16814, 16825), False, 'from opendr.topology import get_vertices_per_edge, get_faces_per_edge\n'), ((18388, 18442), 'numpy.asarray', 'np.asarray', (['verts_by_face'], {'dtype': 'np.float64', 'order': '"""C"""'}), "(verts_by_face, dtype=np.float64, order='C')\n", (18398, 18442), True, 'import numpy as np\n'), ((19054, 19107), 'opendr.common.boundary_neighborhood', 'common.boundary_neighborhood', (['self.boundarybool_image'], {}), '(self.boundarybool_image)\n', (19082, 19107), False, 'from opendr import common\n'), ((19200, 19213), 'numpy.asarray', 'np.asarray', (['k'], {}), '(k)\n', (19210, 19213), True, 'import numpy as np\n'), ((19279, 19442), 'numpy.array', 'np.array', (['[[fx / cx, 0, 0, 0], [0, fy / cy, 0, 0], [0, 0, -(near + far) / (far - near\n ), -2 * near * far / (far - near)], [0, 0, -1, 0]]'], {'dtype': 'np.float32'}), '([[fx / cx, 0, 0, 0], [0, fy / cy, 0, 0], [0, 0, -(near + far) / (\n far - near), -2 * near * far / (far - near)], [0, 0, -1, 0]], dtype=np.\n float32)\n', (19287, 19442), True, 'import numpy as np\n'), ((19636, 19670), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (19651, 19670), True, 'import OpenGL.GL as GL\n'), ((19680, 19709), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (19692, 19709), True, 'import OpenGL.GL as GL\n'), ((19960, 19997), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_static'], {}), '(self.vao_static)\n', (19980, 19997), True, 'import OpenGL.GL as GL\n'), ((20428, 20457), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (20440, 20457), True, 'import OpenGL.GL as GL\n'), ((20760, 20799), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (20780, 20799), True, 'import OpenGL.GL.shaders as shaders\n'), ((20808, 20845), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_static'], {}), '(self.vao_static)\n', (20828, 20845), True, 'import OpenGL.GL as GL\n'), ((21220, 21259), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (21240, 21259), True, 'import OpenGL.GL.shaders as shaders\n'), ((21268, 21317), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (21288, 21317), True, 'import OpenGL.GL as GL\n'), ((21327, 21353), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (21341, 21353), True, 'import OpenGL.GL as GL\n'), ((21362, 21391), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (21373, 21391), True, 'import OpenGL.GL as GL\n'), ((21400, 21459), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (21410, 21459), True, 'import OpenGL.GL as GL\n'), ((21469, 21507), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_POLYGON_OFFSET_FILL'], {}), '(GL.GL_POLYGON_OFFSET_FILL)\n', (21480, 21507), True, 'import OpenGL.GL as GL\n'), ((21516, 21540), 'OpenGL.GL.glPolygonOffset', 'GL.glPolygonOffset', (['(1)', '(1)'], {}), '(1, 1)\n', (21534, 21540), True, 'import OpenGL.GL as GL\n'), ((21607, 21646), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_POLYGON_OFFSET_FILL'], {}), '(GL.GL_POLYGON_OFFSET_FILL)\n', (21619, 21646), True, 'import OpenGL.GL as GL\n'), ((21911, 21941), 'numpy.asarray', 'np.asarray', (['ec'], {'dtype': 'np.uint8'}), '(ec, dtype=np.uint8)\n', (21921, 21941), True, 'import numpy as np\n'), ((22180, 22208), 'OpenGL.GL.glDepthFunc', 'GL.glDepthFunc', (['GL.GL_LEQUAL'], {}), '(GL.GL_LEQUAL)\n', (22194, 22208), True, 'import OpenGL.GL as GL\n'), ((22217, 22267), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (22233, 22267), True, 'import OpenGL.GL as GL\n'), ((22340, 22390), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (22356, 22390), True, 'import OpenGL.GL as GL\n'), ((22399, 22425), 'OpenGL.GL.glDepthFunc', 'GL.glDepthFunc', (['GL.GL_LESS'], {}), '(GL.GL_LESS)\n', (22413, 22425), True, 'import OpenGL.GL as GL\n'), ((23272, 23321), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (23292, 23321), True, 'import OpenGL.GL as GL\n'), ((23330, 23370), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (23345, 23370), True, 'import OpenGL.GL as GL\n'), ((23675, 23734), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (23685, 23734), True, 'import OpenGL.GL as GL\n'), ((23899, 23938), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (23919, 23938), True, 'import OpenGL.GL.shaders as shaders\n'), ((23947, 23996), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (23967, 23996), True, 'import OpenGL.GL as GL\n'), ((24006, 24032), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (24020, 24032), True, 'import OpenGL.GL as GL\n'), ((24041, 24070), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (24052, 24070), True, 'import OpenGL.GL as GL\n'), ((24079, 24138), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (24089, 24138), True, 'import OpenGL.GL as GL\n'), ((24148, 24186), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_POLYGON_OFFSET_FILL'], {}), '(GL.GL_POLYGON_OFFSET_FILL)\n', (24159, 24186), True, 'import OpenGL.GL as GL\n'), ((24195, 24219), 'OpenGL.GL.glPolygonOffset', 'GL.glPolygonOffset', (['(1)', '(1)'], {}), '(1, 1)\n', (24213, 24219), True, 'import OpenGL.GL as GL\n'), ((24286, 24325), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_POLYGON_OFFSET_FILL'], {}), '(GL.GL_POLYGON_OFFSET_FILL)\n', (24298, 24325), True, 'import OpenGL.GL as GL\n'), ((24590, 24620), 'numpy.asarray', 'np.asarray', (['ec'], {'dtype': 'np.uint8'}), '(ec, dtype=np.uint8)\n', (24600, 24620), True, 'import numpy as np\n'), ((24909, 24937), 'OpenGL.GL.glDepthFunc', 'GL.glDepthFunc', (['GL.GL_LEQUAL'], {}), '(GL.GL_LEQUAL)\n', (24923, 24937), True, 'import OpenGL.GL as GL\n'), ((24946, 24976), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (24957, 24976), True, 'import OpenGL.GL as GL\n'), ((24985, 25035), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (25001, 25035), True, 'import OpenGL.GL as GL\n'), ((25044, 25074), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_LINE_SMOOTH'], {}), '(GL.GL_LINE_SMOOTH)\n', (25055, 25074), True, 'import OpenGL.GL as GL\n'), ((25083, 25107), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_BLEND'], {}), '(GL.GL_BLEND)\n', (25094, 25107), True, 'import OpenGL.GL as GL\n'), ((25185, 25232), 'OpenGL.GL.glHint', 'GL.glHint', (['GL.GL_LINE_SMOOTH_HINT', 'GL.GL_NICEST'], {}), '(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n', (25194, 25232), True, 'import OpenGL.GL as GL\n'), ((25241, 25258), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(1)'], {}), '(1)\n', (25255, 25258), True, 'import OpenGL.GL as GL\n'), ((25331, 25381), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (25347, 25381), True, 'import OpenGL.GL as GL\n'), ((25390, 25421), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['self.line_width'], {}), '(self.line_width)\n', (25404, 25421), True, 'import OpenGL.GL as GL\n'), ((25430, 25461), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (25442, 25461), True, 'import OpenGL.GL as GL\n'), ((25470, 25501), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_LINE_SMOOTH'], {}), '(GL.GL_LINE_SMOOTH)\n', (25482, 25501), True, 'import OpenGL.GL as GL\n'), ((25510, 25535), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_BLEND'], {}), '(GL.GL_BLEND)\n', (25522, 25535), True, 'import OpenGL.GL as GL\n'), ((25544, 25570), 'OpenGL.GL.glDepthFunc', 'GL.glDepthFunc', (['GL.GL_LESS'], {}), '(GL.GL_LESS)\n', (25558, 25570), True, 'import OpenGL.GL as GL\n'), ((26417, 26466), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (26437, 26466), True, 'import OpenGL.GL as GL\n'), ((26475, 26515), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (26490, 26515), True, 'import OpenGL.GL as GL\n'), ((26817, 26843), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""raw.png"""', 'raw'], {}), "('raw.png', raw)\n", (26827, 26843), True, 'import matplotlib.pyplot as plt\n'), ((26864, 26880), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (26878, 26880), False, 'import ipdb\n'), ((26890, 26949), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (26900, 26949), True, 'import OpenGL.GL as GL\n'), ((27122, 27156), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (27137, 27156), True, 'import OpenGL.GL as GL\n'), ((27350, 27375), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (27370, 27375), True, 'import OpenGL.GL as GL\n'), ((28826, 28880), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (28846, 28880), True, 'import OpenGL.GL as GL\n'), ((30101, 30151), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (30117, 30151), True, 'import OpenGL.GL as GL\n'), ((30699, 30738), 'numpy.sum', 'np.sum', (['(rays_to_faces * self.tn)'], {'axis': '(1)'}), '(rays_to_faces * self.tn, axis=1)\n', (30705, 30738), True, 'import numpy as np\n'), ((31380, 31414), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (31395, 31414), True, 'import OpenGL.GL as GL\n'), ((33495, 33529), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (33510, 33529), True, 'import OpenGL.GL as GL\n'), ((35620, 35633), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (35630, 35633), True, 'import numpy as np\n'), ((35731, 35770), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (35751, 35770), True, 'import OpenGL.GL.shaders as shaders\n'), ((35983, 36014), 'numpy.unique', 'np.unique', (['rr[rr != 4294967295]'], {}), '(rr[rr != 4294967295])\n', (35992, 36014), True, 'import numpy as np\n'), ((36190, 36240), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (36206, 36240), True, 'import OpenGL.GL as GL\n'), ((36311, 36361), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (36327, 36361), True, 'import OpenGL.GL as GL\n'), ((37079, 37113), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (37094, 37113), True, 'import OpenGL.GL as GL\n'), ((37143, 37197), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (37163, 37197), True, 'import OpenGL.GL as GL\n'), ((37206, 37265), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (37216, 37265), True, 'import OpenGL.GL as GL\n'), ((37484, 37514), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint8'}), '(fc, dtype=np.uint8)\n', (37494, 37514), True, 'import numpy as np\n'), ((37611, 37660), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (37631, 37660), True, 'import OpenGL.GL as GL\n'), ((37670, 37710), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (37685, 37710), True, 'import OpenGL.GL as GL\n'), ((38147, 38176), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (38159, 38176), True, 'import OpenGL.GL as GL\n'), ((38363, 38413), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (38379, 38413), True, 'import OpenGL.GL as GL\n'), ((38481, 38531), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (38497, 38531), True, 'import OpenGL.GL as GL\n'), ((38547, 38580), 'numpy.atleast_3d', 'np.atleast_3d', (['boundarybool_image'], {}), '(boundarybool_image)\n', (38560, 38580), True, 'import numpy as np\n'), ((38700, 38734), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (38715, 38734), True, 'import OpenGL.GL as GL\n'), ((38980, 39022), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_static_face'], {}), '(self.vao_static_face)\n', (39000, 39022), True, 'import OpenGL.GL as GL\n'), ((39032, 39086), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (39052, 39086), True, 'import OpenGL.GL as GL\n'), ((39096, 39155), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (39106, 39155), True, 'import OpenGL.GL as GL\n'), ((39324, 39373), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (39344, 39373), True, 'import OpenGL.GL as GL\n'), ((39382, 39422), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (39397, 39422), True, 'import OpenGL.GL as GL\n'), ((39954, 40117), 'numpy.array', 'np.array', (['[[fx / cx, 0, 0, 0], [0, fy / cy, 0, 0], [0, 0, -(near + far) / (far - near\n ), -2 * near * far / (far - near)], [0, 0, -1, 0]]'], {'dtype': 'np.float32'}), '([[fx / cx, 0, 0, 0], [0, fy / cy, 0, 0], [0, 0, -(near + far) / (\n far - near), -2 * near * far / (far - near)], [0, 0, -1, 0]], dtype=np.\n float32)\n', (39962, 40117), True, 'import numpy as np\n'), ((44174, 44212), 'opendr.common.flow_to', 'common.flow_to', (['self', 'v_next', 'cam_next'], {}), '(self, v_next, cam_next)\n', (44188, 44212), False, 'from opendr import common\n'), ((49808, 49827), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(1.0)'], {}), '(1.0)\n', (49822, 49827), True, 'import OpenGL.GL as GL\n'), ((49854, 50286), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (49875, 50286), True, 'import OpenGL.GL.shaders as shaders\n'), ((50302, 50940), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (50323, 50940), True, 'import OpenGL.GL.shaders as shaders\n'), ((50967, 51021), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (50989, 51021), True, 'import OpenGL.GL.shaders as shaders\n'), ((51163, 51223), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""position"""'], {}), "(self.colorTextureProgram, 'position')\n", (51185, 51223), True, 'import OpenGL.GL as GL\n'), ((51249, 51306), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""color"""'], {}), "(self.colorTextureProgram, 'color')\n", (51271, 51306), True, 'import OpenGL.GL as GL\n'), ((51330, 51390), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""vertexUV"""'], {}), "(self.colorTextureProgram, 'vertexUV')\n", (51352, 51390), True, 'import OpenGL.GL as GL\n'), ((51508, 51564), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""MVP"""'], {}), "(self.colorTextureProgram, 'MVP')\n", (51531, 51564), True, 'import OpenGL.GL as GL\n'), ((55138, 55175), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (55154, 55175), True, 'import OpenGL.GL as GL\n'), ((55184, 55207), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (55204, 55207), True, 'import OpenGL.GL as GL\n'), ((55235, 55304), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""myTextureSampler"""'], {}), "(self.colorTextureProgram, 'myTextureSampler')\n", (55258, 55304), True, 'import OpenGL.GL as GL\n'), ((56222, 56272), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (56238, 56272), True, 'import OpenGL.GL as GL\n'), ((56449, 56499), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (56465, 56499), True, 'import OpenGL.GL as GL\n'), ((56552, 56602), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (56568, 56602), True, 'import OpenGL.GL as GL\n'), ((56842, 56939), 'numpy.asarray', 'np.asarray', (['(overdraw * boundarybool_image + no_overdraw * (1 - boundarybool_image))'], {'order': '"""C"""'}), "(overdraw * boundarybool_image + no_overdraw * (1 -\n boundarybool_image), order='C')\n", (56852, 56939), True, 'import numpy as np\n'), ((57047, 57097), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (57063, 57097), True, 'import OpenGL.GL as GL\n'), ((57139, 57174), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (57154, 57174), True, 'import OpenGL.GL as GL\n'), ((57261, 57315), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (57281, 57315), True, 'import OpenGL.GL as GL\n'), ((57324, 57383), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (57334, 57383), True, 'import OpenGL.GL as GL\n'), ((57393, 57427), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (57408, 57427), True, 'import OpenGL.GL as GL\n'), ((57499, 57548), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (57519, 57548), True, 'import OpenGL.GL as GL\n'), ((57557, 57597), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (57572, 57597), True, 'import OpenGL.GL as GL\n'), ((57843, 57897), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (57863, 57897), True, 'import OpenGL.GL as GL\n'), ((58058, 58108), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (58074, 58108), True, 'import OpenGL.GL as GL\n'), ((58150, 58185), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (58165, 58185), True, 'import OpenGL.GL as GL\n'), ((58272, 58326), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (58292, 58326), True, 'import OpenGL.GL as GL\n'), ((58335, 58394), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (58345, 58394), True, 'import OpenGL.GL as GL\n'), ((58404, 58438), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (58419, 58438), True, 'import OpenGL.GL as GL\n'), ((58530, 58579), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (58550, 58579), True, 'import OpenGL.GL as GL\n'), ((58588, 58628), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (58603, 58628), True, 'import OpenGL.GL as GL\n'), ((58874, 58928), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (58894, 58928), True, 'import OpenGL.GL as GL\n'), ((59416, 59455), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (59422, 59455), True, 'import numpy as np\n'), ((60248, 60287), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (60268, 60287), True, 'import OpenGL.GL.shaders as shaders\n'), ((60297, 60351), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (60317, 60351), True, 'import OpenGL.GL as GL\n'), ((60360, 60419), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (60370, 60419), True, 'import OpenGL.GL as GL\n'), ((61164, 61213), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (61184, 61213), True, 'import OpenGL.GL as GL\n'), ((61223, 61263), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (61238, 61263), True, 'import OpenGL.GL as GL\n'), ((67393, 67454), 'numpy.zeros', 'np.zeros', (['(self.v.r.size / 3, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.v.r.size / 3, 2), dtype=np.float64, order='C')\n", (67401, 67454), True, 'import numpy as np\n'), ((67521, 67578), 'numpy.zeros', 'np.zeros', (['(self.vpe.size, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.vpe.size, 2), dtype=np.float64, order='C')\n", (67529, 67578), True, 'import numpy as np\n'), ((67902, 67936), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (67917, 67936), True, 'import OpenGL.GL as GL\n'), ((68042, 68083), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (68057, 68083), True, 'import OpenGL.GL as GL\n'), ((68321, 68351), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (68332, 68351), True, 'import OpenGL.GL as GL\n'), ((68607, 68661), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (68627, 68661), True, 'import OpenGL.GL as GL\n'), ((68670, 68729), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (68680, 68729), True, 'import OpenGL.GL as GL\n'), ((68917, 68976), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (68927, 68976), True, 'import OpenGL.GL as GL\n'), ((69121, 69160), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (69127, 69160), True, 'import numpy as np\n'), ((70930, 70984), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (70950, 70984), True, 'import OpenGL.GL as GL\n'), ((70993, 71166), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (71013, 71166), True, 'import OpenGL.GL as GL\n'), ((71166, 71215), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (71186, 71215), True, 'import OpenGL.GL as GL\n'), ((71224, 71264), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (71239, 71264), True, 'import OpenGL.GL as GL\n'), ((71511, 71565), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (71531, 71565), True, 'import OpenGL.GL as GL\n'), ((71574, 71605), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (71586, 71605), True, 'import OpenGL.GL as GL\n'), ((71614, 71649), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (71629, 71649), True, 'import OpenGL.GL as GL\n'), ((72372, 72396), 'numpy.round', 'np.round', (['texcoord_image'], {}), '(texcoord_image)\n', (72380, 72396), True, 'import numpy as np\n'), ((72578, 72596), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (72593, 72596), True, 'import OpenGL.GL as GL\n'), ((75660, 76092), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (75681, 76092), True, 'import OpenGL.GL.shaders as shaders\n'), ((76108, 76746), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (76129, 76746), True, 'import OpenGL.GL.shaders as shaders\n'), ((76773, 76827), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (76795, 76827), True, 'import OpenGL.GL.shaders as shaders\n'), ((76968, 77028), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""position"""'], {}), "(self.colorTextureProgram, 'position')\n", (76990, 77028), True, 'import OpenGL.GL as GL\n'), ((77054, 77111), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""color"""'], {}), "(self.colorTextureProgram, 'color')\n", (77076, 77111), True, 'import OpenGL.GL as GL\n'), ((77135, 77195), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""vertexUV"""'], {}), "(self.colorTextureProgram, 'vertexUV')\n", (77157, 77195), True, 'import OpenGL.GL as GL\n'), ((77314, 77370), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""MVP"""'], {}), "(self.colorTextureProgram, 'MVP')\n", (77337, 77370), True, 'import OpenGL.GL as GL\n'), ((77693, 77712), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(2.0)'], {}), '(2.0)\n', (77707, 77712), True, 'import OpenGL.GL as GL\n'), ((81815, 81852), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (81831, 81852), True, 'import OpenGL.GL as GL\n'), ((81861, 81884), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (81881, 81884), True, 'import OpenGL.GL as GL\n'), ((81912, 81981), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""myTextureSampler"""'], {}), "(self.colorTextureProgram, 'myTextureSampler')\n", (81935, 81981), True, 'import OpenGL.GL as GL\n'), ((82133, 82163), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (82144, 82163), True, 'import OpenGL.GL as GL\n'), ((82241, 82274), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (82252, 82274), True, 'import OpenGL.GL as GL\n'), ((82283, 82309), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (82304, 82309), True, 'import OpenGL.GL as GL\n'), ((82335, 83343), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n \n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n \n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n \n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n \n }"""\n , GL.GL_VERTEX_SHADER)\n', (82356, 83343), True, 'import OpenGL.GL.shaders as shaders\n'), ((83368, 84754), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n \n layout(location = 3) uniform sampler2D myTextureSampler;\n\n uniform float ww;\n uniform float wh;\n \n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n \n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n \n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n \n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n \n layout(location = 3) uniform sampler2D myTextureSampler;\n\n uniform float ww;\n uniform float wh;\n \n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n \n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n \n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n \n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (83389, 84754), True, 'import OpenGL.GL.shaders as shaders\n'), ((84781, 84842), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'ERRORS_FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)\n', (84803, 84842), True, 'import OpenGL.GL.shaders as shaders\n'), ((84874, 85058), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """\n , GL.GL_VERTEX_SHADER)\n', (84895, 85058), True, 'import OpenGL.GL.shaders as shaders\n'), ((85082, 85619), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n \n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n \n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""', 'GL.GL_GEOMETRY_SHADER'], {}), '(\n """#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n \n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n \n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""\n , GL.GL_GEOMETRY_SHADER)\n', (85103, 85619), True, 'import OpenGL.GL.shaders as shaders\n'), ((85652, 87261), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n \n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n\n\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n \n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n\n\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (85673, 87261), True, 'import OpenGL.GL.shaders as shaders\n'), ((87261, 87307), 'OpenGL.GL.glClampColor', 'GL.glClampColor', (['GL.GL_CLAMP_READ_COLOR', '(False)'], {}), '(GL.GL_CLAMP_READ_COLOR, False)\n', (87276, 87307), True, 'import OpenGL.GL as GL\n'), ((87465, 87558), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['FETCH_VERTEX_SHADER', 'FETCH_GEOMETRY_SHADER', 'FETCH_FRAGMENT_SHADER'], {}), '(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER,\n FETCH_FRAGMENT_SHADER)\n', (87487, 87558), True, 'import OpenGL.GL.shaders as shaders\n'), ((87581, 87593), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (87590, 87593), True, 'import OpenGL.GL as GL\n'), ((89278, 89296), 'numpy.ones', 'np.ones', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (89285, 89296), True, 'import numpy as np\n'), ((89330, 89342), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (89339, 89342), True, 'import OpenGL.GL as GL\n'), ((89351, 89396), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'self.whitePixelTextureID'], {}), '(1, self.whitePixelTextureID)\n', (89367, 89396), True, 'import OpenGL.GL as GL\n'), ((89407, 89467), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.whitePixelTextureID'], {}), '(GL.GL_TEXTURE_2D, self.whitePixelTextureID)\n', (89423, 89467), True, 'import OpenGL.GL as GL\n'), ((89555, 89644), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (89572, 89644), True, 'import OpenGL.GL as GL\n'), ((89648, 89761), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (89666, 89761), True, 'import OpenGL.GL as GL\n'), ((89787, 89810), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (89807, 89810), True, 'import OpenGL.GL as GL\n'), ((89820, 89846), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (89834, 89846), True, 'import OpenGL.GL as GL\n'), ((89856, 89886), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (89867, 89886), True, 'import OpenGL.GL as GL\n'), ((89964, 89997), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (89975, 89997), True, 'import OpenGL.GL as GL\n'), ((90006, 90032), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (90027, 90032), True, 'import OpenGL.GL as GL\n'), ((90042, 90101), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)\n', (90062, 90101), True, 'import OpenGL.GL as GL\n'), ((90140, 90159), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (90156, 90159), True, 'import OpenGL.GL as GL\n'), ((90168, 90242), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (90184, 90242), True, 'import OpenGL.GL as GL\n'), ((90251, 90393), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RGB8, self.frustum['width'], self.frustum['height'], False)\n", (90277, 90393), True, 'import OpenGL.GL as GL\n'), ((90684, 90819), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)\n', (90709, 90819), True, 'import OpenGL.GL as GL\n'), ((90862, 90881), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (90878, 90881), True, 'import OpenGL.GL as GL\n'), ((90890, 90978), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (90906, 90978), True, 'import OpenGL.GL as GL\n'), ((90982, 91125), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (91008, 91125), True, 'import OpenGL.GL as GL\n'), ((91416, 91560), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)\n', (91441, 91560), True, 'import OpenGL.GL as GL\n'), ((91600, 91619), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (91616, 91619), True, 'import OpenGL.GL as GL\n'), ((91628, 91713), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (91644, 91713), True, 'import OpenGL.GL as GL\n'), ((91717, 91860), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_R32UI, self.frustum['width'], self.frustum['height'], False)\n", (91743, 91860), True, 'import OpenGL.GL as GL\n'), ((91951, 92092), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)\n', (91976, 92092), True, 'import OpenGL.GL as GL\n'), ((92149, 92168), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (92165, 92168), True, 'import OpenGL.GL as GL\n'), ((92177, 92269), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (92193, 92269), True, 'import OpenGL.GL as GL\n'), ((92273, 92416), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (92299, 92416), True, 'import OpenGL.GL as GL\n'), ((92707, 92855), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)\n', (92732, 92855), True, 'import OpenGL.GL as GL\n'), ((92902, 92921), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (92918, 92921), True, 'import OpenGL.GL as GL\n'), ((92930, 93022), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (92946, 93022), True, 'import OpenGL.GL as GL\n'), ((93026, 93169), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (93052, 93169), True, 'import OpenGL.GL as GL\n'), ((93460, 93608), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)\n', (93485, 93608), True, 'import OpenGL.GL as GL\n'), ((93637, 93656), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (93653, 93656), True, 'import OpenGL.GL as GL\n'), ((93665, 93733), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)\n', (93681, 93733), True, 'import OpenGL.GL as GL\n'), ((93742, 93895), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)\n", (93768, 93895), True, 'import OpenGL.GL as GL\n'), ((94186, 94314), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)\n', (94211, 94314), True, 'import OpenGL.GL as GL\n'), ((94727, 94756), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (94738, 94756), True, 'import OpenGL.GL as GL\n'), ((94765, 94815), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (94781, 94815), True, 'import OpenGL.GL as GL\n'), ((94865, 94899), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (94875, 94899), True, 'import OpenGL.GL as GL\n'), ((94908, 94942), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (94918, 94942), True, 'import OpenGL.GL as GL\n'), ((95136, 95178), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (95156, 95178), True, 'import OpenGL.GL as GL\n'), ((95212, 95235), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (95232, 95235), True, 'import OpenGL.GL as GL\n'), ((95245, 95271), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (95259, 95271), True, 'import OpenGL.GL as GL\n'), ((95281, 95343), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)\n', (95301, 95343), True, 'import OpenGL.GL as GL\n'), ((95394, 95418), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (95415, 95418), True, 'import OpenGL.GL as GL\n'), ((95427, 95513), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_render)\n', (95448, 95513), True, 'import OpenGL.GL as GL\n'), ((95517, 95625), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (95541, 95625), True, 'import OpenGL.GL as GL\n'), ((95629, 95766), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)\n', (95657, 95766), True, 'import OpenGL.GL as GL\n'), ((95814, 95838), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (95835, 95838), True, 'import OpenGL.GL as GL\n'), ((95847, 95935), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_position)\n', (95868, 95935), True, 'import OpenGL.GL as GL\n'), ((95939, 96048), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (95963, 96048), True, 'import OpenGL.GL as GL\n'), ((96052, 96191), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)\n', (96080, 96191), True, 'import OpenGL.GL as GL\n'), ((96235, 96259), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (96256, 96259), True, 'import OpenGL.GL as GL\n'), ((96268, 96347), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (96289, 96347), True, 'import OpenGL.GL as GL\n'), ((96356, 96465), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (96380, 96465), True, 'import OpenGL.GL as GL\n'), ((96469, 96604), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (96497, 96604), True, 'import OpenGL.GL as GL\n'), ((96665, 96689), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (96686, 96689), True, 'import OpenGL.GL as GL\n'), ((96698, 96790), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric1)\n', (96719, 96790), True, 'import OpenGL.GL as GL\n'), ((96794, 96903), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (96818, 96903), True, 'import OpenGL.GL as GL\n'), ((96907, 97050), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)\n', (96935, 97050), True, 'import OpenGL.GL as GL\n'), ((97102, 97126), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (97123, 97126), True, 'import OpenGL.GL as GL\n'), ((97135, 97227), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric2)\n', (97156, 97227), True, 'import OpenGL.GL as GL\n'), ((97231, 97340), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (97255, 97340), True, 'import OpenGL.GL as GL\n'), ((97344, 97487), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)\n', (97372, 97487), True, 'import OpenGL.GL as GL\n'), ((97520, 97544), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (97541, 97544), True, 'import OpenGL.GL as GL\n'), ((97553, 97621), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (97574, 97621), True, 'import OpenGL.GL as GL\n'), ((97630, 97749), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (97654, 97749), True, 'import OpenGL.GL as GL\n'), ((97753, 97876), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (97781, 97876), True, 'import OpenGL.GL as GL\n'), ((97881, 97910), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (97892, 97910), True, 'import OpenGL.GL as GL\n'), ((97919, 97969), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (97935, 97969), True, 'import OpenGL.GL as GL\n'), ((97978, 98007), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (97990, 98007), True, 'import OpenGL.GL as GL\n'), ((98017, 98051), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (98027, 98051), True, 'import OpenGL.GL as GL\n'), ((98060, 98094), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (98070, 98094), True, 'import OpenGL.GL as GL\n'), ((98288, 98330), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (98308, 98330), True, 'import OpenGL.GL as GL\n'), ((98379, 98402), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (98399, 98402), True, 'import OpenGL.GL as GL\n'), ((98412, 98474), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_errors_nonms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)\n', (98432, 98474), True, 'import OpenGL.GL as GL\n'), ((98511, 98535), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (98532, 98535), True, 'import OpenGL.GL as GL\n'), ((98544, 98611), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_render)\n', (98565, 98611), True, 'import OpenGL.GL as GL\n'), ((98620, 98728), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (98644, 98728), True, 'import OpenGL.GL as GL\n'), ((98732, 98855), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, render_buf_errors_render)\n', (98760, 98855), True, 'import OpenGL.GL as GL\n'), ((98896, 98920), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (98917, 98920), True, 'import OpenGL.GL as GL\n'), ((98929, 99005), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (98950, 99005), True, 'import OpenGL.GL as GL\n'), ((99014, 99123), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (99038, 99123), True, 'import OpenGL.GL as GL\n'), ((99127, 99259), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (99155, 99259), True, 'import OpenGL.GL as GL\n'), ((99296, 99320), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (99317, 99320), True, 'import OpenGL.GL as GL\n'), ((99329, 99401), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (99350, 99401), True, 'import OpenGL.GL as GL\n'), ((99410, 99519), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (99434, 99519), True, 'import OpenGL.GL as GL\n'), ((99523, 99651), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (99551, 99651), True, 'import OpenGL.GL as GL\n'), ((99706, 99730), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (99727, 99730), True, 'import OpenGL.GL as GL\n'), ((99739, 99824), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1\n )\n', (99760, 99824), True, 'import OpenGL.GL as GL\n'), ((99828, 99937), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (99852, 99937), True, 'import OpenGL.GL as GL\n'), ((99941, 100077), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)\n', (99969, 100077), True, 'import OpenGL.GL as GL\n'), ((100122, 100146), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (100143, 100146), True, 'import OpenGL.GL as GL\n'), ((100155, 100240), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2\n )\n', (100176, 100240), True, 'import OpenGL.GL as GL\n'), ((100244, 100353), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (100268, 100353), True, 'import OpenGL.GL as GL\n'), ((100357, 100493), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)\n', (100385, 100493), True, 'import OpenGL.GL as GL\n'), ((100530, 100554), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (100551, 100554), True, 'import OpenGL.GL as GL\n'), ((100563, 100626), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, z_buf_samples_errors)\n', (100584, 100626), True, 'import OpenGL.GL as GL\n'), ((100635, 100754), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (100659, 100754), True, 'import OpenGL.GL as GL\n'), ((100758, 100876), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, z_buf_samples_errors)\n', (100786, 100876), True, 'import OpenGL.GL as GL\n'), ((100881, 100915), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (100891, 100915), True, 'import OpenGL.GL as GL\n'), ((100924, 100958), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (100934, 100958), True, 'import OpenGL.GL as GL\n'), ((101152, 101194), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (101172, 101194), True, 'import OpenGL.GL as GL\n'), ((101226, 101295), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""myTextureSampler"""'], {}), "(self.errorTextureProgram, 'myTextureSampler')\n", (101249, 101295), True, 'import OpenGL.GL as GL\n'), ((101355, 101415), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""position"""'], {}), "(self.errorTextureProgram, 'position')\n", (101377, 101415), True, 'import OpenGL.GL as GL\n'), ((101441, 101500), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""colorIn"""'], {}), "(self.errorTextureProgram, 'colorIn')\n", (101463, 101500), True, 'import OpenGL.GL as GL\n'), ((101524, 101584), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""vertexUV"""'], {}), "(self.errorTextureProgram, 'vertexUV')\n", (101546, 101584), True, 'import OpenGL.GL as GL\n'), ((101613, 101672), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""face_id"""'], {}), "(self.errorTextureProgram, 'face_id')\n", (101635, 101672), True, 'import OpenGL.GL as GL\n'), ((101704, 101767), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""barycentric"""'], {}), "(self.errorTextureProgram, 'barycentric')\n", (101726, 101767), True, 'import OpenGL.GL as GL\n'), ((103962, 103985), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (103982, 103985), True, 'import OpenGL.GL as GL\n'), ((104011, 104023), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (104020, 104023), True, 'import OpenGL.GL as GL\n'), ((104032, 104070), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_quad'], {}), '(1, self.vao_quad)\n', (104052, 104070), True, 'import OpenGL.GL as GL\n'), ((104079, 104114), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (104099, 104114), True, 'import OpenGL.GL as GL\n'), ((107005, 107035), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (107016, 107035), True, 'import OpenGL.GL as GL\n'), ((107044, 107077), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (107055, 107077), True, 'import OpenGL.GL as GL\n'), ((107086, 107112), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (107107, 107112), True, 'import OpenGL.GL as GL\n'), ((107320, 107361), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.errorTextureProgram'], {}), '(self.errorTextureProgram)\n', (107335, 107361), True, 'import OpenGL.GL as GL\n'), ((107371, 107435), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (107391, 107435), True, 'import OpenGL.GL as GL\n'), ((107596, 107631), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (107612, 107631), True, 'import OpenGL.GL as GL\n'), ((107691, 107726), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (107706, 107726), True, 'import OpenGL.GL as GL\n'), ((107731, 107790), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (107741, 107790), True, 'import OpenGL.GL as GL\n'), ((107808, 107863), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""ww"""'], {}), "(self.errorTextureProgram, 'ww')\n", (107831, 107863), True, 'import OpenGL.GL as GL\n'), ((107880, 107935), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""wh"""'], {}), "(self.errorTextureProgram, 'wh')\n", (107903, 107935), True, 'import OpenGL.GL as GL\n'), ((107944, 107988), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (107958, 107988), True, 'import OpenGL.GL as GL\n'), ((107997, 108042), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (108011, 108042), True, 'import OpenGL.GL as GL\n'), ((108188, 108227), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (108194, 108227), True, 'import numpy as np\n'), ((112544, 112585), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.fetchSamplesProgram'], {}), '(self.fetchSamplesProgram)\n', (112559, 112585), True, 'import OpenGL.GL as GL\n'), ((112654, 112713), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""colors"""'], {}), "(self.fetchSamplesProgram, 'colors')\n", (112677, 112713), True, 'import OpenGL.GL as GL\n'), ((112749, 112818), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_positions"""'], {}), "(self.fetchSamplesProgram, 'sample_positions')\n", (112772, 112818), True, 'import OpenGL.GL as GL\n'), ((112850, 112915), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_faces"""'], {}), "(self.fetchSamplesProgram, 'sample_faces')\n", (112873, 112915), True, 'import OpenGL.GL as GL\n'), ((112954, 113033), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords1"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords1')\n", (112977, 113033), True, 'import OpenGL.GL as GL\n'), ((113072, 113151), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords2"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords2')\n", (113095, 113151), True, 'import OpenGL.GL as GL\n'), ((113426, 113481), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""ww"""'], {}), "(self.fetchSamplesProgram, 'ww')\n", (113449, 113481), True, 'import OpenGL.GL as GL\n'), ((113498, 113553), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""wh"""'], {}), "(self.fetchSamplesProgram, 'wh')\n", (113521, 113553), True, 'import OpenGL.GL as GL\n'), ((113562, 113606), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (113576, 113606), True, 'import OpenGL.GL as GL\n'), ((113615, 113660), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (113629, 113660), True, 'import OpenGL.GL as GL\n'), ((113685, 113760), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (113693, 113760), True, 'import numpy as np\n'), ((113794, 113869), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (113802, 113869), True, 'import numpy as np\n'), ((114032, 114107), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (114040, 114107), True, 'import numpy as np\n'), ((114151, 114226), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 1]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 1])\n", (114159, 114226), True, 'import numpy as np\n'), ((114268, 114343), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (114276, 114343), True, 'import numpy as np\n'), ((114352, 114382), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (114364, 114382), True, 'import OpenGL.GL as GL\n'), ((114392, 114459), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)\n', (114412, 114459), True, 'import OpenGL.GL as GL\n'), ((114645, 114680), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (114661, 114680), True, 'import OpenGL.GL as GL\n'), ((114690, 114725), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (114705, 114725), True, 'import OpenGL.GL as GL\n'), ((114730, 114789), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (114740, 114789), True, 'import OpenGL.GL as GL\n'), ((114813, 114837), 'numpy.arange', 'np.arange', (['self.nsamples'], {}), '(self.nsamples)\n', (114822, 114837), True, 'import numpy as np\n'), ((118791, 118814), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (118811, 118814), True, 'import OpenGL.GL as GL\n'), ((118824, 118859), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (118839, 118859), True, 'import OpenGL.GL as GL\n'), ((118863, 118892), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (118874, 118892), True, 'import OpenGL.GL as GL\n'), ((118901, 118932), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (118913, 118932), True, 'import OpenGL.GL as GL\n'), ((119013, 119037), 'numpy.mean', 'np.mean', (['self.renders', '(0)'], {}), '(self.renders, 0)\n', (119020, 119037), True, 'import numpy as np\n'), ((119265, 119307), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.visibilityProgram_ms'], {}), '(self.visibilityProgram_ms)\n', (119280, 119307), True, 'import OpenGL.GL as GL\n'), ((119321, 119334), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (119331, 119334), True, 'import numpy as np\n'), ((119409, 119468), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (119419, 119468), True, 'import OpenGL.GL as GL\n'), ((119687, 119717), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint8'}), '(fc, dtype=np.uint8)\n', (119697, 119717), True, 'import numpy as np\n'), ((120127, 120152), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (120147, 120152), True, 'import OpenGL.GL as GL\n'), ((120991, 121055), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (121011, 121055), True, 'import OpenGL.GL as GL\n'), ((121116, 121151), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(1)', 'drawingBuffers'], {}), '(1, drawingBuffers)\n', (121132, 121151), True, 'import OpenGL.GL as GL\n'), ((121397, 121427), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (121409, 121427), True, 'import OpenGL.GL as GL\n'), ((121525, 121554), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (121536, 121554), True, 'import OpenGL.GL as GL\n'), ((131170, 131196), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (131178, 131196), True, 'import numpy as np\n'), ((131902, 131958), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (131916, 131958), True, 'import numpy as np\n'), ((132441, 132497), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (132455, 132497), True, 'import numpy as np\n'), ((132980, 133036), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (132994, 133036), True, 'import numpy as np\n'), ((133560, 133574), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (133571, 133574), True, 'import numpy as np\n'), ((133737, 133775), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', 'ident'], {}), '((p2 - p0)[:, None, :], ident)\n', (133745, 133775), True, 'import numpy as np\n'), ((133788, 133826), 'numpy.cross', 'np.cross', (['ident', '(p1 - p0)[:, None, :]'], {}), '(ident, (p1 - p0)[:, None, :])\n', (133796, 133826), True, 'import numpy as np\n'), ((134049, 134091), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp0'], {}), "('ijk,ikl->ijl', dntnorm, dntdp0)\n", (134058, 134091), True, 'import numpy as np\n'), ((134112, 134154), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp1'], {}), "('ijk,ikl->ijl', dntnorm, dntdp1)\n", (134121, 134154), True, 'import numpy as np\n'), ((134175, 134217), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp2'], {}), "('ijk,ikl->ijl', dntnorm, dntdp2)\n", (134184, 134217), True, 'import numpy as np\n'), ((134237, 134274), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp0'], {}), "('ij,ijk->ik', pre1, dntdp0)\n", (134246, 134274), True, 'import numpy as np\n'), ((134294, 134331), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp1'], {}), "('ij,ijk->ik', pre1, dntdp1)\n", (134303, 134331), True, 'import numpy as np\n'), ((134351, 134388), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp2'], {}), "('ij,ijk->ik', pre1, dntdp2)\n", (134360, 134388), True, 'import numpy as np\n'), ((138109, 138206), 'numpy.concatenate', 'np.concatenate', (['[db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:,\n None, :]], axis=1)\n', (138123, 138206), True, 'import numpy as np\n'), ((138217, 138314), 'numpy.concatenate', 'np.concatenate', (['[db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:,\n None, :]], axis=1)\n', (138231, 138314), True, 'import numpy as np\n'), ((138325, 138422), 'numpy.concatenate', 'np.concatenate', (['[db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:,\n None, :]], axis=1)\n', (138339, 138422), True, 'import numpy as np\n'), ((138442, 138512), 'numpy.concatenate', 'np.concatenate', (['[dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]]', '(2)'], {}), '([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)\n', (138456, 138512), True, 'import numpy as np\n'), ((138972, 139060), 'numpy.concatenate', 'np.concatenate', (['[dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]]'], {'axis': '(1)'}), '([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]],\n axis=1)\n', (138986, 139060), True, 'import numpy as np\n'), ((140313, 140352), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (140322, 140352), True, 'import numpy as np\n'), ((140522, 140543), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (140528, 140543), True, 'import numpy as np\n'), ((141441, 141462), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (141447, 141462), True, 'import numpy as np\n'), ((153852, 153873), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (153858, 153873), True, 'import numpy as np\n'), ((155560, 155599), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (155569, 155599), True, 'import numpy as np\n'), ((156647, 156668), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (156653, 156668), True, 'import numpy as np\n'), ((173471, 173565), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (173484, 173565), True, 'import scipy.sparse as sp\n'), ((173618, 173639), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (173624, 173639), True, 'import numpy as np\n'), ((175226, 175247), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (175232, 175247), True, 'import numpy as np\n'), ((181670, 181743), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (181683, 181743), True, 'import scipy.sparse as sp\n'), ((181843, 181864), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (181849, 181864), True, 'import numpy as np\n'), ((188616, 188666), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (188632, 188666), True, 'import OpenGL.GL as GL\n'), ((190060, 190110), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (190076, 190110), True, 'import OpenGL.GL as GL\n'), ((190152, 190187), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (190167, 190187), True, 'import OpenGL.GL as GL\n'), ((190274, 190328), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (190294, 190328), True, 'import OpenGL.GL as GL\n'), ((190337, 190396), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (190347, 190396), True, 'import OpenGL.GL as GL\n'), ((190406, 190440), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (190421, 190440), True, 'import OpenGL.GL as GL\n'), ((190512, 190561), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (190532, 190561), True, 'import OpenGL.GL as GL\n'), ((190570, 190610), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (190585, 190610), True, 'import OpenGL.GL as GL\n'), ((190856, 190910), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (190876, 190910), True, 'import OpenGL.GL as GL\n'), ((191071, 191121), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (191087, 191121), True, 'import OpenGL.GL as GL\n'), ((191163, 191198), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (191178, 191198), True, 'import OpenGL.GL as GL\n'), ((191285, 191339), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (191305, 191339), True, 'import OpenGL.GL as GL\n'), ((191348, 191407), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (191358, 191407), True, 'import OpenGL.GL as GL\n'), ((191417, 191451), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (191432, 191451), True, 'import OpenGL.GL as GL\n'), ((191543, 191592), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (191563, 191592), True, 'import OpenGL.GL as GL\n'), ((191601, 191641), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (191616, 191641), True, 'import OpenGL.GL as GL\n'), ((191887, 191941), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (191907, 191941), True, 'import OpenGL.GL as GL\n'), ((192166, 192205), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (192172, 192205), True, 'import numpy as np\n'), ((193425, 193464), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (193445, 193464), True, 'import OpenGL.GL.shaders as shaders\n'), ((193474, 193528), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (193494, 193528), True, 'import OpenGL.GL as GL\n'), ((193537, 193596), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (193547, 193596), True, 'import OpenGL.GL as GL\n'), ((194341, 194390), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (194361, 194390), True, 'import OpenGL.GL as GL\n'), ((194400, 194440), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (194415, 194440), True, 'import OpenGL.GL as GL\n'), ((195244, 195305), 'numpy.zeros', 'np.zeros', (['(self.v.r.size / 3, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.v.r.size / 3, 2), dtype=np.float64, order='C')\n", (195252, 195305), True, 'import numpy as np\n'), ((195372, 195429), 'numpy.zeros', 'np.zeros', (['(self.vpe.size, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.vpe.size, 2), dtype=np.float64, order='C')\n", (195380, 195429), True, 'import numpy as np\n'), ((195753, 195787), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (195768, 195787), True, 'import OpenGL.GL as GL\n'), ((195893, 195934), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (195908, 195934), True, 'import OpenGL.GL as GL\n'), ((196172, 196202), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (196183, 196202), True, 'import OpenGL.GL as GL\n'), ((196458, 196512), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (196478, 196512), True, 'import OpenGL.GL as GL\n'), ((196521, 196580), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (196531, 196580), True, 'import OpenGL.GL as GL\n'), ((196768, 196827), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (196778, 196827), True, 'import OpenGL.GL as GL\n'), ((196972, 197011), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (196978, 197011), True, 'import numpy as np\n'), ((199165, 199219), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (199185, 199219), True, 'import OpenGL.GL as GL\n'), ((199228, 199401), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (199248, 199401), True, 'import OpenGL.GL as GL\n'), ((199401, 199450), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (199421, 199450), True, 'import OpenGL.GL as GL\n'), ((199459, 199499), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (199474, 199499), True, 'import OpenGL.GL as GL\n'), ((199746, 199800), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (199766, 199800), True, 'import OpenGL.GL as GL\n'), ((199809, 199840), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (199821, 199840), True, 'import OpenGL.GL as GL\n'), ((199849, 199884), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (199864, 199884), True, 'import OpenGL.GL as GL\n'), ((200607, 200631), 'numpy.round', 'np.round', (['texcoord_image'], {}), '(texcoord_image)\n', (200615, 200631), True, 'import numpy as np\n'), ((200813, 200831), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (200828, 200831), True, 'import OpenGL.GL as GL\n'), ((203901, 204333), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (203922, 204333), True, 'import OpenGL.GL.shaders as shaders\n'), ((204349, 204987), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (204370, 204987), True, 'import OpenGL.GL.shaders as shaders\n'), ((205014, 205068), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (205036, 205068), True, 'import OpenGL.GL.shaders as shaders\n'), ((205209, 205269), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""position"""'], {}), "(self.colorTextureProgram, 'position')\n", (205231, 205269), True, 'import OpenGL.GL as GL\n'), ((205295, 205352), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""color"""'], {}), "(self.colorTextureProgram, 'color')\n", (205317, 205352), True, 'import OpenGL.GL as GL\n'), ((205376, 205436), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""vertexUV"""'], {}), "(self.colorTextureProgram, 'vertexUV')\n", (205398, 205436), True, 'import OpenGL.GL as GL\n'), ((205555, 205611), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""MVP"""'], {}), "(self.colorTextureProgram, 'MVP')\n", (205578, 205611), True, 'import OpenGL.GL as GL\n'), ((205934, 205953), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(2.0)'], {}), '(2.0)\n', (205948, 205953), True, 'import OpenGL.GL as GL\n'), ((210056, 210093), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (210072, 210093), True, 'import OpenGL.GL as GL\n'), ((210102, 210125), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (210122, 210125), True, 'import OpenGL.GL as GL\n'), ((210153, 210222), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""myTextureSampler"""'], {}), "(self.colorTextureProgram, 'myTextureSampler')\n", (210176, 210222), True, 'import OpenGL.GL as GL\n'), ((210374, 210404), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (210385, 210404), True, 'import OpenGL.GL as GL\n'), ((210482, 210515), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (210493, 210515), True, 'import OpenGL.GL as GL\n'), ((210524, 210550), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (210545, 210550), True, 'import OpenGL.GL as GL\n'), ((210576, 211584), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n \n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n \n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n \n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n \n }"""\n , GL.GL_VERTEX_SHADER)\n', (210597, 211584), True, 'import OpenGL.GL.shaders as shaders\n'), ((211609, 212995), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n \n layout(location = 3) uniform sampler2D myTextureSampler;\n\n uniform float ww;\n uniform float wh;\n \n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n \n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n \n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n \n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n \n layout(location = 3) uniform sampler2D myTextureSampler;\n\n uniform float ww;\n uniform float wh;\n \n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n \n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n \n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n \n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (211630, 212995), True, 'import OpenGL.GL.shaders as shaders\n'), ((213022, 213083), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'ERRORS_FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)\n', (213044, 213083), True, 'import OpenGL.GL.shaders as shaders\n'), ((213115, 213299), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """\n , GL.GL_VERTEX_SHADER)\n', (213136, 213299), True, 'import OpenGL.GL.shaders as shaders\n'), ((213323, 213860), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n \n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n \n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""', 'GL.GL_GEOMETRY_SHADER'], {}), '(\n """#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n \n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n \n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""\n , GL.GL_GEOMETRY_SHADER)\n', (213344, 213860), True, 'import OpenGL.GL.shaders as shaders\n'), ((213885, 215494), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n \n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n\n\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n \n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n\n\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (213906, 215494), True, 'import OpenGL.GL.shaders as shaders\n'), ((215494, 215540), 'OpenGL.GL.glClampColor', 'GL.glClampColor', (['GL.GL_CLAMP_READ_COLOR', '(False)'], {}), '(GL.GL_CLAMP_READ_COLOR, False)\n', (215509, 215540), True, 'import OpenGL.GL as GL\n'), ((215698, 215791), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['FETCH_VERTEX_SHADER', 'FETCH_GEOMETRY_SHADER', 'FETCH_FRAGMENT_SHADER'], {}), '(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER,\n FETCH_FRAGMENT_SHADER)\n', (215720, 215791), True, 'import OpenGL.GL.shaders as shaders\n'), ((215814, 215826), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (215823, 215826), True, 'import OpenGL.GL as GL\n'), ((217511, 217529), 'numpy.ones', 'np.ones', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (217518, 217529), True, 'import numpy as np\n'), ((217563, 217575), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (217572, 217575), True, 'import OpenGL.GL as GL\n'), ((217584, 217629), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'self.whitePixelTextureID'], {}), '(1, self.whitePixelTextureID)\n', (217600, 217629), True, 'import OpenGL.GL as GL\n'), ((217640, 217700), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.whitePixelTextureID'], {}), '(GL.GL_TEXTURE_2D, self.whitePixelTextureID)\n', (217656, 217700), True, 'import OpenGL.GL as GL\n'), ((217788, 217877), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (217805, 217877), True, 'import OpenGL.GL as GL\n'), ((217881, 217994), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (217899, 217994), True, 'import OpenGL.GL as GL\n'), ((218020, 218043), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (218040, 218043), True, 'import OpenGL.GL as GL\n'), ((218053, 218079), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (218067, 218079), True, 'import OpenGL.GL as GL\n'), ((218089, 218119), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (218100, 218119), True, 'import OpenGL.GL as GL\n'), ((218197, 218230), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (218208, 218230), True, 'import OpenGL.GL as GL\n'), ((218239, 218265), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (218260, 218265), True, 'import OpenGL.GL as GL\n'), ((218275, 218334), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)\n', (218295, 218334), True, 'import OpenGL.GL as GL\n'), ((218373, 218392), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (218389, 218392), True, 'import OpenGL.GL as GL\n'), ((218401, 218475), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (218417, 218475), True, 'import OpenGL.GL as GL\n'), ((218484, 218626), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RGB8, self.frustum['width'], self.frustum['height'], False)\n", (218510, 218626), True, 'import OpenGL.GL as GL\n'), ((218917, 219052), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)\n', (218942, 219052), True, 'import OpenGL.GL as GL\n'), ((219095, 219114), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (219111, 219114), True, 'import OpenGL.GL as GL\n'), ((219123, 219211), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (219139, 219211), True, 'import OpenGL.GL as GL\n'), ((219215, 219358), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (219241, 219358), True, 'import OpenGL.GL as GL\n'), ((219649, 219793), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)\n', (219674, 219793), True, 'import OpenGL.GL as GL\n'), ((219833, 219852), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (219849, 219852), True, 'import OpenGL.GL as GL\n'), ((219861, 219946), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (219877, 219946), True, 'import OpenGL.GL as GL\n'), ((219950, 220093), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_R32UI, self.frustum['width'], self.frustum['height'], False)\n", (219976, 220093), True, 'import OpenGL.GL as GL\n'), ((220184, 220325), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)\n', (220209, 220325), True, 'import OpenGL.GL as GL\n'), ((220382, 220401), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (220398, 220401), True, 'import OpenGL.GL as GL\n'), ((220410, 220502), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (220426, 220502), True, 'import OpenGL.GL as GL\n'), ((220506, 220649), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (220532, 220649), True, 'import OpenGL.GL as GL\n'), ((220940, 221088), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)\n', (220965, 221088), True, 'import OpenGL.GL as GL\n'), ((221135, 221154), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (221151, 221154), True, 'import OpenGL.GL as GL\n'), ((221163, 221255), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (221179, 221255), True, 'import OpenGL.GL as GL\n'), ((221259, 221402), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (221285, 221402), True, 'import OpenGL.GL as GL\n'), ((221693, 221841), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)\n', (221718, 221841), True, 'import OpenGL.GL as GL\n'), ((221870, 221889), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (221886, 221889), True, 'import OpenGL.GL as GL\n'), ((221898, 221966), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)\n', (221914, 221966), True, 'import OpenGL.GL as GL\n'), ((221975, 222128), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)\n", (222001, 222128), True, 'import OpenGL.GL as GL\n'), ((222419, 222547), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)\n', (222444, 222547), True, 'import OpenGL.GL as GL\n'), ((222960, 222989), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (222971, 222989), True, 'import OpenGL.GL as GL\n'), ((222998, 223048), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (223014, 223048), True, 'import OpenGL.GL as GL\n'), ((223098, 223132), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (223108, 223132), True, 'import OpenGL.GL as GL\n'), ((223141, 223175), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (223151, 223175), True, 'import OpenGL.GL as GL\n'), ((223369, 223411), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (223389, 223411), True, 'import OpenGL.GL as GL\n'), ((223445, 223468), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (223465, 223468), True, 'import OpenGL.GL as GL\n'), ((223478, 223504), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (223492, 223504), True, 'import OpenGL.GL as GL\n'), ((223514, 223576), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)\n', (223534, 223576), True, 'import OpenGL.GL as GL\n'), ((223627, 223651), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (223648, 223651), True, 'import OpenGL.GL as GL\n'), ((223660, 223746), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_render)\n', (223681, 223746), True, 'import OpenGL.GL as GL\n'), ((223750, 223858), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (223774, 223858), True, 'import OpenGL.GL as GL\n'), ((223862, 223999), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)\n', (223890, 223999), True, 'import OpenGL.GL as GL\n'), ((224047, 224071), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (224068, 224071), True, 'import OpenGL.GL as GL\n'), ((224080, 224168), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_position)\n', (224101, 224168), True, 'import OpenGL.GL as GL\n'), ((224172, 224281), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (224196, 224281), True, 'import OpenGL.GL as GL\n'), ((224285, 224424), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)\n', (224313, 224424), True, 'import OpenGL.GL as GL\n'), ((224468, 224492), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (224489, 224492), True, 'import OpenGL.GL as GL\n'), ((224501, 224580), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (224522, 224580), True, 'import OpenGL.GL as GL\n'), ((224589, 224698), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (224613, 224698), True, 'import OpenGL.GL as GL\n'), ((224702, 224837), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (224730, 224837), True, 'import OpenGL.GL as GL\n'), ((224898, 224922), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (224919, 224922), True, 'import OpenGL.GL as GL\n'), ((224931, 225023), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric1)\n', (224952, 225023), True, 'import OpenGL.GL as GL\n'), ((225027, 225136), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (225051, 225136), True, 'import OpenGL.GL as GL\n'), ((225140, 225283), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)\n', (225168, 225283), True, 'import OpenGL.GL as GL\n'), ((225335, 225359), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (225356, 225359), True, 'import OpenGL.GL as GL\n'), ((225368, 225460), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric2)\n', (225389, 225460), True, 'import OpenGL.GL as GL\n'), ((225464, 225573), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (225488, 225573), True, 'import OpenGL.GL as GL\n'), ((225577, 225720), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)\n', (225605, 225720), True, 'import OpenGL.GL as GL\n'), ((225753, 225777), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (225774, 225777), True, 'import OpenGL.GL as GL\n'), ((225786, 225854), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (225807, 225854), True, 'import OpenGL.GL as GL\n'), ((225863, 225982), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (225887, 225982), True, 'import OpenGL.GL as GL\n'), ((225986, 226109), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (226014, 226109), True, 'import OpenGL.GL as GL\n'), ((226114, 226143), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (226125, 226143), True, 'import OpenGL.GL as GL\n'), ((226152, 226202), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (226168, 226202), True, 'import OpenGL.GL as GL\n'), ((226211, 226240), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (226223, 226240), True, 'import OpenGL.GL as GL\n'), ((226250, 226284), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (226260, 226284), True, 'import OpenGL.GL as GL\n'), ((226293, 226327), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (226303, 226327), True, 'import OpenGL.GL as GL\n'), ((226521, 226563), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (226541, 226563), True, 'import OpenGL.GL as GL\n'), ((226612, 226635), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (226632, 226635), True, 'import OpenGL.GL as GL\n'), ((226645, 226707), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_errors_nonms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)\n', (226665, 226707), True, 'import OpenGL.GL as GL\n'), ((226744, 226768), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (226765, 226768), True, 'import OpenGL.GL as GL\n'), ((226777, 226844), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_render)\n', (226798, 226844), True, 'import OpenGL.GL as GL\n'), ((226853, 226961), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (226877, 226961), True, 'import OpenGL.GL as GL\n'), ((226965, 227088), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, render_buf_errors_render)\n', (226993, 227088), True, 'import OpenGL.GL as GL\n'), ((227129, 227153), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (227150, 227153), True, 'import OpenGL.GL as GL\n'), ((227162, 227238), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (227183, 227238), True, 'import OpenGL.GL as GL\n'), ((227247, 227356), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (227271, 227356), True, 'import OpenGL.GL as GL\n'), ((227360, 227492), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (227388, 227492), True, 'import OpenGL.GL as GL\n'), ((227529, 227553), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (227550, 227553), True, 'import OpenGL.GL as GL\n'), ((227562, 227634), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (227583, 227634), True, 'import OpenGL.GL as GL\n'), ((227643, 227752), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (227667, 227752), True, 'import OpenGL.GL as GL\n'), ((227756, 227884), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (227784, 227884), True, 'import OpenGL.GL as GL\n'), ((227939, 227963), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (227960, 227963), True, 'import OpenGL.GL as GL\n'), ((227972, 228057), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1\n )\n', (227993, 228057), True, 'import OpenGL.GL as GL\n'), ((228061, 228170), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (228085, 228170), True, 'import OpenGL.GL as GL\n'), ((228174, 228310), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)\n', (228202, 228310), True, 'import OpenGL.GL as GL\n'), ((228355, 228379), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (228376, 228379), True, 'import OpenGL.GL as GL\n'), ((228388, 228473), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2\n )\n', (228409, 228473), True, 'import OpenGL.GL as GL\n'), ((228477, 228586), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (228501, 228586), True, 'import OpenGL.GL as GL\n'), ((228590, 228726), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)\n', (228618, 228726), True, 'import OpenGL.GL as GL\n'), ((228763, 228787), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (228784, 228787), True, 'import OpenGL.GL as GL\n'), ((228796, 228859), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, z_buf_samples_errors)\n', (228817, 228859), True, 'import OpenGL.GL as GL\n'), ((228868, 228987), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (228892, 228987), True, 'import OpenGL.GL as GL\n'), ((228991, 229109), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, z_buf_samples_errors)\n', (229019, 229109), True, 'import OpenGL.GL as GL\n'), ((229114, 229148), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (229124, 229148), True, 'import OpenGL.GL as GL\n'), ((229157, 229191), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (229167, 229191), True, 'import OpenGL.GL as GL\n'), ((229385, 229427), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (229405, 229427), True, 'import OpenGL.GL as GL\n'), ((229459, 229528), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""myTextureSampler"""'], {}), "(self.errorTextureProgram, 'myTextureSampler')\n", (229482, 229528), True, 'import OpenGL.GL as GL\n'), ((229588, 229648), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""position"""'], {}), "(self.errorTextureProgram, 'position')\n", (229610, 229648), True, 'import OpenGL.GL as GL\n'), ((229674, 229733), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""colorIn"""'], {}), "(self.errorTextureProgram, 'colorIn')\n", (229696, 229733), True, 'import OpenGL.GL as GL\n'), ((229757, 229817), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""vertexUV"""'], {}), "(self.errorTextureProgram, 'vertexUV')\n", (229779, 229817), True, 'import OpenGL.GL as GL\n'), ((229846, 229905), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""face_id"""'], {}), "(self.errorTextureProgram, 'face_id')\n", (229868, 229905), True, 'import OpenGL.GL as GL\n'), ((229937, 230000), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""barycentric"""'], {}), "(self.errorTextureProgram, 'barycentric')\n", (229959, 230000), True, 'import OpenGL.GL as GL\n'), ((232192, 232215), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (232212, 232215), True, 'import OpenGL.GL as GL\n'), ((232241, 232253), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (232250, 232253), True, 'import OpenGL.GL as GL\n'), ((232262, 232300), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_quad'], {}), '(1, self.vao_quad)\n', (232282, 232300), True, 'import OpenGL.GL as GL\n'), ((232309, 232344), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (232329, 232344), True, 'import OpenGL.GL as GL\n'), ((235235, 235265), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (235246, 235265), True, 'import OpenGL.GL as GL\n'), ((235274, 235307), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (235285, 235307), True, 'import OpenGL.GL as GL\n'), ((235316, 235342), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (235337, 235342), True, 'import OpenGL.GL as GL\n'), ((235550, 235591), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.errorTextureProgram'], {}), '(self.errorTextureProgram)\n', (235565, 235591), True, 'import OpenGL.GL as GL\n'), ((235601, 235665), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (235621, 235665), True, 'import OpenGL.GL as GL\n'), ((235826, 235861), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (235842, 235861), True, 'import OpenGL.GL as GL\n'), ((235921, 235956), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (235936, 235956), True, 'import OpenGL.GL as GL\n'), ((235961, 236020), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (235971, 236020), True, 'import OpenGL.GL as GL\n'), ((236038, 236093), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""ww"""'], {}), "(self.errorTextureProgram, 'ww')\n", (236061, 236093), True, 'import OpenGL.GL as GL\n'), ((236110, 236165), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""wh"""'], {}), "(self.errorTextureProgram, 'wh')\n", (236133, 236165), True, 'import OpenGL.GL as GL\n'), ((236174, 236218), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (236188, 236218), True, 'import OpenGL.GL as GL\n'), ((236227, 236272), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (236241, 236272), True, 'import OpenGL.GL as GL\n'), ((236418, 236457), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (236424, 236457), True, 'import numpy as np\n'), ((240774, 240815), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.fetchSamplesProgram'], {}), '(self.fetchSamplesProgram)\n', (240789, 240815), True, 'import OpenGL.GL as GL\n'), ((240884, 240943), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""colors"""'], {}), "(self.fetchSamplesProgram, 'colors')\n", (240907, 240943), True, 'import OpenGL.GL as GL\n'), ((240979, 241048), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_positions"""'], {}), "(self.fetchSamplesProgram, 'sample_positions')\n", (241002, 241048), True, 'import OpenGL.GL as GL\n'), ((241080, 241145), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_faces"""'], {}), "(self.fetchSamplesProgram, 'sample_faces')\n", (241103, 241145), True, 'import OpenGL.GL as GL\n'), ((241184, 241263), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords1"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords1')\n", (241207, 241263), True, 'import OpenGL.GL as GL\n'), ((241302, 241381), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords2"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords2')\n", (241325, 241381), True, 'import OpenGL.GL as GL\n'), ((241656, 241711), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""ww"""'], {}), "(self.fetchSamplesProgram, 'ww')\n", (241679, 241711), True, 'import OpenGL.GL as GL\n'), ((241728, 241783), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""wh"""'], {}), "(self.fetchSamplesProgram, 'wh')\n", (241751, 241783), True, 'import OpenGL.GL as GL\n'), ((241792, 241836), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (241806, 241836), True, 'import OpenGL.GL as GL\n'), ((241845, 241890), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (241859, 241890), True, 'import OpenGL.GL as GL\n'), ((241915, 241990), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (241923, 241990), True, 'import numpy as np\n'), ((242024, 242099), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (242032, 242099), True, 'import numpy as np\n'), ((242262, 242337), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (242270, 242337), True, 'import numpy as np\n'), ((242381, 242456), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 1]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 1])\n", (242389, 242456), True, 'import numpy as np\n'), ((242498, 242573), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (242506, 242573), True, 'import numpy as np\n'), ((242582, 242612), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (242594, 242612), True, 'import OpenGL.GL as GL\n'), ((242622, 242689), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)\n', (242642, 242689), True, 'import OpenGL.GL as GL\n'), ((242875, 242910), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (242891, 242910), True, 'import OpenGL.GL as GL\n'), ((242920, 242955), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (242935, 242955), True, 'import OpenGL.GL as GL\n'), ((242960, 243019), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (242970, 243019), True, 'import OpenGL.GL as GL\n'), ((243043, 243067), 'numpy.arange', 'np.arange', (['self.nsamples'], {}), '(self.nsamples)\n', (243052, 243067), True, 'import numpy as np\n'), ((247021, 247044), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (247041, 247044), True, 'import OpenGL.GL as GL\n'), ((247054, 247089), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (247069, 247089), True, 'import OpenGL.GL as GL\n'), ((247093, 247122), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (247104, 247122), True, 'import OpenGL.GL as GL\n'), ((247131, 247162), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (247143, 247162), True, 'import OpenGL.GL as GL\n'), ((247243, 247267), 'numpy.mean', 'np.mean', (['self.renders', '(0)'], {}), '(self.renders, 0)\n', (247250, 247267), True, 'import numpy as np\n'), ((247495, 247537), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.visibilityProgram_ms'], {}), '(self.visibilityProgram_ms)\n', (247510, 247537), True, 'import OpenGL.GL as GL\n'), ((247551, 247564), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (247561, 247564), True, 'import numpy as np\n'), ((247639, 247698), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (247649, 247698), True, 'import OpenGL.GL as GL\n'), ((247917, 247947), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint8'}), '(fc, dtype=np.uint8)\n', (247927, 247947), True, 'import numpy as np\n'), ((248357, 248382), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (248377, 248382), True, 'import OpenGL.GL as GL\n'), ((249221, 249285), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (249241, 249285), True, 'import OpenGL.GL as GL\n'), ((249346, 249381), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(1)', 'drawingBuffers'], {}), '(1, drawingBuffers)\n', (249362, 249381), True, 'import OpenGL.GL as GL\n'), ((249627, 249657), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (249639, 249657), True, 'import OpenGL.GL as GL\n'), ((249755, 249784), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (249766, 249784), True, 'import OpenGL.GL as GL\n'), ((259867, 259893), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (259875, 259893), True, 'import numpy as np\n'), ((260599, 260655), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (260613, 260655), True, 'import numpy as np\n'), ((261138, 261194), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (261152, 261194), True, 'import numpy as np\n'), ((261677, 261733), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (261691, 261733), True, 'import numpy as np\n'), ((262257, 262271), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (262268, 262271), True, 'import numpy as np\n'), ((262434, 262472), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', 'ident'], {}), '((p2 - p0)[:, None, :], ident)\n', (262442, 262472), True, 'import numpy as np\n'), ((262485, 262523), 'numpy.cross', 'np.cross', (['ident', '(p1 - p0)[:, None, :]'], {}), '(ident, (p1 - p0)[:, None, :])\n', (262493, 262523), True, 'import numpy as np\n'), ((262746, 262788), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp0'], {}), "('ijk,ikl->ijl', dntnorm, dntdp0)\n", (262755, 262788), True, 'import numpy as np\n'), ((262809, 262851), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp1'], {}), "('ijk,ikl->ijl', dntnorm, dntdp1)\n", (262818, 262851), True, 'import numpy as np\n'), ((262872, 262914), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp2'], {}), "('ijk,ikl->ijl', dntnorm, dntdp2)\n", (262881, 262914), True, 'import numpy as np\n'), ((262934, 262971), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp0'], {}), "('ij,ijk->ik', pre1, dntdp0)\n", (262943, 262971), True, 'import numpy as np\n'), ((262991, 263028), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp1'], {}), "('ij,ijk->ik', pre1, dntdp1)\n", (263000, 263028), True, 'import numpy as np\n'), ((263048, 263085), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp2'], {}), "('ij,ijk->ik', pre1, dntdp2)\n", (263057, 263085), True, 'import numpy as np\n'), ((266806, 266903), 'numpy.concatenate', 'np.concatenate', (['[db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:,\n None, :]], axis=1)\n', (266820, 266903), True, 'import numpy as np\n'), ((266914, 267011), 'numpy.concatenate', 'np.concatenate', (['[db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:,\n None, :]], axis=1)\n', (266928, 267011), True, 'import numpy as np\n'), ((267022, 267119), 'numpy.concatenate', 'np.concatenate', (['[db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:,\n None, :]], axis=1)\n', (267036, 267119), True, 'import numpy as np\n'), ((267139, 267209), 'numpy.concatenate', 'np.concatenate', (['[dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]]', '(2)'], {}), '([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)\n', (267153, 267209), True, 'import numpy as np\n'), ((267669, 267757), 'numpy.concatenate', 'np.concatenate', (['[dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]]'], {'axis': '(1)'}), '([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]],\n axis=1)\n', (267683, 267757), True, 'import numpy as np\n'), ((269010, 269049), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (269019, 269049), True, 'import numpy as np\n'), ((269219, 269240), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (269225, 269240), True, 'import numpy as np\n'), ((270138, 270159), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (270144, 270159), True, 'import numpy as np\n'), ((282549, 282570), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (282555, 282570), True, 'import numpy as np\n'), ((284257, 284296), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (284266, 284296), True, 'import numpy as np\n'), ((285344, 285365), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (285350, 285365), True, 'import numpy as np\n'), ((302168, 302262), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (302181, 302262), True, 'import scipy.sparse as sp\n'), ((302315, 302336), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (302321, 302336), True, 'import numpy as np\n'), ((303923, 303944), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (303929, 303944), True, 'import numpy as np\n'), ((310367, 310440), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (310380, 310440), True, 'import scipy.sparse as sp\n'), ((310540, 310561), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (310546, 310561), True, 'import numpy as np\n'), ((317313, 317363), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (317329, 317363), True, 'import OpenGL.GL as GL\n'), ((318757, 318807), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (318773, 318807), True, 'import OpenGL.GL as GL\n'), ((318849, 318884), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (318864, 318884), True, 'import OpenGL.GL as GL\n'), ((318971, 319025), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (318991, 319025), True, 'import OpenGL.GL as GL\n'), ((319034, 319093), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (319044, 319093), True, 'import OpenGL.GL as GL\n'), ((319103, 319137), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (319118, 319137), True, 'import OpenGL.GL as GL\n'), ((319209, 319258), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (319229, 319258), True, 'import OpenGL.GL as GL\n'), ((319267, 319307), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (319282, 319307), True, 'import OpenGL.GL as GL\n'), ((319553, 319607), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (319573, 319607), True, 'import OpenGL.GL as GL\n'), ((319768, 319818), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (319784, 319818), True, 'import OpenGL.GL as GL\n'), ((319860, 319895), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (319875, 319895), True, 'import OpenGL.GL as GL\n'), ((319982, 320036), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (320002, 320036), True, 'import OpenGL.GL as GL\n'), ((320045, 320104), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (320055, 320104), True, 'import OpenGL.GL as GL\n'), ((320114, 320148), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (320129, 320148), True, 'import OpenGL.GL as GL\n'), ((320240, 320289), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (320260, 320289), True, 'import OpenGL.GL as GL\n'), ((320298, 320338), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (320313, 320338), True, 'import OpenGL.GL as GL\n'), ((320584, 320638), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (320604, 320638), True, 'import OpenGL.GL as GL\n'), ((320863, 320902), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (320869, 320902), True, 'import numpy as np\n'), ((322122, 322161), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (322142, 322161), True, 'import OpenGL.GL.shaders as shaders\n'), ((322171, 322225), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (322191, 322225), True, 'import OpenGL.GL as GL\n'), ((322234, 322293), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (322244, 322293), True, 'import OpenGL.GL as GL\n'), ((323038, 323087), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (323058, 323087), True, 'import OpenGL.GL as GL\n'), ((323097, 323137), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (323112, 323137), True, 'import OpenGL.GL as GL\n'), ((323941, 324002), 'numpy.zeros', 'np.zeros', (['(self.v.r.size / 3, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.v.r.size / 3, 2), dtype=np.float64, order='C')\n", (323949, 324002), True, 'import numpy as np\n'), ((324069, 324126), 'numpy.zeros', 'np.zeros', (['(self.vpe.size, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.vpe.size, 2), dtype=np.float64, order='C')\n", (324077, 324126), True, 'import numpy as np\n'), ((324450, 324484), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (324465, 324484), True, 'import OpenGL.GL as GL\n'), ((324590, 324631), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (324605, 324631), True, 'import OpenGL.GL as GL\n'), ((324869, 324899), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (324880, 324899), True, 'import OpenGL.GL as GL\n'), ((325155, 325209), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (325175, 325209), True, 'import OpenGL.GL as GL\n'), ((325218, 325277), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (325228, 325277), True, 'import OpenGL.GL as GL\n'), ((325465, 325524), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (325475, 325524), True, 'import OpenGL.GL as GL\n'), ((325669, 325708), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (325675, 325708), True, 'import numpy as np\n'), ((327862, 327916), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (327882, 327916), True, 'import OpenGL.GL as GL\n'), ((327925, 328098), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (327945, 328098), True, 'import OpenGL.GL as GL\n'), ((328098, 328147), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (328118, 328147), True, 'import OpenGL.GL as GL\n'), ((328156, 328196), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (328171, 328196), True, 'import OpenGL.GL as GL\n'), ((328443, 328497), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (328463, 328497), True, 'import OpenGL.GL as GL\n'), ((328506, 328537), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (328518, 328537), True, 'import OpenGL.GL as GL\n'), ((328546, 328581), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (328561, 328581), True, 'import OpenGL.GL as GL\n'), ((329304, 329328), 'numpy.round', 'np.round', (['texcoord_image'], {}), '(texcoord_image)\n', (329312, 329328), True, 'import numpy as np\n'), ((329510, 329528), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (329525, 329528), True, 'import OpenGL.GL as GL\n'), ((332591, 333023), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (332612, 333023), True, 'import OpenGL.GL.shaders as shaders\n'), ((333039, 333677), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (333060, 333677), True, 'import OpenGL.GL.shaders as shaders\n'), ((333704, 333758), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (333726, 333758), True, 'import OpenGL.GL.shaders as shaders\n'), ((333902, 333962), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""position"""'], {}), "(self.colorTextureProgram, 'position')\n", (333924, 333962), True, 'import OpenGL.GL as GL\n'), ((333988, 334045), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""color"""'], {}), "(self.colorTextureProgram, 'color')\n", (334010, 334045), True, 'import OpenGL.GL as GL\n'), ((334069, 334129), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""vertexUV"""'], {}), "(self.colorTextureProgram, 'vertexUV')\n", (334091, 334129), True, 'import OpenGL.GL as GL\n'), ((334248, 334304), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""MVP"""'], {}), "(self.colorTextureProgram, 'MVP')\n", (334271, 334304), True, 'import OpenGL.GL as GL\n'), ((334627, 334646), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(2.0)'], {}), '(2.0)\n', (334641, 334646), True, 'import OpenGL.GL as GL\n'), ((338752, 338789), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (338768, 338789), True, 'import OpenGL.GL as GL\n'), ((338798, 338821), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (338818, 338821), True, 'import OpenGL.GL as GL\n'), ((338848, 338917), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""myTextureSampler"""'], {}), "(self.colorTextureProgram, 'myTextureSampler')\n", (338871, 338917), True, 'import OpenGL.GL as GL\n'), ((339068, 339098), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (339079, 339098), True, 'import OpenGL.GL as GL\n'), ((339176, 339209), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (339187, 339209), True, 'import OpenGL.GL as GL\n'), ((339218, 339244), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (339239, 339244), True, 'import OpenGL.GL as GL\n'), ((339270, 340258), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n\n }"""\n , GL.GL_VERTEX_SHADER)\n', (339291, 340258), True, 'import OpenGL.GL.shaders as shaders\n'), ((340283, 341593), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n\n layout(location = 3) uniform sampler2D myTextureSampler;\n \n uniform float ww;\n uniform float wh;\n\n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n\n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n\n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n\n layout(location = 3) uniform sampler2D myTextureSampler;\n \n uniform float ww;\n uniform float wh;\n\n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n\n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n\n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (340304, 341593), True, 'import OpenGL.GL.shaders as shaders\n'), ((341620, 341681), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'ERRORS_FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)\n', (341642, 341681), True, 'import OpenGL.GL.shaders as shaders\n'), ((341713, 341897), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """\n , GL.GL_VERTEX_SHADER)\n', (341734, 341897), True, 'import OpenGL.GL.shaders as shaders\n'), ((341921, 342440), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n\n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n\n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""', 'GL.GL_GEOMETRY_SHADER'], {}), '(\n """#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n\n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n\n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""\n , GL.GL_GEOMETRY_SHADER)\n', (341942, 342440), True, 'import OpenGL.GL.shaders as shaders\n'), ((342464, 344351), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n //layout(location = 7) uniform sampler2D imageGT;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n //layout(location = 5) out vec3 res;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n \n //vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;\n //res = imgColor - colorFetchOut;\n \n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n //layout(location = 7) uniform sampler2D imageGT;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n //layout(location = 5) out vec3 res;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n \n //vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;\n //res = imgColor - colorFetchOut;\n \n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (342485, 344351), True, 'import OpenGL.GL.shaders as shaders\n'), ((344351, 344397), 'OpenGL.GL.glClampColor', 'GL.glClampColor', (['GL.GL_CLAMP_READ_COLOR', '(False)'], {}), '(GL.GL_CLAMP_READ_COLOR, False)\n', (344366, 344397), True, 'import OpenGL.GL as GL\n'), ((344555, 344648), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['FETCH_VERTEX_SHADER', 'FETCH_GEOMETRY_SHADER', 'FETCH_FRAGMENT_SHADER'], {}), '(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER,\n FETCH_FRAGMENT_SHADER)\n', (344577, 344648), True, 'import OpenGL.GL.shaders as shaders\n'), ((344671, 344683), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (344680, 344683), True, 'import OpenGL.GL as GL\n'), ((346308, 346342), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (346326, 346342), True, 'import OpenGL.GL as GL\n'), ((346365, 346383), 'numpy.ones', 'np.ones', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (346372, 346383), True, 'import numpy as np\n'), ((346419, 346431), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (346428, 346431), True, 'import OpenGL.GL as GL\n'), ((346440, 346485), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'self.whitePixelTextureID'], {}), '(1, self.whitePixelTextureID)\n', (346456, 346485), True, 'import OpenGL.GL as GL\n'), ((346494, 346554), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.whitePixelTextureID'], {}), '(GL.GL_TEXTURE_2D, self.whitePixelTextureID)\n', (346510, 346554), True, 'import OpenGL.GL as GL\n'), ((346642, 346731), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (346659, 346731), True, 'import OpenGL.GL as GL\n'), ((346735, 346848), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (346753, 346848), True, 'import OpenGL.GL as GL\n'), ((346874, 346897), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (346894, 346897), True, 'import OpenGL.GL as GL\n'), ((346907, 346933), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (346921, 346933), True, 'import OpenGL.GL as GL\n'), ((346943, 346973), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (346954, 346973), True, 'import OpenGL.GL as GL\n'), ((347051, 347084), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (347062, 347084), True, 'import OpenGL.GL as GL\n'), ((347093, 347119), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (347114, 347119), True, 'import OpenGL.GL as GL\n'), ((347129, 347188), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)\n', (347149, 347188), True, 'import OpenGL.GL as GL\n'), ((347227, 347246), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (347243, 347246), True, 'import OpenGL.GL as GL\n'), ((347255, 347329), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (347271, 347329), True, 'import OpenGL.GL as GL\n'), ((347338, 347480), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RGB8, self.frustum['width'], self.frustum['height'], False)\n", (347364, 347480), True, 'import OpenGL.GL as GL\n'), ((347771, 347906), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)\n', (347796, 347906), True, 'import OpenGL.GL as GL\n'), ((347949, 347968), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (347965, 347968), True, 'import OpenGL.GL as GL\n'), ((347977, 348065), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (347993, 348065), True, 'import OpenGL.GL as GL\n'), ((348069, 348212), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (348095, 348212), True, 'import OpenGL.GL as GL\n'), ((348503, 348647), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)\n', (348528, 348647), True, 'import OpenGL.GL as GL\n'), ((348687, 348706), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (348703, 348706), True, 'import OpenGL.GL as GL\n'), ((348715, 348800), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (348731, 348800), True, 'import OpenGL.GL as GL\n'), ((348804, 348947), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_R32UI, self.frustum['width'], self.frustum['height'], False)\n", (348830, 348947), True, 'import OpenGL.GL as GL\n'), ((349038, 349179), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)\n', (349063, 349179), True, 'import OpenGL.GL as GL\n'), ((349236, 349255), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (349252, 349255), True, 'import OpenGL.GL as GL\n'), ((349264, 349356), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (349280, 349356), True, 'import OpenGL.GL as GL\n'), ((349360, 349503), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (349386, 349503), True, 'import OpenGL.GL as GL\n'), ((349794, 349942), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)\n', (349819, 349942), True, 'import OpenGL.GL as GL\n'), ((350023, 350042), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (350039, 350042), True, 'import OpenGL.GL as GL\n'), ((350051, 350143), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (350067, 350143), True, 'import OpenGL.GL as GL\n'), ((350147, 350290), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (350173, 350290), True, 'import OpenGL.GL as GL\n'), ((350581, 350729), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)\n', (350606, 350729), True, 'import OpenGL.GL as GL\n'), ((350791, 350810), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (350807, 350810), True, 'import OpenGL.GL as GL\n'), ((350819, 350887), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)\n', (350835, 350887), True, 'import OpenGL.GL as GL\n'), ((350896, 351049), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)\n", (350922, 351049), True, 'import OpenGL.GL as GL\n'), ((351375, 351503), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)\n', (351400, 351503), True, 'import OpenGL.GL as GL\n'), ((351916, 351945), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (351927, 351945), True, 'import OpenGL.GL as GL\n'), ((351954, 352004), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (351970, 352004), True, 'import OpenGL.GL as GL\n'), ((352054, 352088), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (352064, 352088), True, 'import OpenGL.GL as GL\n'), ((352097, 352131), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (352107, 352131), True, 'import OpenGL.GL as GL\n'), ((352325, 352367), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (352345, 352367), True, 'import OpenGL.GL as GL\n'), ((352401, 352424), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (352421, 352424), True, 'import OpenGL.GL as GL\n'), ((352434, 352460), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (352448, 352460), True, 'import OpenGL.GL as GL\n'), ((352470, 352532), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)\n', (352490, 352532), True, 'import OpenGL.GL as GL\n'), ((352583, 352607), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (352604, 352607), True, 'import OpenGL.GL as GL\n'), ((352616, 352702), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_render)\n', (352637, 352702), True, 'import OpenGL.GL as GL\n'), ((352706, 352814), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (352730, 352814), True, 'import OpenGL.GL as GL\n'), ((352818, 352955), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)\n', (352846, 352955), True, 'import OpenGL.GL as GL\n'), ((353003, 353027), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (353024, 353027), True, 'import OpenGL.GL as GL\n'), ((353036, 353124), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_position)\n', (353057, 353124), True, 'import OpenGL.GL as GL\n'), ((353128, 353237), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (353152, 353237), True, 'import OpenGL.GL as GL\n'), ((353241, 353380), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)\n', (353269, 353380), True, 'import OpenGL.GL as GL\n'), ((353424, 353448), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (353445, 353448), True, 'import OpenGL.GL as GL\n'), ((353457, 353536), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (353478, 353536), True, 'import OpenGL.GL as GL\n'), ((353545, 353654), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (353569, 353654), True, 'import OpenGL.GL as GL\n'), ((353658, 353793), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (353686, 353793), True, 'import OpenGL.GL as GL\n'), ((353854, 353878), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (353875, 353878), True, 'import OpenGL.GL as GL\n'), ((353887, 353979), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric1)\n', (353908, 353979), True, 'import OpenGL.GL as GL\n'), ((353983, 354092), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (354007, 354092), True, 'import OpenGL.GL as GL\n'), ((354096, 354239), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)\n', (354124, 354239), True, 'import OpenGL.GL as GL\n'), ((354291, 354315), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (354312, 354315), True, 'import OpenGL.GL as GL\n'), ((354324, 354416), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric2)\n', (354345, 354416), True, 'import OpenGL.GL as GL\n'), ((354420, 354529), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (354444, 354529), True, 'import OpenGL.GL as GL\n'), ((354533, 354676), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)\n', (354561, 354676), True, 'import OpenGL.GL as GL\n'), ((354709, 354733), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (354730, 354733), True, 'import OpenGL.GL as GL\n'), ((354742, 354810), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (354763, 354810), True, 'import OpenGL.GL as GL\n'), ((354819, 354938), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (354843, 354938), True, 'import OpenGL.GL as GL\n'), ((354942, 355065), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (354970, 355065), True, 'import OpenGL.GL as GL\n'), ((355070, 355099), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (355081, 355099), True, 'import OpenGL.GL as GL\n'), ((355108, 355158), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (355124, 355158), True, 'import OpenGL.GL as GL\n'), ((355167, 355196), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (355179, 355196), True, 'import OpenGL.GL as GL\n'), ((355206, 355240), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (355216, 355240), True, 'import OpenGL.GL as GL\n'), ((355249, 355283), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (355259, 355283), True, 'import OpenGL.GL as GL\n'), ((355477, 355519), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (355497, 355519), True, 'import OpenGL.GL as GL\n'), ((355569, 355592), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (355589, 355592), True, 'import OpenGL.GL as GL\n'), ((355602, 355664), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_errors_nonms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)\n', (355622, 355664), True, 'import OpenGL.GL as GL\n'), ((355701, 355725), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (355722, 355725), True, 'import OpenGL.GL as GL\n'), ((355734, 355801), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_render)\n', (355755, 355801), True, 'import OpenGL.GL as GL\n'), ((355810, 355918), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (355834, 355918), True, 'import OpenGL.GL as GL\n'), ((355922, 356045), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, render_buf_errors_render)\n', (355950, 356045), True, 'import OpenGL.GL as GL\n'), ((356086, 356110), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (356107, 356110), True, 'import OpenGL.GL as GL\n'), ((356119, 356195), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (356140, 356195), True, 'import OpenGL.GL as GL\n'), ((356204, 356313), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (356228, 356313), True, 'import OpenGL.GL as GL\n'), ((356317, 356449), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (356345, 356449), True, 'import OpenGL.GL as GL\n'), ((356486, 356510), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (356507, 356510), True, 'import OpenGL.GL as GL\n'), ((356519, 356591), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (356540, 356591), True, 'import OpenGL.GL as GL\n'), ((356600, 356709), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (356624, 356709), True, 'import OpenGL.GL as GL\n'), ((356713, 356841), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (356741, 356841), True, 'import OpenGL.GL as GL\n'), ((356896, 356920), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (356917, 356920), True, 'import OpenGL.GL as GL\n'), ((356929, 357014), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1\n )\n', (356950, 357014), True, 'import OpenGL.GL as GL\n'), ((357018, 357127), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (357042, 357127), True, 'import OpenGL.GL as GL\n'), ((357131, 357267), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)\n', (357159, 357267), True, 'import OpenGL.GL as GL\n'), ((357312, 357336), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (357333, 357336), True, 'import OpenGL.GL as GL\n'), ((357345, 357430), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2\n )\n', (357366, 357430), True, 'import OpenGL.GL as GL\n'), ((357434, 357543), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (357458, 357543), True, 'import OpenGL.GL as GL\n'), ((357547, 357683), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)\n', (357575, 357683), True, 'import OpenGL.GL as GL\n'), ((357720, 357744), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (357741, 357744), True, 'import OpenGL.GL as GL\n'), ((357753, 357816), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, z_buf_samples_errors)\n', (357774, 357816), True, 'import OpenGL.GL as GL\n'), ((357825, 357944), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (357849, 357944), True, 'import OpenGL.GL as GL\n'), ((357948, 358066), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, z_buf_samples_errors)\n', (357976, 358066), True, 'import OpenGL.GL as GL\n'), ((358071, 358105), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (358081, 358105), True, 'import OpenGL.GL as GL\n'), ((358114, 358148), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (358124, 358148), True, 'import OpenGL.GL as GL\n'), ((358342, 358384), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (358362, 358384), True, 'import OpenGL.GL as GL\n'), ((358415, 358484), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""myTextureSampler"""'], {}), "(self.errorTextureProgram, 'myTextureSampler')\n", (358438, 358484), True, 'import OpenGL.GL as GL\n'), ((358545, 358605), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""position"""'], {}), "(self.errorTextureProgram, 'position')\n", (358567, 358605), True, 'import OpenGL.GL as GL\n'), ((358631, 358690), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""colorIn"""'], {}), "(self.errorTextureProgram, 'colorIn')\n", (358653, 358690), True, 'import OpenGL.GL as GL\n'), ((358714, 358774), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""vertexUV"""'], {}), "(self.errorTextureProgram, 'vertexUV')\n", (358736, 358774), True, 'import OpenGL.GL as GL\n'), ((358803, 358862), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""face_id"""'], {}), "(self.errorTextureProgram, 'face_id')\n", (358825, 358862), True, 'import OpenGL.GL as GL\n'), ((358894, 358957), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""barycentric"""'], {}), "(self.errorTextureProgram, 'barycentric')\n", (358916, 358957), True, 'import OpenGL.GL as GL\n'), ((361149, 361172), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (361169, 361172), True, 'import OpenGL.GL as GL\n'), ((361198, 361210), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (361207, 361210), True, 'import OpenGL.GL as GL\n'), ((361219, 361257), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_quad'], {}), '(1, self.vao_quad)\n', (361239, 361257), True, 'import OpenGL.GL as GL\n'), ((361266, 361301), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (361286, 361301), True, 'import OpenGL.GL as GL\n'), ((364193, 364223), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (364204, 364223), True, 'import OpenGL.GL as GL\n'), ((364232, 364265), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (364243, 364265), True, 'import OpenGL.GL as GL\n'), ((364274, 364300), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (364295, 364300), True, 'import OpenGL.GL as GL\n'), ((364512, 364553), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.errorTextureProgram'], {}), '(self.errorTextureProgram)\n', (364527, 364553), True, 'import OpenGL.GL as GL\n'), ((364563, 364627), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (364583, 364627), True, 'import OpenGL.GL as GL\n'), ((364788, 364823), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (364804, 364823), True, 'import OpenGL.GL as GL\n'), ((364883, 364918), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (364898, 364918), True, 'import OpenGL.GL as GL\n'), ((364923, 364982), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (364933, 364982), True, 'import OpenGL.GL as GL\n'), ((365009, 365043), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (365027, 365043), True, 'import OpenGL.GL as GL\n'), ((365151, 365201), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.textureGT'], {}), '(GL.GL_TEXTURE_2D, self.textureGT)\n', (365167, 365201), True, 'import OpenGL.GL as GL\n'), ((365230, 365290), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""imageGT"""'], {}), "(self.errorTextureProgram, 'imageGT')\n", (365253, 365290), True, 'import OpenGL.GL as GL\n'), ((365299, 365335), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureGTLoc', '(1)'], {}), '(self.textureGTLoc, 1)\n', (365313, 365335), True, 'import OpenGL.GL as GL\n'), ((365354, 365409), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""ww"""'], {}), "(self.errorTextureProgram, 'ww')\n", (365377, 365409), True, 'import OpenGL.GL as GL\n'), ((365426, 365481), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""wh"""'], {}), "(self.errorTextureProgram, 'wh')\n", (365449, 365481), True, 'import OpenGL.GL as GL\n'), ((365490, 365534), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (365504, 365534), True, 'import OpenGL.GL as GL\n'), ((365543, 365588), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (365557, 365588), True, 'import OpenGL.GL as GL\n'), ((365735, 365774), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (365741, 365774), True, 'import numpy as np\n'), ((370095, 370136), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.fetchSamplesProgram'], {}), '(self.fetchSamplesProgram)\n', (370110, 370136), True, 'import OpenGL.GL as GL\n'), ((370205, 370264), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""colors"""'], {}), "(self.fetchSamplesProgram, 'colors')\n", (370228, 370264), True, 'import OpenGL.GL as GL\n'), ((370300, 370369), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_positions"""'], {}), "(self.fetchSamplesProgram, 'sample_positions')\n", (370323, 370369), True, 'import OpenGL.GL as GL\n'), ((370401, 370466), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_faces"""'], {}), "(self.fetchSamplesProgram, 'sample_faces')\n", (370424, 370466), True, 'import OpenGL.GL as GL\n'), ((370505, 370584), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords1"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords1')\n", (370528, 370584), True, 'import OpenGL.GL as GL\n'), ((370623, 370702), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords2"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords2')\n", (370646, 370702), True, 'import OpenGL.GL as GL\n'), ((370977, 371032), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""ww"""'], {}), "(self.fetchSamplesProgram, 'ww')\n", (371000, 371032), True, 'import OpenGL.GL as GL\n'), ((371049, 371104), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""wh"""'], {}), "(self.fetchSamplesProgram, 'wh')\n", (371072, 371104), True, 'import OpenGL.GL as GL\n'), ((371113, 371157), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (371127, 371157), True, 'import OpenGL.GL as GL\n'), ((371166, 371211), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (371180, 371211), True, 'import OpenGL.GL as GL\n'), ((371236, 371311), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (371244, 371311), True, 'import numpy as np\n'), ((371346, 371421), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (371354, 371421), True, 'import numpy as np\n'), ((371585, 371660), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (371593, 371660), True, 'import numpy as np\n'), ((371704, 371779), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 1]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 1])\n", (371712, 371779), True, 'import numpy as np\n'), ((371822, 371897), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (371830, 371897), True, 'import numpy as np\n'), ((371907, 371937), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (371919, 371937), True, 'import OpenGL.GL as GL\n'), ((371947, 372014), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)\n', (371967, 372014), True, 'import OpenGL.GL as GL\n'), ((372200, 372235), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (372216, 372235), True, 'import OpenGL.GL as GL\n'), ((372245, 372280), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (372260, 372280), True, 'import OpenGL.GL as GL\n'), ((372285, 372344), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (372295, 372344), True, 'import OpenGL.GL as GL\n'), ((372368, 372392), 'numpy.arange', 'np.arange', (['self.nsamples'], {}), '(self.nsamples)\n', (372377, 372392), True, 'import numpy as np\n'), ((376571, 376594), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (376591, 376594), True, 'import OpenGL.GL as GL\n'), ((376604, 376639), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (376619, 376639), True, 'import OpenGL.GL as GL\n'), ((376645, 376674), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (376656, 376674), True, 'import OpenGL.GL as GL\n'), ((376683, 376714), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (376695, 376714), True, 'import OpenGL.GL as GL\n'), ((376795, 376819), 'numpy.mean', 'np.mean', (['self.renders', '(0)'], {}), '(self.renders, 0)\n', (376802, 376819), True, 'import numpy as np\n'), ((377046, 377088), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.visibilityProgram_ms'], {}), '(self.visibilityProgram_ms)\n', (377061, 377088), True, 'import OpenGL.GL as GL\n'), ((377102, 377115), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (377112, 377115), True, 'import numpy as np\n'), ((377191, 377250), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (377201, 377250), True, 'import OpenGL.GL as GL\n'), ((377470, 377500), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint8'}), '(fc, dtype=np.uint8)\n', (377480, 377500), True, 'import numpy as np\n'), ((377909, 377934), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (377929, 377934), True, 'import OpenGL.GL as GL\n'), ((378773, 378837), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (378793, 378837), True, 'import OpenGL.GL as GL\n'), ((378898, 378933), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(1)', 'drawingBuffers'], {}), '(1, drawingBuffers)\n', (378914, 378933), True, 'import OpenGL.GL as GL\n'), ((379180, 379210), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (379192, 379210), True, 'import OpenGL.GL as GL\n'), ((379308, 379337), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (379319, 379337), True, 'import OpenGL.GL as GL\n'), ((389226, 389252), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (389234, 389252), True, 'import numpy as np\n'), ((389978, 390034), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (389992, 390034), True, 'import numpy as np\n'), ((390537, 390593), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (390551, 390593), True, 'import numpy as np\n'), ((391096, 391152), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (391110, 391152), True, 'import numpy as np\n'), ((391686, 391700), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (391697, 391700), True, 'import numpy as np\n'), ((391875, 391913), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', 'ident'], {}), '((p2 - p0)[:, None, :], ident)\n', (391883, 391913), True, 'import numpy as np\n'), ((391931, 391969), 'numpy.cross', 'np.cross', (['ident', '(p1 - p0)[:, None, :]'], {}), '(ident, (p1 - p0)[:, None, :])\n', (391939, 391969), True, 'import numpy as np\n'), ((392204, 392246), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp0'], {}), "('ijk,ikl->ijl', dntnorm, dntdp0)\n", (392213, 392246), True, 'import numpy as np\n'), ((392268, 392310), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp1'], {}), "('ijk,ikl->ijl', dntnorm, dntdp1)\n", (392277, 392310), True, 'import numpy as np\n'), ((392332, 392374), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp2'], {}), "('ijk,ikl->ijl', dntnorm, dntdp2)\n", (392341, 392374), True, 'import numpy as np\n'), ((392395, 392432), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp0'], {}), "('ij,ijk->ik', pre1, dntdp0)\n", (392404, 392432), True, 'import numpy as np\n'), ((392452, 392489), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp1'], {}), "('ij,ijk->ik', pre1, dntdp1)\n", (392461, 392489), True, 'import numpy as np\n'), ((392509, 392546), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp2'], {}), "('ij,ijk->ik', pre1, dntdp2)\n", (392518, 392546), True, 'import numpy as np\n'), ((396423, 396520), 'numpy.concatenate', 'np.concatenate', (['[db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:,\n None, :]], axis=1)\n', (396437, 396520), True, 'import numpy as np\n'), ((396531, 396628), 'numpy.concatenate', 'np.concatenate', (['[db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:,\n None, :]], axis=1)\n', (396545, 396628), True, 'import numpy as np\n'), ((396639, 396736), 'numpy.concatenate', 'np.concatenate', (['[db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:,\n None, :]], axis=1)\n', (396653, 396736), True, 'import numpy as np\n'), ((396756, 396826), 'numpy.concatenate', 'np.concatenate', (['[dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]]', '(2)'], {}), '([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)\n', (396770, 396826), True, 'import numpy as np\n'), ((397289, 397377), 'numpy.concatenate', 'np.concatenate', (['[dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]]'], {'axis': '(1)'}), '([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]],\n axis=1)\n', (397303, 397377), True, 'import numpy as np\n'), ((398461, 398482), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (398467, 398482), True, 'import numpy as np\n'), ((406583, 406604), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (406589, 406604), True, 'import numpy as np\n'), ((407970, 408009), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (407979, 408009), True, 'import numpy as np\n'), ((408888, 408909), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (408894, 408909), True, 'import numpy as np\n'), ((424331, 424425), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (424344, 424425), True, 'import scipy.sparse as sp\n'), ((424433, 424454), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (424439, 424454), True, 'import numpy as np\n'), ((425645, 425666), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (425651, 425666), True, 'import numpy as np\n'), ((429058, 429131), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (429071, 429131), True, 'import scipy.sparse as sp\n'), ((429183, 429204), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (429189, 429204), True, 'import numpy as np\n'), ((435966, 436016), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (435982, 436016), True, 'import OpenGL.GL as GL\n'), ((437410, 437460), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (437426, 437460), True, 'import OpenGL.GL as GL\n'), ((437502, 437537), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (437517, 437537), True, 'import OpenGL.GL as GL\n'), ((437626, 437680), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (437646, 437680), True, 'import OpenGL.GL as GL\n'), ((437689, 437748), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (437699, 437748), True, 'import OpenGL.GL as GL\n'), ((437758, 437792), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (437773, 437792), True, 'import OpenGL.GL as GL\n'), ((437864, 437913), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (437884, 437913), True, 'import OpenGL.GL as GL\n'), ((437922, 437962), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (437937, 437962), True, 'import OpenGL.GL as GL\n'), ((438242, 438296), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (438262, 438296), True, 'import OpenGL.GL as GL\n'), ((438461, 438511), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (438477, 438511), True, 'import OpenGL.GL as GL\n'), ((438553, 438588), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (438568, 438588), True, 'import OpenGL.GL as GL\n'), ((438677, 438731), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (438697, 438731), True, 'import OpenGL.GL as GL\n'), ((438740, 438799), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (438750, 438799), True, 'import OpenGL.GL as GL\n'), ((438809, 438843), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (438824, 438843), True, 'import OpenGL.GL as GL\n'), ((438935, 438984), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (438955, 438984), True, 'import OpenGL.GL as GL\n'), ((438993, 439033), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (439008, 439033), True, 'import OpenGL.GL as GL\n'), ((439313, 439367), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (439333, 439367), True, 'import OpenGL.GL as GL\n'), ((439593, 439632), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (439599, 439632), True, 'import numpy as np\n'), ((440853, 440892), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (440873, 440892), True, 'import OpenGL.GL.shaders as shaders\n'), ((440902, 440956), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (440922, 440956), True, 'import OpenGL.GL as GL\n'), ((440965, 441024), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (440975, 441024), True, 'import OpenGL.GL as GL\n'), ((441787, 441836), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (441807, 441836), True, 'import OpenGL.GL as GL\n'), ((441846, 441886), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (441861, 441886), True, 'import OpenGL.GL as GL\n'), ((442733, 442794), 'numpy.zeros', 'np.zeros', (['(self.v.r.size / 3, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.v.r.size / 3, 2), dtype=np.float64, order='C')\n", (442741, 442794), True, 'import numpy as np\n'), ((442864, 442921), 'numpy.zeros', 'np.zeros', (['(self.vpe.size, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.vpe.size, 2), dtype=np.float64, order='C')\n", (442872, 442921), True, 'import numpy as np\n'), ((443248, 443282), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (443263, 443282), True, 'import OpenGL.GL as GL\n'), ((443388, 443429), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (443403, 443429), True, 'import OpenGL.GL as GL\n'), ((443667, 443697), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (443678, 443697), True, 'import OpenGL.GL as GL\n'), ((443957, 444011), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (443977, 444011), True, 'import OpenGL.GL as GL\n'), ((444020, 444079), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (444030, 444079), True, 'import OpenGL.GL as GL\n'), ((444267, 444326), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (444277, 444326), True, 'import OpenGL.GL as GL\n'), ((444472, 444511), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (444478, 444511), True, 'import numpy as np\n'), ((446667, 446721), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (446687, 446721), True, 'import OpenGL.GL as GL\n'), ((446730, 446903), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (446750, 446903), True, 'import OpenGL.GL as GL\n'), ((446932, 446981), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (446952, 446981), True, 'import OpenGL.GL as GL\n'), ((446990, 447030), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (447005, 447030), True, 'import OpenGL.GL as GL\n'), ((447311, 447365), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (447331, 447365), True, 'import OpenGL.GL as GL\n'), ((447374, 447405), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (447386, 447405), True, 'import OpenGL.GL as GL\n'), ((447414, 447449), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (447429, 447449), True, 'import OpenGL.GL as GL\n'), ((448190, 448214), 'numpy.round', 'np.round', (['texcoord_image'], {}), '(texcoord_image)\n', (448198, 448214), True, 'import numpy as np\n'), ((448403, 448421), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (448418, 448421), True, 'import OpenGL.GL as GL\n'), ((451487, 451919), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core\n // Interpolated values from the vertex shaders\n //#extension GL_EXT_shader_image_load_store : enable \n in vec3 theColor;\n in vec2 UV;\n uniform sampler2D myTextureSampler;\n // Ouput data\n out vec3 color;\n void main(){\n color = theColor * texture2D( myTextureSampler, UV).rgb;\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (451508, 451919), True, 'import OpenGL.GL.shaders as shaders\n'), ((451935, 452573), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 color;\n layout(location = 2) in vec2 vertexUV;\n uniform mat4 MVP;\n out vec3 theColor;\n out vec2 UV;\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n theColor = color;\n UV = vertexUV;\n }"""\n , GL.GL_VERTEX_SHADER)\n', (451956, 452573), True, 'import OpenGL.GL.shaders as shaders\n'), ((452600, 452654), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, FRAGMENT_SHADER)\n', (452622, 452654), True, 'import OpenGL.GL.shaders as shaders\n'), ((452798, 452858), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""position"""'], {}), "(self.colorTextureProgram, 'position')\n", (452820, 452858), True, 'import OpenGL.GL as GL\n'), ((452884, 452941), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""color"""'], {}), "(self.colorTextureProgram, 'color')\n", (452906, 452941), True, 'import OpenGL.GL as GL\n'), ((452965, 453025), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.colorTextureProgram', '"""vertexUV"""'], {}), "(self.colorTextureProgram, 'vertexUV')\n", (452987, 453025), True, 'import OpenGL.GL as GL\n'), ((453144, 453200), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""MVP"""'], {}), "(self.colorTextureProgram, 'MVP')\n", (453167, 453200), True, 'import OpenGL.GL as GL\n'), ((453523, 453542), 'OpenGL.GL.glLineWidth', 'GL.glLineWidth', (['(2.0)'], {}), '(2.0)\n', (453537, 453542), True, 'import OpenGL.GL as GL\n'), ((457677, 457714), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (457693, 457714), True, 'import OpenGL.GL as GL\n'), ((457723, 457746), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (457743, 457746), True, 'import OpenGL.GL as GL\n'), ((457773, 457842), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.colorTextureProgram', '"""myTextureSampler"""'], {}), "(self.colorTextureProgram, 'myTextureSampler')\n", (457796, 457842), True, 'import OpenGL.GL as GL\n'), ((457964, 457994), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (457975, 457994), True, 'import OpenGL.GL as GL\n'), ((458072, 458105), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (458083, 458105), True, 'import OpenGL.GL as GL\n'), ((458114, 458140), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (458135, 458140), True, 'import OpenGL.GL as GL\n'), ((458166, 459154), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n\n }"""', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n layout (location = 0) in vec3 position;\n layout (location = 1) in vec3 colorIn;\n layout(location = 2) in vec2 vertexUV;\n layout(location = 3) in uint face_id;\n layout(location = 4) in vec3 barycentric;\n\n uniform mat4 MVP;\n out vec3 theColor;\n out vec4 pos;\n flat out uint face_out;\n out vec3 barycentric_vert_out;\n out vec2 UV;\n\n // Values that stay constant for the whole mesh.\n void main(){\n // Output position of the vertex, in clip space : MVP * position\n gl_Position = MVP* vec4(position,1);\n pos = MVP * vec4(position,1);\n //pos = pos4.xyz;\n theColor = colorIn;\n UV = vertexUV;\n face_out = face_id;\n barycentric_vert_out = barycentric;\n\n }"""\n , GL.GL_VERTEX_SHADER)\n', (458187, 459154), True, 'import OpenGL.GL.shaders as shaders\n'), ((459179, 460489), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n\n layout(location = 3) uniform sampler2D myTextureSampler;\n \n uniform float ww;\n uniform float wh;\n\n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n\n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n\n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n\n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n\n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n //layout(early_fragment_tests) in;\n\n // Interpolated values from the vertex shaders\n in vec3 theColor;\n in vec2 UV;\n flat in uint face_out;\n in vec4 pos;\n in vec3 barycentric_vert_out;\n\n layout(location = 3) uniform sampler2D myTextureSampler;\n \n uniform float ww;\n uniform float wh;\n\n // Ouput data\n layout(location = 0) out vec3 color; \n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 barycentric1;\n layout(location = 4) out vec2 barycentric2;\n\n void main(){\n vec3 finalColor = theColor * texture2D( myTextureSampler, UV).rgb;\n color = finalColor.rgb;\n\n sample_pos = ((0.5*pos.xy/pos.w) + 0.5)*vec2(ww,wh);\n sample_face = face_out;\n barycentric1 = barycentric_vert_out.xy;\n barycentric2 = vec2(barycentric_vert_out.z, 0.);\n\n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (459200, 460489), True, 'import OpenGL.GL.shaders as shaders\n'), ((460516, 460577), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['VERTEX_SHADER', 'ERRORS_FRAGMENT_SHADER'], {}), '(VERTEX_SHADER, ERRORS_FRAGMENT_SHADER)\n', (460538, 460577), True, 'import OpenGL.GL.shaders as shaders\n'), ((460609, 460793), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """', 'GL.GL_VERTEX_SHADER'], {}), '(\n """#version 330 core\n // Input vertex data, different for all executions of this shader.\n void main() {}\n """\n , GL.GL_VERTEX_SHADER)\n', (460630, 460793), True, 'import OpenGL.GL.shaders as shaders\n'), ((460817, 461336), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n\n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n\n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""', 'GL.GL_GEOMETRY_SHADER'], {}), '(\n """#version 330 core\n layout(points) in;\n layout(triangle_strip, max_vertices = 4) out;\n\n const vec2 data[4] = vec2[]\n (\n vec2(-1.0, 1.0),\n vec2(-1.0, -1.0),\n vec2( 1.0, 1.0),\n vec2( 1.0, -1.0)\n );\n\n void main() {\n for (int i = 0; i < 4; ++i) {\n gl_Position = vec4( data[i], 0.0, 1.0 );\n EmitVertex();\n }\n EndPrimitive();\n }"""\n , GL.GL_GEOMETRY_SHADER)\n', (460838, 461336), True, 'import OpenGL.GL.shaders as shaders\n'), ((461360, 463247), 'OpenGL.GL.shaders.compileShader', 'shaders.compileShader', (['"""#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n //layout(location = 7) uniform sampler2D imageGT;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n //layout(location = 5) out vec3 res;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n \n //vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;\n //res = imgColor - colorFetchOut;\n \n }"""', 'GL.GL_FRAGMENT_SHADER'], {}), '(\n """#version 330 core \n #extension GL_ARB_explicit_uniform_location : enable\n #extension GL_ARB_explicit_attrib_location : enable\n\n layout(location = 2) uniform sampler2DMS colors;\n layout(location = 3) uniform sampler2DMS sample_positions;\n layout(location = 4) uniform usampler2DMS sample_faces;\n layout(location = 5) uniform sampler2DMS sample_barycentric_coords1;\n layout(location = 6) uniform sampler2DMS sample_barycentric_coords2;\n //layout(location = 7) uniform sampler2D imageGT;\n\n uniform float ww;\n uniform float wh;\n uniform int sample;\n\n // Ouput data\n layout(location = 0) out vec3 colorFetchOut;\n layout(location = 1) out vec2 sample_pos;\n layout(location = 2) out uint sample_face;\n layout(location = 3) out vec2 sample_barycentric1;\n layout(location = 4) out vec2 sample_barycentric2;\n //layout(location = 5) out vec3 res;\n\n //out int gl_SampleMask[];\n const int all_sample_mask = 0xffff;\n\n void main(){\n ivec2 texcoord = ivec2(gl_FragCoord.xy);\n colorFetchOut = texelFetch(colors, texcoord, sample).xyz;\n sample_pos = texelFetch(sample_positions, texcoord, sample).xy; \n sample_face = texelFetch(sample_faces, texcoord, sample).r;\n sample_barycentric1 = texelFetch(sample_barycentric_coords1, texcoord, sample).xy;\n sample_barycentric2 = texelFetch(sample_barycentric_coords2, texcoord, sample).xy;\n \n //vec3 imgColor = texture2D(imageGT, gl_FragCoord.xy/vec2(ww,wh)).rgb;\n //res = imgColor - colorFetchOut;\n \n }"""\n , GL.GL_FRAGMENT_SHADER)\n', (461381, 463247), True, 'import OpenGL.GL.shaders as shaders\n'), ((463247, 463293), 'OpenGL.GL.glClampColor', 'GL.glClampColor', (['GL.GL_CLAMP_READ_COLOR', '(False)'], {}), '(GL.GL_CLAMP_READ_COLOR, False)\n', (463262, 463293), True, 'import OpenGL.GL as GL\n'), ((463451, 463544), 'OpenGL.GL.shaders.compileProgram', 'shaders.compileProgram', (['FETCH_VERTEX_SHADER', 'FETCH_GEOMETRY_SHADER', 'FETCH_FRAGMENT_SHADER'], {}), '(FETCH_VERTEX_SHADER, FETCH_GEOMETRY_SHADER,\n FETCH_FRAGMENT_SHADER)\n', (463473, 463544), True, 'import OpenGL.GL.shaders as shaders\n'), ((463567, 463579), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (463576, 463579), True, 'import OpenGL.GL as GL\n'), ((465652, 465689), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', '(0)'], {}), '(GL.GL_TEXTURE_2D, 0)\n', (465668, 465689), True, 'import OpenGL.GL as GL\n'), ((465699, 465733), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (465717, 465733), True, 'import OpenGL.GL as GL\n'), ((465756, 465774), 'numpy.ones', 'np.ones', (['[4, 4, 3]'], {}), '([4, 4, 3])\n', (465763, 465774), True, 'import numpy as np\n'), ((465810, 465822), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (465819, 465822), True, 'import OpenGL.GL as GL\n'), ((465831, 465876), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'self.whitePixelTextureID'], {}), '(1, self.whitePixelTextureID)\n', (465847, 465876), True, 'import OpenGL.GL as GL\n'), ((465886, 465946), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.whitePixelTextureID'], {}), '(GL.GL_TEXTURE_2D, self.whitePixelTextureID)\n', (465902, 465946), True, 'import OpenGL.GL as GL\n'), ((465956, 465999), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (465972, 465999), True, 'import OpenGL.GL as GL\n'), ((466008, 466084), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (466026, 466084), True, 'import OpenGL.GL as GL\n'), ((466093, 466188), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (466111, 466188), True, 'import OpenGL.GL as GL\n'), ((466192, 466257), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (466210, 466257), True, 'import OpenGL.GL as GL\n'), ((466266, 466330), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (466284, 466330), True, 'import OpenGL.GL as GL\n'), ((466339, 466418), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_WRAP_S', 'GL.GL_CLAMP_TO_EDGE'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)\n', (466357, 466418), True, 'import OpenGL.GL as GL\n'), ((466425, 466504), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_WRAP_T', 'GL.GL_CLAMP_TO_EDGE'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)\n', (466443, 466504), True, 'import OpenGL.GL as GL\n'), ((466601, 466722), 'OpenGL.GL.glTexImage2D', 'GL.glTexImage2D', (['GL.GL_TEXTURE_2D', '(0)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]', '(0)', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, GL.GL_RGB32F, image.shape[1], image.\n shape[0], 0, GL.GL_RGB, GL.GL_FLOAT, image)\n', (466616, 466722), True, 'import OpenGL.GL as GL\n'), ((466962, 466985), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (466982, 466985), True, 'import OpenGL.GL as GL\n'), ((466995, 467021), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (467009, 467021), True, 'import OpenGL.GL as GL\n'), ((467031, 467061), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (467042, 467061), True, 'import OpenGL.GL as GL\n'), ((467139, 467172), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (467150, 467172), True, 'import OpenGL.GL as GL\n'), ((467181, 467207), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (467202, 467207), True, 'import OpenGL.GL as GL\n'), ((467217, 467276), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_ms_errors)\n', (467237, 467276), True, 'import OpenGL.GL as GL\n'), ((467315, 467334), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (467331, 467334), True, 'import OpenGL.GL as GL\n'), ((467343, 467417), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (467359, 467417), True, 'import OpenGL.GL as GL\n'), ((467426, 467568), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RGB8, self.frustum['width'], self.frustum['height'], False)\n", (467452, 467568), True, 'import OpenGL.GL as GL\n'), ((467859, 467994), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render, 0)\n', (467884, 467994), True, 'import OpenGL.GL as GL\n'), ((468037, 468056), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (468053, 468056), True, 'import OpenGL.GL as GL\n'), ((468065, 468153), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (468081, 468153), True, 'import OpenGL.GL as GL\n'), ((468157, 468300), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (468183, 468300), True, 'import OpenGL.GL as GL\n'), ((468591, 468735), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_position, 0)\n', (468616, 468735), True, 'import OpenGL.GL as GL\n'), ((468775, 468794), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (468791, 468794), True, 'import OpenGL.GL as GL\n'), ((468803, 468888), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (468819, 468888), True, 'import OpenGL.GL as GL\n'), ((468892, 469035), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_R32UI, self.frustum['width'], self.frustum['height'], False)\n", (468918, 469035), True, 'import OpenGL.GL as GL\n'), ((469126, 469267), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces, 0)\n', (469151, 469267), True, 'import OpenGL.GL as GL\n'), ((469324, 469343), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (469340, 469343), True, 'import OpenGL.GL as GL\n'), ((469352, 469444), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (469368, 469444), True, 'import OpenGL.GL as GL\n'), ((469448, 469591), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (469474, 469591), True, 'import OpenGL.GL as GL\n'), ((469882, 470030), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric1, 0)\n', (469907, 470030), True, 'import OpenGL.GL as GL\n'), ((470111, 470130), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (470127, 470130), True, 'import OpenGL.GL as GL\n'), ((470139, 470231), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (470155, 470231), True, 'import OpenGL.GL as GL\n'), ((470235, 470378), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_RG32F, self.frustum['width'], self.frustum['height'], False)\n", (470261, 470378), True, 'import OpenGL.GL as GL\n'), ((470669, 470817), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_barycentric2, 0)\n', (470694, 470817), True, 'import OpenGL.GL as GL\n'), ((470879, 470898), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)'], {}), '(1)\n', (470895, 470898), True, 'import OpenGL.GL as GL\n'), ((470907, 470975), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors)\n', (470923, 470975), True, 'import OpenGL.GL as GL\n'), ((470984, 471137), 'OpenGL.GL.glTexImage2DMultisample', 'GL.glTexImage2DMultisample', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.nsamples', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']", '(False)'], {}), "(GL.GL_TEXTURE_2D_MULTISAMPLE, self.nsamples, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'], False)\n", (471010, 471137), True, 'import OpenGL.GL as GL\n'), ((471463, 471591), 'OpenGL.GL.glFramebufferTexture2D', 'GL.glFramebufferTexture2D', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.z_buf_ms_errors', '(0)'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_TEXTURE_2D_MULTISAMPLE, self.z_buf_ms_errors, 0)\n', (471488, 471591), True, 'import OpenGL.GL as GL\n'), ((472004, 472033), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (472015, 472033), True, 'import OpenGL.GL as GL\n'), ((472042, 472092), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (472058, 472092), True, 'import OpenGL.GL as GL\n'), ((472142, 472176), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (472152, 472176), True, 'import OpenGL.GL as GL\n'), ((472185, 472219), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (472195, 472219), True, 'import OpenGL.GL as GL\n'), ((472413, 472455), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (472433, 472455), True, 'import OpenGL.GL as GL\n'), ((472489, 472512), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (472509, 472512), True, 'import OpenGL.GL as GL\n'), ((472522, 472548), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (472536, 472548), True, 'import OpenGL.GL as GL\n'), ((472558, 472620), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_sample_fetch)\n', (472578, 472620), True, 'import OpenGL.GL as GL\n'), ((472671, 472695), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (472692, 472695), True, 'import OpenGL.GL as GL\n'), ((472704, 472790), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_render)\n', (472725, 472790), True, 'import OpenGL.GL as GL\n'), ((472794, 472902), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (472818, 472902), True, 'import OpenGL.GL as GL\n'), ((472906, 473043), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_render)\n', (472934, 473043), True, 'import OpenGL.GL as GL\n'), ((473091, 473115), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (473112, 473115), True, 'import OpenGL.GL as GL\n'), ((473124, 473212), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_position)\n', (473145, 473212), True, 'import OpenGL.GL as GL\n'), ((473216, 473325), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (473240, 473325), True, 'import OpenGL.GL as GL\n'), ((473329, 473468), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_position)\n', (473357, 473468), True, 'import OpenGL.GL as GL\n'), ((473512, 473536), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (473533, 473536), True, 'import OpenGL.GL as GL\n'), ((473545, 473624), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (473566, 473624), True, 'import OpenGL.GL as GL\n'), ((473633, 473742), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (473657, 473742), True, 'import OpenGL.GL as GL\n'), ((473746, 473881), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_face)\n', (473774, 473881), True, 'import OpenGL.GL as GL\n'), ((473942, 473966), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (473963, 473966), True, 'import OpenGL.GL as GL\n'), ((473975, 474067), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric1)\n', (473996, 474067), True, 'import OpenGL.GL as GL\n'), ((474071, 474180), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (474095, 474180), True, 'import OpenGL.GL as GL\n'), ((474184, 474327), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric1)\n', (474212, 474327), True, 'import OpenGL.GL as GL\n'), ((474379, 474403), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (474400, 474403), True, 'import OpenGL.GL as GL\n'), ((474412, 474504), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, self.\n render_buffer_fetch_sample_barycentric2)\n', (474433, 474504), True, 'import OpenGL.GL as GL\n'), ((474508, 474617), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (474532, 474617), True, 'import OpenGL.GL as GL\n'), ((474621, 474764), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'self.render_buffer_fetch_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, self.render_buffer_fetch_sample_barycentric2)\n', (474649, 474764), True, 'import OpenGL.GL as GL\n'), ((474797, 474821), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (474818, 474821), True, 'import OpenGL.GL as GL\n'), ((474830, 474898), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (474851, 474898), True, 'import OpenGL.GL as GL\n'), ((474907, 475026), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (474931, 475026), True, 'import OpenGL.GL as GL\n'), ((475030, 475153), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_samples_errors)\n', (475058, 475153), True, 'import OpenGL.GL as GL\n'), ((475158, 475187), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (475169, 475187), True, 'import OpenGL.GL as GL\n'), ((475196, 475246), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (475212, 475246), True, 'import OpenGL.GL as GL\n'), ((475255, 475284), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (475267, 475284), True, 'import OpenGL.GL as GL\n'), ((475294, 475328), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (475304, 475328), True, 'import OpenGL.GL as GL\n'), ((475337, 475371), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (475347, 475371), True, 'import OpenGL.GL as GL\n'), ((475565, 475607), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (475585, 475607), True, 'import OpenGL.GL as GL\n'), ((475657, 475680), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (475677, 475680), True, 'import OpenGL.GL as GL\n'), ((475690, 475752), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_errors_nonms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_errors_nonms)\n', (475710, 475752), True, 'import OpenGL.GL as GL\n'), ((475789, 475813), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (475810, 475813), True, 'import OpenGL.GL as GL\n'), ((475822, 475889), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_render)\n', (475843, 475889), True, 'import OpenGL.GL as GL\n'), ((475898, 476006), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RGB8, self.frustum[\n 'width'], self.frustum['height'])\n", (475922, 476006), True, 'import OpenGL.GL as GL\n'), ((476010, 476133), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'render_buf_errors_render'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL\n .GL_RENDERBUFFER, render_buf_errors_render)\n', (476038, 476133), True, 'import OpenGL.GL as GL\n'), ((476174, 476198), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (476195, 476198), True, 'import OpenGL.GL as GL\n'), ((476207, 476283), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (476228, 476283), True, 'import OpenGL.GL as GL\n'), ((476292, 476401), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (476316, 476401), True, 'import OpenGL.GL as GL\n'), ((476405, 476537), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT1', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_position'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT1, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_position)\n', (476433, 476537), True, 'import OpenGL.GL as GL\n'), ((476574, 476598), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (476595, 476598), True, 'import OpenGL.GL as GL\n'), ((476607, 476679), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (476628, 476679), True, 'import OpenGL.GL as GL\n'), ((476688, 476797), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_R32UI', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_R32UI, self.frustum[\n 'width'], self.frustum['height'])\n", (476712, 476797), True, 'import OpenGL.GL as GL\n'), ((476801, 476929), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT2', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_face'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT2, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_face)\n', (476829, 476929), True, 'import OpenGL.GL as GL\n'), ((476984, 477008), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (477005, 477008), True, 'import OpenGL.GL as GL\n'), ((477017, 477102), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric1\n )\n', (477038, 477102), True, 'import OpenGL.GL as GL\n'), ((477106, 477215), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (477130, 477215), True, 'import OpenGL.GL as GL\n'), ((477219, 477355), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT3', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric1'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT3, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric1)\n', (477247, 477355), True, 'import OpenGL.GL as GL\n'), ((477400, 477424), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (477421, 477424), True, 'import OpenGL.GL as GL\n'), ((477433, 477518), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_RENDERBUFFER, render_buf_errors_sample_barycentric2\n )\n', (477454, 477518), True, 'import OpenGL.GL as GL\n'), ((477522, 477631), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_RG32F', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_RG32F, self.frustum[\n 'width'], self.frustum['height'])\n", (477546, 477631), True, 'import OpenGL.GL as GL\n'), ((477635, 477771), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT4', 'GL.GL_RENDERBUFFER', 'render_buf_errors_sample_barycentric2'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT4, GL\n .GL_RENDERBUFFER, render_buf_errors_sample_barycentric2)\n', (477663, 477771), True, 'import OpenGL.GL as GL\n'), ((477808, 477832), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (477829, 477832), True, 'import OpenGL.GL as GL\n'), ((477841, 477904), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_RENDERBUFFER, z_buf_samples_errors)\n', (477862, 477904), True, 'import OpenGL.GL as GL\n'), ((477913, 478032), 'OpenGL.GL.glRenderbufferStorage', 'GL.glRenderbufferStorage', (['GL.GL_RENDERBUFFER', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, GL.GL_DEPTH_COMPONENT, self.\n frustum['width'], self.frustum['height'])\n", (477937, 478032), True, 'import OpenGL.GL as GL\n'), ((478036, 478154), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'z_buf_samples_errors'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, z_buf_samples_errors)\n', (478064, 478154), True, 'import OpenGL.GL as GL\n'), ((478159, 478193), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (478169, 478193), True, 'import OpenGL.GL as GL\n'), ((478202, 478236), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (478212, 478236), True, 'import OpenGL.GL as GL\n'), ((478430, 478472), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (478450, 478472), True, 'import OpenGL.GL as GL\n'), ((478503, 478572), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""myTextureSampler"""'], {}), "(self.errorTextureProgram, 'myTextureSampler')\n", (478526, 478572), True, 'import OpenGL.GL as GL\n'), ((478633, 478693), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""position"""'], {}), "(self.errorTextureProgram, 'position')\n", (478655, 478693), True, 'import OpenGL.GL as GL\n'), ((478719, 478778), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""colorIn"""'], {}), "(self.errorTextureProgram, 'colorIn')\n", (478741, 478778), True, 'import OpenGL.GL as GL\n'), ((478802, 478862), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""vertexUV"""'], {}), "(self.errorTextureProgram, 'vertexUV')\n", (478824, 478862), True, 'import OpenGL.GL as GL\n'), ((478891, 478950), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""face_id"""'], {}), "(self.errorTextureProgram, 'face_id')\n", (478913, 478950), True, 'import OpenGL.GL as GL\n'), ((478982, 479045), 'OpenGL.GL.glGetAttribLocation', 'GL.glGetAttribLocation', (['self.errorTextureProgram', '"""barycentric"""'], {}), "(self.errorTextureProgram, 'barycentric')\n", (479004, 479045), True, 'import OpenGL.GL as GL\n'), ((481237, 481260), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (481257, 481260), True, 'import OpenGL.GL as GL\n'), ((481286, 481298), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (481295, 481298), True, 'import OpenGL.GL as GL\n'), ((481307, 481345), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'self.vao_quad'], {}), '(1, self.vao_quad)\n', (481327, 481345), True, 'import OpenGL.GL as GL\n'), ((481354, 481389), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (481374, 481389), True, 'import OpenGL.GL as GL\n'), ((484282, 484312), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (484293, 484312), True, 'import OpenGL.GL as GL\n'), ((484321, 484354), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_SAMPLE_SHADING'], {}), '(GL.GL_SAMPLE_SHADING)\n', (484332, 484354), True, 'import OpenGL.GL as GL\n'), ((484363, 484389), 'OpenGL.GL.glMinSampleShading', 'GL.glMinSampleShading', (['(1.0)'], {}), '(1.0)\n', (484384, 484389), True, 'import OpenGL.GL as GL\n'), ((484601, 484642), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.errorTextureProgram'], {}), '(self.errorTextureProgram)\n', (484616, 484642), True, 'import OpenGL.GL as GL\n'), ((484652, 484716), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (484672, 484716), True, 'import OpenGL.GL as GL\n'), ((484877, 484912), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (484893, 484912), True, 'import OpenGL.GL as GL\n'), ((484972, 485007), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (484987, 485007), True, 'import OpenGL.GL as GL\n'), ((485012, 485071), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (485022, 485071), True, 'import OpenGL.GL as GL\n'), ((485098, 485132), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (485116, 485132), True, 'import OpenGL.GL as GL\n'), ((485240, 485290), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'self.textureGT'], {}), '(GL.GL_TEXTURE_2D, self.textureGT)\n', (485256, 485290), True, 'import OpenGL.GL as GL\n'), ((485319, 485379), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""imageGT"""'], {}), "(self.errorTextureProgram, 'imageGT')\n", (485342, 485379), True, 'import OpenGL.GL as GL\n'), ((485388, 485424), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureGTLoc', '(1)'], {}), '(self.textureGTLoc, 1)\n', (485402, 485424), True, 'import OpenGL.GL as GL\n'), ((485443, 485498), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""ww"""'], {}), "(self.errorTextureProgram, 'ww')\n", (485466, 485498), True, 'import OpenGL.GL as GL\n'), ((485515, 485570), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.errorTextureProgram', '"""wh"""'], {}), "(self.errorTextureProgram, 'wh')\n", (485538, 485570), True, 'import OpenGL.GL as GL\n'), ((485579, 485623), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (485593, 485623), True, 'import OpenGL.GL as GL\n'), ((485632, 485677), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (485646, 485677), True, 'import OpenGL.GL as GL\n'), ((485824, 485863), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (485830, 485863), True, 'import numpy as np\n'), ((490184, 490225), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.fetchSamplesProgram'], {}), '(self.fetchSamplesProgram)\n', (490199, 490225), True, 'import OpenGL.GL as GL\n'), ((490294, 490353), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""colors"""'], {}), "(self.fetchSamplesProgram, 'colors')\n", (490317, 490353), True, 'import OpenGL.GL as GL\n'), ((490389, 490458), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_positions"""'], {}), "(self.fetchSamplesProgram, 'sample_positions')\n", (490412, 490458), True, 'import OpenGL.GL as GL\n'), ((490490, 490555), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_faces"""'], {}), "(self.fetchSamplesProgram, 'sample_faces')\n", (490513, 490555), True, 'import OpenGL.GL as GL\n'), ((490594, 490673), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords1"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords1')\n", (490617, 490673), True, 'import OpenGL.GL as GL\n'), ((490712, 490791), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample_barycentric_coords2"""'], {}), "(self.fetchSamplesProgram, 'sample_barycentric_coords2')\n", (490735, 490791), True, 'import OpenGL.GL as GL\n'), ((491066, 491121), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""ww"""'], {}), "(self.fetchSamplesProgram, 'ww')\n", (491089, 491121), True, 'import OpenGL.GL as GL\n'), ((491138, 491193), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""wh"""'], {}), "(self.fetchSamplesProgram, 'wh')\n", (491161, 491193), True, 'import OpenGL.GL as GL\n'), ((491202, 491246), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['wwLoc', "self.frustum['width']"], {}), "(wwLoc, self.frustum['width'])\n", (491216, 491246), True, 'import OpenGL.GL as GL\n'), ((491255, 491300), 'OpenGL.GL.glUniform1f', 'GL.glUniform1f', (['whLoc', "self.frustum['height']"], {}), "(whLoc, self.frustum['height'])\n", (491269, 491300), True, 'import OpenGL.GL as GL\n'), ((491325, 491400), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (491333, 491400), True, 'import numpy as np\n'), ((491435, 491510), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (491443, 491510), True, 'import numpy as np\n'), ((491674, 491749), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 2]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 2])\n", (491682, 491749), True, 'import numpy as np\n'), ((491793, 491868), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 1]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 1])\n", (491801, 491868), True, 'import numpy as np\n'), ((491911, 491986), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height'], 3]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height'], 3])\n", (491919, 491986), True, 'import numpy as np\n'), ((491996, 492026), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (492008, 492026), True, 'import OpenGL.GL as GL\n'), ((492036, 492103), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_sample_fetch)\n', (492056, 492103), True, 'import OpenGL.GL as GL\n'), ((492289, 492324), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(5)', 'drawingBuffers'], {}), '(5, drawingBuffers)\n', (492305, 492324), True, 'import OpenGL.GL as GL\n'), ((492334, 492369), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0, 0.0)\n', (492349, 492369), True, 'import OpenGL.GL as GL\n'), ((492374, 492433), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (492384, 492433), True, 'import OpenGL.GL as GL\n'), ((492457, 492481), 'numpy.arange', 'np.arange', (['self.nsamples'], {}), '(self.nsamples)\n', (492466, 492481), True, 'import numpy as np\n'), ((496660, 496683), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (496680, 496683), True, 'import OpenGL.GL as GL\n'), ((496693, 496728), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (496708, 496728), True, 'import OpenGL.GL as GL\n'), ((496734, 496763), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (496745, 496763), True, 'import OpenGL.GL as GL\n'), ((496772, 496803), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (496784, 496803), True, 'import OpenGL.GL as GL\n'), ((496884, 496908), 'numpy.mean', 'np.mean', (['self.renders', '(0)'], {}), '(self.renders, 0)\n', (496891, 496908), True, 'import numpy as np\n'), ((497135, 497177), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.visibilityProgram_ms'], {}), '(self.visibilityProgram_ms)\n', (497150, 497177), True, 'import OpenGL.GL as GL\n'), ((497191, 497204), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (497201, 497204), True, 'import numpy as np\n'), ((497280, 497339), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (497290, 497339), True, 'import OpenGL.GL as GL\n'), ((497559, 497589), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint8'}), '(fc, dtype=np.uint8)\n', (497569, 497589), True, 'import numpy as np\n'), ((497998, 498023), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (498018, 498023), True, 'import OpenGL.GL as GL\n'), ((498862, 498926), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms_errors'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms_errors)\n', (498882, 498926), True, 'import OpenGL.GL as GL\n'), ((498987, 499022), 'OpenGL.GL.glDrawBuffers', 'GL.glDrawBuffers', (['(1)', 'drawingBuffers'], {}), '(1, drawingBuffers)\n', (499003, 499022), True, 'import OpenGL.GL as GL\n'), ((499269, 499299), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (499281, 499299), True, 'import OpenGL.GL as GL\n'), ((499397, 499426), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (499408, 499426), True, 'import OpenGL.GL as GL\n'), ((509719, 509745), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (509727, 509745), True, 'import numpy as np\n'), ((510471, 510527), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (510485, 510527), True, 'import numpy as np\n'), ((511030, 511086), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (511044, 511086), True, 'import numpy as np\n'), ((511589, 511645), 'numpy.concatenate', 'np.concatenate', (['[xu[:, :, None], xv[:, :, None]]'], {'axis': '(2)'}), '([xu[:, :, None], xv[:, :, None]], axis=2)\n', (511603, 511645), True, 'import numpy as np\n'), ((512179, 512193), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (512190, 512193), True, 'import numpy as np\n'), ((512368, 512406), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', 'ident'], {}), '((p2 - p0)[:, None, :], ident)\n', (512376, 512406), True, 'import numpy as np\n'), ((512424, 512462), 'numpy.cross', 'np.cross', (['ident', '(p1 - p0)[:, None, :]'], {}), '(ident, (p1 - p0)[:, None, :])\n', (512432, 512462), True, 'import numpy as np\n'), ((512697, 512739), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp0'], {}), "('ijk,ikl->ijl', dntnorm, dntdp0)\n", (512706, 512739), True, 'import numpy as np\n'), ((512761, 512803), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp1'], {}), "('ijk,ikl->ijl', dntnorm, dntdp1)\n", (512770, 512803), True, 'import numpy as np\n'), ((512825, 512867), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dntnorm', 'dntdp2'], {}), "('ijk,ikl->ijl', dntnorm, dntdp2)\n", (512834, 512867), True, 'import numpy as np\n'), ((512888, 512925), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp0'], {}), "('ij,ijk->ik', pre1, dntdp0)\n", (512897, 512925), True, 'import numpy as np\n'), ((512945, 512982), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp1'], {}), "('ij,ijk->ik', pre1, dntdp1)\n", (512954, 512982), True, 'import numpy as np\n'), ((513002, 513039), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->ik"""', 'pre1', 'dntdp2'], {}), "('ij,ijk->ik', pre1, dntdp2)\n", (513011, 513039), True, 'import numpy as np\n'), ((516916, 517013), 'numpy.concatenate', 'np.concatenate', (['[db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp0wrt[:, None, :], db1dp0wrt[:, None, :], db2dp0wrt[:,\n None, :]], axis=1)\n', (516930, 517013), True, 'import numpy as np\n'), ((517024, 517121), 'numpy.concatenate', 'np.concatenate', (['[db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp1wrt[:, None, :], db1dp1wrt[:, None, :], db2dp1wrt[:,\n None, :]], axis=1)\n', (517038, 517121), True, 'import numpy as np\n'), ((517132, 517229), 'numpy.concatenate', 'np.concatenate', (['[db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:, None, :]]'], {'axis': '(1)'}), '([db0dp2wrt[:, None, :], db1dp2wrt[:, None, :], db2dp2wrt[:,\n None, :]], axis=1)\n', (517146, 517229), True, 'import numpy as np\n'), ((517249, 517319), 'numpy.concatenate', 'np.concatenate', (['[dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]]', '(2)'], {}), '([dp0[:, :, None], dp1[:, :, None], dp2[:, :, None]], 2)\n', (517263, 517319), True, 'import numpy as np\n'), ((517782, 517870), 'numpy.concatenate', 'np.concatenate', (['[dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]]'], {'axis': '(1)'}), '([dxdp_0[:, None, :], dxdp_1[:, None, :], dxdp_2[:, None, :]],\n axis=1)\n', (517796, 517870), True, 'import numpy as np\n'), ((518954, 518975), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (518960, 518975), True, 'import numpy as np\n'), ((527106, 527127), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (527112, 527127), True, 'import numpy as np\n'), ((528493, 528532), 'numpy.arange', 'np.arange', (['self.boundarybool_image.size'], {}), '(self.boundarybool_image.size)\n', (528502, 528532), True, 'import numpy as np\n'), ((529541, 529562), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (529547, 529562), True, 'import numpy as np\n'), ((544984, 545078), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (544997, 545078), True, 'import scipy.sparse as sp\n'), ((545086, 545107), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (545092, 545107), True, 'import numpy as np\n'), ((546298, 546319), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (546304, 546319), True, 'import numpy as np\n'), ((549851, 549924), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (549864, 549924), True, 'import scipy.sparse as sp\n'), ((549976, 549997), 'numpy.any', 'np.any', (['boundaryImage'], {}), '(boundaryImage)\n', (549982, 549997), True, 'import numpy as np\n'), ((556759, 556809), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (556775, 556809), True, 'import OpenGL.GL as GL\n'), ((558203, 558253), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (558219, 558253), True, 'import OpenGL.GL as GL\n'), ((558295, 558330), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (558310, 558330), True, 'import OpenGL.GL as GL\n'), ((558419, 558473), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (558439, 558473), True, 'import OpenGL.GL as GL\n'), ((558482, 558541), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (558492, 558541), True, 'import OpenGL.GL as GL\n'), ((558551, 558585), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (558566, 558585), True, 'import OpenGL.GL as GL\n'), ((558657, 558706), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (558677, 558706), True, 'import OpenGL.GL as GL\n'), ((558715, 558755), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (558730, 558755), True, 'import OpenGL.GL as GL\n'), ((559035, 559089), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (559055, 559089), True, 'import OpenGL.GL as GL\n'), ((559254, 559304), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (559270, 559304), True, 'import OpenGL.GL as GL\n'), ((559346, 559381), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (559361, 559381), True, 'import OpenGL.GL as GL\n'), ((559470, 559524), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (559490, 559524), True, 'import OpenGL.GL as GL\n'), ((559533, 559592), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (559543, 559592), True, 'import OpenGL.GL as GL\n'), ((559602, 559636), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (559617, 559636), True, 'import OpenGL.GL as GL\n'), ((559728, 559777), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (559748, 559777), True, 'import OpenGL.GL as GL\n'), ((559786, 559826), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (559801, 559826), True, 'import OpenGL.GL as GL\n'), ((560106, 560160), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (560126, 560160), True, 'import OpenGL.GL as GL\n'), ((560386, 560425), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (560392, 560425), True, 'import numpy as np\n'), ((561646, 561685), 'OpenGL.GL.shaders.glUseProgram', 'shaders.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (561666, 561685), True, 'import OpenGL.GL.shaders as shaders\n'), ((561695, 561749), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (561715, 561749), True, 'import OpenGL.GL as GL\n'), ((561758, 561817), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (561768, 561817), True, 'import OpenGL.GL as GL\n'), ((562580, 562629), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (562600, 562629), True, 'import OpenGL.GL as GL\n'), ((562639, 562679), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (562654, 562679), True, 'import OpenGL.GL as GL\n'), ((563526, 563587), 'numpy.zeros', 'np.zeros', (['(self.v.r.size / 3, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.v.r.size / 3, 2), dtype=np.float64, order='C')\n", (563534, 563587), True, 'import numpy as np\n'), ((563657, 563714), 'numpy.zeros', 'np.zeros', (['(self.vpe.size, 2)'], {'dtype': 'np.float64', 'order': '"""C"""'}), "((self.vpe.size, 2), dtype=np.float64, order='C')\n", (563665, 563714), True, 'import numpy as np\n'), ((564041, 564075), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (564056, 564075), True, 'import OpenGL.GL as GL\n'), ((564181, 564222), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (564196, 564222), True, 'import OpenGL.GL as GL\n'), ((564483, 564517), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (564498, 564517), True, 'import OpenGL.GL as GL\n'), ((564626, 564667), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (564641, 564667), True, 'import OpenGL.GL as GL\n'), ((564907, 564937), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (564918, 564937), True, 'import OpenGL.GL as GL\n'), ((565197, 565251), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (565217, 565251), True, 'import OpenGL.GL as GL\n'), ((565260, 565319), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (565270, 565319), True, 'import OpenGL.GL as GL\n'), ((565507, 565566), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (565517, 565566), True, 'import OpenGL.GL as GL\n'), ((565712, 565751), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (565718, 565751), True, 'import numpy as np\n'), ((567907, 567961), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (567927, 567961), True, 'import OpenGL.GL as GL\n'), ((567970, 568143), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (567990, 568143), True, 'import OpenGL.GL as GL\n'), ((568172, 568221), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (568192, 568221), True, 'import OpenGL.GL as GL\n'), ((568230, 568270), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (568245, 568270), True, 'import OpenGL.GL as GL\n'), ((568551, 568605), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (568571, 568605), True, 'import OpenGL.GL as GL\n'), ((568614, 568645), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (568626, 568645), True, 'import OpenGL.GL as GL\n'), ((568654, 568689), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (568669, 568689), True, 'import OpenGL.GL as GL\n'), ((569430, 569454), 'numpy.round', 'np.round', (['texcoord_image'], {}), '(texcoord_image)\n', (569438, 569454), True, 'import numpy as np\n'), ((569643, 569661), 'OpenGL.GL.glGenBuffers', 'GL.glGenBuffers', (['(1)'], {}), '(1)\n', (569658, 569661), True, 'import OpenGL.GL as GL\n'), ((1081, 1116), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (1106, 1116), False, 'import glfw\n'), ((4639, 4650), 'glfw.init', 'glfw.init', ([], {}), '()\n', (4648, 4650), False, 'import glfw\n'), ((4704, 4751), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MAJOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MAJOR, 3)\n', (4720, 4751), False, 'import glfw\n'), ((4764, 4811), 'glfw.window_hint', 'glfw.window_hint', (['glfw.CONTEXT_VERSION_MINOR', '(3)'], {}), '(glfw.CONTEXT_VERSION_MINOR, 3)\n', (4780, 4811), False, 'import glfw\n'), ((4895, 4958), 'glfw.window_hint', 'glfw.window_hint', (['glfw.OPENGL_PROFILE', 'glfw.OPENGL_CORE_PROFILE'], {}), '(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n', (4911, 4958), False, 'import glfw\n'), ((4971, 5008), 'glfw.window_hint', 'glfw.window_hint', (['glfw.DEPTH_BITS', '(32)'], {}), '(glfw.DEPTH_BITS, 32)\n', (4987, 5008), False, 'import glfw\n'), ((5021, 5064), 'glfw.window_hint', 'glfw.window_hint', (['glfw.VISIBLE', 'GL.GL_FALSE'], {}), '(glfw.VISIBLE, GL.GL_FALSE)\n', (5037, 5064), False, 'import glfw\n'), ((5088, 5187), 'glfw.create_window', 'glfw.create_window', (["self.frustum['width']", "self.frustum['height']", '"""test"""', 'None', 'self.sharedWin'], {}), "(self.frustum['width'], self.frustum['height'], 'test',\n None, self.sharedWin)\n", (5106, 5187), False, 'import glfw\n'), ((5197, 5232), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (5222, 5232), False, 'import glfw\n'), ((5469, 5521), 'OpenGL.raw.osmesa.mesa.OSMesaCreateContext', 'mesa.OSMesaCreateContext', (['GL.GL_RGBA', 'self.sharedWin'], {}), '(GL.GL_RGBA, self.sharedWin)\n', (5493, 5521), False, 'from OpenGL.raw.osmesa import mesa\n'), ((5577, 5654), 'OpenGL.arrays.GLubyteArray.zeros', 'arrays.GLubyteArray.zeros', (["(self.frustum['height'], self.frustum['width'], 3)"], {}), "((self.frustum['height'], self.frustum['width'], 3))\n", (5602, 5654), False, 'from OpenGL import arrays\n'), ((5680, 5722), 'OpenGL.arrays.ArrayDatatype.dataPointer', 'arrays.ArrayDatatype.dataPointer', (['self.buf'], {}), '(self.buf)\n', (5712, 5722), False, 'from OpenGL import arrays\n'), ((7180, 7203), 'OpenGL.GL.glGenFramebuffers', 'GL.glGenFramebuffers', (['(1)'], {}), '(1)\n', (7200, 7203), True, 'import OpenGL.GL as GL\n'), ((7217, 7243), 'OpenGL.GL.glDepthMask', 'GL.glDepthMask', (['GL.GL_TRUE'], {}), '(GL.GL_TRUE)\n', (7231, 7243), True, 'import OpenGL.GL as GL\n'), ((7257, 7309), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_FRAMEBUFFER, self.fbo_ms)\n', (7277, 7309), True, 'import OpenGL.GL as GL\n'), ((7345, 7369), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (7366, 7369), True, 'import OpenGL.GL as GL\n'), ((7382, 7443), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.render_buf_ms'], {}), '(GL.GL_RENDERBUFFER, self.render_buf_ms)\n', (7403, 7443), True, 'import OpenGL.GL as GL\n'), ((7456, 7590), 'OpenGL.GL.glRenderbufferStorageMultisample', 'GL.glRenderbufferStorageMultisample', (['GL.GL_RENDERBUFFER', 'self.nsamples', 'GL.GL_RGB8', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, self.nsamples, GL.\n GL_RGB8, self.frustum['width'], self.frustum['height'])\n", (7491, 7590), True, 'import OpenGL.GL as GL\n'), ((7598, 7720), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'GL.GL_COLOR_ATTACHMENT0', 'GL.GL_RENDERBUFFER', 'self.render_buf_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, GL.\n GL_COLOR_ATTACHMENT0, GL.GL_RENDERBUFFER, self.render_buf_ms)\n', (7626, 7720), True, 'import OpenGL.GL as GL\n'), ((7745, 7769), 'OpenGL.GL.glGenRenderbuffers', 'GL.glGenRenderbuffers', (['(1)'], {}), '(1)\n', (7766, 7769), True, 'import OpenGL.GL as GL\n'), ((7782, 7838), 'OpenGL.GL.glBindRenderbuffer', 'GL.glBindRenderbuffer', (['GL.GL_RENDERBUFFER', 'self.z_buf_ms'], {}), '(GL.GL_RENDERBUFFER, self.z_buf_ms)\n', (7803, 7838), True, 'import OpenGL.GL as GL\n'), ((7851, 7996), 'OpenGL.GL.glRenderbufferStorageMultisample', 'GL.glRenderbufferStorageMultisample', (['GL.GL_RENDERBUFFER', 'self.nsamples', 'GL.GL_DEPTH_COMPONENT', "self.frustum['width']", "self.frustum['height']"], {}), "(GL.GL_RENDERBUFFER, self.nsamples, GL.\n GL_DEPTH_COMPONENT, self.frustum['width'], self.frustum['height'])\n", (7886, 7996), True, 'import OpenGL.GL as GL\n'), ((8004, 8115), 'OpenGL.GL.glFramebufferRenderbuffer', 'GL.glFramebufferRenderbuffer', (['GL.GL_FRAMEBUFFER', 'GL.GL_DEPTH_ATTACHMENT', 'GL.GL_RENDERBUFFER', 'self.z_buf_ms'], {}), '(GL.GL_FRAMEBUFFER, GL.GL_DEPTH_ATTACHMENT, GL.\n GL_RENDERBUFFER, self.z_buf_ms)\n', (8032, 8115), True, 'import OpenGL.GL as GL\n'), ((8124, 8153), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_DEPTH_TEST'], {}), '(GL.GL_DEPTH_TEST)\n', (8135, 8153), True, 'import OpenGL.GL as GL\n'), ((8166, 8216), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (8182, 8216), True, 'import OpenGL.GL as GL\n'), ((8229, 8258), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_CULL_FACE'], {}), '(GL.GL_CULL_FACE)\n', (8241, 8258), True, 'import OpenGL.GL as GL\n'), ((8272, 8306), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_COLOR_BUFFER_BIT'], {}), '(GL.GL_COLOR_BUFFER_BIT)\n', (8282, 8306), True, 'import OpenGL.GL as GL\n'), ((8319, 8353), 'OpenGL.GL.glClear', 'GL.glClear', (['GL.GL_DEPTH_BUFFER_BIT'], {}), '(GL.GL_DEPTH_BUFFER_BIT)\n', (8329, 8353), True, 'import OpenGL.GL as GL\n'), ((8560, 8602), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', '(0)'], {}), '(GL.GL_FRAMEBUFFER, 0)\n', (8580, 8602), True, 'import OpenGL.GL as GL\n'), ((9837, 9883), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (9864, 9883), True, 'import OpenGL.GL as GL\n'), ((13028, 13062), 'numpy.array', 'np.array', (['self.v'], {'dtype': 'np.float32'}), '(self.v, dtype=np.float32)\n', (13036, 13062), True, 'import numpy as np\n'), ((13402, 13436), 'numpy.array', 'np.array', (['self.v'], {'dtype': 'np.float32'}), '(self.v, dtype=np.float32)\n', (13410, 13436), True, 'import numpy as np\n'), ((13474, 13509), 'numpy.array', 'np.array', (['self.vc'], {'dtype': 'np.float32'}), '(self.vc, dtype=np.float32)\n', (13482, 13509), True, 'import numpy as np\n'), ((13551, 13594), 'numpy.array', 'np.array', (['self.vc_by_face'], {'dtype': 'np.float32'}), '(self.vc_by_face, dtype=np.float32)\n', (13559, 13594), True, 'import numpy as np\n'), ((16525, 16540), 'OpenGL.GL.glGetError', 'GL.glGetError', ([], {}), '()\n', (16538, 16540), True, 'import OpenGL.GL as GL\n'), ((20283, 20322), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (20289, 20322), True, 'import numpy as np\n'), ((20537, 20594), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (20557, 20594), True, 'import OpenGL.GL as GL\n'), ((20621, 20680), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (20641, 20680), True, 'import OpenGL.GL as GL\n'), ((21573, 21597), 'numpy.zeros_like', 'np.zeros_like', (['self.vc.r'], {}), '(self.vc.r)\n', (21586, 21597), True, 'import numpy as np\n'), ((24252, 24276), 'numpy.zeros_like', 'np.zeros_like', (['self.vc.r'], {}), '(self.vc.r)\n', (24265, 24276), True, 'import numpy as np\n'), ((24634, 24666), 'numpy.ones_like', 'np.ones_like', (['ec'], {'dtype': 'np.uint8'}), '(ec, dtype=np.uint8)\n', (24646, 24666), True, 'import numpy as np\n'), ((29076, 29115), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (29082, 29115), True, 'import numpy as np\n'), ((29650, 29704), 'numpy.asarray', 'np.asarray', (['verts_by_edge'], {'dtype': 'np.float32', 'order': '"""C"""'}), "(verts_by_edge, dtype=np.float32, order='C')\n", (29660, 29704), True, 'import numpy as np\n'), ((30456, 30467), 'chumpy.utils.row', 'row', (['campos'], {}), '(campos)\n', (30459, 30467), False, 'from chumpy.utils import row, col\n'), ((31837, 31891), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (31857, 31891), True, 'import OpenGL.GL as GL\n'), ((31904, 31963), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (31914, 31963), True, 'import OpenGL.GL as GL\n'), ((33952, 34006), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (33972, 34006), True, 'import OpenGL.GL as GL\n'), ((34019, 34078), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (34029, 34078), True, 'import OpenGL.GL as GL\n'), ((36657, 36669), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (36667, 36669), True, 'import matplotlib.pyplot as plt\n'), ((36682, 36698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (36693, 36698), True, 'import matplotlib.pyplot as plt\n'), ((36752, 36768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (36763, 36768), True, 'import matplotlib.pyplot as plt\n'), ((36823, 36833), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36831, 36833), True, 'import matplotlib.pyplot as plt\n'), ((36846, 36861), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (36859, 36861), False, 'import pdb\n'), ((38930, 38969), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (38936, 38969), True, 'import numpy as np\n'), ((44872, 44902), 'OpenGL.GL.glEnable', 'GL.glEnable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (44883, 44902), True, 'import OpenGL.GL as GL\n'), ((45176, 45230), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (45196, 45230), True, 'import OpenGL.GL as GL\n'), ((45243, 45302), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (45253, 45302), True, 'import OpenGL.GL as GL\n'), ((45509, 45568), 'OpenGL.GL.glClear', 'GL.glClear', (['(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)'], {}), '(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n', (45519, 45568), True, 'import OpenGL.GL as GL\n'), ((45824, 45878), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (45844, 45878), True, 'import OpenGL.GL as GL\n'), ((45891, 46064), 'OpenGL.GL.glBlitFramebuffer', 'GL.glBlitFramebuffer', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", '(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_COLOR_BUFFER_BIT', 'GL.GL_LINEAR'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], 0,\n 0, self.frustum['width'], self.frustum['height'], GL.\n GL_COLOR_BUFFER_BIT, GL.GL_LINEAR)\n", (45911, 46064), True, 'import OpenGL.GL as GL\n'), ((46068, 46117), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_FRAMEBUFFER, self.fbo)\n', (46088, 46117), True, 'import OpenGL.GL as GL\n'), ((46130, 46170), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (46145, 46170), True, 'import OpenGL.GL as GL\n'), ((46488, 46542), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo)\n', (46508, 46542), True, 'import OpenGL.GL as GL\n'), ((46555, 46586), 'OpenGL.GL.glDisable', 'GL.glDisable', (['GL.GL_MULTISAMPLE'], {}), '(GL.GL_MULTISAMPLE)\n', (46567, 46586), True, 'import OpenGL.GL as GL\n'), ((46599, 46634), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['(0.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(0.0, 0.0, 0.0, 1.0)\n', (46614, 46634), True, 'import OpenGL.GL as GL\n'), ((48144, 48156), 'OpenGL.GL.glFlush', 'GL.glFlush', ([], {}), '()\n', (48154, 48156), True, 'import OpenGL.GL as GL\n'), ((48169, 48182), 'OpenGL.GL.glFinish', 'GL.glFinish', ([], {}), '()\n', (48180, 48182), True, 'import OpenGL.GL as GL\n'), ((49551, 49595), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (49569, 49595), True, 'import OpenGL.GL as GL\n'), ((56792, 56825), 'numpy.atleast_3d', 'np.atleast_3d', (['boundarybool_image'], {}), '(boundarybool_image)\n', (56805, 56825), True, 'import numpy as np\n'), ((59655, 59685), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (59675, 59685), True, 'import OpenGL.GL as GL\n'), ((59864, 59924), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_location, 1, GL.GL_TRUE, MVP)\n', (59885, 59924), True, 'import OpenGL.GL as GL\n'), ((60769, 60796), 'numpy.hstack', 'np.hstack', (['(colors, color3)'], {}), '((colors, color3))\n', (60778, 60796), True, 'import numpy as np\n'), ((60972, 61022), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (60988, 61022), True, 'import OpenGL.GL as GL\n'), ((61104, 61154), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (61120, 61154), True, 'import OpenGL.GL as GL\n'), ((68402, 68523), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (68417, 68523), True, 'import OpenGL.GL as GL\n'), ((68764, 68821), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (68784, 68821), True, 'import OpenGL.GL as GL\n'), ((68848, 68907), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (68868, 68907), True, 'import OpenGL.GL as GL\n'), ((70777, 70834), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (70797, 70834), True, 'import OpenGL.GL as GL\n'), ((70861, 70920), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (70881, 70920), True, 'import OpenGL.GL as GL\n'), ((73138, 73150), 'OpenGL.GL.glFlush', 'GL.glFlush', ([], {}), '()\n', (73148, 73150), True, 'import OpenGL.GL as GL\n'), ((73163, 73176), 'OpenGL.GL.glFinish', 'GL.glFinish', ([], {}), '()\n', (73174, 73176), True, 'import OpenGL.GL as GL\n'), ((75362, 75406), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (75380, 75406), True, 'import OpenGL.GL as GL\n'), ((89493, 89514), 'numpy.flipud', 'np.flipud', (['whitePixel'], {}), '(whitePixel)\n', (89502, 89514), True, 'import numpy as np\n'), ((95049, 95095), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (95076, 95095), True, 'import OpenGL.GL as GL\n'), ((98201, 98247), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (98228, 98247), True, 'import OpenGL.GL as GL\n'), ((101065, 101111), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (101092, 101111), True, 'import OpenGL.GL as GL\n'), ((107198, 107319), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (107213, 107319), True, 'import OpenGL.GL as GL\n'), ((114864, 114923), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample"""'], {}), "(self.fetchSamplesProgram, 'sample')\n", (114887, 114923), True, 'import OpenGL.GL as GL\n'), ((114936, 114969), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['sampleLoc', 'sample'], {}), '(sampleLoc, sample)\n', (114950, 114969), True, 'import OpenGL.GL as GL\n'), ((114983, 115017), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (115001, 115017), True, 'import OpenGL.GL as GL\n'), ((115030, 115104), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (115046, 115104), True, 'import OpenGL.GL as GL\n'), ((115117, 115150), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.colorsLoc', '(0)'], {}), '(self.colorsLoc, 0)\n', (115131, 115150), True, 'import OpenGL.GL as GL\n'), ((115164, 115198), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (115182, 115198), True, 'import OpenGL.GL as GL\n'), ((115211, 115299), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (115227, 115299), True, 'import OpenGL.GL as GL\n'), ((115307, 115350), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_positionsLoc', '(1)'], {}), '(self.sample_positionsLoc, 1)\n', (115321, 115350), True, 'import OpenGL.GL as GL\n'), ((115364, 115398), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE2'], {}), '(GL.GL_TEXTURE2)\n', (115382, 115398), True, 'import OpenGL.GL as GL\n'), ((115411, 115496), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (115427, 115496), True, 'import OpenGL.GL as GL\n'), ((115504, 115543), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_facesLoc', '(2)'], {}), '(self.sample_facesLoc, 2)\n', (115518, 115543), True, 'import OpenGL.GL as GL\n'), ((115557, 115591), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE3'], {}), '(GL.GL_TEXTURE3)\n', (115575, 115591), True, 'import OpenGL.GL as GL\n'), ((115604, 115696), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (115620, 115696), True, 'import OpenGL.GL as GL\n'), ((115704, 115750), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric1Loc', '(3)'], {}), '(self.sample_barycentric1Loc, 3)\n', (115718, 115750), True, 'import OpenGL.GL as GL\n'), ((115764, 115798), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE4'], {}), '(GL.GL_TEXTURE4)\n', (115782, 115798), True, 'import OpenGL.GL as GL\n'), ((115811, 115903), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (115827, 115903), True, 'import OpenGL.GL as GL\n'), ((115911, 115957), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric2Loc', '(4)'], {}), '(self.sample_barycentric2Loc, 4)\n', (115925, 115957), True, 'import OpenGL.GL as GL\n'), ((115971, 116006), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (115991, 116006), True, 'import OpenGL.GL as GL\n'), ((116019, 116054), 'OpenGL.GL.glDrawArrays', 'GL.glDrawArrays', (['GL.GL_POINTS', '(0)', '(1)'], {}), '(GL.GL_POINTS, 0, 1)\n', (116034, 116054), True, 'import OpenGL.GL as GL\n'), ((116385, 116452), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)\n', (116405, 116452), True, 'import OpenGL.GL as GL\n'), ((116466, 116506), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (116481, 116506), True, 'import OpenGL.GL as GL\n'), ((116802, 116842), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT1'], {}), '(GL.GL_COLOR_ATTACHMENT1)\n', (116817, 116842), True, 'import OpenGL.GL as GL\n'), ((117148, 117188), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT2'], {}), '(GL.GL_COLOR_ATTACHMENT2)\n', (117163, 117188), True, 'import OpenGL.GL as GL\n'), ((117495, 117535), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT3'], {}), '(GL.GL_COLOR_ATTACHMENT3)\n', (117510, 117535), True, 'import OpenGL.GL as GL\n'), ((117850, 117890), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT4'], {}), '(GL.GL_COLOR_ATTACHMENT4)\n', (117865, 117890), True, 'import OpenGL.GL as GL\n'), ((118247, 118366), 'numpy.concatenate', 'np.concatenate', (['[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2\n [sample][:, :, 0:1]]', '(2)'], {}), '([self.renders_sample_barycentric1[sample], self.\n renders_sample_barycentric2[sample][:, :, 0:1]], 2)\n', (118261, 118366), True, 'import numpy as np\n'), ((120685, 120734), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.uint8', 'order': '"""C"""'}), "(vc_by_face, dtype=np.uint8, order='C')\n", (120695, 120734), True, 'import numpy as np\n'), ((121347, 121386), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (121353, 121386), True, 'import numpy as np\n'), ((133646, 133685), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', '(-ident)'], {}), '((p2 - p0)[:, None, :], -ident)\n', (133654, 133685), True, 'import numpy as np\n'), ((133684, 133723), 'numpy.cross', 'np.cross', (['(-ident)', '(p1 - p0)[:, None, :]'], {}), '(-ident, (p1 - p0)[:, None, :])\n', (133692, 133723), True, 'import numpy as np\n'), ((140705, 140756), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (140712, 140756), True, 'import numpy as np\n'), ((141077, 141165), 'numpy.any', 'np.any', (['((edgeFaces[:, :, 0] == sampleFaces) | (edgeFaces[:, :, 1] == sampleFaces))', '(0)'], {}), '((edgeFaces[:, :, 0] == sampleFaces) | (edgeFaces[:, :, 1] ==\n sampleFaces), 0)\n', (141083, 141165), True, 'import numpy as np\n'), ((142106, 142157), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (142113, 142157), True, 'import numpy as np\n'), ((142457, 142513), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (142464, 142513), True, 'import numpy as np\n'), ((142987, 143038), 'numpy.tile', 'np.tile', (['vertsPerFaceProjBnd', '[self.nsamples, 1, 1]'], {}), '(vertsPerFaceProjBnd, [self.nsamples, 1, 1])\n', (142994, 143038), True, 'import numpy as np\n'), ((145835, 145889), 'numpy.argmin', 'np.argmin', (['np.c_[d[nonIntersect], d2[nonIntersect]]', '(1)'], {}), '(np.c_[d[nonIntersect], d2[nonIntersect]], 1)\n', (145844, 145889), True, 'import numpy as np\n'), ((146726, 146784), 'numpy.tile', 'np.tile', (['verticesBnd[None, :, :]', '[self.nsamples, 1, 1, 1]'], {}), '(verticesBnd[None, :, :], [self.nsamples, 1, 1, 1])\n', (146733, 146784), True, 'import numpy as np\n'), ((147097, 147149), 'numpy.tile', 'np.tile', (['vcBnd[None, :, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vcBnd[None, :, :], [self.nsamples, 1, 1, 1])\n', (147104, 147149), True, 'import numpy as np\n'), ((149565, 149677), 'numpy.argmin', 'np.argmin', (['np.c_[barycentricVertsDistIntesect[nonIntersect],\n barycentricVertsDistIntesect2[nonIntersect]]', '(1)'], {}), '(np.c_[barycentricVertsDistIntesect[nonIntersect],\n barycentricVertsDistIntesect2[nonIntersect]], 1)\n', (149574, 149677), True, 'import numpy as np\n'), ((150899, 150930), 'numpy.zeros', 'np.zeros', (['[nsamples, nBndFaces]'], {}), '([nsamples, nBndFaces])\n', (150907, 150930), True, 'import numpy as np\n'), ((151292, 151340), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (151300, 151340), True, 'import numpy as np\n'), ((151381, 151429), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (151389, 151429), True, 'import numpy as np\n'), ((151464, 151512), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (151472, 151512), True, 'import numpy as np\n'), ((153108, 153143), 'numpy.zeros_like', 'np.zeros_like', (['self.render_resolved'], {}), '(self.render_resolved)\n', (153121, 153143), True, 'import numpy as np\n'), ((153203, 153232), 'numpy.sum', 'np.sum', (['finalColorBnd'], {'axis': '(0)'}), '(finalColorBnd, axis=0)\n', (153209, 153232), True, 'import numpy as np\n'), ((156800, 156851), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (156807, 156851), True, 'import numpy as np\n'), ((157052, 157108), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (157059, 157108), True, 'import numpy as np\n'), ((161717, 161731), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (161728, 161731), True, 'import numpy as np\n'), ((161909, 161950), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', '(-ident)'], {}), "('ijk,ikl->ijl', dlnorm, -ident)\n", (161918, 161950), True, 'import numpy as np\n'), ((161976, 162016), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', 'ident'], {}), "('ijk,ikl->ijl', dlnorm, ident)\n", (161985, 162016), True, 'import numpy as np\n'), ((162581, 162622), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dn_norm', 'dndp1'], {}), "('ijk,ikl->ijl', dn_norm, dndp1)\n", (162590, 162622), True, 'import numpy as np\n'), ((162648, 162689), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dn_norm', 'dndp2'], {}), "('ijk,ikl->ijl', dn_norm, dndp2)\n", (162657, 162689), True, 'import numpy as np\n'), ((162714, 162752), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp1'], {}), "('ij,ijl->il', n_norm, dndp1)\n", (162723, 162752), True, 'import numpy as np\n'), ((162776, 162814), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp2'], {}), "('ij,ijl->il', n_norm, dndp2)\n", (162785, 162814), True, 'import numpy as np\n'), ((165124, 165134), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (165127, 165134), False, 'from chumpy.utils import row, col\n'), ((165512, 165575), 'numpy.concatenate', 'np.concatenate', (['[data1[:, :, None, :], data2[:, :, None, :]]', '(2)'], {}), '([data1[:, :, None, :], data2[:, :, None, :]], 2)\n', (165526, 165575), True, 'import numpy as np\n'), ((165701, 165795), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (165714, 165795), True, 'import scipy.sparse as sp\n'), ((167429, 167439), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (167432, 167439), False, 'from chumpy.utils import row, col\n'), ((167964, 168058), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (167977, 168058), True, 'import scipy.sparse as sp\n'), ((168479, 168489), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (168482, 168489), False, 'from chumpy.utils import row, col\n'), ((168918, 169012), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (168931, 169012), True, 'import scipy.sparse as sp\n'), ((169510, 169553), 'numpy.tile', 'np.tile', (['verticesBnd', '[self.nsamples, 1, 1]'], {}), '(verticesBnd, [self.nsamples, 1, 1])\n', (169517, 169553), True, 'import numpy as np\n'), ((170753, 170763), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (170756, 170763), False, 'from chumpy.utils import row, col\n'), ((171178, 171272), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (171191, 171272), True, 'import scipy.sparse as sp\n'), ((175379, 175430), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (175386, 175430), True, 'import numpy as np\n'), ((175631, 175687), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (175638, 175687), True, 'import numpy as np\n'), ((177369, 177379), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (177372, 177379), False, 'from chumpy.utils import row, col\n'), ((177757, 177830), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (177770, 177830), True, 'import scipy.sparse as sp\n'), ((178664, 178674), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (178667, 178674), False, 'from chumpy.utils import row, col\n'), ((179074, 179147), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (179087, 179147), True, 'import scipy.sparse as sp\n'), ((179720, 179730), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (179723, 179730), False, 'from chumpy.utils import row, col\n'), ((180125, 180198), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (180138, 180198), True, 'import scipy.sparse as sp\n'), ((192373, 192403), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (192393, 192403), True, 'import OpenGL.GL as GL\n'), ((193065, 193125), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_location, 1, GL.GL_TRUE, MVP)\n', (193086, 193125), True, 'import OpenGL.GL as GL\n'), ((193946, 193973), 'numpy.hstack', 'np.hstack', (['(colors, color3)'], {}), '((colors, color3))\n', (193955, 193973), True, 'import numpy as np\n'), ((194149, 194199), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (194165, 194199), True, 'import OpenGL.GL as GL\n'), ((194281, 194331), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (194297, 194331), True, 'import OpenGL.GL as GL\n'), ((196253, 196374), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (196268, 196374), True, 'import OpenGL.GL as GL\n'), ((196615, 196672), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (196635, 196672), True, 'import OpenGL.GL as GL\n'), ((196699, 196758), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (196719, 196758), True, 'import OpenGL.GL as GL\n'), ((199012, 199069), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (199032, 199069), True, 'import OpenGL.GL as GL\n'), ((199096, 199155), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (199116, 199155), True, 'import OpenGL.GL as GL\n'), ((201379, 201391), 'OpenGL.GL.glFlush', 'GL.glFlush', ([], {}), '()\n', (201389, 201391), True, 'import OpenGL.GL as GL\n'), ((201404, 201417), 'OpenGL.GL.glFinish', 'GL.glFinish', ([], {}), '()\n', (201415, 201417), True, 'import OpenGL.GL as GL\n'), ((203603, 203647), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (203621, 203647), True, 'import OpenGL.GL as GL\n'), ((217726, 217747), 'numpy.flipud', 'np.flipud', (['whitePixel'], {}), '(whitePixel)\n', (217735, 217747), True, 'import numpy as np\n'), ((223282, 223328), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (223309, 223328), True, 'import OpenGL.GL as GL\n'), ((226434, 226480), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (226461, 226480), True, 'import OpenGL.GL as GL\n'), ((229298, 229344), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (229325, 229344), True, 'import OpenGL.GL as GL\n'), ((235428, 235549), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (235443, 235549), True, 'import OpenGL.GL as GL\n'), ((243094, 243153), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample"""'], {}), "(self.fetchSamplesProgram, 'sample')\n", (243117, 243153), True, 'import OpenGL.GL as GL\n'), ((243166, 243199), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['sampleLoc', 'sample'], {}), '(sampleLoc, sample)\n', (243180, 243199), True, 'import OpenGL.GL as GL\n'), ((243213, 243247), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (243231, 243247), True, 'import OpenGL.GL as GL\n'), ((243260, 243334), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (243276, 243334), True, 'import OpenGL.GL as GL\n'), ((243347, 243380), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.colorsLoc', '(0)'], {}), '(self.colorsLoc, 0)\n', (243361, 243380), True, 'import OpenGL.GL as GL\n'), ((243394, 243428), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (243412, 243428), True, 'import OpenGL.GL as GL\n'), ((243441, 243529), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (243457, 243529), True, 'import OpenGL.GL as GL\n'), ((243537, 243580), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_positionsLoc', '(1)'], {}), '(self.sample_positionsLoc, 1)\n', (243551, 243580), True, 'import OpenGL.GL as GL\n'), ((243594, 243628), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE2'], {}), '(GL.GL_TEXTURE2)\n', (243612, 243628), True, 'import OpenGL.GL as GL\n'), ((243641, 243726), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (243657, 243726), True, 'import OpenGL.GL as GL\n'), ((243734, 243773), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_facesLoc', '(2)'], {}), '(self.sample_facesLoc, 2)\n', (243748, 243773), True, 'import OpenGL.GL as GL\n'), ((243787, 243821), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE3'], {}), '(GL.GL_TEXTURE3)\n', (243805, 243821), True, 'import OpenGL.GL as GL\n'), ((243834, 243926), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (243850, 243926), True, 'import OpenGL.GL as GL\n'), ((243934, 243980), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric1Loc', '(3)'], {}), '(self.sample_barycentric1Loc, 3)\n', (243948, 243980), True, 'import OpenGL.GL as GL\n'), ((243994, 244028), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE4'], {}), '(GL.GL_TEXTURE4)\n', (244012, 244028), True, 'import OpenGL.GL as GL\n'), ((244041, 244133), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (244057, 244133), True, 'import OpenGL.GL as GL\n'), ((244141, 244187), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric2Loc', '(4)'], {}), '(self.sample_barycentric2Loc, 4)\n', (244155, 244187), True, 'import OpenGL.GL as GL\n'), ((244201, 244236), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (244221, 244236), True, 'import OpenGL.GL as GL\n'), ((244249, 244284), 'OpenGL.GL.glDrawArrays', 'GL.glDrawArrays', (['GL.GL_POINTS', '(0)', '(1)'], {}), '(GL.GL_POINTS, 0, 1)\n', (244264, 244284), True, 'import OpenGL.GL as GL\n'), ((244615, 244682), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)\n', (244635, 244682), True, 'import OpenGL.GL as GL\n'), ((244696, 244736), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (244711, 244736), True, 'import OpenGL.GL as GL\n'), ((245032, 245072), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT1'], {}), '(GL.GL_COLOR_ATTACHMENT1)\n', (245047, 245072), True, 'import OpenGL.GL as GL\n'), ((245378, 245418), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT2'], {}), '(GL.GL_COLOR_ATTACHMENT2)\n', (245393, 245418), True, 'import OpenGL.GL as GL\n'), ((245725, 245765), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT3'], {}), '(GL.GL_COLOR_ATTACHMENT3)\n', (245740, 245765), True, 'import OpenGL.GL as GL\n'), ((246080, 246120), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT4'], {}), '(GL.GL_COLOR_ATTACHMENT4)\n', (246095, 246120), True, 'import OpenGL.GL as GL\n'), ((246477, 246596), 'numpy.concatenate', 'np.concatenate', (['[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2\n [sample][:, :, 0:1]]', '(2)'], {}), '([self.renders_sample_barycentric1[sample], self.\n renders_sample_barycentric2[sample][:, :, 0:1]], 2)\n', (246491, 246596), True, 'import numpy as np\n'), ((248915, 248964), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.uint8', 'order': '"""C"""'}), "(vc_by_face, dtype=np.uint8, order='C')\n", (248925, 248964), True, 'import numpy as np\n'), ((249577, 249616), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (249583, 249616), True, 'import numpy as np\n'), ((262343, 262382), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', '(-ident)'], {}), '((p2 - p0)[:, None, :], -ident)\n', (262351, 262382), True, 'import numpy as np\n'), ((262381, 262420), 'numpy.cross', 'np.cross', (['(-ident)', '(p1 - p0)[:, None, :]'], {}), '(-ident, (p1 - p0)[:, None, :])\n', (262389, 262420), True, 'import numpy as np\n'), ((269402, 269453), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (269409, 269453), True, 'import numpy as np\n'), ((269774, 269862), 'numpy.any', 'np.any', (['((edgeFaces[:, :, 0] == sampleFaces) | (edgeFaces[:, :, 1] == sampleFaces))', '(0)'], {}), '((edgeFaces[:, :, 0] == sampleFaces) | (edgeFaces[:, :, 1] ==\n sampleFaces), 0)\n', (269780, 269862), True, 'import numpy as np\n'), ((270803, 270854), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (270810, 270854), True, 'import numpy as np\n'), ((271154, 271210), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (271161, 271210), True, 'import numpy as np\n'), ((271684, 271735), 'numpy.tile', 'np.tile', (['vertsPerFaceProjBnd', '[self.nsamples, 1, 1]'], {}), '(vertsPerFaceProjBnd, [self.nsamples, 1, 1])\n', (271691, 271735), True, 'import numpy as np\n'), ((274532, 274586), 'numpy.argmin', 'np.argmin', (['np.c_[d[nonIntersect], d2[nonIntersect]]', '(1)'], {}), '(np.c_[d[nonIntersect], d2[nonIntersect]], 1)\n', (274541, 274586), True, 'import numpy as np\n'), ((275423, 275481), 'numpy.tile', 'np.tile', (['verticesBnd[None, :, :]', '[self.nsamples, 1, 1, 1]'], {}), '(verticesBnd[None, :, :], [self.nsamples, 1, 1, 1])\n', (275430, 275481), True, 'import numpy as np\n'), ((275794, 275846), 'numpy.tile', 'np.tile', (['vcBnd[None, :, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vcBnd[None, :, :], [self.nsamples, 1, 1, 1])\n', (275801, 275846), True, 'import numpy as np\n'), ((278262, 278374), 'numpy.argmin', 'np.argmin', (['np.c_[barycentricVertsDistIntesect[nonIntersect],\n barycentricVertsDistIntesect2[nonIntersect]]', '(1)'], {}), '(np.c_[barycentricVertsDistIntesect[nonIntersect],\n barycentricVertsDistIntesect2[nonIntersect]], 1)\n', (278271, 278374), True, 'import numpy as np\n'), ((279596, 279627), 'numpy.zeros', 'np.zeros', (['[nsamples, nBndFaces]'], {}), '([nsamples, nBndFaces])\n', (279604, 279627), True, 'import numpy as np\n'), ((279989, 280037), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (279997, 280037), True, 'import numpy as np\n'), ((280078, 280126), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (280086, 280126), True, 'import numpy as np\n'), ((280161, 280209), 'numpy.zeros', 'np.zeros', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (280169, 280209), True, 'import numpy as np\n'), ((281805, 281840), 'numpy.zeros_like', 'np.zeros_like', (['self.render_resolved'], {}), '(self.render_resolved)\n', (281818, 281840), True, 'import numpy as np\n'), ((281900, 281929), 'numpy.sum', 'np.sum', (['finalColorBnd'], {'axis': '(0)'}), '(finalColorBnd, axis=0)\n', (281906, 281929), True, 'import numpy as np\n'), ((285497, 285548), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (285504, 285548), True, 'import numpy as np\n'), ((285749, 285805), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (285756, 285805), True, 'import numpy as np\n'), ((290414, 290428), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (290425, 290428), True, 'import numpy as np\n'), ((290606, 290647), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', '(-ident)'], {}), "('ijk,ikl->ijl', dlnorm, -ident)\n", (290615, 290647), True, 'import numpy as np\n'), ((290673, 290713), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', 'ident'], {}), "('ijk,ikl->ijl', dlnorm, ident)\n", (290682, 290713), True, 'import numpy as np\n'), ((291278, 291319), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dn_norm', 'dndp1'], {}), "('ijk,ikl->ijl', dn_norm, dndp1)\n", (291287, 291319), True, 'import numpy as np\n'), ((291345, 291386), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dn_norm', 'dndp2'], {}), "('ijk,ikl->ijl', dn_norm, dndp2)\n", (291354, 291386), True, 'import numpy as np\n'), ((291411, 291449), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp1'], {}), "('ij,ijl->il', n_norm, dndp1)\n", (291420, 291449), True, 'import numpy as np\n'), ((291473, 291511), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp2'], {}), "('ij,ijl->il', n_norm, dndp2)\n", (291482, 291511), True, 'import numpy as np\n'), ((293821, 293831), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (293824, 293831), False, 'from chumpy.utils import row, col\n'), ((294209, 294272), 'numpy.concatenate', 'np.concatenate', (['[data1[:, :, None, :], data2[:, :, None, :]]', '(2)'], {}), '([data1[:, :, None, :], data2[:, :, None, :]], 2)\n', (294223, 294272), True, 'import numpy as np\n'), ((294398, 294492), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (294411, 294492), True, 'import scipy.sparse as sp\n'), ((296126, 296136), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (296129, 296136), False, 'from chumpy.utils import row, col\n'), ((296661, 296755), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (296674, 296755), True, 'import scipy.sparse as sp\n'), ((297176, 297186), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (297179, 297186), False, 'from chumpy.utils import row, col\n'), ((297615, 297709), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (297628, 297709), True, 'import scipy.sparse as sp\n'), ((298207, 298250), 'numpy.tile', 'np.tile', (['verticesBnd', '[self.nsamples, 1, 1]'], {}), '(verticesBnd, [self.nsamples, 1, 1])\n', (298214, 298250), True, 'import numpy as np\n'), ((299450, 299460), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (299453, 299460), False, 'from chumpy.utils import row, col\n'), ((299875, 299969), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (299888, 299969), True, 'import scipy.sparse as sp\n'), ((304076, 304127), 'numpy.tile', 'np.tile', (['boundaryFaces[None, :]', '[self.nsamples, 1]'], {}), '(boundaryFaces[None, :], [self.nsamples, 1])\n', (304083, 304127), True, 'import numpy as np\n'), ((304328, 304384), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (304335, 304384), True, 'import numpy as np\n'), ((306066, 306076), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (306069, 306076), False, 'from chumpy.utils import row, col\n'), ((306454, 306527), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (306467, 306527), True, 'import scipy.sparse as sp\n'), ((307361, 307371), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (307364, 307371), False, 'from chumpy.utils import row, col\n'), ((307771, 307844), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (307784, 307844), True, 'import scipy.sparse as sp\n'), ((308417, 308427), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (308420, 308427), False, 'from chumpy.utils import row, col\n'), ((308822, 308895), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (308835, 308895), True, 'import scipy.sparse as sp\n'), ((321070, 321100), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (321090, 321100), True, 'import OpenGL.GL as GL\n'), ((321762, 321822), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_location, 1, GL.GL_TRUE, MVP)\n', (321783, 321822), True, 'import OpenGL.GL as GL\n'), ((322643, 322670), 'numpy.hstack', 'np.hstack', (['(colors, color3)'], {}), '((colors, color3))\n', (322652, 322670), True, 'import numpy as np\n'), ((322846, 322896), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (322862, 322896), True, 'import OpenGL.GL as GL\n'), ((322978, 323028), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (322994, 323028), True, 'import OpenGL.GL as GL\n'), ((324950, 325071), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (324965, 325071), True, 'import OpenGL.GL as GL\n'), ((325312, 325369), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (325332, 325369), True, 'import OpenGL.GL as GL\n'), ((325396, 325455), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (325416, 325455), True, 'import OpenGL.GL as GL\n'), ((327709, 327766), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (327729, 327766), True, 'import OpenGL.GL as GL\n'), ((327793, 327852), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (327813, 327852), True, 'import OpenGL.GL as GL\n'), ((330069, 330081), 'OpenGL.GL.glFlush', 'GL.glFlush', ([], {}), '()\n', (330079, 330081), True, 'import OpenGL.GL as GL\n'), ((330094, 330107), 'OpenGL.GL.glFinish', 'GL.glFinish', ([], {}), '()\n', (330105, 330107), True, 'import OpenGL.GL as GL\n'), ((332293, 332337), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (332311, 332337), True, 'import OpenGL.GL as GL\n'), ((346580, 346601), 'numpy.flipud', 'np.flipud', (['whitePixel'], {}), '(whitePixel)\n', (346589, 346601), True, 'import numpy as np\n'), ((352238, 352284), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (352265, 352284), True, 'import OpenGL.GL as GL\n'), ((355390, 355436), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (355417, 355436), True, 'import OpenGL.GL as GL\n'), ((358255, 358301), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (358282, 358301), True, 'import OpenGL.GL as GL\n'), ((364386, 364507), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (364401, 364507), True, 'import OpenGL.GL as GL\n'), ((372418, 372477), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample"""'], {}), "(self.fetchSamplesProgram, 'sample')\n", (372441, 372477), True, 'import OpenGL.GL as GL\n'), ((372490, 372523), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['sampleLoc', 'sample'], {}), '(sampleLoc, sample)\n', (372504, 372523), True, 'import OpenGL.GL as GL\n'), ((372537, 372571), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (372555, 372571), True, 'import OpenGL.GL as GL\n'), ((372584, 372658), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (372600, 372658), True, 'import OpenGL.GL as GL\n'), ((372671, 372704), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.colorsLoc', '(0)'], {}), '(self.colorsLoc, 0)\n', (372685, 372704), True, 'import OpenGL.GL as GL\n'), ((372718, 372752), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (372736, 372752), True, 'import OpenGL.GL as GL\n'), ((372765, 372853), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (372781, 372853), True, 'import OpenGL.GL as GL\n'), ((372861, 372904), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_positionsLoc', '(1)'], {}), '(self.sample_positionsLoc, 1)\n', (372875, 372904), True, 'import OpenGL.GL as GL\n'), ((372918, 372952), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE2'], {}), '(GL.GL_TEXTURE2)\n', (372936, 372952), True, 'import OpenGL.GL as GL\n'), ((372965, 373050), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (372981, 373050), True, 'import OpenGL.GL as GL\n'), ((373058, 373097), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_facesLoc', '(2)'], {}), '(self.sample_facesLoc, 2)\n', (373072, 373097), True, 'import OpenGL.GL as GL\n'), ((373111, 373145), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE3'], {}), '(GL.GL_TEXTURE3)\n', (373129, 373145), True, 'import OpenGL.GL as GL\n'), ((373158, 373250), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (373174, 373250), True, 'import OpenGL.GL as GL\n'), ((373258, 373304), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric1Loc', '(3)'], {}), '(self.sample_barycentric1Loc, 3)\n', (373272, 373304), True, 'import OpenGL.GL as GL\n'), ((373318, 373352), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE4'], {}), '(GL.GL_TEXTURE4)\n', (373336, 373352), True, 'import OpenGL.GL as GL\n'), ((373365, 373457), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (373381, 373457), True, 'import OpenGL.GL as GL\n'), ((373465, 373511), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric2Loc', '(4)'], {}), '(self.sample_barycentric2Loc, 4)\n', (373479, 373511), True, 'import OpenGL.GL as GL\n'), ((373525, 373560), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (373545, 373560), True, 'import OpenGL.GL as GL\n'), ((373573, 373608), 'OpenGL.GL.glDrawArrays', 'GL.glDrawArrays', (['GL.GL_POINTS', '(0)', '(1)'], {}), '(GL.GL_POINTS, 0, 1)\n', (373588, 373608), True, 'import OpenGL.GL as GL\n'), ((373939, 374006), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)\n', (373959, 374006), True, 'import OpenGL.GL as GL\n'), ((374020, 374060), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (374035, 374060), True, 'import OpenGL.GL as GL\n'), ((374396, 374436), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT1'], {}), '(GL.GL_COLOR_ATTACHMENT1)\n', (374411, 374436), True, 'import OpenGL.GL as GL\n'), ((374782, 374822), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT2'], {}), '(GL.GL_COLOR_ATTACHMENT2)\n', (374797, 374822), True, 'import OpenGL.GL as GL\n'), ((375177, 375217), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT3'], {}), '(GL.GL_COLOR_ATTACHMENT3)\n', (375192, 375217), True, 'import OpenGL.GL as GL\n'), ((375572, 375612), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT4'], {}), '(GL.GL_COLOR_ATTACHMENT4)\n', (375587, 375612), True, 'import OpenGL.GL as GL\n'), ((376009, 376128), 'numpy.concatenate', 'np.concatenate', (['[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2\n [sample][:, :, 0:1]]', '(2)'], {}), '([self.renders_sample_barycentric1[sample], self.\n renders_sample_barycentric2[sample][:, :, 0:1]], 2)\n', (376023, 376128), True, 'import numpy as np\n'), ((378467, 378516), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.uint8', 'order': '"""C"""'}), "(vc_by_face, dtype=np.uint8, order='C')\n", (378477, 378516), True, 'import numpy as np\n'), ((379130, 379169), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (379136, 379169), True, 'import numpy as np\n'), ((391776, 391815), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', '(-ident)'], {}), '((p2 - p0)[:, None, :], -ident)\n', (391784, 391815), True, 'import numpy as np\n'), ((391818, 391857), 'numpy.cross', 'np.cross', (['(-ident)', '(p1 - p0)[:, None, :]'], {}), '(-ident, (p1 - p0)[:, None, :])\n', (391826, 391857), True, 'import numpy as np\n'), ((399303, 399359), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (399310, 399359), True, 'import numpy as np\n'), ((401151, 401205), 'numpy.argmin', 'np.argmin', (['np.c_[d[nonIntersect], d2[nonIntersect]]', '(1)'], {}), '(np.c_[d[nonIntersect], d2[nonIntersect]], 1)\n', (401160, 401205), True, 'import numpy as np\n'), ((404961, 404992), 'numpy.zeros', 'np.zeros', (['[nsamples, nBndFaces]'], {}), '([nsamples, nBndFaces])\n', (404969, 404992), True, 'import numpy as np\n'), ((405293, 405340), 'numpy.ones', 'np.ones', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (405300, 405340), True, 'import numpy as np\n'), ((405921, 405952), 'numpy.zeros_like', 'np.zeros_like', (['self.color_image'], {}), '(self.color_image)\n', (405934, 405952), True, 'import numpy as np\n'), ((406012, 406041), 'numpy.sum', 'np.sum', (['finalColorBnd'], {'axis': '(0)'}), '(finalColorBnd, axis=0)\n', (406018, 406041), True, 'import numpy as np\n'), ((409315, 409371), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (409322, 409371), True, 'import numpy as np\n'), ((414001, 414015), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (414012, 414015), True, 'import numpy as np\n'), ((414200, 414241), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', '(-ident)'], {}), "('ijk,ikl->ijl', dlnorm, -ident)\n", (414209, 414241), True, 'import numpy as np\n'), ((414267, 414307), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', 'ident'], {}), "('ijk,ikl->ijl', dlnorm, ident)\n", (414276, 414307), True, 'import numpy as np\n'), ((415061, 415099), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp1'], {}), "('ij,ijl->il', n_norm, dndp1)\n", (415070, 415099), True, 'import numpy as np\n'), ((415123, 415161), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp2'], {}), "('ij,ijl->il', n_norm, dndp2)\n", (415132, 415161), True, 'import numpy as np\n'), ((418990, 419000), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (418993, 419000), False, 'from chumpy.utils import row, col\n'), ((419392, 419455), 'numpy.concatenate', 'np.concatenate', (['[data1[:, :, None, :], data2[:, :, None, :]]', '(2)'], {}), '([data1[:, :, None, :], data2[:, :, None, :]], 2)\n', (419406, 419455), True, 'import numpy as np\n'), ((419579, 419673), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (419592, 419673), True, 'import scipy.sparse as sp\n'), ((421791, 421801), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (421794, 421801), False, 'from chumpy.utils import row, col\n'), ((422213, 422307), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (422226, 422307), True, 'import scipy.sparse as sp\n'), ((427309, 427319), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (427312, 427319), False, 'from chumpy.utils import row, col\n'), ((427727, 427800), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (427740, 427800), True, 'import scipy.sparse as sp\n'), ((439800, 439830), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (439820, 439830), True, 'import OpenGL.GL as GL\n'), ((440494, 440554), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_location, 1, GL.GL_TRUE, MVP)\n', (440515, 440554), True, 'import OpenGL.GL as GL\n'), ((441391, 441418), 'numpy.hstack', 'np.hstack', (['(colors, color3)'], {}), '((colors, color3))\n', (441400, 441418), True, 'import numpy as np\n'), ((441595, 441645), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (441611, 441645), True, 'import OpenGL.GL as GL\n'), ((441727, 441777), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (441743, 441777), True, 'import OpenGL.GL as GL\n'), ((443748, 443869), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (443763, 443869), True, 'import OpenGL.GL as GL\n'), ((444114, 444171), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (444134, 444171), True, 'import OpenGL.GL as GL\n'), ((444198, 444257), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (444218, 444257), True, 'import OpenGL.GL as GL\n'), ((446514, 446571), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (446534, 446571), True, 'import OpenGL.GL as GL\n'), ((446598, 446657), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (446618, 446657), True, 'import OpenGL.GL as GL\n'), ((448965, 448977), 'OpenGL.GL.glFlush', 'GL.glFlush', ([], {}), '()\n', (448975, 448977), True, 'import OpenGL.GL as GL\n'), ((448990, 449003), 'OpenGL.GL.glFinish', 'GL.glFinish', ([], {}), '()\n', (449001, 449003), True, 'import OpenGL.GL as GL\n'), ((451189, 451233), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (451207, 451233), True, 'import OpenGL.GL as GL\n'), ((466529, 466550), 'numpy.flipud', 'np.flipud', (['whitePixel'], {}), '(whitePixel)\n', (466538, 466550), True, 'import numpy as np\n'), ((472326, 472372), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (472353, 472372), True, 'import OpenGL.GL as GL\n'), ((475478, 475524), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (475505, 475524), True, 'import OpenGL.GL as GL\n'), ((478343, 478389), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (478370, 478389), True, 'import OpenGL.GL as GL\n'), ((484475, 484596), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (484490, 484596), True, 'import OpenGL.GL as GL\n'), ((492507, 492566), 'OpenGL.GL.glGetUniformLocation', 'GL.glGetUniformLocation', (['self.fetchSamplesProgram', '"""sample"""'], {}), "(self.fetchSamplesProgram, 'sample')\n", (492530, 492566), True, 'import OpenGL.GL as GL\n'), ((492579, 492612), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['sampleLoc', 'sample'], {}), '(sampleLoc, sample)\n', (492593, 492612), True, 'import OpenGL.GL as GL\n'), ((492626, 492660), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (492644, 492660), True, 'import OpenGL.GL as GL\n'), ((492673, 492747), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_render'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_render)\n', (492689, 492747), True, 'import OpenGL.GL as GL\n'), ((492760, 492793), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.colorsLoc', '(0)'], {}), '(self.colorsLoc, 0)\n', (492774, 492793), True, 'import OpenGL.GL as GL\n'), ((492807, 492841), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE1'], {}), '(GL.GL_TEXTURE1)\n', (492825, 492841), True, 'import OpenGL.GL as GL\n'), ((492854, 492942), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_position'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_position)\n', (492870, 492942), True, 'import OpenGL.GL as GL\n'), ((492950, 492993), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_positionsLoc', '(1)'], {}), '(self.sample_positionsLoc, 1)\n', (492964, 492993), True, 'import OpenGL.GL as GL\n'), ((493007, 493041), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE2'], {}), '(GL.GL_TEXTURE2)\n', (493025, 493041), True, 'import OpenGL.GL as GL\n'), ((493054, 493139), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_faces'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.texture_errors_sample_faces\n )\n', (493070, 493139), True, 'import OpenGL.GL as GL\n'), ((493147, 493186), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_facesLoc', '(2)'], {}), '(self.sample_facesLoc, 2)\n', (493161, 493186), True, 'import OpenGL.GL as GL\n'), ((493200, 493234), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE3'], {}), '(GL.GL_TEXTURE3)\n', (493218, 493234), True, 'import OpenGL.GL as GL\n'), ((493247, 493339), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric1'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric1)\n', (493263, 493339), True, 'import OpenGL.GL as GL\n'), ((493347, 493393), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric1Loc', '(3)'], {}), '(self.sample_barycentric1Loc, 3)\n', (493361, 493393), True, 'import OpenGL.GL as GL\n'), ((493407, 493441), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE4'], {}), '(GL.GL_TEXTURE4)\n', (493425, 493441), True, 'import OpenGL.GL as GL\n'), ((493454, 493546), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D_MULTISAMPLE', 'self.texture_errors_sample_barycentric2'], {}), '(GL.GL_TEXTURE_2D_MULTISAMPLE, self.\n texture_errors_sample_barycentric2)\n', (493470, 493546), True, 'import OpenGL.GL as GL\n'), ((493554, 493600), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.sample_barycentric2Loc', '(4)'], {}), '(self.sample_barycentric2Loc, 4)\n', (493568, 493600), True, 'import OpenGL.GL as GL\n'), ((493614, 493649), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['self.vao_quad'], {}), '(self.vao_quad)\n', (493634, 493649), True, 'import OpenGL.GL as GL\n'), ((493662, 493697), 'OpenGL.GL.glDrawArrays', 'GL.glDrawArrays', (['GL.GL_POINTS', '(0)', '(1)'], {}), '(GL.GL_POINTS, 0, 1)\n', (493677, 493697), True, 'import OpenGL.GL as GL\n'), ((494028, 494095), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_sample_fetch'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_sample_fetch)\n', (494048, 494095), True, 'import OpenGL.GL as GL\n'), ((494109, 494149), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT0'], {}), '(GL.GL_COLOR_ATTACHMENT0)\n', (494124, 494149), True, 'import OpenGL.GL as GL\n'), ((494485, 494525), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT1'], {}), '(GL.GL_COLOR_ATTACHMENT1)\n', (494500, 494525), True, 'import OpenGL.GL as GL\n'), ((494871, 494911), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT2'], {}), '(GL.GL_COLOR_ATTACHMENT2)\n', (494886, 494911), True, 'import OpenGL.GL as GL\n'), ((495266, 495306), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT3'], {}), '(GL.GL_COLOR_ATTACHMENT3)\n', (495281, 495306), True, 'import OpenGL.GL as GL\n'), ((495661, 495701), 'OpenGL.GL.glReadBuffer', 'GL.glReadBuffer', (['GL.GL_COLOR_ATTACHMENT4'], {}), '(GL.GL_COLOR_ATTACHMENT4)\n', (495676, 495701), True, 'import OpenGL.GL as GL\n'), ((496098, 496217), 'numpy.concatenate', 'np.concatenate', (['[self.renders_sample_barycentric1[sample], self.renders_sample_barycentric2\n [sample][:, :, 0:1]]', '(2)'], {}), '([self.renders_sample_barycentric1[sample], self.\n renders_sample_barycentric2[sample][:, :, 0:1]], 2)\n', (496112, 496217), True, 'import numpy as np\n'), ((498556, 498605), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.uint8', 'order': '"""C"""'}), "(vc_by_face, dtype=np.uint8, order='C')\n", (498566, 498605), True, 'import numpy as np\n'), ((499219, 499258), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (499225, 499258), True, 'import numpy as np\n'), ((512269, 512308), 'numpy.cross', 'np.cross', (['(p2 - p0)[:, None, :]', '(-ident)'], {}), '((p2 - p0)[:, None, :], -ident)\n', (512277, 512308), True, 'import numpy as np\n'), ((512311, 512350), 'numpy.cross', 'np.cross', (['(-ident)', '(p1 - p0)[:, None, :]'], {}), '(-ident, (p1 - p0)[:, None, :])\n', (512319, 512350), True, 'import numpy as np\n'), ((519796, 519852), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (519803, 519852), True, 'import numpy as np\n'), ((521661, 521715), 'numpy.argmin', 'np.argmin', (['np.c_[d[nonIntersect], d2[nonIntersect]]', '(1)'], {}), '(np.c_[d[nonIntersect], d2[nonIntersect]], 1)\n', (521670, 521715), True, 'import numpy as np\n'), ((525816, 525863), 'numpy.ones', 'np.ones', (['[self.nsamples, boundaryFaces.size, 3]'], {}), '([self.nsamples, boundaryFaces.size, 3])\n', (525823, 525863), True, 'import numpy as np\n'), ((526444, 526475), 'numpy.zeros_like', 'np.zeros_like', (['self.color_image'], {}), '(self.color_image)\n', (526457, 526475), True, 'import numpy as np\n'), ((526535, 526564), 'numpy.sum', 'np.sum', (['finalColorBnd'], {'axis': '(0)'}), '(finalColorBnd, axis=0)\n', (526541, 526564), True, 'import numpy as np\n'), ((529968, 530024), 'numpy.tile', 'np.tile', (['vertsProjBnd[None, :]', '[self.nsamples, 1, 1, 1]'], {}), '(vertsProjBnd[None, :], [self.nsamples, 1, 1, 1])\n', (529975, 530024), True, 'import numpy as np\n'), ((534654, 534668), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (534665, 534668), True, 'import numpy as np\n'), ((534853, 534894), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', '(-ident)'], {}), "('ijk,ikl->ijl', dlnorm, -ident)\n", (534862, 534894), True, 'import numpy as np\n'), ((534920, 534960), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl->ijl"""', 'dlnorm', 'ident'], {}), "('ijk,ikl->ijl', dlnorm, ident)\n", (534929, 534960), True, 'import numpy as np\n'), ((535714, 535752), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp1'], {}), "('ij,ijl->il', n_norm, dndp1)\n", (535723, 535752), True, 'import numpy as np\n'), ((535776, 535814), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'n_norm', 'dndp2'], {}), "('ij,ijl->il', n_norm, dndp2)\n", (535785, 535814), True, 'import numpy as np\n'), ((539643, 539653), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (539646, 539653), False, 'from chumpy.utils import row, col\n'), ((540045, 540108), 'numpy.concatenate', 'np.concatenate', (['[data1[:, :, None, :], data2[:, :, None, :]]', '(2)'], {}), '([data1[:, :, None, :], data2[:, :, None, :]], 2)\n', (540059, 540108), True, 'import numpy as np\n'), ((540232, 540326), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (540245, 540326), True, 'import scipy.sparse as sp\n'), ((542444, 542454), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (542447, 542454), False, 'from chumpy.utils import row, col\n'), ((542866, 542960), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(image_width * image_height * n_channels, num_verts * 2)'}), '((data, ij), shape=(image_width * image_height * n_channels, \n num_verts * 2))\n', (542879, 542960), True, 'import scipy.sparse as sp\n'), ((548102, 548112), 'chumpy.utils.col', 'col', (['faces'], {}), '(faces)\n', (548105, 548112), False, 'from chumpy.utils import row, col\n'), ((548520, 548593), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ij)'], {'shape': '(width * height * num_channels, vc_size)'}), '((data, ij), shape=(width * height * num_channels, vc_size))\n', (548533, 548593), True, 'import scipy.sparse as sp\n'), ((560593, 560623), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (560613, 560623), True, 'import OpenGL.GL as GL\n'), ((561287, 561347), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_location, 1, GL.GL_TRUE, MVP)\n', (561308, 561347), True, 'import OpenGL.GL as GL\n'), ((562184, 562211), 'numpy.hstack', 'np.hstack', (['(colors, color3)'], {}), '((colors, color3))\n', (562193, 562211), True, 'import numpy as np\n'), ((562388, 562438), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_LINE'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n', (562404, 562438), True, 'import OpenGL.GL as GL\n'), ((562520, 562570), 'OpenGL.GL.glPolygonMode', 'GL.glPolygonMode', (['GL.GL_FRONT_AND_BACK', 'GL.GL_FILL'], {}), '(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n', (562536, 562570), True, 'import OpenGL.GL as GL\n'), ((564988, 565109), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (565003, 565109), True, 'import OpenGL.GL as GL\n'), ((565354, 565411), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (565374, 565411), True, 'import OpenGL.GL as GL\n'), ((565438, 565497), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (565458, 565497), True, 'import OpenGL.GL as GL\n'), ((567754, 567811), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (567774, 567811), True, 'import OpenGL.GL as GL\n'), ((567838, 567897), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (567858, 567897), True, 'import OpenGL.GL as GL\n'), ((1261, 1282), 'OpenGL.GL.GLuint', 'GL.GLuint', (['self.mesap'], {}), '(self.mesap)\n', (1270, 1282), True, 'import OpenGL.GL as GL\n'), ((1605, 1642), 'OpenGL.GL.glDeleteProgram', 'GL.glDeleteProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (1623, 1642), True, 'import OpenGL.GL as GL\n'), ((3404, 3455), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[self.vao_static.value]'], {}), '(1, [self.vao_static.value])\n', (3427, 3455), True, 'import OpenGL.GL as GL\n'), ((3472, 3528), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[self.vao_static_face.value]'], {}), '(1, [self.vao_static_face.value])\n', (3495, 3528), True, 'import OpenGL.GL as GL\n'), ((3545, 3593), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[self.vao_dyn.value]'], {}), '(1, [self.vao_dyn.value])\n', (3568, 3593), True, 'import OpenGL.GL as GL\n'), ((3610, 3661), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[self.vao_dyn_ub.value]'], {}), '(1, [self.vao_dyn_ub.value])\n', (3633, 3661), True, 'import OpenGL.GL as GL\n'), ((5775, 5796), 'OpenGL.GL.GLuint', 'GL.GLuint', (['self.mesap'], {}), '(self.mesap)\n', (5784, 5796), True, 'import OpenGL.GL as GL\n'), ((8469, 8515), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (8496, 8515), True, 'import OpenGL.GL as GL\n'), ((15964, 15997), 'numpy.array', 'np.array', (['self.vc'], {'dtype': 'np.uint8'}), '(self.vc, dtype=np.uint8)\n', (15972, 15997), True, 'import numpy as np\n'), ((17646, 17699), 'numpy.asarray', 'np.asarray', (['(boundaryid_image != 4294967295)', 'np.uint32'], {}), '(boundaryid_image != 4294967295, np.uint32)\n', (17656, 17699), True, 'import numpy as np\n'), ((17899, 17952), 'numpy.asarray', 'np.asarray', (['(boundaryid_image != 4294967295)', 'np.uint32'], {}), '(boundaryid_image != 4294967295, np.uint32)\n', (17909, 17952), True, 'import numpy as np\n'), ((27686, 27719), 'numpy.repeat', 'np.repeat', (['fc', 'f.shape[1]'], {'axis': '(0)'}), '(fc, f.shape[1], axis=0)\n', (27695, 27719), True, 'import numpy as np\n'), ((27995, 28046), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.float32', 'order': '"""C"""'}), "(vc_by_face, dtype=np.float32, order='C')\n", (28005, 28046), True, 'import numpy as np\n'), ((28499, 28545), 'numpy.zeros_like', 'np.zeros_like', (['verts_by_face'], {'dtype': 'np.float32'}), '(verts_by_face, dtype=np.float32)\n', (28512, 28545), True, 'import numpy as np\n'), ((31008, 31030), 'numpy.nonzero', 'np.nonzero', (['(dps <= 0.0)'], {}), '(dps <= 0.0)\n', (31018, 31030), True, 'import numpy as np\n'), ((31783, 31822), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (31789, 31822), True, 'import numpy as np\n'), ((33898, 33937), 'numpy.dot', 'np.dot', (['self.projectionMatrix', 'view_mtx'], {}), '(self.projectionMatrix, view_mtx)\n', (33904, 33937), True, 'import numpy as np\n'), ((42145, 42336), 'opendr.common.dImage_wrt_2dVerts_bnd', 'common.dImage_wrt_2dVerts_bnd', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f', '(self.boundaryid_image != 4294967295)'], {}), "(color, visible, visibility, barycentric, self\n .frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f, \n self.boundaryid_image != 4294967295)\n", (42174, 42336), False, 'from opendr import common\n'), ((42367, 42512), 'opendr.common.dImage_wrt_2dVerts', 'common.dImage_wrt_2dVerts', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f'], {}), "(color, visible, visibility, barycentric, self.\n frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)\n", (42392, 42512), False, 'from opendr import common\n'), ((42555, 42677), 'opendr.common.dr_wrt_vc', 'common.dr_wrt_vc', (['visible', 'visibility', 'self.f', 'barycentric', 'self.frustum', 'self.vc.size'], {'num_channels': 'self.num_channels'}), '(visible, visibility, self.f, barycentric, self.frustum,\n self.vc.size, num_channels=self.num_channels)\n', (42571, 42677), False, 'from opendr import common\n'), ((43263, 43298), 'numpy.array', 'np.array', (['([0.5] * self.num_channels)'], {}), '([0.5] * self.num_channels)\n', (43271, 43298), True, 'import numpy as np\n'), ((44960, 45081), 'OpenGL.GL.glClearColor', 'GL.glClearColor', (['self.bgcolor.r[0]', 'self.bgcolor.r[1 % self.num_channels]', 'self.bgcolor.r[2 % self.num_channels]', '(1.0)'], {}), '(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],\n self.bgcolor.r[2 % self.num_channels], 1.0)\n', (44975, 45081), True, 'import OpenGL.GL as GL\n'), ((45345, 45402), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_ms)\n', (45365, 45402), True, 'import OpenGL.GL as GL\n'), ((45437, 45496), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_DRAW_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_DRAW_FRAMEBUFFER, self.fbo_noms)\n', (45457, 45496), True, 'import OpenGL.GL as GL\n'), ((45660, 45717), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_ms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_ms)\n', (45680, 45717), True, 'import OpenGL.GL as GL\n'), ((45752, 45811), 'OpenGL.GL.glBindFramebuffer', 'GL.glBindFramebuffer', (['GL.GL_READ_FRAMEBUFFER', 'self.fbo_noms'], {}), '(GL.GL_READ_FRAMEBUFFER, self.fbo_noms)\n', (45772, 45811), True, 'import OpenGL.GL as GL\n'), ((46972, 46987), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (46985, 46987), False, 'import pdb\n'), ((48478, 48488), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (48486, 48488), False, 'from OpenGL.arrays import vbo\n'), ((48564, 48576), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (48574, 48576), False, 'from OpenGL.arrays import vbo\n'), ((48652, 48664), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (48662, 48664), False, 'from OpenGL.arrays import vbo\n'), ((48814, 48824), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (48822, 48824), False, 'from OpenGL.arrays import vbo\n'), ((48871, 48883), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (48881, 48883), False, 'from OpenGL.arrays import vbo\n'), ((48930, 48942), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (48940, 48942), False, 'from OpenGL.arrays import vbo\n'), ((48989, 49001), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (48999, 49001), False, 'from OpenGL.arrays import vbo\n'), ((49118, 49128), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (49126, 49128), False, 'from OpenGL.arrays import vbo\n'), ((49172, 49184), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (49182, 49184), False, 'from OpenGL.arrays import vbo\n'), ((49228, 49240), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (49238, 49240), False, 'from OpenGL.arrays import vbo\n'), ((49284, 49323), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[vao.value]'], {}), '(1, [vao.value])\n', (49307, 49323), True, 'import OpenGL.GL as GL\n'), ((49502, 49537), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (49527, 49537), False, 'import glfw\n'), ((49653, 49668), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (49666, 49668), False, 'import pdb\n'), ((52444, 52456), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (52453, 52456), True, 'import OpenGL.GL as GL\n'), ((52473, 52501), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (52493, 52501), True, 'import OpenGL.GL as GL\n'), ((52518, 52543), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (52538, 52543), True, 'import OpenGL.GL as GL\n'), ((52759, 52806), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (52787, 52806), True, 'import OpenGL.GL as GL\n'), ((52855, 52940), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (52879, 52940), True, 'import OpenGL.GL as GL\n'), ((52988, 53032), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (53016, 53032), True, 'import OpenGL.GL as GL\n'), ((53081, 53159), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (53105, 53159), True, 'import OpenGL.GL as GL\n'), ((61787, 61795), 'chumpy.utils.row', 'row', (['cim'], {}), '(cim)\n', (61790, 61795), False, 'from chumpy.utils import row, col\n'), ((62603, 62628), 'numpy.concatenate', 'np.concatenate', (['(r, g, b)'], {}), '((r, g, b))\n', (62617, 62628), True, 'import numpy as np\n'), ((62645, 62693), 'numpy.concatenate', 'np.concatenate', (['(IS * 3, IS * 3 + 1, IS * 3 + 2)'], {}), '((IS * 3, IS * 3 + 1, IS * 3 + 2))\n', (62659, 62693), True, 'import numpy as np\n'), ((62701, 62749), 'numpy.concatenate', 'np.concatenate', (['(JS * 3, JS * 3 + 1, JS * 3 + 2)'], {}), '((JS * 3, JS * 3 + 1, JS * 3 + 2))\n', (62715, 62749), True, 'import numpy as np\n'), ((62760, 62824), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, (IS, JS))'], {'shape': '(self.r.size, wrt.r.size)'}), '((data, (IS, JS)), shape=(self.r.size, wrt.r.size))\n', (62773, 62824), True, 'import scipy.sparse as sp\n'), ((69846, 69876), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (69866, 69876), True, 'import OpenGL.GL as GL\n'), ((70570, 70638), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (70591, 70638), True, 'import OpenGL.GL as GL\n'), ((73472, 73482), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (73480, 73482), False, 'from OpenGL.arrays import vbo\n'), ((73558, 73570), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (73568, 73570), False, 'from OpenGL.arrays import vbo\n'), ((73646, 73658), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (73656, 73658), False, 'from OpenGL.arrays import vbo\n'), ((73832, 73842), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (73840, 73842), False, 'from OpenGL.arrays import vbo\n'), ((73912, 73924), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (73922, 73924), False, 'from OpenGL.arrays import vbo\n'), ((73994, 74006), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (74004, 74006), False, 'from OpenGL.arrays import vbo\n'), ((74173, 74183), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (74181, 74183), False, 'from OpenGL.arrays import vbo\n'), ((74252, 74264), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (74262, 74264), False, 'from OpenGL.arrays import vbo\n'), ((74333, 74345), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (74343, 74345), False, 'from OpenGL.arrays import vbo\n'), ((74509, 74519), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (74517, 74519), False, 'from OpenGL.arrays import vbo\n'), ((74586, 74598), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (74596, 74598), False, 'from OpenGL.arrays import vbo\n'), ((74665, 74677), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (74675, 74677), False, 'from OpenGL.arrays import vbo\n'), ((74844, 74854), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (74852, 74854), False, 'from OpenGL.arrays import vbo\n'), ((74926, 74938), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (74936, 74938), False, 'from OpenGL.arrays import vbo\n'), ((75010, 75022), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (75020, 75022), False, 'from OpenGL.arrays import vbo\n'), ((75095, 75134), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[vao.value]'], {}), '(1, [vao.value])\n', (75118, 75134), True, 'import OpenGL.GL as GL\n'), ((75313, 75348), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (75338, 75348), False, 'import glfw\n'), ((75487, 75502), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (75500, 75502), False, 'import pdb\n'), ((78067, 78079), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (78076, 78079), True, 'import OpenGL.GL as GL\n'), ((78096, 78124), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (78116, 78124), True, 'import OpenGL.GL as GL\n'), ((78141, 78166), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (78161, 78166), True, 'import OpenGL.GL as GL\n'), ((79027, 79074), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (79055, 79074), True, 'import OpenGL.GL as GL\n'), ((79123, 79208), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (79147, 79208), True, 'import OpenGL.GL as GL\n'), ((79256, 79300), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (79284, 79300), True, 'import OpenGL.GL as GL\n'), ((79349, 79427), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (79373, 79427), True, 'import OpenGL.GL as GL\n'), ((104500, 104512), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (104509, 104512), True, 'import OpenGL.GL as GL\n'), ((104529, 104557), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (104549, 104557), True, 'import OpenGL.GL as GL\n'), ((104574, 104599), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (104594, 104599), True, 'import OpenGL.GL as GL\n'), ((104810, 104857), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (104838, 104857), True, 'import OpenGL.GL as GL\n'), ((104907, 104992), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (104931, 104992), True, 'import OpenGL.GL as GL\n'), ((105106, 105150), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (105134, 105150), True, 'import OpenGL.GL as GL\n'), ((105200, 105278), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (105224, 105278), True, 'import OpenGL.GL as GL\n'), ((105386, 105428), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (105414, 105428), True, 'import OpenGL.GL as GL\n'), ((105478, 105554), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (105502, 105554), True, 'import OpenGL.GL as GL\n'), ((105857, 105888), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (105867, 105888), True, 'import numpy as np\n'), ((105920, 105931), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['fc'], {}), '(fc)\n', (105927, 105931), False, 'from OpenGL.arrays import vbo\n'), ((105984, 106031), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['face_ids_location'], {}), '(face_ids_location)\n', (106012, 106031), True, 'import OpenGL.GL as GL\n'), ((106081, 106157), 'OpenGL.GL.glVertexAttribIPointer', 'GL.glVertexAttribIPointer', (['face_ids_location', '(1)', 'GL.GL_UNSIGNED_INT', '(0)', 'None'], {}), '(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)\n', (106106, 106157), True, 'import OpenGL.GL as GL\n'), ((106303, 106325), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['f_barycentric'], {}), '(f_barycentric)\n', (106310, 106325), False, 'from OpenGL.arrays import vbo\n'), ((106381, 106431), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['barycentric_location'], {}), '(barycentric_location)\n', (106409, 106431), True, 'import OpenGL.GL as GL\n'), ((106481, 106569), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['barycentric_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE,\n 0, None)\n', (106505, 106569), True, 'import OpenGL.GL as GL\n'), ((106759, 106782), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (106779, 106782), True, 'import OpenGL.GL as GL\n'), ((108493, 108523), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (108513, 108523), True, 'import OpenGL.GL as GL\n'), ((109373, 109407), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (109391, 109407), True, 'import OpenGL.GL as GL\n'), ((109424, 109467), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (109440, 109467), True, 'import OpenGL.GL as GL\n'), ((109484, 109521), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureObjLoc', '(0)'], {}), '(self.textureObjLoc, 0)\n', (109498, 109521), True, 'import OpenGL.GL as GL\n'), ((109539, 109607), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (109560, 109607), True, 'import OpenGL.GL as GL\n'), ((113898, 113970), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height']]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height']])\n", (113906, 113970), True, 'import numpy as np\n'), ((120463, 120496), 'numpy.repeat', 'np.repeat', (['fc', 'f.shape[1]'], {'axis': '(0)'}), '(fc, f.shape[1], axis=0)\n', (120472, 120496), True, 'import numpy as np\n'), ((126197, 126229), 'numpy.ones', 'np.ones', (['[vertices.size // 3, 1]'], {}), '([vertices.size // 3, 1])\n', (126204, 126229), True, 'import numpy as np\n'), ((126287, 126311), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (126295, 126311), True, 'import numpy as np\n'), ((126388, 126412), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (126396, 126412), True, 'import numpy as np\n'), ((126474, 126503), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (126481, 126503), True, 'import numpy as np\n'), ((130188, 130212), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (130196, 130212), True, 'import numpy as np\n'), ((130289, 130313), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (130297, 130313), True, 'import numpy as np\n'), ((130375, 130404), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (130382, 130404), True, 'import numpy as np\n'), ((131220, 131246), 'numpy.linalg.norm', 'np.linalg.norm', (['nt'], {'axis': '(1)'}), '(nt, axis=1)\n', (131234, 131246), True, 'import numpy as np\n'), ((133196, 133222), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (133204, 133222), True, 'import numpy as np\n'), ((133876, 133917), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'nt_norm', 'nt_norm'], {}), "('ij,ik->ijk', nt_norm, nt_norm)\n", (133885, 133917), True, 'import numpy as np\n'), ((135272, 135304), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p2 - p1)'], {}), '(nt_norm[:, :], p2 - p1)\n', (135280, 135304), True, 'import numpy as np\n'), ((135449, 135485), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (135457, 135485), True, 'import numpy as np\n'), ((136166, 136202), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (136174, 136202), True, 'import numpy as np\n'), ((136533, 136565), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p0 - p2)'], {}), '(nt_norm[:, :], p0 - p2)\n', (136541, 136565), True, 'import numpy as np\n'), ((137316, 137348), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p1 - p0)'], {}), '(nt_norm[:, :], p1 - p0)\n', (137324, 137348), True, 'import numpy as np\n'), ((137495, 137531), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (137503, 137531), True, 'import numpy as np\n'), ((139240, 139283), 'numpy.sum', 'np.sum', (['(dbvc[:, :, :, :, :, None] * dxdp)', '(4)'], {}), '(dbvc[:, :, :, :, :, None] * dxdp, 4)\n', (139246, 139283), True, 'import numpy as np\n'), ((140371, 140409), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (140378, 140409), True, 'import numpy as np\n'), ((149425, 149524), 'numpy.abs', 'np.abs', (['(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 -\n barycentricVertsDistEdge)'], {}), '(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 -\n barycentricVertsDistEdge)\n', (149431, 149524), True, 'import numpy as np\n'), ((155618, 155656), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (155625, 155656), True, 'import numpy as np\n'), ((161578, 161598), 'numpy.abs', 'np.abs', (['n_norm[:, 0]'], {}), '(n_norm[:, 0])\n', (161584, 161598), True, 'import numpy as np\n'), ((161600, 161620), 'numpy.abs', 'np.abs', (['n_norm[:, 1]'], {}), '(n_norm[:, 1])\n', (161606, 161620), True, 'import numpy as np\n'), ((162090, 162128), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ik"""', 'dv1dp1', 'lnorm'], {}), "('ijk,ij->ik', dv1dp1, lnorm)\n", (162099, 162128), True, 'import numpy as np\n'), ((162131, 162170), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp1'], {}), "('ij,ijl->il', v1, dl_normdp1)\n", (162140, 162170), True, 'import numpy as np\n'), ((162195, 162234), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp2'], {}), "('ij,ijl->il', v1, dl_normdp2)\n", (162204, 162234), True, 'import numpy as np\n'), ((162853, 162867), 'numpy.abs', 'np.abs', (['n_norm'], {}), '(n_norm)\n', (162859, 162867), True, 'import numpy as np\n'), ((171625, 171641), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (171632, 171641), True, 'import numpy as np\n'), ((171697, 171713), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (171704, 171713), True, 'import numpy as np\n'), ((171769, 171785), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (171776, 171785), True, 'import numpy as np\n'), ((172343, 172366), 'numpy.atleast_3d', 'np.atleast_3d', (['observed'], {}), '(observed)\n', (172356, 172366), True, 'import numpy as np\n'), ((172803, 172834), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (172812, 172834), True, 'import numpy as np\n'), ((174511, 174549), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (174518, 174549), True, 'import numpy as np\n'), ((181177, 181202), 'numpy.asarray', 'np.asarray', (['bc'], {'order': '"""C"""'}), "(bc, order='C')\n", (181187, 181202), True, 'import numpy as np\n'), ((197273, 197303), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (197293, 197303), True, 'import OpenGL.GL as GL\n'), ((198724, 198792), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (198745, 198792), True, 'import OpenGL.GL as GL\n'), ((201713, 201723), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (201721, 201723), False, 'from OpenGL.arrays import vbo\n'), ((201799, 201811), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (201809, 201811), False, 'from OpenGL.arrays import vbo\n'), ((201887, 201899), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (201897, 201899), False, 'from OpenGL.arrays import vbo\n'), ((202073, 202083), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (202081, 202083), False, 'from OpenGL.arrays import vbo\n'), ((202153, 202165), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (202163, 202165), False, 'from OpenGL.arrays import vbo\n'), ((202235, 202247), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (202245, 202247), False, 'from OpenGL.arrays import vbo\n'), ((202414, 202424), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (202422, 202424), False, 'from OpenGL.arrays import vbo\n'), ((202493, 202505), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (202503, 202505), False, 'from OpenGL.arrays import vbo\n'), ((202574, 202586), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (202584, 202586), False, 'from OpenGL.arrays import vbo\n'), ((202750, 202760), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (202758, 202760), False, 'from OpenGL.arrays import vbo\n'), ((202827, 202839), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (202837, 202839), False, 'from OpenGL.arrays import vbo\n'), ((202906, 202918), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (202916, 202918), False, 'from OpenGL.arrays import vbo\n'), ((203085, 203095), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (203093, 203095), False, 'from OpenGL.arrays import vbo\n'), ((203167, 203179), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (203177, 203179), False, 'from OpenGL.arrays import vbo\n'), ((203251, 203263), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (203261, 203263), False, 'from OpenGL.arrays import vbo\n'), ((203336, 203375), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[vao.value]'], {}), '(1, [vao.value])\n', (203359, 203375), True, 'import OpenGL.GL as GL\n'), ((203554, 203589), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (203579, 203589), False, 'import glfw\n'), ((203728, 203743), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (203741, 203743), False, 'import pdb\n'), ((206308, 206320), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (206317, 206320), True, 'import OpenGL.GL as GL\n'), ((206337, 206365), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (206357, 206365), True, 'import OpenGL.GL as GL\n'), ((206382, 206407), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (206402, 206407), True, 'import OpenGL.GL as GL\n'), ((207268, 207315), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (207296, 207315), True, 'import OpenGL.GL as GL\n'), ((207364, 207449), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (207388, 207449), True, 'import OpenGL.GL as GL\n'), ((207497, 207541), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (207525, 207541), True, 'import OpenGL.GL as GL\n'), ((207590, 207668), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (207614, 207668), True, 'import OpenGL.GL as GL\n'), ((232730, 232742), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (232739, 232742), True, 'import OpenGL.GL as GL\n'), ((232759, 232787), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (232779, 232787), True, 'import OpenGL.GL as GL\n'), ((232804, 232829), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (232824, 232829), True, 'import OpenGL.GL as GL\n'), ((233040, 233087), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (233068, 233087), True, 'import OpenGL.GL as GL\n'), ((233137, 233222), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (233161, 233222), True, 'import OpenGL.GL as GL\n'), ((233336, 233380), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (233364, 233380), True, 'import OpenGL.GL as GL\n'), ((233430, 233508), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (233454, 233508), True, 'import OpenGL.GL as GL\n'), ((233616, 233658), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (233644, 233658), True, 'import OpenGL.GL as GL\n'), ((233708, 233784), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (233732, 233784), True, 'import OpenGL.GL as GL\n'), ((234087, 234118), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (234097, 234118), True, 'import numpy as np\n'), ((234150, 234161), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['fc'], {}), '(fc)\n', (234157, 234161), False, 'from OpenGL.arrays import vbo\n'), ((234214, 234261), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['face_ids_location'], {}), '(face_ids_location)\n', (234242, 234261), True, 'import OpenGL.GL as GL\n'), ((234311, 234387), 'OpenGL.GL.glVertexAttribIPointer', 'GL.glVertexAttribIPointer', (['face_ids_location', '(1)', 'GL.GL_UNSIGNED_INT', '(0)', 'None'], {}), '(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)\n', (234336, 234387), True, 'import OpenGL.GL as GL\n'), ((234533, 234555), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['f_barycentric'], {}), '(f_barycentric)\n', (234540, 234555), False, 'from OpenGL.arrays import vbo\n'), ((234611, 234661), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['barycentric_location'], {}), '(barycentric_location)\n', (234639, 234661), True, 'import OpenGL.GL as GL\n'), ((234711, 234799), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['barycentric_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE,\n 0, None)\n', (234735, 234799), True, 'import OpenGL.GL as GL\n'), ((234989, 235012), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (235009, 235012), True, 'import OpenGL.GL as GL\n'), ((236723, 236753), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (236743, 236753), True, 'import OpenGL.GL as GL\n'), ((237603, 237637), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (237621, 237637), True, 'import OpenGL.GL as GL\n'), ((237654, 237697), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (237670, 237697), True, 'import OpenGL.GL as GL\n'), ((237714, 237751), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureObjLoc', '(0)'], {}), '(self.textureObjLoc, 0)\n', (237728, 237751), True, 'import OpenGL.GL as GL\n'), ((237769, 237837), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (237790, 237837), True, 'import OpenGL.GL as GL\n'), ((242128, 242200), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height']]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height']])\n", (242136, 242200), True, 'import numpy as np\n'), ((248693, 248726), 'numpy.repeat', 'np.repeat', (['fc', 'f.shape[1]'], {'axis': '(0)'}), '(fc, f.shape[1], axis=0)\n', (248702, 248726), True, 'import numpy as np\n'), ((252808, 252999), 'opendr.common.dImage_wrt_2dVerts_bnd', 'common.dImage_wrt_2dVerts_bnd', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f', '(self.boundaryid_image != 4294967295)'], {}), "(color, visible, visibility, barycentric, self\n .frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f, \n self.boundaryid_image != 4294967295)\n", (252837, 252999), False, 'from opendr import common\n'), ((253043, 253188), 'opendr.common.dImage_wrt_2dVerts', 'common.dImage_wrt_2dVerts', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f'], {}), "(color, visible, visibility, barycentric, self.\n frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)\n", (253068, 253188), False, 'from opendr import common\n'), ((254894, 254926), 'numpy.ones', 'np.ones', (['[vertices.size // 3, 1]'], {}), '([vertices.size // 3, 1])\n', (254901, 254926), True, 'import numpy as np\n'), ((254984, 255008), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (254992, 255008), True, 'import numpy as np\n'), ((255085, 255109), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (255093, 255109), True, 'import numpy as np\n'), ((255171, 255200), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (255178, 255200), True, 'import numpy as np\n'), ((258885, 258909), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (258893, 258909), True, 'import numpy as np\n'), ((258986, 259010), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (258994, 259010), True, 'import numpy as np\n'), ((259072, 259101), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (259079, 259101), True, 'import numpy as np\n'), ((259917, 259943), 'numpy.linalg.norm', 'np.linalg.norm', (['nt'], {'axis': '(1)'}), '(nt, axis=1)\n', (259931, 259943), True, 'import numpy as np\n'), ((261893, 261919), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (261901, 261919), True, 'import numpy as np\n'), ((262573, 262614), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'nt_norm', 'nt_norm'], {}), "('ij,ik->ijk', nt_norm, nt_norm)\n", (262582, 262614), True, 'import numpy as np\n'), ((263969, 264001), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p2 - p1)'], {}), '(nt_norm[:, :], p2 - p1)\n', (263977, 264001), True, 'import numpy as np\n'), ((264146, 264182), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (264154, 264182), True, 'import numpy as np\n'), ((264863, 264899), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (264871, 264899), True, 'import numpy as np\n'), ((265230, 265262), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p0 - p2)'], {}), '(nt_norm[:, :], p0 - p2)\n', (265238, 265262), True, 'import numpy as np\n'), ((266013, 266045), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p1 - p0)'], {}), '(nt_norm[:, :], p1 - p0)\n', (266021, 266045), True, 'import numpy as np\n'), ((266192, 266228), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (266200, 266228), True, 'import numpy as np\n'), ((267937, 267980), 'numpy.sum', 'np.sum', (['(dbvc[:, :, :, :, :, None] * dxdp)', '(4)'], {}), '(dbvc[:, :, :, :, :, None] * dxdp, 4)\n', (267943, 267980), True, 'import numpy as np\n'), ((269068, 269106), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (269075, 269106), True, 'import numpy as np\n'), ((278122, 278221), 'numpy.abs', 'np.abs', (['(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 -\n barycentricVertsDistEdge)'], {}), '(barycentricVertsDistIntesect + barycentricVertsDistIntesect2 -\n barycentricVertsDistEdge)\n', (278128, 278221), True, 'import numpy as np\n'), ((284315, 284353), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (284322, 284353), True, 'import numpy as np\n'), ((290275, 290295), 'numpy.abs', 'np.abs', (['n_norm[:, 0]'], {}), '(n_norm[:, 0])\n', (290281, 290295), True, 'import numpy as np\n'), ((290297, 290317), 'numpy.abs', 'np.abs', (['n_norm[:, 1]'], {}), '(n_norm[:, 1])\n', (290303, 290317), True, 'import numpy as np\n'), ((290787, 290825), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ik"""', 'dv1dp1', 'lnorm'], {}), "('ijk,ij->ik', dv1dp1, lnorm)\n", (290796, 290825), True, 'import numpy as np\n'), ((290828, 290867), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp1'], {}), "('ij,ijl->il', v1, dl_normdp1)\n", (290837, 290867), True, 'import numpy as np\n'), ((290892, 290931), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp2'], {}), "('ij,ijl->il', v1, dl_normdp2)\n", (290901, 290931), True, 'import numpy as np\n'), ((291550, 291564), 'numpy.abs', 'np.abs', (['n_norm'], {}), '(n_norm)\n', (291556, 291564), True, 'import numpy as np\n'), ((300322, 300338), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (300329, 300338), True, 'import numpy as np\n'), ((300394, 300410), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (300401, 300410), True, 'import numpy as np\n'), ((300466, 300482), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (300473, 300482), True, 'import numpy as np\n'), ((301040, 301063), 'numpy.atleast_3d', 'np.atleast_3d', (['observed'], {}), '(observed)\n', (301053, 301063), True, 'import numpy as np\n'), ((301500, 301531), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (301509, 301531), True, 'import numpy as np\n'), ((303208, 303246), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (303215, 303246), True, 'import numpy as np\n'), ((309874, 309899), 'numpy.asarray', 'np.asarray', (['bc'], {'order': '"""C"""'}), "(bc, order='C')\n", (309884, 309899), True, 'import numpy as np\n'), ((325970, 326000), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (325990, 326000), True, 'import OpenGL.GL as GL\n'), ((327421, 327489), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (327442, 327489), True, 'import OpenGL.GL as GL\n'), ((330403, 330413), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (330411, 330413), False, 'from OpenGL.arrays import vbo\n'), ((330489, 330501), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (330499, 330501), False, 'from OpenGL.arrays import vbo\n'), ((330577, 330589), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (330587, 330589), False, 'from OpenGL.arrays import vbo\n'), ((330763, 330773), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (330771, 330773), False, 'from OpenGL.arrays import vbo\n'), ((330843, 330855), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (330853, 330855), False, 'from OpenGL.arrays import vbo\n'), ((330925, 330937), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (330935, 330937), False, 'from OpenGL.arrays import vbo\n'), ((331104, 331114), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (331112, 331114), False, 'from OpenGL.arrays import vbo\n'), ((331183, 331195), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (331193, 331195), False, 'from OpenGL.arrays import vbo\n'), ((331264, 331276), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (331274, 331276), False, 'from OpenGL.arrays import vbo\n'), ((331440, 331450), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (331448, 331450), False, 'from OpenGL.arrays import vbo\n'), ((331517, 331529), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (331527, 331529), False, 'from OpenGL.arrays import vbo\n'), ((331596, 331608), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (331606, 331608), False, 'from OpenGL.arrays import vbo\n'), ((331775, 331785), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (331783, 331785), False, 'from OpenGL.arrays import vbo\n'), ((331857, 331869), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (331867, 331869), False, 'from OpenGL.arrays import vbo\n'), ((331941, 331953), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (331951, 331953), False, 'from OpenGL.arrays import vbo\n'), ((332026, 332065), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[vao.value]'], {}), '(1, [vao.value])\n', (332049, 332065), True, 'import OpenGL.GL as GL\n'), ((332244, 332279), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (332269, 332279), False, 'import glfw\n'), ((332418, 332433), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (332431, 332433), False, 'import pdb\n'), ((335001, 335013), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (335010, 335013), True, 'import OpenGL.GL as GL\n'), ((335030, 335058), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (335050, 335058), True, 'import OpenGL.GL as GL\n'), ((335075, 335100), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (335095, 335100), True, 'import OpenGL.GL as GL\n'), ((335961, 336008), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (335989, 336008), True, 'import OpenGL.GL as GL\n'), ((336058, 336143), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (336082, 336143), True, 'import OpenGL.GL as GL\n'), ((336191, 336235), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (336219, 336235), True, 'import OpenGL.GL as GL\n'), ((336285, 336363), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (336309, 336363), True, 'import OpenGL.GL as GL\n'), ((361686, 361698), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (361695, 361698), True, 'import OpenGL.GL as GL\n'), ((361715, 361743), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (361735, 361743), True, 'import OpenGL.GL as GL\n'), ((361760, 361785), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (361780, 361785), True, 'import OpenGL.GL as GL\n'), ((361996, 362043), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (362024, 362043), True, 'import OpenGL.GL as GL\n'), ((362093, 362178), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (362117, 362178), True, 'import OpenGL.GL as GL\n'), ((362292, 362336), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (362320, 362336), True, 'import OpenGL.GL as GL\n'), ((362386, 362464), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (362410, 362464), True, 'import OpenGL.GL as GL\n'), ((362572, 362614), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (362600, 362614), True, 'import OpenGL.GL as GL\n'), ((362664, 362740), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (362688, 362740), True, 'import OpenGL.GL as GL\n'), ((363045, 363076), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (363055, 363076), True, 'import numpy as np\n'), ((363108, 363119), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['fc'], {}), '(fc)\n', (363115, 363119), False, 'from OpenGL.arrays import vbo\n'), ((363172, 363219), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['face_ids_location'], {}), '(face_ids_location)\n', (363200, 363219), True, 'import OpenGL.GL as GL\n'), ((363269, 363345), 'OpenGL.GL.glVertexAttribIPointer', 'GL.glVertexAttribIPointer', (['face_ids_location', '(1)', 'GL.GL_UNSIGNED_INT', '(0)', 'None'], {}), '(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)\n', (363294, 363345), True, 'import OpenGL.GL as GL\n'), ((363491, 363513), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['f_barycentric'], {}), '(f_barycentric)\n', (363498, 363513), False, 'from OpenGL.arrays import vbo\n'), ((363569, 363619), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['barycentric_location'], {}), '(barycentric_location)\n', (363597, 363619), True, 'import OpenGL.GL as GL\n'), ((363669, 363757), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['barycentric_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE,\n 0, None)\n', (363693, 363757), True, 'import OpenGL.GL as GL\n'), ((363947, 363970), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (363967, 363970), True, 'import OpenGL.GL as GL\n'), ((366040, 366070), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (366060, 366070), True, 'import OpenGL.GL as GL\n'), ((366922, 366956), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (366940, 366956), True, 'import OpenGL.GL as GL\n'), ((366973, 367016), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (366989, 367016), True, 'import OpenGL.GL as GL\n'), ((367033, 367070), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureObjLoc', '(0)'], {}), '(self.textureObjLoc, 0)\n', (367047, 367070), True, 'import OpenGL.GL as GL\n'), ((367088, 367156), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (367109, 367156), True, 'import OpenGL.GL as GL\n'), ((371451, 371523), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height']]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height']])\n", (371459, 371523), True, 'import numpy as np\n'), ((378245, 378278), 'numpy.repeat', 'np.repeat', (['fc', 'f.shape[1]'], {'axis': '(0)'}), '(fc, f.shape[1], axis=0)\n', (378254, 378278), True, 'import numpy as np\n'), ((384243, 384275), 'numpy.ones', 'np.ones', (['[vertices.size // 3, 1]'], {}), '([vertices.size // 3, 1])\n', (384250, 384275), True, 'import numpy as np\n'), ((384333, 384357), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (384341, 384357), True, 'import numpy as np\n'), ((384434, 384458), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (384442, 384458), True, 'import numpy as np\n'), ((384520, 384549), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (384527, 384549), True, 'import numpy as np\n'), ((388232, 388256), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (388240, 388256), True, 'import numpy as np\n'), ((388333, 388357), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (388341, 388357), True, 'import numpy as np\n'), ((388419, 388448), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (388426, 388448), True, 'import numpy as np\n'), ((389276, 389302), 'numpy.linalg.norm', 'np.linalg.norm', (['nt'], {'axis': '(1)'}), '(nt, axis=1)\n', (389290, 389302), True, 'import numpy as np\n'), ((391314, 391340), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (391322, 391340), True, 'import numpy as np\n'), ((392025, 392066), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'nt_norm', 'nt_norm'], {}), "('ij,ik->ijk', nt_norm, nt_norm)\n", (392034, 392066), True, 'import numpy as np\n'), ((393449, 393481), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p2 - p1)'], {}), '(nt_norm[:, :], p2 - p1)\n', (393457, 393481), True, 'import numpy as np\n'), ((393630, 393666), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (393638, 393666), True, 'import numpy as np\n'), ((394380, 394416), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (394388, 394416), True, 'import numpy as np\n'), ((394777, 394809), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p0 - p2)'], {}), '(nt_norm[:, :], p0 - p2)\n', (394785, 394809), True, 'import numpy as np\n'), ((395596, 395628), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p1 - p0)'], {}), '(nt_norm[:, :], p1 - p0)\n', (395604, 395628), True, 'import numpy as np\n'), ((395777, 395813), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (395785, 395813), True, 'import numpy as np\n'), ((397565, 397608), 'numpy.sum', 'np.sum', (['(dbvc[:, :, :, :, :, None] * dxdp)', '(4)'], {}), '(dbvc[:, :, :, :, :, None] * dxdp, 4)\n', (397571, 397608), True, 'import numpy as np\n'), ((398311, 398349), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (398318, 398349), True, 'import numpy as np\n'), ((406170, 406201), 'numpy.zeros_like', 'np.zeros_like', (['self.color_image'], {}), '(self.color_image)\n', (406183, 406201), True, 'import numpy as np\n'), ((406532, 406571), 'numpy.sum', 'np.sum', (['self.sampleResidualsWeighted', '(0)'], {}), '(self.sampleResidualsWeighted, 0)\n', (406538, 406571), True, 'import numpy as np\n'), ((408028, 408066), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (408035, 408066), True, 'import numpy as np\n'), ((414381, 414419), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ik"""', 'dv1dp1', 'lnorm'], {}), "('ijk,ij->ik', dv1dp1, lnorm)\n", (414390, 414419), True, 'import numpy as np\n'), ((414422, 414461), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp1'], {}), "('ij,ijl->il', v1, dl_normdp1)\n", (414431, 414461), True, 'import numpy as np\n'), ((414486, 414525), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp2'], {}), "('ij,ijl->il', v1, dl_normdp2)\n", (414495, 414525), True, 'import numpy as np\n'), ((422669, 422685), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (422676, 422685), True, 'import numpy as np\n'), ((422741, 422757), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (422748, 422757), True, 'import numpy as np\n'), ((422813, 422829), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (422820, 422829), True, 'import numpy as np\n'), ((423635, 423658), 'numpy.atleast_3d', 'np.atleast_3d', (['observed'], {}), '(observed)\n', (423648, 423658), True, 'import numpy as np\n'), ((424002, 424033), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (424011, 424033), True, 'import numpy as np\n'), ((425100, 425138), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (425107, 425138), True, 'import numpy as np\n'), ((428669, 428712), 'numpy.asarray', 'np.asarray', (['dImage_wrt_nonbnd_vc'], {'order': '"""C"""'}), "(dImage_wrt_nonbnd_vc, order='C')\n", (428679, 428712), True, 'import numpy as np\n'), ((444773, 444803), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (444793, 444803), True, 'import OpenGL.GL as GL\n'), ((446226, 446294), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (446247, 446294), True, 'import OpenGL.GL as GL\n'), ((449299, 449309), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (449307, 449309), False, 'from OpenGL.arrays import vbo\n'), ((449385, 449397), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (449395, 449397), False, 'from OpenGL.arrays import vbo\n'), ((449473, 449485), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (449483, 449485), False, 'from OpenGL.arrays import vbo\n'), ((449659, 449669), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (449667, 449669), False, 'from OpenGL.arrays import vbo\n'), ((449739, 449751), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (449749, 449751), False, 'from OpenGL.arrays import vbo\n'), ((449821, 449833), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (449831, 449833), False, 'from OpenGL.arrays import vbo\n'), ((450000, 450010), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (450008, 450010), False, 'from OpenGL.arrays import vbo\n'), ((450079, 450091), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (450089, 450091), False, 'from OpenGL.arrays import vbo\n'), ((450160, 450172), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (450170, 450172), False, 'from OpenGL.arrays import vbo\n'), ((450336, 450346), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (450344, 450346), False, 'from OpenGL.arrays import vbo\n'), ((450413, 450425), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (450423, 450425), False, 'from OpenGL.arrays import vbo\n'), ((450492, 450504), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (450502, 450504), False, 'from OpenGL.arrays import vbo\n'), ((450671, 450681), 'OpenGL.arrays.vbo.bind', 'vbo.bind', ([], {}), '()\n', (450679, 450681), False, 'from OpenGL.arrays import vbo\n'), ((450753, 450765), 'OpenGL.arrays.vbo.unbind', 'vbo.unbind', ([], {}), '()\n', (450763, 450765), False, 'from OpenGL.arrays import vbo\n'), ((450837, 450849), 'OpenGL.arrays.vbo.delete', 'vbo.delete', ([], {}), '()\n', (450847, 450849), False, 'from OpenGL.arrays import vbo\n'), ((450922, 450961), 'OpenGL.GL.glDeleteVertexArrays', 'GL.glDeleteVertexArrays', (['(1)', '[vao.value]'], {}), '(1, [vao.value])\n', (450945, 450961), True, 'import OpenGL.GL as GL\n'), ((451140, 451175), 'glfw.make_context_current', 'glfw.make_context_current', (['self.win'], {}), '(self.win)\n', (451165, 451175), False, 'import glfw\n'), ((451314, 451329), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (451327, 451329), False, 'import pdb\n'), ((453926, 453938), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (453935, 453938), True, 'import OpenGL.GL as GL\n'), ((453955, 453983), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (453975, 453983), True, 'import OpenGL.GL as GL\n'), ((454000, 454025), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (454020, 454025), True, 'import OpenGL.GL as GL\n'), ((454886, 454933), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (454914, 454933), True, 'import OpenGL.GL as GL\n'), ((454983, 455068), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (455007, 455068), True, 'import OpenGL.GL as GL\n'), ((455116, 455160), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (455144, 455160), True, 'import OpenGL.GL as GL\n'), ((455210, 455288), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (455234, 455288), True, 'import OpenGL.GL as GL\n'), ((481774, 481786), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (481783, 481786), True, 'import OpenGL.GL as GL\n'), ((481803, 481831), 'OpenGL.GL.glGenVertexArrays', 'GL.glGenVertexArrays', (['(1)', 'vao'], {}), '(1, vao)\n', (481823, 481831), True, 'import OpenGL.GL as GL\n'), ((481848, 481873), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao'], {}), '(vao)\n', (481868, 481873), True, 'import OpenGL.GL as GL\n'), ((482084, 482131), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['position_location'], {}), '(position_location)\n', (482112, 482131), True, 'import OpenGL.GL as GL\n'), ((482181, 482266), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['position_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(position_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0,\n None)\n', (482205, 482266), True, 'import OpenGL.GL as GL\n'), ((482380, 482424), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['color_location'], {}), '(color_location)\n', (482408, 482424), True, 'import OpenGL.GL as GL\n'), ((482474, 482552), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['color_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(color_location, 3, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (482498, 482552), True, 'import OpenGL.GL as GL\n'), ((482660, 482702), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (482688, 482702), True, 'import OpenGL.GL as GL\n'), ((482752, 482828), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (482776, 482828), True, 'import OpenGL.GL as GL\n'), ((483134, 483165), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (483144, 483165), True, 'import numpy as np\n'), ((483197, 483208), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['fc'], {}), '(fc)\n', (483204, 483208), False, 'from OpenGL.arrays import vbo\n'), ((483261, 483308), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['face_ids_location'], {}), '(face_ids_location)\n', (483289, 483308), True, 'import OpenGL.GL as GL\n'), ((483358, 483434), 'OpenGL.GL.glVertexAttribIPointer', 'GL.glVertexAttribIPointer', (['face_ids_location', '(1)', 'GL.GL_UNSIGNED_INT', '(0)', 'None'], {}), '(face_ids_location, 1, GL.GL_UNSIGNED_INT, 0, None)\n', (483383, 483434), True, 'import OpenGL.GL as GL\n'), ((483580, 483602), 'OpenGL.arrays.vbo.VBO', 'vbo.VBO', (['f_barycentric'], {}), '(f_barycentric)\n', (483587, 483602), False, 'from OpenGL.arrays import vbo\n'), ((483658, 483708), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['barycentric_location'], {}), '(barycentric_location)\n', (483686, 483708), True, 'import OpenGL.GL as GL\n'), ((483758, 483846), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['barycentric_location', '(3)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(barycentric_location, 3, GL.GL_FLOAT, GL.GL_FALSE,\n 0, None)\n', (483782, 483846), True, 'import OpenGL.GL as GL\n'), ((484036, 484059), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['(0)'], {}), '(0)\n', (484056, 484059), True, 'import OpenGL.GL as GL\n'), ((486129, 486159), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (486149, 486159), True, 'import OpenGL.GL as GL\n'), ((487011, 487045), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (487029, 487045), True, 'import OpenGL.GL as GL\n'), ((487062, 487105), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (487078, 487105), True, 'import OpenGL.GL as GL\n'), ((487122, 487159), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureObjLoc', '(0)'], {}), '(self.textureObjLoc, 0)\n', (487136, 487159), True, 'import OpenGL.GL as GL\n'), ((487177, 487245), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (487198, 487245), True, 'import OpenGL.GL as GL\n'), ((491540, 491612), 'numpy.zeros', 'np.zeros', (["[self.nsamples, self.frustum['width'], self.frustum['height']]"], {}), "([self.nsamples, self.frustum['width'], self.frustum['height']])\n", (491548, 491612), True, 'import numpy as np\n'), ((498334, 498367), 'numpy.repeat', 'np.repeat', (['fc', 'f.shape[1]'], {'axis': '(0)'}), '(fc, f.shape[1], axis=0)\n', (498343, 498367), True, 'import numpy as np\n'), ((502593, 502784), 'opendr.common.dImage_wrt_2dVerts_bnd', 'common.dImage_wrt_2dVerts_bnd', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f', '(self.boundaryid_image != 4294967295)'], {}), "(color, visible, visibility, barycentric, self\n .frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f, \n self.boundaryid_image != 4294967295)\n", (502622, 502784), False, 'from opendr import common\n'), ((502828, 502973), 'opendr.common.dImage_wrt_2dVerts', 'common.dImage_wrt_2dVerts', (['color', 'visible', 'visibility', 'barycentric', "self.frustum['width']", "self.frustum['height']", '(self.v.r.size / 3)', 'self.f'], {}), "(color, visible, visibility, barycentric, self.\n frustum['width'], self.frustum['height'], self.v.r.size / 3, self.f)\n", (502853, 502973), False, 'from opendr import common\n'), ((504736, 504768), 'numpy.ones', 'np.ones', (['[vertices.size // 3, 1]'], {}), '([vertices.size // 3, 1])\n', (504743, 504768), True, 'import numpy as np\n'), ((504826, 504850), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (504834, 504850), True, 'import numpy as np\n'), ((504927, 504951), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (504935, 504951), True, 'import numpy as np\n'), ((505013, 505042), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (505020, 505042), True, 'import numpy as np\n'), ((508725, 508749), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (508733, 508749), True, 'import numpy as np\n'), ((508826, 508850), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (508834, 508850), True, 'import numpy as np\n'), ((508912, 508941), 'numpy.ones', 'np.ones', (['[verts.size // 3, 1]'], {}), '([verts.size // 3, 1])\n', (508919, 508941), True, 'import numpy as np\n'), ((509769, 509795), 'numpy.linalg.norm', 'np.linalg.norm', (['nt'], {'axis': '(1)'}), '(nt, axis=1)\n', (509783, 509795), True, 'import numpy as np\n'), ((511807, 511833), 'numpy.cross', 'np.cross', (['(p1 - p0)', '(p2 - p0)'], {}), '(p1 - p0, p2 - p0)\n', (511815, 511833), True, 'import numpy as np\n'), ((512518, 512559), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'nt_norm', 'nt_norm'], {}), "('ij,ik->ijk', nt_norm, nt_norm)\n", (512527, 512559), True, 'import numpy as np\n'), ((513942, 513974), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p2 - p1)'], {}), '(nt_norm[:, :], p2 - p1)\n', (513950, 513974), True, 'import numpy as np\n'), ((514123, 514159), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (514131, 514159), True, 'import numpy as np\n'), ((514873, 514909), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (514881, 514909), True, 'import numpy as np\n'), ((515270, 515302), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p0 - p2)'], {}), '(nt_norm[:, :], p0 - p2)\n', (515278, 515302), True, 'import numpy as np\n'), ((516089, 516121), 'numpy.cross', 'np.cross', (['nt_norm[:, :]', '(p1 - p0)'], {}), '(nt_norm[:, :], p1 - p0)\n', (516097, 516121), True, 'import numpy as np\n'), ((516270, 516306), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', 'ident'], {}), '(nt_norm[:, None, :], ident)\n', (516278, 516306), True, 'import numpy as np\n'), ((518058, 518101), 'numpy.sum', 'np.sum', (['(dbvc[:, :, :, :, :, None] * dxdp)', '(4)'], {}), '(dbvc[:, :, :, :, :, None] * dxdp, 4)\n', (518064, 518101), True, 'import numpy as np\n'), ((518804, 518842), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (518811, 518842), True, 'import numpy as np\n'), ((526693, 526724), 'numpy.zeros_like', 'np.zeros_like', (['self.color_image'], {}), '(self.color_image)\n', (526706, 526724), True, 'import numpy as np\n'), ((527055, 527094), 'numpy.sum', 'np.sum', (['self.sampleResidualsWeighted', '(0)'], {}), '(self.sampleResidualsWeighted, 0)\n', (527061, 527094), True, 'import numpy as np\n'), ((528551, 528589), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (528558, 528589), True, 'import numpy as np\n'), ((535034, 535072), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ik"""', 'dv1dp1', 'lnorm'], {}), "('ijk,ij->ik', dv1dp1, lnorm)\n", (535043, 535072), True, 'import numpy as np\n'), ((535075, 535114), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp1'], {}), "('ij,ijl->il', v1, dl_normdp1)\n", (535084, 535114), True, 'import numpy as np\n'), ((535139, 535178), 'numpy.einsum', 'np.einsum', (['"""ij,ijl->il"""', 'v1', 'dl_normdp2'], {}), "('ij,ijl->il', v1, dl_normdp2)\n", (535148, 535178), True, 'import numpy as np\n'), ((543322, 543338), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (543329, 543338), True, 'import numpy as np\n'), ((543394, 543410), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (543401, 543410), True, 'import numpy as np\n'), ((543466, 543482), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (543473, 543482), True, 'import numpy as np\n'), ((544288, 544311), 'numpy.atleast_3d', 'np.atleast_3d', (['observed'], {}), '(observed)\n', (544301, 544311), True, 'import numpy as np\n'), ((544655, 544686), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (544664, 544686), True, 'import numpy as np\n'), ((545753, 545791), 'numpy.ones', 'np.ones', (['self.boundarybool_image.shape'], {}), '(self.boundarybool_image.shape)\n', (545760, 545791), True, 'import numpy as np\n'), ((549462, 549505), 'numpy.asarray', 'np.asarray', (['dImage_wrt_nonbnd_vc'], {'order': '"""C"""'}), "(dImage_wrt_nonbnd_vc, order='C')\n", (549472, 549505), True, 'import numpy as np\n'), ((566013, 566043), 'OpenGL.GL.glBindVertexArray', 'GL.glBindVertexArray', (['vao_mesh'], {}), '(vao_mesh)\n', (566033, 566043), True, 'import OpenGL.GL as GL\n'), ((567466, 567534), 'OpenGL.GL.glUniformMatrix4fv', 'GL.glUniformMatrix4fv', (['self.MVP_texture_location', '(1)', 'GL.GL_TRUE', 'MVP'], {}), '(self.MVP_texture_location, 1, GL.GL_TRUE, MVP)\n', (567487, 567534), True, 'import OpenGL.GL as GL\n'), ((1729, 1741), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1737, 1741), True, 'import numpy as np\n'), ((1916, 1928), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1924, 1928), True, 'import numpy as np\n'), ((2119, 2131), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2127, 2131), True, 'import numpy as np\n'), ((2310, 2322), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2318, 2322), True, 'import numpy as np\n'), ((2488, 2500), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2496, 2500), True, 'import numpy as np\n'), ((2680, 2692), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2688, 2692), True, 'import numpy as np\n'), ((2869, 2881), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2877, 2881), True, 'import numpy as np\n'), ((3055, 3067), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3063, 3067), True, 'import numpy as np\n'), ((3237, 3249), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3245, 3249), True, 'import numpy as np\n'), ((4290, 4305), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4303, 4305), False, 'import pdb\n'), ((9772, 9818), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (9799, 9818), True, 'import OpenGL.GL as GL\n'), ((12827, 12866), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (12836, 12866), True, 'import numpy as np\n'), ((16290, 16329), 'OpenGL.GL.glValidateProgram', 'GL.glValidateProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (16310, 16329), True, 'import OpenGL.GL as GL\n'), ((16375, 16416), 'OpenGL.GL.glGetProgramInfoLog', 'GL.glGetProgramInfoLog', (['self.colorProgram'], {}), '(self.colorProgram)\n', (16397, 16416), True, 'import OpenGL.GL as GL\n'), ((16465, 16506), 'OpenGL.GL.glGetInteger', 'GL.glGetInteger', (['GL.GL_MAX_VERTEX_ATTRIBS'], {}), '(GL.GL_MAX_VERTEX_ATTRIBS)\n', (16480, 16506), True, 'import OpenGL.GL as GL\n'), ((18530, 18539), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (18536, 18539), True, 'import numpy as np\n'), ((28219, 28268), 'numpy.asarray', 'np.asarray', (['vc_by_face'], {'dtype': 'np.uint8', 'order': '"""C"""'}), "(vc_by_face, dtype=np.uint8, order='C')\n", (28229, 28268), True, 'import numpy as np\n'), ((28737, 28771), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (28746, 28771), True, 'import numpy as np\n'), ((29545, 29557), 'numpy.fliplr', 'np.fliplr', (['f'], {}), '(f)\n', (29554, 29557), True, 'import numpy as np\n'), ((42728, 42807), 'opendr.common.dr_wrt_bgcolor', 'common.dr_wrt_bgcolor', (['visibility', 'self.frustum'], {'num_channels': 'self.num_channels'}), '(visibility, self.frustum, num_channels=self.num_channels)\n', (42749, 42807), False, 'from opendr import common\n'), ((44697, 44707), 'numpy.min', 'np.min', (['ys'], {}), '(ys)\n', (44703, 44707), True, 'import numpy as np\n'), ((44708, 44718), 'numpy.max', 'np.max', (['ys'], {}), '(ys)\n', (44714, 44718), True, 'import numpy as np\n'), ((44720, 44730), 'numpy.min', 'np.min', (['xs'], {}), '(xs)\n', (44726, 44730), True, 'import numpy as np\n'), ((44731, 44741), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (44737, 44741), True, 'import numpy as np\n'), ((48389, 48401), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (48397, 48401), True, 'import numpy as np\n'), ((48754, 48766), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (48762, 48766), True, 'import numpy as np\n'), ((49061, 49073), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (49069, 49073), True, 'import numpy as np\n'), ((53271, 53313), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (53299, 53313), True, 'import OpenGL.GL as GL\n'), ((53366, 53442), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (53390, 53442), True, 'import OpenGL.GL as GL\n'), ((53586, 53598), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (53595, 53598), True, 'import OpenGL.GL as GL\n'), ((53620, 53648), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'texture'], {}), '(1, texture)\n', (53636, 53648), True, 'import OpenGL.GL as GL\n'), ((53671, 53714), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (53687, 53714), True, 'import OpenGL.GL as GL\n'), ((53734, 53810), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (53752, 53810), True, 'import OpenGL.GL as GL\n'), ((53831, 53926), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (53849, 53926), True, 'import OpenGL.GL as GL\n'), ((53942, 54007), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (53960, 54007), True, 'import OpenGL.GL as GL\n'), ((54028, 54092), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (54046, 54092), True, 'import OpenGL.GL as GL\n'), ((54114, 54157), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (54130, 54157), True, 'import OpenGL.GL as GL\n'), ((54294, 54383), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (54311, 54383), True, 'import OpenGL.GL as GL\n'), ((54399, 54512), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (54417, 54512), True, 'import OpenGL.GL as GL\n'), ((59114, 59130), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (59126, 59130), True, 'import numpy as np\n'), ((62336, 62364), 'cv2.imshow', 'cv2.imshow', (['"""clr_im"""', 'clr_im'], {}), "('clr_im', clr_im)\n", (62346, 62364), False, 'import cv2\n'), ((62442, 62456), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (62453, 62456), False, 'import cv2\n'), ((71719, 71755), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (71732, 71755), True, 'import numpy as np\n'), ((73383, 73395), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (73391, 73395), True, 'import numpy as np\n'), ((73749, 73761), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (73757, 73761), True, 'import numpy as np\n'), ((74091, 74103), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74099, 74103), True, 'import numpy as np\n'), ((74429, 74441), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74437, 74441), True, 'import numpy as np\n'), ((74759, 74771), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (74767, 74771), True, 'import numpy as np\n'), ((79539, 79581), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (79567, 79581), True, 'import OpenGL.GL as GL\n'), ((79634, 79710), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (79658, 79710), True, 'import OpenGL.GL as GL\n'), ((79854, 79866), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (79863, 79866), True, 'import OpenGL.GL as GL\n'), ((79888, 79916), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'texture'], {}), '(1, texture)\n', (79904, 79916), True, 'import OpenGL.GL as GL\n'), ((79939, 79982), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (79955, 79982), True, 'import OpenGL.GL as GL\n'), ((80002, 80078), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (80020, 80078), True, 'import OpenGL.GL as GL\n'), ((80099, 80194), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (80117, 80194), True, 'import OpenGL.GL as GL\n'), ((80210, 80275), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (80228, 80275), True, 'import OpenGL.GL as GL\n'), ((80296, 80360), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (80314, 80360), True, 'import OpenGL.GL as GL\n'), ((80382, 80425), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (80398, 80425), True, 'import OpenGL.GL as GL\n'), ((80562, 80651), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (80579, 80651), True, 'import OpenGL.GL as GL\n'), ((80667, 80780), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (80685, 80780), True, 'import OpenGL.GL as GL\n'), ((94984, 95030), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (95011, 95030), True, 'import OpenGL.GL as GL\n'), ((98136, 98182), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (98163, 98182), True, 'import OpenGL.GL as GL\n'), ((101000, 101046), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (101027, 101046), True, 'import OpenGL.GL as GL\n'), ((120902, 120936), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (120911, 120936), True, 'import numpy as np\n'), ((134410, 134436), 'numpy.cross', 'np.cross', (['nt_norm', '(p2 - p1)'], {}), '(nt_norm, p2 - p1)\n', (134418, 134436), True, 'import numpy as np\n'), ((135200, 135237), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (135208, 135237), True, 'import numpy as np\n'), ((135972, 135998), 'numpy.cross', 'np.cross', (['nt_norm', '(p0 - p2)'], {}), '(nt_norm, p0 - p2)\n', (135980, 135998), True, 'import numpy as np\n'), ((136460, 136497), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (136468, 136497), True, 'import numpy as np\n'), ((137049, 137075), 'numpy.cross', 'np.cross', (['nt_norm', '(p1 - p0)'], {}), '(nt_norm, p1 - p0)\n', (137057, 137075), True, 'import numpy as np\n'), ((137243, 137280), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (137251, 137280), True, 'import numpy as np\n'), ((143234, 143250), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (143241, 143250), True, 'import numpy as np\n'), ((143307, 143323), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (143314, 143323), True, 'import numpy as np\n'), ((143380, 143396), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (143387, 143396), True, 'import numpy as np\n'), ((143894, 143910), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (143901, 143910), True, 'import numpy as np\n'), ((143967, 143983), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (143974, 143983), True, 'import numpy as np\n'), ((144040, 144056), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (144047, 144056), True, 'import numpy as np\n'), ((144485, 144501), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (144492, 144501), True, 'import numpy as np\n'), ((144558, 144574), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (144565, 144574), True, 'import numpy as np\n'), ((144631, 144647), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (144638, 144647), True, 'import numpy as np\n'), ((145101, 145123), 'numpy.sum', 'np.sum', (['(l ** 2)'], {'axis': '(1)'}), '(l ** 2, axis=1)\n', (145107, 145123), True, 'import numpy as np\n'), ((145575, 145599), 'numpy.sum', 'np.sum', (['(l12 ** 2)'], {'axis': '(1)'}), '(l12 ** 2, axis=1)\n', (145581, 145599), True, 'import numpy as np\n'), ((146199, 146231), 'numpy.sum', 'np.sum', (['(lineToPoint ** 2)'], {'axis': '(1)'}), '(lineToPoint ** 2, axis=1)\n', (146205, 146231), True, 'import numpy as np\n'), ((147390, 147414), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (147398, 147414), True, 'import numpy as np\n'), ((147591, 147615), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (147599, 147615), True, 'import numpy as np\n'), ((147703, 147745), 'numpy.ones', 'np.ones', (['[verticesBndOutside.size // 3, 1]'], {}), '([verticesBndOutside.size // 3, 1])\n', (147710, 147745), True, 'import numpy as np\n'), ((148650, 148682), 'numpy.ones', 'np.ones', (['[projPoint.shape[0], 1]'], {}), '([projPoint.shape[0], 1])\n', (148657, 148682), True, 'import numpy as np\n'), ((149864, 149901), 'numpy.array', 'np.array', (['(argminDistNonIntersect == 0)'], {}), '(argminDistNonIntersect == 0)\n', (149872, 149901), True, 'import numpy as np\n'), ((161824, 161861), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'lnorm', 'lnorm'], {}), "('ij,ik->ijk', lnorm, lnorm)\n", (161833, 161861), True, 'import numpy as np\n'), ((162499, 162538), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'n_norm', 'n_norm'], {}), "('ij,ik->ijk', n_norm, n_norm)\n", (162508, 162538), True, 'import numpy as np\n'), ((165152, 165183), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (165161, 165183), True, 'import numpy as np\n'), ((167457, 167488), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (167466, 167488), True, 'import numpy as np\n'), ((168507, 168538), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (168516, 168538), True, 'import numpy as np\n'), ((170781, 170812), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (170790, 170812), True, 'import numpy as np\n'), ((171017, 171066), 'numpy.transpose', 'np.transpose', (['dImage_wrt_bar_v_edge', '[1, 0, 2, 3]'], {}), '(dImage_wrt_bar_v_edge, [1, 0, 2, 3])\n', (171029, 171066), True, 'import numpy as np\n'), ((171832, 171910), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (171846, 171910), True, 'import numpy as np\n'), ((172704, 172715), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (172707, 172715), False, 'from chumpy.utils import row, col\n'), ((180951, 180962), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (180954, 180962), False, 'from chumpy.utils import row, col\n'), ((184313, 184344), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (184323, 184344), True, 'import numpy as np\n'), ((199954, 199990), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (199967, 199990), True, 'import numpy as np\n'), ((201624, 201636), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (201632, 201636), True, 'import numpy as np\n'), ((201990, 202002), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (201998, 202002), True, 'import numpy as np\n'), ((202332, 202344), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (202340, 202344), True, 'import numpy as np\n'), ((202670, 202682), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (202678, 202682), True, 'import numpy as np\n'), ((203000, 203012), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (203008, 203012), True, 'import numpy as np\n'), ((207780, 207822), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (207808, 207822), True, 'import OpenGL.GL as GL\n'), ((207875, 207951), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (207899, 207951), True, 'import OpenGL.GL as GL\n'), ((208095, 208107), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (208104, 208107), True, 'import OpenGL.GL as GL\n'), ((208129, 208157), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'texture'], {}), '(1, texture)\n', (208145, 208157), True, 'import OpenGL.GL as GL\n'), ((208180, 208223), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (208196, 208223), True, 'import OpenGL.GL as GL\n'), ((208243, 208319), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (208261, 208319), True, 'import OpenGL.GL as GL\n'), ((208340, 208435), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (208358, 208435), True, 'import OpenGL.GL as GL\n'), ((208451, 208516), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (208469, 208516), True, 'import OpenGL.GL as GL\n'), ((208537, 208601), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (208555, 208601), True, 'import OpenGL.GL as GL\n'), ((208623, 208666), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (208639, 208666), True, 'import OpenGL.GL as GL\n'), ((208803, 208892), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (208820, 208892), True, 'import OpenGL.GL as GL\n'), ((208908, 209021), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (208926, 209021), True, 'import OpenGL.GL as GL\n'), ((223217, 223263), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (223244, 223263), True, 'import OpenGL.GL as GL\n'), ((226369, 226415), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (226396, 226415), True, 'import OpenGL.GL as GL\n'), ((229233, 229279), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (229260, 229279), True, 'import OpenGL.GL as GL\n'), ((249132, 249166), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (249141, 249166), True, 'import numpy as np\n'), ((263107, 263133), 'numpy.cross', 'np.cross', (['nt_norm', '(p2 - p1)'], {}), '(nt_norm, p2 - p1)\n', (263115, 263133), True, 'import numpy as np\n'), ((263897, 263934), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (263905, 263934), True, 'import numpy as np\n'), ((264669, 264695), 'numpy.cross', 'np.cross', (['nt_norm', '(p0 - p2)'], {}), '(nt_norm, p0 - p2)\n', (264677, 264695), True, 'import numpy as np\n'), ((265157, 265194), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (265165, 265194), True, 'import numpy as np\n'), ((265746, 265772), 'numpy.cross', 'np.cross', (['nt_norm', '(p1 - p0)'], {}), '(nt_norm, p1 - p0)\n', (265754, 265772), True, 'import numpy as np\n'), ((265940, 265977), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (265948, 265977), True, 'import numpy as np\n'), ((271931, 271947), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (271938, 271947), True, 'import numpy as np\n'), ((272004, 272020), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (272011, 272020), True, 'import numpy as np\n'), ((272077, 272093), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (272084, 272093), True, 'import numpy as np\n'), ((272591, 272607), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (272598, 272607), True, 'import numpy as np\n'), ((272664, 272680), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (272671, 272680), True, 'import numpy as np\n'), ((272737, 272753), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (272744, 272753), True, 'import numpy as np\n'), ((273182, 273198), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (273189, 273198), True, 'import numpy as np\n'), ((273255, 273271), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (273262, 273271), True, 'import numpy as np\n'), ((273328, 273344), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (273335, 273344), True, 'import numpy as np\n'), ((273798, 273820), 'numpy.sum', 'np.sum', (['(l ** 2)'], {'axis': '(1)'}), '(l ** 2, axis=1)\n', (273804, 273820), True, 'import numpy as np\n'), ((274272, 274296), 'numpy.sum', 'np.sum', (['(l12 ** 2)'], {'axis': '(1)'}), '(l12 ** 2, axis=1)\n', (274278, 274296), True, 'import numpy as np\n'), ((274896, 274928), 'numpy.sum', 'np.sum', (['(lineToPoint ** 2)'], {'axis': '(1)'}), '(lineToPoint ** 2, axis=1)\n', (274902, 274928), True, 'import numpy as np\n'), ((276087, 276111), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (276095, 276111), True, 'import numpy as np\n'), ((276288, 276312), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (276296, 276312), True, 'import numpy as np\n'), ((276400, 276442), 'numpy.ones', 'np.ones', (['[verticesBndOutside.size // 3, 1]'], {}), '([verticesBndOutside.size // 3, 1])\n', (276407, 276442), True, 'import numpy as np\n'), ((277347, 277379), 'numpy.ones', 'np.ones', (['[projPoint.shape[0], 1]'], {}), '([projPoint.shape[0], 1])\n', (277354, 277379), True, 'import numpy as np\n'), ((278561, 278598), 'numpy.array', 'np.array', (['(argminDistNonIntersect == 0)'], {}), '(argminDistNonIntersect == 0)\n', (278569, 278598), True, 'import numpy as np\n'), ((290521, 290558), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'lnorm', 'lnorm'], {}), "('ij,ik->ijk', lnorm, lnorm)\n", (290530, 290558), True, 'import numpy as np\n'), ((291196, 291235), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'n_norm', 'n_norm'], {}), "('ij,ik->ijk', n_norm, n_norm)\n", (291205, 291235), True, 'import numpy as np\n'), ((293849, 293880), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (293858, 293880), True, 'import numpy as np\n'), ((296154, 296185), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (296163, 296185), True, 'import numpy as np\n'), ((297204, 297235), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (297213, 297235), True, 'import numpy as np\n'), ((299478, 299509), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (299487, 299509), True, 'import numpy as np\n'), ((299714, 299763), 'numpy.transpose', 'np.transpose', (['dImage_wrt_bar_v_edge', '[1, 0, 2, 3]'], {}), '(dImage_wrt_bar_v_edge, [1, 0, 2, 3])\n', (299726, 299763), True, 'import numpy as np\n'), ((300529, 300607), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (300543, 300607), True, 'import numpy as np\n'), ((301401, 301412), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (301404, 301412), False, 'from chumpy.utils import row, col\n'), ((309648, 309659), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (309651, 309659), False, 'from chumpy.utils import row, col\n'), ((313010, 313041), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (313020, 313041), True, 'import numpy as np\n'), ((328651, 328687), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (328664, 328687), True, 'import numpy as np\n'), ((330314, 330326), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (330322, 330326), True, 'import numpy as np\n'), ((330680, 330692), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (330688, 330692), True, 'import numpy as np\n'), ((331022, 331034), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (331030, 331034), True, 'import numpy as np\n'), ((331360, 331372), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (331368, 331372), True, 'import numpy as np\n'), ((331690, 331702), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (331698, 331702), True, 'import numpy as np\n'), ((336475, 336517), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (336503, 336517), True, 'import OpenGL.GL as GL\n'), ((336571, 336647), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (336595, 336647), True, 'import OpenGL.GL as GL\n'), ((336792, 336804), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (336801, 336804), True, 'import OpenGL.GL as GL\n'), ((336826, 336854), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'texture'], {}), '(1, texture)\n', (336842, 336854), True, 'import OpenGL.GL as GL\n'), ((336875, 336918), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (336891, 336918), True, 'import OpenGL.GL as GL\n'), ((336939, 337015), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (336957, 337015), True, 'import OpenGL.GL as GL\n'), ((337036, 337131), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (337054, 337131), True, 'import OpenGL.GL as GL\n'), ((337147, 337212), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (337165, 337212), True, 'import OpenGL.GL as GL\n'), ((337233, 337297), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (337251, 337297), True, 'import OpenGL.GL as GL\n'), ((337319, 337362), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (337335, 337362), True, 'import OpenGL.GL as GL\n'), ((337499, 337588), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (337516, 337588), True, 'import OpenGL.GL as GL\n'), ((337604, 337717), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (337622, 337717), True, 'import OpenGL.GL as GL\n'), ((352173, 352219), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (352200, 352219), True, 'import OpenGL.GL as GL\n'), ((355325, 355371), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (355352, 355371), True, 'import OpenGL.GL as GL\n'), ((358190, 358236), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (358217, 358236), True, 'import OpenGL.GL as GL\n'), ((378684, 378718), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (378693, 378718), True, 'import numpy as np\n'), ((392568, 392594), 'numpy.cross', 'np.cross', (['nt_norm', '(p2 - p1)'], {}), '(nt_norm, p2 - p1)\n', (392576, 392594), True, 'import numpy as np\n'), ((393363, 393400), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (393371, 393400), True, 'import numpy as np\n'), ((394184, 394210), 'numpy.cross', 'np.cross', (['nt_norm', '(p0 - p2)'], {}), '(nt_norm, p0 - p2)\n', (394192, 394210), True, 'import numpy as np\n'), ((394691, 394728), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (394699, 394728), True, 'import numpy as np\n'), ((395314, 395340), 'numpy.cross', 'np.cross', (['nt_norm', '(p1 - p0)'], {}), '(nt_norm, p1 - p0)\n', (395322, 395340), True, 'import numpy as np\n'), ((395510, 395547), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (395518, 395547), True, 'import numpy as np\n'), ((399806, 399822), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (399813, 399822), True, 'import numpy as np\n'), ((399882, 399898), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (399889, 399898), True, 'import numpy as np\n'), ((399958, 399974), 'numpy.ones', 'np.ones', (['[nv, 1]'], {}), '([nv, 1])\n', (399965, 399974), True, 'import numpy as np\n'), ((400439, 400461), 'numpy.sum', 'np.sum', (['(l ** 2)'], {'axis': '(1)'}), '(l ** 2, axis=1)\n', (400445, 400461), True, 'import numpy as np\n'), ((400877, 400901), 'numpy.sum', 'np.sum', (['(l12 ** 2)'], {'axis': '(1)'}), '(l12 ** 2, axis=1)\n', (400883, 400901), True, 'import numpy as np\n'), ((401528, 401560), 'numpy.sum', 'np.sum', (['(lineToPoint ** 2)'], {'axis': '(1)'}), '(lineToPoint ** 2, axis=1)\n', (401534, 401560), True, 'import numpy as np\n'), ((414116, 414153), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'lnorm', 'lnorm'], {}), "('ij,ik->ijk', lnorm, lnorm)\n", (414125, 414153), True, 'import numpy as np\n'), ((414841, 414880), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'n_norm', 'n_norm'], {}), "('ij,ik->ijk', n_norm, n_norm)\n", (414850, 414880), True, 'import numpy as np\n'), ((419018, 419049), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (419027, 419049), True, 'import numpy as np\n'), ((421819, 421850), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (421828, 421850), True, 'import numpy as np\n'), ((422066, 422110), 'numpy.transpose', 'np.transpose', (['dImage_wrt_bar_v', '[1, 0, 2, 3]'], {}), '(dImage_wrt_bar_v, [1, 0, 2, 3])\n', (422078, 422110), True, 'import numpy as np\n'), ((422876, 422954), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (422890, 422954), True, 'import numpy as np\n'), ((423901, 423912), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (423904, 423912), False, 'from chumpy.utils import row, col\n'), ((428140, 428151), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (428143, 428151), False, 'from chumpy.utils import row, col\n'), ((431189, 431220), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (431199, 431220), True, 'import numpy as np\n'), ((447521, 447557), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (447534, 447557), True, 'import numpy as np\n'), ((449210, 449222), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (449218, 449222), True, 'import numpy as np\n'), ((449576, 449588), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (449584, 449588), True, 'import numpy as np\n'), ((449918, 449930), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (449926, 449930), True, 'import numpy as np\n'), ((450256, 450268), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (450264, 450268), True, 'import numpy as np\n'), ((450586, 450598), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (450594, 450598), True, 'import numpy as np\n'), ((455400, 455442), 'OpenGL.GL.glEnableVertexAttribArray', 'GL.glEnableVertexAttribArray', (['uvs_location'], {}), '(uvs_location)\n', (455428, 455442), True, 'import OpenGL.GL as GL\n'), ((455496, 455572), 'OpenGL.GL.glVertexAttribPointer', 'GL.glVertexAttribPointer', (['uvs_location', '(2)', 'GL.GL_FLOAT', 'GL.GL_FALSE', '(0)', 'None'], {}), '(uvs_location, 2, GL.GL_FLOAT, GL.GL_FALSE, 0, None)\n', (455520, 455572), True, 'import OpenGL.GL as GL\n'), ((455717, 455729), 'OpenGL.GL.GLuint', 'GL.GLuint', (['(0)'], {}), '(0)\n', (455726, 455729), True, 'import OpenGL.GL as GL\n'), ((455751, 455779), 'OpenGL.GL.glGenTextures', 'GL.glGenTextures', (['(1)', 'texture'], {}), '(1, texture)\n', (455767, 455779), True, 'import OpenGL.GL as GL\n'), ((455800, 455843), 'OpenGL.GL.glPixelStorei', 'GL.glPixelStorei', (['GL.GL_UNPACK_ALIGNMENT', '(1)'], {}), '(GL.GL_UNPACK_ALIGNMENT, 1)\n', (455816, 455843), True, 'import OpenGL.GL as GL\n'), ((455864, 455940), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAG_FILTER', 'GL.GL_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n', (455882, 455940), True, 'import OpenGL.GL as GL\n'), ((455961, 456056), 'OpenGL.GL.glTexParameterf', 'GL.glTexParameterf', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MIN_FILTER', 'GL.GL_LINEAR_MIPMAP_LINEAR'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.\n GL_LINEAR_MIPMAP_LINEAR)\n', (455979, 456056), True, 'import OpenGL.GL as GL\n'), ((456072, 456137), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_BASE_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_BASE_LEVEL, 0)\n', (456090, 456137), True, 'import OpenGL.GL as GL\n'), ((456158, 456222), 'OpenGL.GL.glTexParameteri', 'GL.glTexParameteri', (['GL.GL_TEXTURE_2D', 'GL.GL_TEXTURE_MAX_LEVEL', '(0)'], {}), '(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAX_LEVEL, 0)\n', (456176, 456222), True, 'import OpenGL.GL as GL\n'), ((456244, 456287), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (456260, 456287), True, 'import OpenGL.GL as GL\n'), ((456424, 456513), 'OpenGL.GL.glTexStorage2D', 'GL.glTexStorage2D', (['GL.GL_TEXTURE_2D', '(1)', 'GL.GL_RGB32F', 'image.shape[1]', 'image.shape[0]'], {}), '(GL.GL_TEXTURE_2D, 1, GL.GL_RGB32F, image.shape[1], image.\n shape[0])\n', (456441, 456513), True, 'import OpenGL.GL as GL\n'), ((456529, 456642), 'OpenGL.GL.glTexSubImage2D', 'GL.glTexSubImage2D', (['GL.GL_TEXTURE_2D', '(0)', '(0)', '(0)', 'image.shape[1]', 'image.shape[0]', 'GL.GL_RGB', 'GL.GL_FLOAT', 'image'], {}), '(GL.GL_TEXTURE_2D, 0, 0, 0, image.shape[1], image.shape[0\n ], GL.GL_RGB, GL.GL_FLOAT, image)\n', (456547, 456642), True, 'import OpenGL.GL as GL\n'), ((472261, 472307), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (472288, 472307), True, 'import OpenGL.GL as GL\n'), ((475413, 475459), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (475440, 475459), True, 'import OpenGL.GL as GL\n'), ((478278, 478324), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (478305, 478324), True, 'import OpenGL.GL as GL\n'), ((498773, 498807), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (498782, 498807), True, 'import numpy as np\n'), ((513061, 513087), 'numpy.cross', 'np.cross', (['nt_norm', '(p2 - p1)'], {}), '(nt_norm, p2 - p1)\n', (513069, 513087), True, 'import numpy as np\n'), ((513856, 513893), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (513864, 513893), True, 'import numpy as np\n'), ((514677, 514703), 'numpy.cross', 'np.cross', (['nt_norm', '(p0 - p2)'], {}), '(nt_norm, p0 - p2)\n', (514685, 514703), True, 'import numpy as np\n'), ((515184, 515221), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (515192, 515221), True, 'import numpy as np\n'), ((515807, 515833), 'numpy.cross', 'np.cross', (['nt_norm', '(p1 - p0)'], {}), '(nt_norm, p1 - p0)\n', (515815, 515833), True, 'import numpy as np\n'), ((516003, 516040), 'numpy.cross', 'np.cross', (['nt_norm[:, None, :]', '(-ident)'], {}), '(nt_norm[:, None, :], -ident)\n', (516011, 516040), True, 'import numpy as np\n'), ((520949, 520971), 'numpy.sum', 'np.sum', (['(l ** 2)'], {'axis': '(1)'}), '(l ** 2, axis=1)\n', (520955, 520971), True, 'import numpy as np\n'), ((521387, 521411), 'numpy.sum', 'np.sum', (['(l12 ** 2)'], {'axis': '(1)'}), '(l12 ** 2, axis=1)\n', (521393, 521411), True, 'import numpy as np\n'), ((522038, 522070), 'numpy.sum', 'np.sum', (['(lineToPoint ** 2)'], {'axis': '(1)'}), '(lineToPoint ** 2, axis=1)\n', (522044, 522070), True, 'import numpy as np\n'), ((534769, 534806), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'lnorm', 'lnorm'], {}), "('ij,ik->ijk', lnorm, lnorm)\n", (534778, 534806), True, 'import numpy as np\n'), ((535494, 535533), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'n_norm', 'n_norm'], {}), "('ij,ik->ijk', n_norm, n_norm)\n", (535503, 535533), True, 'import numpy as np\n'), ((539671, 539702), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (539680, 539702), True, 'import numpy as np\n'), ((542472, 542503), 'numpy.hstack', 'np.hstack', (['(JS * 2, JS * 2 + 1)'], {}), '((JS * 2, JS * 2 + 1))\n', (542481, 542503), True, 'import numpy as np\n'), ((542719, 542763), 'numpy.transpose', 'np.transpose', (['dImage_wrt_bar_v', '[1, 0, 2, 3]'], {}), '(dImage_wrt_bar_v, [1, 0, 2, 3])\n', (542731, 542763), True, 'import numpy as np\n'), ((543529, 543607), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (543543, 543607), True, 'import numpy as np\n'), ((544554, 544565), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (544557, 544565), False, 'from chumpy.utils import row, col\n'), ((548933, 548944), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (548936, 548944), False, 'from chumpy.utils import row, col\n'), ((551982, 552013), 'numpy.asarray', 'np.asarray', (['fc'], {'dtype': 'np.uint32'}), '(fc, dtype=np.uint32)\n', (551992, 552013), True, 'import numpy as np\n'), ((568761, 568797), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (568774, 568797), True, 'import numpy as np\n'), ((8400, 8446), 'OpenGL.GL.glCheckFramebufferStatus', 'GL.glCheckFramebufferStatus', (['GL.GL_FRAMEBUFFER'], {}), '(GL.GL_FRAMEBUFFER)\n', (8427, 8446), True, 'import OpenGL.GL as GL\n'), ((19848, 19917), 'numpy.vstack', 'np.vstack', (['(vc[:, 0], vc[:, 1 % vc.shape[1]], vc[:, 2 % vc.shape[1]])'], {}), '((vc[:, 0], vc[:, 1 % vc.shape[1]], vc[:, 2 % vc.shape[1]]))\n', (19857, 19917), True, 'import numpy as np\n'), ((20180, 20202), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (20188, 20202), True, 'import numpy as np\n'), ((28974, 28996), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (28982, 28996), True, 'import numpy as np\n'), ((29844, 29878), 'numpy.arange', 'np.arange', (['f.size'], {'dtype': 'np.uint32'}), '(f.size, dtype=np.uint32)\n', (29853, 29878), True, 'import numpy as np\n'), ((36070, 36126), 'numpy.ones', 'np.ones', (["(self.frustum['height'], self.frustum['width'])"], {}), "((self.frustum['height'], self.frustum['width']))\n", (36077, 36126), True, 'import numpy as np\n'), ((38828, 38850), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (38836, 38850), True, 'import numpy as np\n'), ((51857, 51884), 'numpy.array', 'np.array', (['self.v_list[mesh]'], {}), '(self.v_list[mesh])\n', (51865, 51884), True, 'import numpy as np\n'), ((51938, 51966), 'numpy.array', 'np.array', (['self.vc_list[mesh]'], {}), '(self.vc_list[mesh])\n', (51946, 51966), True, 'import numpy as np\n'), ((52017, 52045), 'numpy.array', 'np.array', (['self.ft_list[mesh]'], {}), '(self.ft_list[mesh])\n', (52025, 52045), True, 'import numpy as np\n'), ((54196, 54241), 'numpy.flipud', 'np.flipud', (['self.textures_list[mesh][polygons]'], {}), '(self.textures_list[mesh][polygons])\n', (54205, 54241), True, 'import numpy as np\n'), ((59364, 59386), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (59372, 59386), True, 'import numpy as np\n'), ((63578, 63617), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (63587, 63617), True, 'import numpy as np\n'), ((69069, 69091), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (69077, 69091), True, 'import numpy as np\n'), ((69480, 69496), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (69492, 69496), True, 'import numpy as np\n'), ((70165, 70206), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (70180, 70206), True, 'import OpenGL.GL as GL\n'), ((70307, 70341), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (70325, 70341), True, 'import OpenGL.GL as GL\n'), ((70366, 70409), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (70382, 70409), True, 'import OpenGL.GL as GL\n'), ((70434, 70467), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureID', '(0)'], {}), '(self.textureID, 0)\n', (70448, 70467), True, 'import OpenGL.GL as GL\n'), ((70518, 70552), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (70533, 70552), True, 'import OpenGL.GL as GL\n'), ((80464, 80509), 'numpy.flipud', 'np.flipud', (['self.textures_list[mesh][polygons]'], {}), '(self.textures_list[mesh][polygons])\n', (80473, 80509), True, 'import numpy as np\n'), ((106210, 106219), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (106216, 106219), True, 'import numpy as np\n'), ((108136, 108158), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (108144, 108158), True, 'import numpy as np\n'), ((121245, 121267), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (121253, 121267), True, 'import numpy as np\n'), ((122022, 122101), 'opendr.common.dr_wrt_bgcolor', 'common.dr_wrt_bgcolor', (['visibility', 'self.frustum'], {'num_channels': 'self.num_channels'}), '(visibility, self.frustum, num_channels=self.num_channels)\n', (122043, 122101), False, 'from opendr import common\n'), ((122875, 122900), 'numpy.concatenate', 'np.concatenate', (['(r, g, b)'], {}), '((r, g, b))\n', (122889, 122900), True, 'import numpy as np\n'), ((122917, 122965), 'numpy.concatenate', 'np.concatenate', (['(IS * 3, IS * 3 + 1, IS * 3 + 2)'], {}), '((IS * 3, IS * 3 + 1, IS * 3 + 2))\n', (122931, 122965), True, 'import numpy as np\n'), ((122973, 123021), 'numpy.concatenate', 'np.concatenate', (['(JS * 3, JS * 3 + 1, JS * 3 + 2)'], {}), '((JS * 3, JS * 3 + 1, JS * 3 + 2))\n', (122987, 123021), True, 'import numpy as np\n'), ((123032, 123096), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, (IS, JS))'], {'shape': '(self.r.size, wrt.r.size)'}), '((data, (IS, JS)), shape=(self.r.size, wrt.r.size))\n', (123045, 123096), True, 'import scipy.sparse as sp\n'), ((126366, 126385), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (126374, 126385), True, 'import numpy as np\n'), ((130267, 130286), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (130275, 130286), True, 'import numpy as np\n'), ((143448, 143526), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (143462, 143526), True, 'import numpy as np\n'), ((144111, 144189), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (144125, 144189), True, 'import numpy as np\n'), ((144701, 144779), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (144715, 144779), True, 'import numpy as np\n'), ((147270, 147294), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (147278, 147294), True, 'import numpy as np\n'), ((148023, 148057), 'numpy.sum', 'np.sum', (['(projVerticesBndDir ** 2)', '(1)'], {}), '(projVerticesBndDir ** 2, 1)\n', (148029, 148057), True, 'import numpy as np\n'), ((162904, 162919), 'numpy.sign', 'np.sign', (['n_norm'], {}), '(n_norm)\n', (162911, 162919), True, 'import numpy as np\n'), ((163045, 163060), 'numpy.sign', 'np.sign', (['n_norm'], {}), '(n_norm)\n', (163052, 163060), True, 'import numpy as np\n'), ((163627, 163650), 'numpy.sum', 'np.sum', (['(v1 ** 2)'], {'axis': '(1)'}), '(v1 ** 2, axis=1)\n', (163633, 163650), True, 'import numpy as np\n'), ((163809, 163832), 'numpy.sum', 'np.sum', (['(v2 ** 2)'], {'axis': '(1)'}), '(v2 ** 2, axis=1)\n', (163815, 163832), True, 'import numpy as np\n'), ((164798, 164809), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (164801, 164809), False, 'from chumpy.utils import row, col\n'), ((167312, 167323), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (167315, 167323), False, 'from chumpy.utils import row, col\n'), ((168363, 168374), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (168366, 168374), False, 'from chumpy.utils import row, col\n'), ((169721, 169785), 'numpy.tile', 'np.tile', (['f[frontFacingEdgeFaces][None, :]', '[self.nsamples, 1, 1]'], {}), '(f[frontFacingEdgeFaces][None, :], [self.nsamples, 1, 1])\n', (169728, 169785), True, 'import numpy as np\n'), ((170441, 170452), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (170444, 170452), False, 'from chumpy.utils import row, col\n'), ((177262, 177273), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (177265, 177273), False, 'from chumpy.utils import row, col\n'), ((178405, 178416), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (178408, 178416), False, 'from chumpy.utils import row, col\n'), ((179615, 179626), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (179618, 179626), False, 'from chumpy.utils import row, col\n'), ((183703, 183742), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (183712, 183742), True, 'import numpy as np\n'), ((192114, 192136), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (192122, 192136), True, 'import numpy as np\n'), ((192640, 192668), 'numpy.ones_like', 'np.ones_like', (['colors_by_face'], {}), '(colors_by_face)\n', (192652, 192668), True, 'import numpy as np\n'), ((196920, 196942), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (196928, 196942), True, 'import numpy as np\n'), ((198319, 198360), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (198334, 198360), True, 'import OpenGL.GL as GL\n'), ((198461, 198495), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (198479, 198495), True, 'import OpenGL.GL as GL\n'), ((198520, 198563), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (198536, 198563), True, 'import OpenGL.GL as GL\n'), ((198588, 198621), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureID', '(0)'], {}), '(self.textureID, 0)\n', (198602, 198621), True, 'import OpenGL.GL as GL\n'), ((198672, 198706), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (198687, 198706), True, 'import OpenGL.GL as GL\n'), ((208705, 208750), 'numpy.flipud', 'np.flipud', (['self.textures_list[mesh][polygons]'], {}), '(self.textures_list[mesh][polygons])\n', (208714, 208750), True, 'import numpy as np\n'), ((234440, 234449), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (234446, 234449), True, 'import numpy as np\n'), ((236366, 236388), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (236374, 236388), True, 'import numpy as np\n'), ((249475, 249497), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (249483, 249497), True, 'import numpy as np\n'), ((250252, 250331), 'opendr.common.dr_wrt_bgcolor', 'common.dr_wrt_bgcolor', (['visibility', 'self.frustum'], {'num_channels': 'self.num_channels'}), '(visibility, self.frustum, num_channels=self.num_channels)\n', (250273, 250331), False, 'from opendr import common\n'), ((251105, 251130), 'numpy.concatenate', 'np.concatenate', (['(r, g, b)'], {}), '((r, g, b))\n', (251119, 251130), True, 'import numpy as np\n'), ((251147, 251195), 'numpy.concatenate', 'np.concatenate', (['(IS * 3, IS * 3 + 1, IS * 3 + 2)'], {}), '((IS * 3, IS * 3 + 1, IS * 3 + 2))\n', (251161, 251195), True, 'import numpy as np\n'), ((251203, 251251), 'numpy.concatenate', 'np.concatenate', (['(JS * 3, JS * 3 + 1, JS * 3 + 2)'], {}), '((JS * 3, JS * 3 + 1, JS * 3 + 2))\n', (251217, 251251), True, 'import numpy as np\n'), ((251262, 251326), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, (IS, JS))'], {'shape': '(self.r.size, wrt.r.size)'}), '((data, (IS, JS)), shape=(self.r.size, wrt.r.size))\n', (251275, 251326), True, 'import scipy.sparse as sp\n'), ((255063, 255082), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (255071, 255082), True, 'import numpy as np\n'), ((258964, 258983), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (258972, 258983), True, 'import numpy as np\n'), ((272145, 272223), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (272159, 272223), True, 'import numpy as np\n'), ((272808, 272886), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (272822, 272886), True, 'import numpy as np\n'), ((273398, 273476), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (273412, 273476), True, 'import numpy as np\n'), ((275967, 275991), 'numpy.array', 'np.array', (['[[0, 0, 0, 1]]'], {}), '([[0, 0, 0, 1]])\n', (275975, 275991), True, 'import numpy as np\n'), ((276720, 276754), 'numpy.sum', 'np.sum', (['(projVerticesBndDir ** 2)', '(1)'], {}), '(projVerticesBndDir ** 2, 1)\n', (276726, 276754), True, 'import numpy as np\n'), ((291601, 291616), 'numpy.sign', 'np.sign', (['n_norm'], {}), '(n_norm)\n', (291608, 291616), True, 'import numpy as np\n'), ((291742, 291757), 'numpy.sign', 'np.sign', (['n_norm'], {}), '(n_norm)\n', (291749, 291757), True, 'import numpy as np\n'), ((292324, 292347), 'numpy.sum', 'np.sum', (['(v1 ** 2)'], {'axis': '(1)'}), '(v1 ** 2, axis=1)\n', (292330, 292347), True, 'import numpy as np\n'), ((292506, 292529), 'numpy.sum', 'np.sum', (['(v2 ** 2)'], {'axis': '(1)'}), '(v2 ** 2, axis=1)\n', (292512, 292529), True, 'import numpy as np\n'), ((293495, 293506), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (293498, 293506), False, 'from chumpy.utils import row, col\n'), ((296009, 296020), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (296012, 296020), False, 'from chumpy.utils import row, col\n'), ((297060, 297071), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (297063, 297071), False, 'from chumpy.utils import row, col\n'), ((298418, 298482), 'numpy.tile', 'np.tile', (['f[frontFacingEdgeFaces][None, :]', '[self.nsamples, 1, 1]'], {}), '(f[frontFacingEdgeFaces][None, :], [self.nsamples, 1, 1])\n', (298425, 298482), True, 'import numpy as np\n'), ((299138, 299149), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (299141, 299149), False, 'from chumpy.utils import row, col\n'), ((305959, 305970), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (305962, 305970), False, 'from chumpy.utils import row, col\n'), ((307102, 307113), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (307105, 307113), False, 'from chumpy.utils import row, col\n'), ((308312, 308323), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (308315, 308323), False, 'from chumpy.utils import row, col\n'), ((312400, 312439), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (312409, 312439), True, 'import numpy as np\n'), ((320811, 320833), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (320819, 320833), True, 'import numpy as np\n'), ((321337, 321365), 'numpy.ones_like', 'np.ones_like', (['colors_by_face'], {}), '(colors_by_face)\n', (321349, 321365), True, 'import numpy as np\n'), ((325617, 325639), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (325625, 325639), True, 'import numpy as np\n'), ((327016, 327057), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (327031, 327057), True, 'import OpenGL.GL as GL\n'), ((327158, 327192), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (327176, 327192), True, 'import OpenGL.GL as GL\n'), ((327217, 327260), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (327233, 327260), True, 'import OpenGL.GL as GL\n'), ((327285, 327318), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureID', '(0)'], {}), '(self.textureID, 0)\n', (327299, 327318), True, 'import OpenGL.GL as GL\n'), ((327369, 327403), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (327384, 327403), True, 'import OpenGL.GL as GL\n'), ((337401, 337446), 'numpy.flipud', 'np.flipud', (['self.textures_list[mesh][polygons]'], {}), '(self.textures_list[mesh][polygons])\n', (337410, 337446), True, 'import numpy as np\n'), ((363398, 363407), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (363404, 363407), True, 'import numpy as np\n'), ((365682, 365704), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (365690, 365704), True, 'import numpy as np\n'), ((379027, 379049), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (379035, 379049), True, 'import numpy as np\n'), ((379804, 379883), 'opendr.common.dr_wrt_bgcolor', 'common.dr_wrt_bgcolor', (['visibility', 'self.frustum'], {'num_channels': 'self.num_channels'}), '(visibility, self.frustum, num_channels=self.num_channels)\n', (379825, 379883), False, 'from opendr import common\n'), ((380664, 380689), 'numpy.concatenate', 'np.concatenate', (['(r, g, b)'], {}), '((r, g, b))\n', (380678, 380689), True, 'import numpy as np\n'), ((380708, 380756), 'numpy.concatenate', 'np.concatenate', (['(IS * 3, IS * 3 + 1, IS * 3 + 2)'], {}), '((IS * 3, IS * 3 + 1, IS * 3 + 2))\n', (380722, 380756), True, 'import numpy as np\n'), ((380774, 380822), 'numpy.concatenate', 'np.concatenate', (['(JS * 3, JS * 3 + 1, JS * 3 + 2)'], {}), '((JS * 3, JS * 3 + 1, JS * 3 + 2))\n', (380788, 380822), True, 'import numpy as np\n'), ((380843, 380907), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, (IS, JS))'], {'shape': '(self.r.size, wrt.r.size)'}), '((data, (IS, JS)), shape=(self.r.size, wrt.r.size))\n', (380856, 380907), True, 'import scipy.sparse as sp\n'), ((384412, 384431), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (384420, 384431), True, 'import numpy as np\n'), ((388311, 388330), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (388319, 388330), True, 'import numpy as np\n'), ((400022, 400100), 'numpy.concatenate', 'np.concatenate', (['[p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]]'], {'axis': '(1)'}), '([p0_proj[:, None], p1_proj[:, None], p2_proj[:, None]], axis=1)\n', (400036, 400100), True, 'import numpy as np\n'), ((416010, 416033), 'numpy.sum', 'np.sum', (['(v1 ** 2)'], {'axis': '(1)'}), '(v1 ** 2, axis=1)\n', (416016, 416033), True, 'import numpy as np\n'), ((416200, 416223), 'numpy.sum', 'np.sum', (['(v2 ** 2)'], {'axis': '(1)'}), '(v2 ** 2, axis=1)\n', (416206, 416223), True, 'import numpy as np\n'), ((416931, 416990), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (416938, 416990), True, 'import numpy as np\n'), ((417412, 417471), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (417419, 417471), True, 'import numpy as np\n'), ((418740, 418751), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (418743, 418751), False, 'from chumpy.utils import row, col\n'), ((421689, 421700), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (421692, 421700), False, 'from chumpy.utils import row, col\n'), ((427219, 427230), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (427222, 427230), False, 'from chumpy.utils import row, col\n'), ((430579, 430618), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (430588, 430618), True, 'import numpy as np\n'), ((439540, 439562), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (439548, 439562), True, 'import numpy as np\n'), ((440067, 440095), 'numpy.ones_like', 'np.ones_like', (['colors_by_face'], {}), '(colors_by_face)\n', (440079, 440095), True, 'import numpy as np\n'), ((444419, 444441), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (444427, 444441), True, 'import numpy as np\n'), ((445821, 445862), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (445836, 445862), True, 'import OpenGL.GL as GL\n'), ((445963, 445997), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (445981, 445997), True, 'import OpenGL.GL as GL\n'), ((446022, 446065), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (446038, 446065), True, 'import OpenGL.GL as GL\n'), ((446090, 446123), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureID', '(0)'], {}), '(self.textureID, 0)\n', (446104, 446123), True, 'import OpenGL.GL as GL\n'), ((446174, 446208), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (446189, 446208), True, 'import OpenGL.GL as GL\n'), ((456326, 456371), 'numpy.flipud', 'np.flipud', (['self.textures_list[mesh][polygons]'], {}), '(self.textures_list[mesh][polygons])\n', (456335, 456371), True, 'import numpy as np\n'), ((483487, 483496), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (483493, 483496), True, 'import numpy as np\n'), ((485771, 485793), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (485779, 485793), True, 'import numpy as np\n'), ((499116, 499138), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (499124, 499138), True, 'import numpy as np\n'), ((499893, 499972), 'opendr.common.dr_wrt_bgcolor', 'common.dr_wrt_bgcolor', (['visibility', 'self.frustum'], {'num_channels': 'self.num_channels'}), '(visibility, self.frustum, num_channels=self.num_channels)\n', (499914, 499972), False, 'from opendr import common\n'), ((500753, 500778), 'numpy.concatenate', 'np.concatenate', (['(r, g, b)'], {}), '((r, g, b))\n', (500767, 500778), True, 'import numpy as np\n'), ((500797, 500845), 'numpy.concatenate', 'np.concatenate', (['(IS * 3, IS * 3 + 1, IS * 3 + 2)'], {}), '((IS * 3, IS * 3 + 1, IS * 3 + 2))\n', (500811, 500845), True, 'import numpy as np\n'), ((500863, 500911), 'numpy.concatenate', 'np.concatenate', (['(JS * 3, JS * 3 + 1, JS * 3 + 2)'], {}), '((JS * 3, JS * 3 + 1, JS * 3 + 2))\n', (500877, 500911), True, 'import numpy as np\n'), ((500932, 500996), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, (IS, JS))'], {'shape': '(self.r.size, wrt.r.size)'}), '((data, (IS, JS)), shape=(self.r.size, wrt.r.size))\n', (500945, 500996), True, 'import scipy.sparse as sp\n'), ((504905, 504924), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (504913, 504924), True, 'import numpy as np\n'), ((508804, 508823), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (508812, 508823), True, 'import numpy as np\n'), ((536663, 536686), 'numpy.sum', 'np.sum', (['(v1 ** 2)'], {'axis': '(1)'}), '(v1 ** 2, axis=1)\n', (536669, 536686), True, 'import numpy as np\n'), ((536853, 536876), 'numpy.sum', 'np.sum', (['(v2 ** 2)'], {'axis': '(1)'}), '(v2 ** 2, axis=1)\n', (536859, 536876), True, 'import numpy as np\n'), ((537584, 537643), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (537591, 537643), True, 'import numpy as np\n'), ((538065, 538124), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (538072, 538124), True, 'import numpy as np\n'), ((539393, 539404), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (539396, 539404), False, 'from chumpy.utils import row, col\n'), ((542342, 542353), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (542345, 542353), False, 'from chumpy.utils import row, col\n'), ((547872, 547883), 'chumpy.utils.col', 'col', (['pixels'], {}), '(pixels)\n', (547875, 547883), False, 'from chumpy.utils import row, col\n'), ((551372, 551411), 'numpy.arange', 'np.arange', (['self.f.size'], {'dtype': 'np.uint32'}), '(self.f.size, dtype=np.uint32)\n', (551381, 551411), True, 'import numpy as np\n'), ((560333, 560355), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (560341, 560355), True, 'import numpy as np\n'), ((560860, 560888), 'numpy.ones_like', 'np.ones_like', (['colors_by_face'], {}), '(colors_by_face)\n', (560872, 560888), True, 'import numpy as np\n'), ((565659, 565681), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (565667, 565681), True, 'import numpy as np\n'), ((567061, 567102), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorTextureProgram'], {}), '(self.colorTextureProgram)\n', (567076, 567102), True, 'import OpenGL.GL as GL\n'), ((567203, 567237), 'OpenGL.GL.glActiveTexture', 'GL.glActiveTexture', (['GL.GL_TEXTURE0'], {}), '(GL.GL_TEXTURE0)\n', (567221, 567237), True, 'import OpenGL.GL as GL\n'), ((567262, 567305), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (567278, 567305), True, 'import OpenGL.GL as GL\n'), ((567330, 567363), 'OpenGL.GL.glUniform1i', 'GL.glUniform1i', (['self.textureID', '(0)'], {}), '(self.textureID, 0)\n', (567344, 567363), True, 'import OpenGL.GL as GL\n'), ((567414, 567448), 'OpenGL.GL.glUseProgram', 'GL.glUseProgram', (['self.colorProgram'], {}), '(self.colorProgram)\n', (567429, 567448), True, 'import OpenGL.GL as GL\n'), ((30364, 30390), 'cv2.Rodrigues', 'cv2.Rodrigues', (['camera.rt.r'], {}), '(camera.rt.r)\n', (30377, 30390), False, 'import cv2\n'), ((31677, 31699), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (31685, 31699), True, 'import numpy as np\n'), ((32343, 32399), 'numpy.ones', 'np.ones', (["(self.frustum['height'], self.frustum['width'])"], {}), "((self.frustum['height'], self.frustum['width']))\n", (32350, 32399), True, 'import numpy as np\n'), ((33064, 33129), 'numpy.zeros', 'np.zeros', (['[visibility_edge.shape[0], visibility_edge.shape[1], 2]'], {}), '([visibility_edge.shape[0], visibility_edge.shape[1], 2])\n', (33072, 33129), True, 'import numpy as np\n'), ((33792, 33814), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (33800, 33814), True, 'import numpy as np\n'), ((34458, 34514), 'numpy.ones', 'np.ones', (["(self.frustum['height'], self.frustum['width'])"], {}), "((self.frustum['height'], self.frustum['width']))\n", (34465, 34514), True, 'import numpy as np\n'), ((35182, 35247), 'numpy.zeros', 'np.zeros', (['[visibility_edge.shape[0], visibility_edge.shape[1], 2]'], {}), '([visibility_edge.shape[0], visibility_edge.shape[1], 2])\n', (35190, 35247), True, 'import numpy as np\n'), ((52583, 52620), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (52591, 52620), True, 'import numpy as np\n'), ((63156, 63183), 'numpy.array', 'np.array', (['self.v_list[mesh]'], {}), '(self.v_list[mesh])\n', (63164, 63183), True, 'import numpy as np\n'), ((63257, 63285), 'numpy.array', 'np.array', (['self.vc_list[mesh]'], {}), '(self.vc_list[mesh])\n', (63265, 63285), True, 'import numpy as np\n'), ((65986, 66029), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (66002, 66029), True, 'import OpenGL.GL as GL\n'), ((78371, 78394), 'numpy.array', 'np.array', (['verts_by_face'], {}), '(verts_by_face)\n', (78379, 78394), True, 'import numpy as np\n'), ((78573, 78597), 'numpy.array', 'np.array', (['colors_by_face'], {}), '(colors_by_face)\n', (78581, 78597), True, 'import numpy as np\n'), ((78770, 78791), 'numpy.array', 'np.array', (['uvs_by_face'], {}), '(uvs_by_face)\n', (78778, 78791), True, 'import numpy as np\n'), ((78851, 78888), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (78859, 78888), True, 'import numpy as np\n'), ((122608, 122636), 'cv2.imshow', 'cv2.imshow', (['"""clr_im"""', 'clr_im'], {}), "('clr_im', clr_im)\n", (122618, 122636), False, 'import cv2\n'), ((122714, 122728), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (122725, 122728), False, 'import cv2\n'), ((147368, 147387), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (147376, 147387), True, 'import numpy as np\n'), ((187037, 187080), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (187053, 187080), True, 'import OpenGL.GL as GL\n'), ((197871, 197887), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (197883, 197887), True, 'import numpy as np\n'), ((206612, 206635), 'numpy.array', 'np.array', (['verts_by_face'], {}), '(verts_by_face)\n', (206620, 206635), True, 'import numpy as np\n'), ((206814, 206838), 'numpy.array', 'np.array', (['colors_by_face'], {}), '(colors_by_face)\n', (206822, 206838), True, 'import numpy as np\n'), ((207011, 207032), 'numpy.array', 'np.array', (['uvs_by_face'], {}), '(uvs_by_face)\n', (207019, 207032), True, 'import numpy as np\n'), ((207092, 207129), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (207100, 207129), True, 'import numpy as np\n'), ((250838, 250866), 'cv2.imshow', 'cv2.imshow', (['"""clr_im"""', 'clr_im'], {}), "('clr_im', clr_im)\n", (250848, 250866), False, 'import cv2\n'), ((250944, 250958), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (250955, 250958), False, 'import cv2\n'), ((276065, 276084), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (276073, 276084), True, 'import numpy as np\n'), ((315734, 315777), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (315750, 315777), True, 'import OpenGL.GL as GL\n'), ((326568, 326584), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (326580, 326584), True, 'import numpy as np\n'), ((335305, 335328), 'numpy.array', 'np.array', (['verts_by_face'], {}), '(verts_by_face)\n', (335313, 335328), True, 'import numpy as np\n'), ((335507, 335531), 'numpy.array', 'np.array', (['colors_by_face'], {}), '(colors_by_face)\n', (335515, 335531), True, 'import numpy as np\n'), ((335704, 335725), 'numpy.array', 'np.array', (['uvs_by_face'], {}), '(uvs_by_face)\n', (335712, 335725), True, 'import numpy as np\n'), ((335785, 335822), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (335793, 335822), True, 'import numpy as np\n'), ((380391, 380419), 'cv2.imshow', 'cv2.imshow', (['"""clr_im"""', 'clr_im'], {}), "('clr_im', clr_im)\n", (380401, 380419), False, 'import cv2\n'), ((380497, 380511), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (380508, 380511), False, 'import cv2\n'), ((416796, 416855), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (416803, 416855), True, 'import numpy as np\n'), ((417277, 417336), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (417284, 417336), True, 'import numpy as np\n'), ((420640, 420699), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (420647, 420699), True, 'import numpy as np\n'), ((421306, 421365), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (421313, 421365), True, 'import numpy as np\n'), ((426637, 426696), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (426644, 426696), True, 'import numpy as np\n'), ((426822, 426881), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (426829, 426881), True, 'import numpy as np\n'), ((433913, 433956), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (433929, 433956), True, 'import OpenGL.GL as GL\n'), ((445371, 445387), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (445383, 445387), True, 'import numpy as np\n'), ((454230, 454253), 'numpy.array', 'np.array', (['verts_by_face'], {}), '(verts_by_face)\n', (454238, 454253), True, 'import numpy as np\n'), ((454432, 454456), 'numpy.array', 'np.array', (['colors_by_face'], {}), '(colors_by_face)\n', (454440, 454456), True, 'import numpy as np\n'), ((454629, 454650), 'numpy.array', 'np.array', (['uvs_by_face'], {}), '(uvs_by_face)\n', (454637, 454650), True, 'import numpy as np\n'), ((454710, 454747), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (454718, 454747), True, 'import numpy as np\n'), ((500480, 500508), 'cv2.imshow', 'cv2.imshow', (['"""clr_im"""', 'clr_im'], {}), "('clr_im', clr_im)\n", (500490, 500508), False, 'import cv2\n'), ((500586, 500600), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (500597, 500600), False, 'import cv2\n'), ((537449, 537508), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (537456, 537508), True, 'import numpy as np\n'), ((537930, 537989), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (537937, 537989), True, 'import numpy as np\n'), ((541293, 541352), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (541300, 541352), True, 'import numpy as np\n'), ((541959, 542018), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (541966, 542018), True, 'import numpy as np\n'), ((547290, 547349), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (547297, 547349), True, 'import numpy as np\n'), ((547475, 547534), 'numpy.tile', 'np.tile', (['self.d_final_total[None, :]', '[self.nsamples, 1, 1]'], {}), '(self.d_final_total[None, :], [self.nsamples, 1, 1])\n', (547482, 547534), True, 'import numpy as np\n'), ((554706, 554749), 'OpenGL.GL.glBindTexture', 'GL.glBindTexture', (['GL.GL_TEXTURE_2D', 'texture'], {}), '(GL.GL_TEXTURE_2D, texture)\n', (554722, 554749), True, 'import OpenGL.GL as GL\n'), ((566611, 566627), 'numpy.ones_like', 'np.ones_like', (['vc'], {}), '(vc)\n', (566623, 566627), True, 'import numpy as np\n'), ((23410, 23515), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (23425, 23515), True, 'import OpenGL.GL as GL\n'), ((26555, 26660), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (26570, 26660), True, 'import OpenGL.GL as GL\n'), ((37750, 37855), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (37765, 37855), True, 'import OpenGL.GL as GL\n'), ((46712, 46748), 'numpy.atleast_3d', 'np.atleast_3d', (['self.visibility_image'], {}), '(self.visibility_image)\n', (46725, 46748), True, 'import numpy as np\n'), ((55911, 55950), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[texture.value]'], {}), '(1, [texture.value])\n', (55930, 55950), True, 'import OpenGL.GL as GL\n'), ((60616, 60657), 'numpy.ones', 'np.ones', (['[self.ft_list[mesh].shape[0], 1]'], {}), '([self.ft_list[mesh].shape[0], 1])\n', (60623, 60657), True, 'import numpy as np\n'), ((62173, 62188), 'chumpy.utils.col', 'col', (['vis_texidx'], {}), '(vis_texidx)\n', (62176, 62188), False, 'from chumpy.utils import row, col\n'), ((66201, 66254), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (66210, 66254), True, 'import numpy as np\n'), ((66556, 66609), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (66565, 66609), True, 'import numpy as np\n'), ((184594, 184631), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (184602, 184631), True, 'import numpy as np\n'), ((187252, 187305), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (187261, 187305), True, 'import numpy as np\n'), ((187606, 187659), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (187615, 187659), True, 'import numpy as np\n'), ((188438, 188477), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[texture.value]'], {}), '(1, [texture.value])\n', (188457, 188477), True, 'import OpenGL.GL as GL\n'), ((193793, 193834), 'numpy.ones', 'np.ones', (['[self.ft_list[mesh].shape[0], 1]'], {}), '([self.ft_list[mesh].shape[0], 1])\n', (193800, 193834), True, 'import numpy as np\n'), ((313291, 313328), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (313299, 313328), True, 'import numpy as np\n'), ((315949, 316002), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (315958, 316002), True, 'import numpy as np\n'), ((316303, 316356), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (316312, 316356), True, 'import numpy as np\n'), ((317135, 317174), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[texture.value]'], {}), '(1, [texture.value])\n', (317154, 317174), True, 'import OpenGL.GL as GL\n'), ((322490, 322531), 'numpy.ones', 'np.ones', (['[self.ft_list[mesh].shape[0], 1]'], {}), '([self.ft_list[mesh].shape[0], 1])\n', (322497, 322531), True, 'import numpy as np\n'), ((431470, 431507), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (431478, 431507), True, 'import numpy as np\n'), ((434129, 434182), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (434138, 434182), True, 'import numpy as np\n'), ((434485, 434538), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (434494, 434538), True, 'import numpy as np\n'), ((435786, 435825), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[texture.value]'], {}), '(1, [texture.value])\n', (435805, 435825), True, 'import OpenGL.GL as GL\n'), ((441222, 441263), 'numpy.ones', 'np.ones', (['[self.ft_list[mesh].shape[0], 1]'], {}), '([self.ft_list[mesh].shape[0], 1])\n', (441229, 441263), True, 'import numpy as np\n'), ((552263, 552300), 'numpy.array', 'np.array', (['self.f_list[mesh][polygons]'], {}), '(self.f_list[mesh][polygons])\n', (552271, 552300), True, 'import numpy as np\n'), ((554922, 554975), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (554931, 554975), True, 'import numpy as np\n'), ((555278, 555331), 'numpy.flipud', 'np.flipud', (['(self.textures_list[mesh][polygons] * 255.0)'], {}), '(self.textures_list[mesh][polygons] * 255.0)\n', (555287, 555331), True, 'import numpy as np\n'), ((556579, 556618), 'OpenGL.GL.glDeleteTextures', 'GL.glDeleteTextures', (['(1)', '[texture.value]'], {}), '(1, [texture.value])\n', (556598, 556618), True, 'import OpenGL.GL as GL\n'), ((562015, 562056), 'numpy.ones', 'np.ones', (['[self.ft_list[mesh].shape[0], 1]'], {}), '([self.ft_list[mesh].shape[0], 1])\n', (562022, 562056), True, 'import numpy as np\n'), ((39544, 39649), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (39559, 39649), True, 'import OpenGL.GL as GL\n'), ((57639, 57744), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (57654, 57744), True, 'import OpenGL.GL as GL\n'), ((58670, 58775), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (58685, 58775), True, 'import OpenGL.GL as GL\n'), ((71307, 71412), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (71322, 71412), True, 'import OpenGL.GL as GL\n'), ((190652, 190757), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (190667, 190757), True, 'import OpenGL.GL as GL\n'), ((191683, 191788), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (191698, 191788), True, 'import OpenGL.GL as GL\n'), ((199542, 199647), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (199557, 199647), True, 'import OpenGL.GL as GL\n'), ((319349, 319454), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (319364, 319454), True, 'import OpenGL.GL as GL\n'), ((320380, 320485), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (320395, 320485), True, 'import OpenGL.GL as GL\n'), ((328239, 328344), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (328254, 328344), True, 'import OpenGL.GL as GL\n'), ((438017, 438122), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (438032, 438122), True, 'import OpenGL.GL as GL\n'), ((439088, 439193), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (439103, 439193), True, 'import OpenGL.GL as GL\n'), ((447086, 447191), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (447101, 447191), True, 'import OpenGL.GL as GL\n'), ((558810, 558915), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (558825, 558915), True, 'import OpenGL.GL as GL\n'), ((559881, 559986), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (559896, 559986), True, 'import OpenGL.GL as GL\n'), ((568326, 568431), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (568341, 568431), True, 'import OpenGL.GL as GL\n'), ((46217, 46322), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (46232, 46322), True, 'import OpenGL.GL as GL\n'), ((61306, 61411), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (61321, 61411), True, 'import OpenGL.GL as GL\n'), ((116552, 116649), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (116567, 116649), True, 'import OpenGL.GL as GL\n'), ((116888, 116985), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (116903, 116985), True, 'import OpenGL.GL as GL\n'), ((117234, 117346), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RED_INTEGER', 'GL.GL_UNSIGNED_INT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RED_INTEGER, GL.GL_UNSIGNED_INT)\n", (117249, 117346), True, 'import OpenGL.GL as GL\n'), ((117581, 117678), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (117596, 117678), True, 'import OpenGL.GL as GL\n'), ((117936, 118033), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (117951, 118033), True, 'import OpenGL.GL as GL\n'), ((122444, 122459), 'chumpy.utils.col', 'col', (['vis_texidx'], {}), '(vis_texidx)\n', (122447, 122459), False, 'from chumpy.utils import row, col\n'), ((148741, 148762), 'numpy.linalg.inv', 'np.linalg.inv', (['camMtx'], {}), '(camMtx)\n', (148754, 148762), True, 'import numpy as np\n'), ((194483, 194588), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (194498, 194588), True, 'import OpenGL.GL as GL\n'), ((244782, 244879), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (244797, 244879), True, 'import OpenGL.GL as GL\n'), ((245118, 245215), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (245133, 245215), True, 'import OpenGL.GL as GL\n'), ((245464, 245576), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RED_INTEGER', 'GL.GL_UNSIGNED_INT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RED_INTEGER, GL.GL_UNSIGNED_INT)\n", (245479, 245576), True, 'import OpenGL.GL as GL\n'), ((245811, 245908), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (245826, 245908), True, 'import OpenGL.GL as GL\n'), ((246166, 246263), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (246181, 246263), True, 'import OpenGL.GL as GL\n'), ((250674, 250689), 'chumpy.utils.col', 'col', (['vis_texidx'], {}), '(vis_texidx)\n', (250677, 250689), False, 'from chumpy.utils import row, col\n'), ((277438, 277459), 'numpy.linalg.inv', 'np.linalg.inv', (['camMtx'], {}), '(camMtx)\n', (277451, 277459), True, 'import numpy as np\n'), ((323180, 323285), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (323195, 323285), True, 'import OpenGL.GL as GL\n'), ((374123, 374220), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (374138, 374220), True, 'import OpenGL.GL as GL\n'), ((374499, 374596), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (374514, 374596), True, 'import OpenGL.GL as GL\n'), ((374885, 374997), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RED_INTEGER', 'GL.GL_UNSIGNED_INT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RED_INTEGER, GL.GL_UNSIGNED_INT)\n", (374900, 374997), True, 'import OpenGL.GL as GL\n'), ((375280, 375377), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (375295, 375377), True, 'import OpenGL.GL as GL\n'), ((375675, 375772), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (375690, 375772), True, 'import OpenGL.GL as GL\n'), ((380226, 380241), 'chumpy.utils.col', 'col', (['vis_texidx'], {}), '(vis_texidx)\n', (380229, 380241), False, 'from chumpy.utils import row, col\n'), ((441942, 442047), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (441957, 442047), True, 'import OpenGL.GL as GL\n'), ((494212, 494309), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (494227, 494309), True, 'import OpenGL.GL as GL\n'), ((494588, 494685), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (494603, 494685), True, 'import OpenGL.GL as GL\n'), ((494974, 495086), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RED_INTEGER', 'GL.GL_UNSIGNED_INT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RED_INTEGER, GL.GL_UNSIGNED_INT)\n", (494989, 495086), True, 'import OpenGL.GL as GL\n'), ((495369, 495466), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (495384, 495466), True, 'import OpenGL.GL as GL\n'), ((495764, 495861), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_FLOAT'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_FLOAT)\n", (495779, 495861), True, 'import OpenGL.GL as GL\n'), ((500315, 500330), 'chumpy.utils.col', 'col', (['vis_texidx'], {}), '(vis_texidx)\n', (500318, 500330), False, 'from chumpy.utils import row, col\n'), ((562735, 562840), 'OpenGL.GL.glReadPixels', 'GL.glReadPixels', (['(0)', '(0)', "self.frustum['width']", "self.frustum['height']", 'GL.GL_RGB', 'GL.GL_UNSIGNED_BYTE'], {}), "(0, 0, self.frustum['width'], self.frustum['height'], GL.\n GL_RGB, GL.GL_UNSIGNED_BYTE)\n", (562750, 562840), True, 'import OpenGL.GL as GL\n')] |
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""Noisy-Sin Shard Descriptor."""
from typing import List
import numpy as np
from openfl.interface.interactive_api.shard_descriptor import ShardDescriptor
class LinRegSD(ShardDescriptor):
"""Shard descriptor class."""
def __init__(self, rank: int, n_samples: int = 10, noise: float = 0.15) -> None:
"""
Initialize LinReg Shard Descriptor.
This Shard Descriptor generate random data. Sample features are
floats between pi/3 and 5*pi/3, and targets are calculated
calculated as sin(feature) + normal_noise.
"""
np.random.seed(rank) # Setting seed for reproducibility
self.n_samples = max(n_samples, 5)
self.interval = 240
self.x_start = 60
x = np.random.rand(n_samples, 1) * self.interval + self.x_start
x *= np.pi / 180
y = np.sin(x) + np.random.normal(0, noise, size=(n_samples, 1))
self.data = np.concatenate((x, y), axis=1)
def get_dataset(self, dataset_type: str) -> np.ndarray:
"""
Return a shard dataset by type.
A simple list with elements (x, y) implemets the Shard Dataset interface.
"""
if dataset_type == 'train':
return self.data[:self.n_samples // 2]
elif dataset_type == 'val':
return self.data[self.n_samples // 2:]
else:
pass
@property
def sample_shape(self) -> List[str]:
"""Return the sample shape info."""
(*x, _) = self.data[0]
return [str(i) for i in np.array(x, ndmin=1).shape]
@property
def target_shape(self) -> List[str]:
"""Return the target shape info."""
(*_, y) = self.data[0]
return [str(i) for i in np.array(y, ndmin=1).shape]
@property
def dataset_description(self) -> str:
"""Return the dataset description."""
return 'Allowed dataset types are `train` and `val`'
| [
"numpy.random.seed",
"numpy.sin",
"numpy.array",
"numpy.random.normal",
"numpy.random.rand",
"numpy.concatenate"
] | [((662, 682), 'numpy.random.seed', 'np.random.seed', (['rank'], {}), '(rank)\n', (676, 682), True, 'import numpy as np\n'), ((1005, 1035), 'numpy.concatenate', 'np.concatenate', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (1019, 1035), True, 'import numpy as np\n'), ((925, 934), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (931, 934), True, 'import numpy as np\n'), ((937, 984), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise'], {'size': '(n_samples, 1)'}), '(0, noise, size=(n_samples, 1))\n', (953, 984), True, 'import numpy as np\n'), ((828, 856), 'numpy.random.rand', 'np.random.rand', (['n_samples', '(1)'], {}), '(n_samples, 1)\n', (842, 856), True, 'import numpy as np\n'), ((1612, 1632), 'numpy.array', 'np.array', (['x'], {'ndmin': '(1)'}), '(x, ndmin=1)\n', (1620, 1632), True, 'import numpy as np\n'), ((1803, 1823), 'numpy.array', 'np.array', (['y'], {'ndmin': '(1)'}), '(y, ndmin=1)\n', (1811, 1823), True, 'import numpy as np\n')] |
import os
import numpy as np
import tensorflow as tf
import cv2
from PIL import Image
from tensorflow.keras.applications import VGG16
from tensorflow.keras import backend as K
from model import Vgg16
def tensor_load_rgbimage(filename, size=None, scale=None, keep_asp=False):
img = Image.open(filename).convert('RGB')
if size is not None:
if keep_asp:
size2 = int(size * 1.0 / img.size[0] * img.size[1])
img = img.resize((size, size2), Image.ANTIALIAS)
else:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
img = np.array(img).transpose(2, 0, 1)
img = tf.constant(img, dtype = tf.float64)
return img
def tensor_save_rgbimage(tensor, filename):
img = tf.constant(tensor, dtype = tf.unit8)
img = tf.clip_by_value(img, clip_value_min = 0, clip_value_max = 255)
img = tf.transpose(img, perm = [1, 2, 0])
img = Image.fromarray(img)
img.save(filename)
def tensor_save_bgrimage(tensor, filename):
# Conver BGR to RGB
(b, g, r) = tf.split(tensor, num_or_size_splits = 3, axis = 3)
tensor = tf.concat((r, g, b), axis = 3)
tensor_save_rgbimage(tensor, filename)
# def gram_matrix(y):
# (b, ch, h, w) = y.get_shape() # b - Batch_size, ch- Channels, h - Height, w - Width
# features = tf.reshape(y,[b, ch, w * h])
# features_t = tf.transpose(feature, perm = [1, 2])
# gram = tf.matmul(features, features_t) / (ch * h * w)
# return gram
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def subtract_imagenet_mean_batch(X):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
b, ch, w, c = X.shape
X[:, 0, :, :] -= 103.939
X[:, 1, :, :] -= 116.779
X[:, 2, :, :] -= 123.680
return X
def add_imagenet_mean_batch(X):
"""Add ImageNet mean pixel-wise from a BGR image."""
b, ch, w, c = X.shape
X[:, 0, :, :] += 103.939
X[:, 1, :, :] += 116.779
X[:, 2, :, :] += 123.680
return X
def imagenet_clamp_batch(batch, low, high):
batch[:,0,:,:] = tf.clip_by_value(batch[:,0,:,:], clip_value_min = low-103.939, clip_value_max = high-103.939)
batch[:,1,:,:] = tf.clip_by_value(batch[:,1,:,:], clip_value_min = low-103.939, clip_value_max = high-103.939)
batch[:,2,:,:] = tf.clip_by_value(batch[:,2,:,:], clip_value_min = low-103.939, clip_value_max = high-103.939)
def preprocess_batch(batch):
batch = tf.transpose(img, perm = [0, 1])
(r, g, b) = tf.split(batch, num_or_size_splits = 3, axis = 3)
batch = tf.concat((b, g, r), axis = 3)
batch = tf.transpose(img, perm = [0, 1])
return batch
# def init_vgg16(model_folder):
# """load the vgg16 model feature"""
# if not os.path.exists(os.path.join(model_folder, 'vgg16.weight')):
# if not os.path.exists(os.path.join(model_folder, 'vgg16.t7')):
# os.system(
# 'wget http://cs.stanford.edu/people/jcjohns/fast-neural-style/models/vgg16.t7 -O ' + os.path.join(model_folder, 'vgg16.t7'))
# vgglua = load_lua(os.path.join(model_folder, 'vgg16.t7'))
# vgg = Vgg16()
# for (src, dst) in zip(vgglua.parameters()[0], vgg.parameters()):
# dst.data[:] = src
# torch.save(vgg.state_dict(), os.path.join(model_folder, 'vgg16.weight'))
class StyleLoader():
def __init__(self, style_folder, style_size, cuda=True):
self.folder = style_folder
self.style_size = style_size
self.files = os.listdir(style_folder)
def get(self, i):
idx = i%len(self.files)
filepath = os.path.join(self.folder, self.files[idx])
style = tensor_load_rgbimage(filepath, self.style_size)
style = style.unsqueeze(0)
style = preprocess_batch(style)
style_v = tf.Variable(style)
return style_v
def size(self):
return len(self.files) | [
"tensorflow.clip_by_value",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.transpose",
"PIL.Image.open",
"tensorflow.Variable",
"tensorflow.keras.backend.permute_dimensions",
"numpy.array",
"PIL.Image.fromarray",
"tensorflow.split",
"os.path.join",
"os.listdir",
"tensorflow.keras.ba... | [((745, 779), 'tensorflow.constant', 'tf.constant', (['img'], {'dtype': 'tf.float64'}), '(img, dtype=tf.float64)\n', (756, 779), True, 'import tensorflow as tf\n'), ((853, 888), 'tensorflow.constant', 'tf.constant', (['tensor'], {'dtype': 'tf.unit8'}), '(tensor, dtype=tf.unit8)\n', (864, 888), True, 'import tensorflow as tf\n'), ((901, 960), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['img'], {'clip_value_min': '(0)', 'clip_value_max': '(255)'}), '(img, clip_value_min=0, clip_value_max=255)\n', (917, 960), True, 'import tensorflow as tf\n'), ((975, 1008), 'tensorflow.transpose', 'tf.transpose', (['img'], {'perm': '[1, 2, 0]'}), '(img, perm=[1, 2, 0])\n', (987, 1008), True, 'import tensorflow as tf\n'), ((1021, 1041), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (1036, 1041), False, 'from PIL import Image\n'), ((1151, 1197), 'tensorflow.split', 'tf.split', (['tensor'], {'num_or_size_splits': '(3)', 'axis': '(3)'}), '(tensor, num_or_size_splits=3, axis=3)\n', (1159, 1197), True, 'import tensorflow as tf\n'), ((1215, 1243), 'tensorflow.concat', 'tf.concat', (['(r, g, b)'], {'axis': '(3)'}), '((r, g, b), axis=3)\n', (1224, 1243), True, 'import tensorflow as tf\n'), ((2286, 2386), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['batch[:, 0, :, :]'], {'clip_value_min': '(low - 103.939)', 'clip_value_max': '(high - 103.939)'}), '(batch[:, 0, :, :], clip_value_min=low - 103.939,\n clip_value_max=high - 103.939)\n', (2302, 2386), True, 'import tensorflow as tf\n'), ((2401, 2501), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['batch[:, 1, :, :]'], {'clip_value_min': '(low - 103.939)', 'clip_value_max': '(high - 103.939)'}), '(batch[:, 1, :, :], clip_value_min=low - 103.939,\n clip_value_max=high - 103.939)\n', (2417, 2501), True, 'import tensorflow as tf\n'), ((2516, 2616), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['batch[:, 2, :, :]'], {'clip_value_min': '(low - 103.939)', 'clip_value_max': '(high - 103.939)'}), '(batch[:, 2, :, :], clip_value_min=low - 103.939,\n clip_value_max=high - 103.939)\n', (2532, 2616), True, 'import tensorflow as tf\n'), ((2652, 2682), 'tensorflow.transpose', 'tf.transpose', (['img'], {'perm': '[0, 1]'}), '(img, perm=[0, 1])\n', (2664, 2682), True, 'import tensorflow as tf\n'), ((2701, 2746), 'tensorflow.split', 'tf.split', (['batch'], {'num_or_size_splits': '(3)', 'axis': '(3)'}), '(batch, num_or_size_splits=3, axis=3)\n', (2709, 2746), True, 'import tensorflow as tf\n'), ((2763, 2791), 'tensorflow.concat', 'tf.concat', (['(b, g, r)'], {'axis': '(3)'}), '((b, g, r), axis=3)\n', (2772, 2791), True, 'import tensorflow as tf\n'), ((2806, 2836), 'tensorflow.transpose', 'tf.transpose', (['img'], {'perm': '[0, 1]'}), '(img, perm=[0, 1])\n', (2818, 2836), True, 'import tensorflow as tf\n'), ((1674, 1708), 'tensorflow.keras.backend.permute_dimensions', 'K.permute_dimensions', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (1694, 1708), True, 'from tensorflow.keras import backend as K\n'), ((1737, 1758), 'tensorflow.keras.backend.transpose', 'K.transpose', (['features'], {}), '(features)\n', (1748, 1758), True, 'from tensorflow.keras import backend as K\n'), ((3704, 3728), 'os.listdir', 'os.listdir', (['style_folder'], {}), '(style_folder)\n', (3714, 3728), False, 'import os\n'), ((3807, 3849), 'os.path.join', 'os.path.join', (['self.folder', 'self.files[idx]'], {}), '(self.folder, self.files[idx])\n', (3819, 3849), False, 'import os\n'), ((4011, 4029), 'tensorflow.Variable', 'tf.Variable', (['style'], {}), '(style)\n', (4022, 4029), True, 'import tensorflow as tf\n'), ((286, 306), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (296, 306), False, 'from PIL import Image\n'), ((702, 715), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (710, 715), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
from sklearn.datasets.samples_generator import make_moons
from sklearn.datasets.samples_generator import make_circles
from sklearn.datasets.samples_generator import make_blobs
# generate 2d classification dataset
n = 10000
X, y = make_circles(n_samples=n, noise=0.05)
# scatter plot, dots colored by class value
df = pd.DataFrame(dict(x=X[:,0], y=X[:,1], label=y))
colors = {0:'red', 1:'blue'}
fig, ax = plt.subplots()
grouped = df.groupby('label')
for key, group in grouped:
group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])
plt.show()
datadict = {'X1': X[:,0],'X2' : X[:,1], 'target': y}
data = pd.DataFrame(data=datadict)
X = data.iloc[:,[0, 1]].values
type(X)
y = data.target.values
# TRAIN TEST SPLIT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
N, D = X_train.shape
from sklearn.preprocessing import StandardScaler
scaleObj = StandardScaler()
X_train = scaleObj.fit_transform(X_train)
X_test = scaleObj.transform(X_test)
from tensorflow.keras.layers import Input, Dense, Activation,Dropout
from tensorflow.keras.models import Model
input_layer = Input(shape=(D,))
dense_layer_1 = Dense(20, activation='relu')(input_layer)
dense_layer_2 = Dense(20, activation='relu')(input_layer)
output = Dense(1, activation='sigmoid')(dense_layer_2)
model = Model(inputs=input_layer, outputs=output)
model.compile(
optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy']
)
# https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/LearningRateScheduler
def scheduler(epoch):
if epoch < 10:
return 0.001
else:
return 0.001 * tf.math.exp(0.1 * (10 - epoch))
callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
report = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=50)
plt.plot(report.history['loss'], label="loss = SGD")
print(X.shape)
print(y.shape)
y_predicted = model.predict(X)
# Visualising the results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'cyan')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'blue'))(i), label = j)
plt.title('Model')
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
plt.show()
# Visualising the results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, model.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('pink', 'cyan')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
plt.title('Model')
plt.xlabel('X')
plt.ylabel('y')
plt.legend()
plt.show()
print("Train eval: ", model.evaluate(X_train, y_train))
print("Test eval: ", model.evaluate(X_test, y_test)) | [
"pandas.DataFrame",
"matplotlib.pyplot.title",
"tensorflow.math.exp",
"matplotlib.pyplot.show",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.legend",
"tensorflow.keras.models.Model"... | [((349, 386), 'sklearn.datasets.samples_generator.make_circles', 'make_circles', ([], {'n_samples': 'n', 'noise': '(0.05)'}), '(n_samples=n, noise=0.05)\n', (361, 386), False, 'from sklearn.datasets.samples_generator import make_circles\n'), ((523, 537), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (535, 537), True, 'import matplotlib.pyplot as plt\n'), ((677, 687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (685, 687), True, 'import matplotlib.pyplot as plt\n'), ((748, 775), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'datadict'}), '(data=datadict)\n', (760, 775), True, 'import pandas as pd\n'), ((949, 986), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (965, 986), False, 'from sklearn.model_selection import train_test_split\n'), ((1073, 1089), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1087, 1089), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1295, 1312), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(D,)'}), '(shape=(D,))\n', (1300, 1312), False, 'from tensorflow.keras.layers import Input, Dense, Activation, Dropout\n'), ((1493, 1534), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'output'}), '(inputs=input_layer, outputs=output)\n', (1498, 1534), False, 'from tensorflow.keras.models import Model\n'), ((1853, 1904), 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['scheduler'], {}), '(scheduler)\n', (1893, 1904), True, 'import tensorflow as tf\n'), ((1989, 2041), 'matplotlib.pyplot.plot', 'plt.plot', (["report.history['loss']"], {'label': '"""loss = SGD"""'}), "(report.history['loss'], label='loss = SGD')\n", (1997, 2041), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2825), 'matplotlib.pyplot.title', 'plt.title', (['"""Model"""'], {}), "('Model')\n", (2816, 2825), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2841), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (2836, 2841), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2857), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2852, 2857), True, 'import matplotlib.pyplot as plt\n'), ((2858, 2870), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2868, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2879, 2881), True, 'import matplotlib.pyplot as plt\n'), ((3416, 3434), 'matplotlib.pyplot.title', 'plt.title', (['"""Model"""'], {}), "('Model')\n", (3425, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3450), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (3445, 3450), True, 'import matplotlib.pyplot as plt\n'), ((3451, 3466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3461, 3466), True, 'import matplotlib.pyplot as plt\n'), ((3467, 3479), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3477, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3480, 3490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3488, 3490), True, 'import matplotlib.pyplot as plt\n'), ((1329, 1357), 'tensorflow.keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (1334, 1357), False, 'from tensorflow.keras.layers import Input, Dense, Activation, Dropout\n'), ((1387, 1415), 'tensorflow.keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (1392, 1415), False, 'from tensorflow.keras.layers import Input, Dense, Activation, Dropout\n'), ((1438, 1468), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1443, 1468), False, 'from tensorflow.keras.layers import Input, Dense, Activation, Dropout\n'), ((2661, 2677), 'numpy.unique', 'np.unique', (['y_set'], {}), '(y_set)\n', (2670, 2677), True, 'import numpy as np\n'), ((2547, 2579), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('pink', 'cyan')"], {}), "(('pink', 'cyan'))\n", (2561, 2579), False, 'from matplotlib.colors import ListedColormap\n'), ((3324, 3356), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('pink', 'cyan')"], {}), "(('pink', 'cyan'))\n", (3338, 3356), False, 'from matplotlib.colors import ListedColormap\n'), ((1808, 1839), 'tensorflow.math.exp', 'tf.math.exp', (['(0.1 * (10 - epoch))'], {}), '(0.1 * (10 - epoch))\n', (1819, 1839), True, 'import tensorflow as tf\n'), ((2760, 2791), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["('red', 'blue')"], {}), "(('red', 'blue'))\n", (2774, 2791), False, 'from matplotlib.colors import ListedColormap\n')] |
'''
# This is an 80 character line #
Compute the length of the cluster edge:
-Use Freud to find the complete system neighborlist
-Grab the largest cluster
-Mesh the system
-Compute which bins have largest cluster particles
-If adjacent bins are empty, the reference bin is an edge
-Multiply by bin size to get length
'''
import sys
# Run locally
sys.path.append('/Users/kolbt/Desktop/compiled/hoomd-blue/build')
sys.path.append('/Users/kolbt/Desktop/compiled/gsd/build')
# Run on the cpu
sys.path.append('/nas/longleaf/home/kolbt/programs/cpu-hoomd/hoomd-blue/build')
# Run on the gpu
sys.path.append('/nas/longleaf/home/kolbt/programs/hoomd_2.2.1/hoomd-blue/build')
sys.path.append('/nas/longleaf/home/kolbt/programs/gsd/build')
import gsd
from gsd import hoomd
from gsd import pygsd
import freud
from freud import parallel
from freud import box
from freud import density
from freud import cluster
import numpy as np
import math
import random
from scipy import stats
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.collections
from matplotlib import colors
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
def computeTauPerTstep(epsilon, mindt=0.000001):
'''Read in epsilon, output tauBrownian per timestep'''
# if epsilon != 1.:
# mindt=0.00001
kBT = 1.0
tstepPerTau = float(epsilon / (kBT * mindt))
return 1. / tstepPerTau
def roundUp(n, decimals=0):
'''Round up size of bins to account for floating point inaccuracy'''
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def getNBins(length, minSz=(2**(1./6.))):
"Given box size, return number of bins"
initGuess = int(length) + 1
nBins = initGuess
# This loop only exits on function return
while True:
if length / nBins > minSz:
return nBins
else:
nBins -= 1
def findBins(lookN, currentInd, maxInds):
'''Get the surrounding bin indices'''
maxInds -= 1
left = currentInd - lookN
right = currentInd + lookN
binsList = []
for i in range(left, right):
ind = i
if i > maxInds:
ind -= maxInds
binsList.append(ind)
return binsList
# Get infile and open
inFile = str(sys.argv[1])
if inFile[0:7] == "cluster":
add = 'cluster_'
else:
add = ''
f = hoomd.open(name=inFile, mode='rb')
# Inside and outside activity from command line
peA = float(sys.argv[2])
peB = float(sys.argv[3])
parFrac = float(sys.argv[4])
eps = float(sys.argv[5])
try:
phi = float(sys.argv[6])
intPhi = int(phi)
phi /= 100.
except:
phi = 0.6
intPhi = 60
# Outfile to write data to
outFile = add + 'edge_pa' + str(peA) +\
'_pb' + str(peB) +\
'_xa' + str(parFrac) +\
'_phi' + str(intPhi) +\
'_ep' + '{0:.3f}'.format(eps) +\
'.txt'
g = open(outFile, 'w') # write file headings
g.write('Timestep'.center(10) + ' ' + 'Length'.center(10) + '\n')
g.close()
start = 0 # first frame to process
dumps = int(f.__len__()) # get number of timesteps dumped
end = dumps # final frame to process
start = end - 1
box_data = np.zeros((1), dtype=np.ndarray) # box dimension holder
r_cut = 2**(1./6.) # potential cutoff
tauPerDT = computeTauPerTstep(epsilon=eps) # brownian time per timestep
with hoomd.open(name=inFile, mode='rb') as t:
snap = t[0]
first_tstep = snap.configuration.step
box_data = snap.configuration.box
l_box = box_data[0]
h_box = l_box / 2.
typ = snap.particles.typeid
partNum = len(typ)
# Set up cluster computation using box
f_box = box.Box(Lx=l_box, Ly=l_box, is2D=True)
my_clust = cluster.Cluster()
c_props = cluster.ClusterProperties()
# Compute each mesh
NBins = getNBins(l_box, r_cut)
sizeBin = roundUp((l_box / NBins), 6)
# Loop through each timestep
for j in range(start, end):
snap = t[j]
# Easier accessors
pos = snap.particles.position # position
pos[:,-1] = 0.0
xy = np.delete(pos, 2, 1)
typ = snap.particles.typeid # type
tst = snap.configuration.step # timestep
tst -= first_tstep # normalize by first timestep
tst *= tauPerDT # convert to Brownian time
# Compute clusters for this timestep
system = freud.AABBQuery(f_box, f_box.wrap(pos))
# Compute neighbor list for only largest cluster
my_clust.compute(system, neighbors={'r_max': 1.0})
ids = my_clust.cluster_idx # get id of each cluster
c_props.compute(system, ids) # find cluster properties
clust_size = c_props.sizes # find cluster sizes
# We can also grab all clusters over a set size
suffIDs = []
for k in range(0, len(clust_size)):
# Cluster must be >= 5% of all particles
if clust_size[k] >= (partNum * 0.05):
# Grab sufficiently large cluster IDs
suffIDs.append(k)
# # Try and grab edge of largest cluster
# lClust = max(clust_size)
# # Get the id of the largest cluster
# for k in range(0, len(clust_size)):
# if clust_size[k] == lClust:
# lcID = ids[k]
# Get the positions of all particles in LC
binParts = [[[] for b in range(NBins)] for a in range(NBins)]
occParts = [[0 for b in range(NBins)] for a in range(NBins)]
edgeBin = [[0 for b in range(NBins)] for a in range(NBins)]
lcPos = []
for k in range(0, len(ids)):
if ids[k] in suffIDs:
# if ids[k] == lcID:
lcPos.append(pos[k])
# Convert position to be > 0 to place in list mesh
tmp_posX = pos[k][0] + h_box
tmp_posY = pos[k][1] + h_box
x_ind = int(tmp_posX / sizeBin)
y_ind = int(tmp_posY / sizeBin)
# Append particle id to appropriate bin
binParts[x_ind][y_ind].append(k)
occParts[x_ind][y_ind] = 1
# If sufficient neighbor bins are empty, we have an edge
thresh = 1.5
# Loop through x index of mesh
for ix in range(0, len(occParts)):
# If at right edge, wrap to left
if (ix + 1) != NBins:
lookx = [ix-1, ix, ix+1]
else:
lookx = [ix-1, ix, 0]
# Loop through y index of mesh
for iy in range(0, len(occParts[ix])):
# Reset neighbor counter
count = 0
# If the bin is not occupied, skip it
if occParts[ix][iy] == 0:
continue
# If at top edge, wrap to bottom
if (iy + 1) != NBins:
looky = [iy-1, iy, iy+1]
else:
looky = [iy-1, iy, 0]
# Loop through surrounding x-index
for indx in lookx:
# Loop through surrounding y-index
for indy in looky:
# If neighbor bin is NOT occupied
if occParts[indx][indy] == 0:
# If neighbor bin shares a vertex
if indx != ix and indy != iy:
count += 0.5
# If neighbor bin shares a side
else:
count += 1
# If sufficient neighbors are empty, we found an edge
if count >= thresh:
edgeBin[indx][indy] = 1
# Sum the resultant edge mesh
Nedges = 0
for ix in range(0, len(occParts)):
for iy in range(0, len(occParts[ix])):
Nedges += edgeBin[ix][iy]
# The edge length of sufficiently large clusters
lEdge = Nedges * sizeBin
# Write this to a textfile with the timestep
g = open(outFile, 'a')
g.write('{0:.3f}'.format(tst).center(10) + ' ')
g.write('{0:.1f}'.format(lEdge).center(10) + '\n')
g.close()
# # A sanity check on a perfect hcp circle
# print(Nedges)
# print(Nedges * sizeBin)
# x = list(list(zip(*lcPos))[0])
# y = list(list(zip(*lcPos))[1])
# diam = max(x) - min(x)
# circ = diam * np.pi
# print(circ)
# print(Nedges * sizeBin / circ)
#
# # Let's plot imshow to make sure we're good thus far
# fig, ax = plt.subplots()
# ax.imshow(edgeBin, extent=[0, l_box, 0, l_box], aspect='auto', origin='lower')
# ax.set_aspect('equal')
# plt.show()
| [
"sys.path.append",
"freud.cluster.ClusterProperties",
"numpy.delete",
"freud.cluster.Cluster",
"math.ceil",
"numpy.zeros",
"freud.box.Box",
"gsd.hoomd.open"
] | [((396, 461), 'sys.path.append', 'sys.path.append', (['"""/Users/kolbt/Desktop/compiled/hoomd-blue/build"""'], {}), "('/Users/kolbt/Desktop/compiled/hoomd-blue/build')\n", (411, 461), False, 'import sys\n'), ((462, 520), 'sys.path.append', 'sys.path.append', (['"""/Users/kolbt/Desktop/compiled/gsd/build"""'], {}), "('/Users/kolbt/Desktop/compiled/gsd/build')\n", (477, 520), False, 'import sys\n'), ((538, 617), 'sys.path.append', 'sys.path.append', (['"""/nas/longleaf/home/kolbt/programs/cpu-hoomd/hoomd-blue/build"""'], {}), "('/nas/longleaf/home/kolbt/programs/cpu-hoomd/hoomd-blue/build')\n", (553, 617), False, 'import sys\n'), ((635, 721), 'sys.path.append', 'sys.path.append', (['"""/nas/longleaf/home/kolbt/programs/hoomd_2.2.1/hoomd-blue/build"""'], {}), "(\n '/nas/longleaf/home/kolbt/programs/hoomd_2.2.1/hoomd-blue/build')\n", (650, 721), False, 'import sys\n'), ((717, 779), 'sys.path.append', 'sys.path.append', (['"""/nas/longleaf/home/kolbt/programs/gsd/build"""'], {}), "('/nas/longleaf/home/kolbt/programs/gsd/build')\n", (732, 779), False, 'import sys\n'), ((2422, 2456), 'gsd.hoomd.open', 'hoomd.open', ([], {'name': 'inFile', 'mode': '"""rb"""'}), "(name=inFile, mode='rb')\n", (2432, 2456), False, 'from gsd import hoomd\n'), ((3273, 3302), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'np.ndarray'}), '(1, dtype=np.ndarray)\n', (3281, 3302), True, 'import numpy as np\n'), ((3471, 3505), 'gsd.hoomd.open', 'hoomd.open', ([], {'name': 'inFile', 'mode': '"""rb"""'}), "(name=inFile, mode='rb')\n", (3481, 3505), False, 'from gsd import hoomd\n'), ((3765, 3803), 'freud.box.Box', 'box.Box', ([], {'Lx': 'l_box', 'Ly': 'l_box', 'is2D': '(True)'}), '(Lx=l_box, Ly=l_box, is2D=True)\n', (3772, 3803), False, 'from freud import box\n'), ((3819, 3836), 'freud.cluster.Cluster', 'cluster.Cluster', ([], {}), '()\n', (3834, 3836), False, 'from freud import cluster\n'), ((3851, 3878), 'freud.cluster.ClusterProperties', 'cluster.ClusterProperties', ([], {}), '()\n', (3876, 3878), False, 'from freud import cluster\n'), ((1622, 1647), 'math.ceil', 'math.ceil', (['(n * multiplier)'], {}), '(n * multiplier)\n', (1631, 1647), False, 'import math\n'), ((4197, 4217), 'numpy.delete', 'np.delete', (['pos', '(2)', '(1)'], {}), '(pos, 2, 1)\n', (4206, 4217), True, 'import numpy as np\n')] |
#! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
nohup python2 stack_spectra_ELG_LineLF.py > stack_spectra_ELG_LineLF.log &
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import SpectraStackingEBOSS as sse
spec_dir = join(os.environ['HOME'],"SDSS/stacks/LineLF/")
PLATE, MJD, FIBREID, z, z_err, log_l_O2, log_l_O3, log_l_Hb, WEIGHT_SYSTOT, WEIGHT_CP, WEIGHT_NOZ = n.loadtxt(os.path.join(spec_dir, "ELGv5_11_0rrv2_all_lum_weight.txt"), unpack=True)
weight=WEIGHT_SYSTOT*WEIGHT_CP* WEIGHT_NOZ
N_per_stack=25000
zmins = n.array([0.5]) # n.arange(0.6,1.2,0.2)
zmaxs = n.array([1.3]) #zmins+0.2
"""
for zmin, zmax in zip(zmins, zmaxs):
z_sel = (z>=zmin)&(z<zmax)&(log_l_O2>0)&(weight>0)
idx_L = n.argsort(log_l_O2[z_sel])
#log_l_O2[z_sel][idx_L]
data_to_stack = n.transpose([PLATE, MJD, FIBREID, z, weight])[z_sel][idx_L]
# write in files of 1000
ids_bds = n.arange(0, len(data_to_stack), N_per_stack)
for ids_bd in ids_bds:
name = 'elg_'+str(n.min(log_l_O2[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+'_O2_'+str(n.max(log_l_O2[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+"-"+str(zmin)+'_z_'+str(zmax)+'_N_'+str(len(log_l_O2[z_sel][idx_L][ids_bd:ids_bd+N_per_stack]))+'.ascii'
print(name)
n.savetxt( os.path.join(spec_dir, name),data_to_stack[::-1][ids_bd:ids_bd+N_per_stack])
#zmins = n.arange(0.6,0.8,0.2)
#zmaxs = zmins+0.2
for zmin, zmax in zip(zmins, zmaxs):
z_sel = (z>=zmin)&(z<zmax)&(log_l_O3>0)&(weight>0)
idx_L = n.argsort(log_l_O3[z_sel])
#log_l_O3[z_sel][idx_L]
data_to_stack = n.transpose([PLATE, MJD, FIBREID, z, weight])[z_sel][idx_L]
# write in files of 1000
ids_bds = n.arange(0, len(data_to_stack), N_per_stack)
for ids_bd in ids_bds:
name = 'elg_'+str(n.min(log_l_O3[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+'_O3_'+str(n.max(log_l_O3[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+"-"+str(zmin)+'_z_'+str(zmax)+'_N_'+str(len(log_l_O3[z_sel][idx_L][ids_bd:ids_bd+N_per_stack]))+'.ascii'
print(name)
n.savetxt( os.path.join(spec_dir, name),data_to_stack[::-1][ids_bd:ids_bd+N_per_stack])
#zmins = n.arange(0.6,0.8,0.2)
#zmaxs = zmins+0.2
for zmin, zmax in zip(zmins, zmaxs):
z_sel = (z>=zmin)&(z<zmax)&(log_l_Hb>0)&(weight>0)
idx_L = n.argsort(log_l_Hb[z_sel])
#log_l_Hb[z_sel][idx_L]
data_to_stack = n.transpose([PLATE, MJD, FIBREID, z, weight])[z_sel][idx_L]
# write in files of 1000
ids_bds = n.arange(0, len(data_to_stack), N_per_stack)
for ids_bd in ids_bds:
name = 'elg_'+str(n.min(log_l_Hb[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+'_Hb_'+str(n.max(log_l_Hb[z_sel][idx_L][::-1][ids_bd:ids_bd+N_per_stack])).zfill(3)+"-"+str(zmin)+'_z_'+str(zmax)+'_N_'+str(len(log_l_Hb[z_sel][idx_L][ids_bd:ids_bd+N_per_stack]))+'.ascii'
print(name)
n.savetxt( os.path.join(spec_dir, name),data_to_stack[::-1][ids_bd:ids_bd+N_per_stack])
"""
file_list = n.array(glob.glob(os.path.join(spec_dir, '*.ascii')))
def stack_it(specList ):
outfile = join(spec_dir, os.path.basename(specList)[:-6]+".stack")
stack=sse.SpectraStackingEBOSS(specList, outfile, PBKT_input=True )
stack.createStackMatrix_Weighted()
print(outfile)
stack.stackSpectra()
for file_input in file_list:
stack_it(file_input)
| [
"numpy.array",
"os.path.join",
"SpectraStackingEBOSS.SpectraStackingEBOSS",
"os.path.basename"
] | [((311, 358), 'os.path.join', 'join', (["os.environ['HOME']", '"""SDSS/stacks/LineLF/"""'], {}), "(os.environ['HOME'], 'SDSS/stacks/LineLF/')\n", (315, 358), False, 'from os.path import join\n'), ((615, 629), 'numpy.array', 'n.array', (['[0.5]'], {}), '([0.5])\n', (622, 629), True, 'import numpy as n\n'), ((662, 676), 'numpy.array', 'n.array', (['[1.3]'], {}), '([1.3])\n', (669, 676), True, 'import numpy as n\n'), ((469, 528), 'os.path.join', 'os.path.join', (['spec_dir', '"""ELGv5_11_0rrv2_all_lum_weight.txt"""'], {}), "(spec_dir, 'ELGv5_11_0rrv2_all_lum_weight.txt')\n", (481, 528), False, 'import os\n'), ((3166, 3226), 'SpectraStackingEBOSS.SpectraStackingEBOSS', 'sse.SpectraStackingEBOSS', (['specList', 'outfile'], {'PBKT_input': '(True)'}), '(specList, outfile, PBKT_input=True)\n', (3190, 3226), True, 'import SpectraStackingEBOSS as sse\n'), ((3029, 3062), 'os.path.join', 'os.path.join', (['spec_dir', '"""*.ascii"""'], {}), "(spec_dir, '*.ascii')\n", (3041, 3062), False, 'import os\n'), ((3117, 3143), 'os.path.basename', 'os.path.basename', (['specList'], {}), '(specList)\n', (3133, 3143), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import datetime
from optim.pretrain import *
import argparse
import torch
from utils.utils import get_config_from_json
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--save_freq', type=int, default=200,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=128,
help='batch_size')
parser.add_argument('--K', type=int, default=16, help='Number of augmentation for each sample') # Bigger is better.
parser.add_argument('--feature_size', type=int, default=64,
help='feature_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=400,
help='number of training epochs')
parser.add_argument('--patience', type=int, default=400,
help='training patience')
parser.add_argument('--aug_type', type=str, default='none', help='Augmentation type')
parser.add_argument('--piece_size', type=float, default=0.2,
help='piece size for time series piece sampling')
parser.add_argument('--class_type', type=str, default='3C', help='Classification type')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.01,
help='learning rate')
# model dataset
parser.add_argument('--dataset_name', type=str, default='CricketX',
choices=['CricketX', 'UWaveGestureLibraryAll',
'InsectWingbeatSound','DodgerLoopDay',
'MFPT','XJTU'],
help='dataset')
parser.add_argument('--ucr_path', type=str, default='./datasets/',
help='Data root for dataset.')
parser.add_argument('--ckpt_dir', type=str, default='./ckpt/',
help='Data path for checkpoint.')
# method
parser.add_argument('--backbone', type=str, default='SimConv4')
parser.add_argument('--model_name', type=str, default='InterSample',
choices=['InterSample', 'IntraTemporal', 'SelfTime'], help='choose method')
parser.add_argument('--config_dir', type=str, default='./config', help='The Configuration Dir')
opt = parser.parse_args()
return opt
if __name__ == "__main__":
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
import numpy as np
opt = parse_option()
exp = 'linear_eval'
Seeds = [0, 1, 2, 3, 4]
aug1 = ['magnitude_warp']
aug2 = ['time_warp']
config_dict = get_config_from_json('{}/{}_config.json'.format(
opt.config_dir, opt.dataset_name))
opt.class_type = config_dict['class_type']
opt.piece_size = config_dict['piece_size']
if opt.model_name=='InterSample':
model_paras = 'none'
else:
model_paras = '{}_{}'.format(opt.piece_size, opt.class_type)
if aug1 == aug2:
opt.aug_type = [aug1]
elif type(aug1) is list:
opt.aug_type = aug1 + aug2
else:
opt.aug_type = [aug1, aug2]
log_dir = './log/{}/{}/{}/{}/{}'.format(
exp, opt.dataset_name, opt.model_name, '_'.join(opt.aug_type), model_paras)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
file2print_detail_train = open("{}/train_detail.log".format(log_dir), 'a+')
print(datetime.datetime.now(), file=file2print_detail_train)
print("Dataset\tTrain\tTest\tDimension\tClass\tSeed\tAcc_max\tEpoch_max", file=file2print_detail_train)
file2print_detail_train.flush()
ACCs = {}
MAX_EPOCHs_seed = {}
ACCs_seed = {}
for seed in Seeds:
np.random.seed(seed)
torch.manual_seed(seed)
opt.ckpt_dir = './ckpt/{}/{}/{}/{}/{}/{}'.format(
exp, opt.model_name, opt.dataset_name, '_'.join(opt.aug_type),
model_paras, str(seed))
if not os.path.exists(opt.ckpt_dir):
os.makedirs(opt.ckpt_dir)
print('[INFO] Running at:', opt.dataset_name)
x_train, y_train, x_val, y_val, x_test, y_test, nb_class, _ \
= load_ucr2018(opt.ucr_path, opt.dataset_name)
################
## Train #######
################
if opt.model_name == 'InterSample':
acc_max, epoch_max = pretrain_InterSampleRel(x_train, y_train, opt)
elif 'IntraTemporal' in opt.model_name:
acc_max, epoch_max = pretrain_IntraSampleRel(x_train, y_train, opt)
elif 'SelfTime' in opt.model_name:
acc_max, epoch_max = pretrain_SelfTime(x_train, y_train, opt)
print("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
opt.dataset_name, x_train.shape[0], x_test.shape[0], x_train.shape[1], nb_class,
seed, round(acc_max, 2), epoch_max),
file=file2print_detail_train)
file2print_detail_train.flush()
| [
"numpy.random.seed",
"os.makedirs",
"argparse.ArgumentParser",
"torch.manual_seed",
"os.path.exists",
"datetime.datetime.now"
] | [((179, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""argument for training"""'], {}), "('argument for training')\n", (202, 227), False, 'import argparse\n'), ((3352, 3375), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3366, 3375), False, 'import os\n'), ((3385, 3405), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3396, 3405), False, 'import os\n'), ((3497, 3520), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3518, 3520), False, 'import datetime\n'), ((3787, 3807), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3801, 3807), True, 'import numpy as np\n'), ((3816, 3839), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (3833, 3839), False, 'import torch\n'), ((4025, 4053), 'os.path.exists', 'os.path.exists', (['opt.ckpt_dir'], {}), '(opt.ckpt_dir)\n', (4039, 4053), False, 'import os\n'), ((4067, 4092), 'os.makedirs', 'os.makedirs', (['opt.ckpt_dir'], {}), '(opt.ckpt_dir)\n', (4078, 4092), False, 'import os\n')] |
import pyclesperanto_prototype as cle
import numpy as np
def test_affine_shear_y_in_x_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[1, 2, 1] = 1
transform = cle.AffineTransform3D()
transform.shear_in_x_plane(angle_y_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_affine_shear_z_in_x_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[2, 1, 1] = 1
transform = cle.AffineTransform3D()
transform.shear_in_x_plane(angle_z_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_affine_shear_x_in_y_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[1, 1, 2] = 1
transform = cle.AffineTransform3D()
transform.shear_in_y_plane(angle_x_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_affine_shear_z_in_y_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[2, 1, 1] = 1
transform = cle.AffineTransform3D()
transform.shear_in_y_plane(angle_z_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_affine_shear_x_in_z_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[1, 1, 2] = 1
transform = cle.AffineTransform3D()
transform.shear_in_z_plane(angle_x_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
def test_affine_shear_y_in_z_plane():
source = np.zeros((5, 5, 5))
source[1, 1, 1] = 1
reference = np.zeros((5, 5, 5))
reference[1, 2, 1] = 1
transform = cle.AffineTransform3D()
transform.shear_in_z_plane(angle_y_in_degrees=45)
result = cle.affine_transform(source, transform=transform)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
print(b)
assert (np.array_equal(a, b))
| [
"pyclesperanto_prototype.AffineTransform3D",
"numpy.zeros",
"pyclesperanto_prototype.affine_transform",
"pyclesperanto_prototype.pull",
"numpy.array_equal"
] | [((110, 129), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (118, 129), True, 'import numpy as np\n'), ((171, 190), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (179, 190), True, 'import numpy as np\n'), ((235, 258), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (256, 258), True, 'import pyclesperanto_prototype as cle\n'), ((326, 375), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (346, 375), True, 'import pyclesperanto_prototype as cle\n'), ((385, 401), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (393, 401), True, 'import pyclesperanto_prototype as cle\n'), ((410, 429), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (418, 429), True, 'import pyclesperanto_prototype as cle\n'), ((470, 490), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (484, 490), True, 'import numpy as np\n'), ((545, 564), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (553, 564), True, 'import numpy as np\n'), ((606, 625), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (614, 625), True, 'import numpy as np\n'), ((670, 693), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (691, 693), True, 'import pyclesperanto_prototype as cle\n'), ((761, 810), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (781, 810), True, 'import pyclesperanto_prototype as cle\n'), ((820, 836), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (828, 836), True, 'import pyclesperanto_prototype as cle\n'), ((845, 864), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (853, 864), True, 'import pyclesperanto_prototype as cle\n'), ((905, 925), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (919, 925), True, 'import numpy as np\n'), ((980, 999), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (988, 999), True, 'import numpy as np\n'), ((1041, 1060), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1049, 1060), True, 'import numpy as np\n'), ((1105, 1128), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (1126, 1128), True, 'import pyclesperanto_prototype as cle\n'), ((1197, 1246), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (1217, 1246), True, 'import pyclesperanto_prototype as cle\n'), ((1256, 1272), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (1264, 1272), True, 'import pyclesperanto_prototype as cle\n'), ((1281, 1300), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (1289, 1300), True, 'import pyclesperanto_prototype as cle\n'), ((1341, 1361), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (1355, 1361), True, 'import numpy as np\n'), ((1416, 1435), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1424, 1435), True, 'import numpy as np\n'), ((1477, 1496), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1485, 1496), True, 'import numpy as np\n'), ((1541, 1564), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (1562, 1564), True, 'import pyclesperanto_prototype as cle\n'), ((1632, 1681), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (1652, 1681), True, 'import pyclesperanto_prototype as cle\n'), ((1691, 1707), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (1699, 1707), True, 'import pyclesperanto_prototype as cle\n'), ((1716, 1735), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (1724, 1735), True, 'import pyclesperanto_prototype as cle\n'), ((1776, 1796), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (1790, 1796), True, 'import numpy as np\n'), ((1851, 1870), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1859, 1870), True, 'import numpy as np\n'), ((1912, 1931), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (1920, 1931), True, 'import numpy as np\n'), ((1976, 1999), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (1997, 1999), True, 'import pyclesperanto_prototype as cle\n'), ((2067, 2116), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (2087, 2116), True, 'import pyclesperanto_prototype as cle\n'), ((2126, 2142), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (2134, 2142), True, 'import pyclesperanto_prototype as cle\n'), ((2151, 2170), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (2159, 2170), True, 'import pyclesperanto_prototype as cle\n'), ((2211, 2231), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (2225, 2231), True, 'import numpy as np\n'), ((2286, 2305), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (2294, 2305), True, 'import numpy as np\n'), ((2347, 2366), 'numpy.zeros', 'np.zeros', (['(5, 5, 5)'], {}), '((5, 5, 5))\n', (2355, 2366), True, 'import numpy as np\n'), ((2411, 2434), 'pyclesperanto_prototype.AffineTransform3D', 'cle.AffineTransform3D', ([], {}), '()\n', (2432, 2434), True, 'import pyclesperanto_prototype as cle\n'), ((2502, 2551), 'pyclesperanto_prototype.affine_transform', 'cle.affine_transform', (['source'], {'transform': 'transform'}), '(source, transform=transform)\n', (2522, 2551), True, 'import pyclesperanto_prototype as cle\n'), ((2561, 2577), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (2569, 2577), True, 'import pyclesperanto_prototype as cle\n'), ((2586, 2605), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (2594, 2605), True, 'import pyclesperanto_prototype as cle\n'), ((2646, 2666), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (2660, 2666), True, 'import numpy as np\n')] |
import numpy as np
from perses.rjmc import geometry
from openmmtools import states, mcmc
from simtk import openmm, unit
from simtk.openmm import app
from openmmtools import cache
from perses.storage import NetCDFStorageView, NetCDFStorage
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
from typing import List, Dict
cache.global_context_cache.capacity = 1
class HydrationPersesRun(object):
"""
This class encapsulates the necessary objects to run RJ calculations for relative hydration free energies
"""
def __init__(self, molecules: List[str], output_filename: str, ncmc_switching_times: Dict[str, int], equilibrium_steps: Dict[str, int], timestep: unit.Quantity, initial_molecule: str=None, geometry_options: Dict=None):
self._molecules = [SmallMoleculeSetProposalEngine.canonicalize_smiles(molecule) for molecule in molecules]
environments = ['explicit', 'vacuum']
temperature = 298.15 * unit.kelvin
pressure = 1.0 * unit.atmospheres
constraints = app.HBonds
self._storage = NetCDFStorage(output_filename)
self._ncmc_switching_times = ncmc_switching_times
self._n_equilibrium_steps = equilibrium_steps
self._geometry_options = geometry_options
# Create a system generator for our desired forcefields.
from perses.rjmc.topology_proposal import SystemGenerator
system_generators = dict()
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'tip3p.xml'],
forcefield_kwargs={'nonbondedMethod': app.PME,
'nonbondedCutoff': 9.0 * unit.angstrom,
'implicitSolvent': None,
'constraints': constraints,
'ewaldErrorTolerance': 1e-5,
'hydrogenMass': 3.0*unit.amu},
barostat=barostat)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename],
forcefield_kwargs={'nonbondedMethod': app.NoCutoff,
'implicitSolvent': None,
'constraints': constraints,
'hydrogenMass': 3.0*unit.amu})
#
# Create topologies and positions
#
topologies = dict()
positions = dict()
from openmoltools import forcefield_generators
forcefield = app.ForceField(gaff_xml_filename, 'tip3p.xml')
forcefield.registerTemplateGenerator(forcefield_generators.gaffTemplateGenerator)
# Create molecule in vacuum.
from perses.utils.openeye import extractPositionsFromOEMol
from openmoltools.openeye import smiles_to_oemol, generate_conformers
if initial_molecule:
smiles = initial_molecule
else:
smiles = np.random.choice(molecules)
molecule = smiles_to_oemol(smiles)
molecule = generate_conformers(molecule, max_confs=1)
topologies['vacuum'] = forcefield_generators.generateTopologyFromOEMol(molecule)
positions['vacuum'] = extractPositionsFromOEMol(molecule)
# Create molecule in solvent.
modeller = app.Modeller(topologies['vacuum'], positions['vacuum'])
modeller.addSolvent(forcefield, model='tip3p', padding=9.0 * unit.angstrom)
topologies['explicit'] = modeller.getTopology()
positions['explicit'] = modeller.getPositions()
# Set up the proposal engines.
proposal_metadata = {}
proposal_engines = dict()
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(self._molecules,
system_generators[environment])
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
thermodynamic_states['explicit'] = states.ThermodynamicState(system=systems['explicit'],
temperature=temperature, pressure=pressure)
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = NetCDFStorageView(self._storage, envname=environment)
if self._geometry_options:
n_torsion_divisions = self._geometry_options['n_torsion_divsions'][environment]
use_sterics = self._geometry_options['use_sterics'][environment]
else:
n_torsion_divisions = 180
use_sterics = False
geometry_engine = geometry.FFAllAngleGeometryEngine(storage=storage, n_torsion_divisions=n_torsion_divisions, use_sterics=use_sterics)
move = mcmc.LangevinSplittingDynamicsMove(timestep=timestep, splitting="V R O R V",
n_restart_attempts=10)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment],
box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = mcmc.MCMCSampler(thermodynamic_states[environment], sampler_state, move)
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment],
chemical_state_key, proposal_engines[environment],
geometry_engine,
options={'nsteps': self._ncmc_switching_times[environment]}, storage=storage, ncmc_write_interval=self._ncmc_switching_times[environment])
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = {sams_samplers['explicit']: 1.0, sams_samplers['vacuum']: -1.0}
designer = MultiTargetDesign(target_samplers, storage=self._storage)
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
if __name__=="__main__":
import yaml
import openeye.oechem as oechem
import sys
from perses.tests.utils import sanitizeSMILES
with open(sys.argv[1], 'r') as option_file:
options_dictionary = yaml.load(option_file)
#read the molecules into a list:
molecule_file = options_dictionary['molecule_file']
with open(molecule_file, 'r') as molecule_input_file:
molecule_string = molecule_input_file.read()
molecules = molecule_string.split("\n")
valid_molecules = []
for molecule in molecules:
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, molecule)
if mol.NumAtoms() == 0:
continue
valid_molecules.append(molecule)
sanitized_molecules = sanitizeSMILES(valid_molecules)
hydration_run = HydrationPersesRun(sanitized_molecules, options_dictionary['output_filename'],
options_dictionary['ncmc_switching_times'],
options_dictionary['equilibrium_steps'],
options_dictionary['timestep'] * unit.femtoseconds)
for environment in ['explicit', 'vacuum']:
hydration_run.exen_samplers[environment].verbose = True
hydration_run.sams_samplers[environment].verbose = True
hydration_run.designer.verbose = True
n_iterations = options_dictionary['n_iterations']
phase = options_dictionary['phase']
if phase == "vacuum":
hydration_run.sams_samplers['vacuum'].run(niterations=n_iterations)
elif phase == "explicit":
hydration_run.sams_samplers['explicit'].run(niterations=n_iterations)
elif phase == "multitarget":
hydration_run.designer.run(niterations=n_iterations)
else:
raise ValueError("Phase needs to be either vacuum, solvent, or multitarget.")
| [
"yaml.load",
"pkg_resources.resource_filename",
"openeye.oechem.OESmilesToMol",
"openmmtools.states.SamplerState",
"openmoltools.forcefield_generators.generateTopologyFromOEMol",
"openmmtools.mcmc.MCMCSampler",
"perses.utils.openeye.extractPositionsFromOEMol",
"perses.rjmc.topology_proposal.SmallMolec... | [((8859, 8890), 'perses.tests.utils.sanitizeSMILES', 'sanitizeSMILES', (['valid_molecules'], {}), '(valid_molecules)\n', (8873, 8890), False, 'from perses.tests.utils import sanitizeSMILES\n'), ((1072, 1102), 'perses.storage.NetCDFStorage', 'NetCDFStorage', (['output_filename'], {}), '(output_filename)\n', (1085, 1102), False, 'from perses.storage import NetCDFStorageView, NetCDFStorage\n'), ((1512, 1556), 'pkg_resources.resource_filename', 'resource_filename', (['"""perses"""', '"""data/gaff.xml"""'], {}), "('perses', 'data/gaff.xml')\n", (1529, 1556), False, 'from pkg_resources import resource_filename\n'), ((1576, 1624), 'simtk.openmm.MonteCarloBarostat', 'openmm.MonteCarloBarostat', (['pressure', 'temperature'], {}), '(pressure, temperature)\n', (1601, 1624), False, 'from simtk import openmm, unit\n'), ((1665, 1954), 'perses.rjmc.topology_proposal.SystemGenerator', 'SystemGenerator', (["[gaff_xml_filename, 'tip3p.xml']"], {'forcefield_kwargs': "{'nonbondedMethod': app.PME, 'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None, 'constraints': constraints,\n 'ewaldErrorTolerance': 1e-05, 'hydrogenMass': 3.0 * unit.amu}", 'barostat': 'barostat'}), "([gaff_xml_filename, 'tip3p.xml'], forcefield_kwargs={\n 'nonbondedMethod': app.PME, 'nonbondedCutoff': 9.0 * unit.angstrom,\n 'implicitSolvent': None, 'constraints': constraints,\n 'ewaldErrorTolerance': 1e-05, 'hydrogenMass': 3.0 * unit.amu}, barostat\n =barostat)\n", (1680, 1954), False, 'from perses.rjmc.topology_proposal import SystemGenerator\n'), ((2459, 2641), 'perses.rjmc.topology_proposal.SystemGenerator', 'SystemGenerator', (['[gaff_xml_filename]'], {'forcefield_kwargs': "{'nonbondedMethod': app.NoCutoff, 'implicitSolvent': None, 'constraints':\n constraints, 'hydrogenMass': 3.0 * unit.amu}"}), "([gaff_xml_filename], forcefield_kwargs={'nonbondedMethod':\n app.NoCutoff, 'implicitSolvent': None, 'constraints': constraints,\n 'hydrogenMass': 3.0 * unit.amu})\n", (2474, 2641), False, 'from perses.rjmc.topology_proposal import SystemGenerator\n'), ((3100, 3146), 'simtk.openmm.app.ForceField', 'app.ForceField', (['gaff_xml_filename', '"""tip3p.xml"""'], {}), "(gaff_xml_filename, 'tip3p.xml')\n", (3114, 3146), False, 'from simtk.openmm import app\n'), ((3569, 3592), 'openmoltools.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (3584, 3592), False, 'from openmoltools.openeye import smiles_to_oemol, generate_conformers\n'), ((3612, 3654), 'openmoltools.openeye.generate_conformers', 'generate_conformers', (['molecule'], {'max_confs': '(1)'}), '(molecule, max_confs=1)\n', (3631, 3654), False, 'from openmoltools.openeye import smiles_to_oemol, generate_conformers\n'), ((3686, 3743), 'openmoltools.forcefield_generators.generateTopologyFromOEMol', 'forcefield_generators.generateTopologyFromOEMol', (['molecule'], {}), '(molecule)\n', (3733, 3743), False, 'from openmoltools import forcefield_generators\n'), ((3774, 3809), 'perses.utils.openeye.extractPositionsFromOEMol', 'extractPositionsFromOEMol', (['molecule'], {}), '(molecule)\n', (3799, 3809), False, 'from perses.utils.openeye import extractPositionsFromOEMol\n'), ((3868, 3923), 'simtk.openmm.app.Modeller', 'app.Modeller', (["topologies['vacuum']", "positions['vacuum']"], {}), "(topologies['vacuum'], positions['vacuum'])\n", (3880, 3923), False, 'from simtk.openmm import app\n'), ((4801, 4903), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['explicit']", 'temperature': 'temperature', 'pressure': 'pressure'}), "(system=systems['explicit'], temperature=\n temperature, pressure=pressure)\n", (4826, 4903), False, 'from openmmtools import states, mcmc\n'), ((5009, 5085), 'openmmtools.states.ThermodynamicState', 'states.ThermodynamicState', ([], {'system': "systems['vacuum']", 'temperature': 'temperature'}), "(system=systems['vacuum'], temperature=temperature)\n", (5034, 5085), False, 'from openmmtools import states, mcmc\n'), ((7565, 7622), 'perses.samplers.samplers.MultiTargetDesign', 'MultiTargetDesign', (['target_samplers'], {'storage': 'self._storage'}), '(target_samplers, storage=self._storage)\n', (7582, 7622), False, 'from perses.samplers.samplers import MultiTargetDesign\n'), ((8336, 8358), 'yaml.load', 'yaml.load', (['option_file'], {}), '(option_file)\n', (8345, 8358), False, 'import yaml\n'), ((8679, 8693), 'openeye.oechem.OEMol', 'oechem.OEMol', ([], {}), '()\n', (8691, 8693), True, 'import openeye.oechem as oechem\n'), ((8702, 8737), 'openeye.oechem.OESmilesToMol', 'oechem.OESmilesToMol', (['mol', 'molecule'], {}), '(mol, molecule)\n', (8722, 8737), True, 'import openeye.oechem as oechem\n'), ((796, 856), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine.canonicalize_smiles', 'SmallMoleculeSetProposalEngine.canonicalize_smiles', (['molecule'], {}), '(molecule)\n', (846, 856), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((3522, 3549), 'numpy.random.choice', 'np.random.choice', (['molecules'], {}), '(molecules)\n', (3538, 3549), True, 'import numpy as np\n'), ((4311, 4390), 'perses.rjmc.topology_proposal.SmallMoleculeSetProposalEngine', 'SmallMoleculeSetProposalEngine', (['self._molecules', 'system_generators[environment]'], {}), '(self._molecules, system_generators[environment])\n', (4341, 4390), False, 'from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine\n'), ((5356, 5409), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['self._storage'], {'envname': 'environment'}), '(self._storage, envname=environment)\n', (5373, 5409), False, 'from perses.storage import NetCDFStorageView, NetCDFStorage\n'), ((5755, 5876), 'perses.rjmc.geometry.FFAllAngleGeometryEngine', 'geometry.FFAllAngleGeometryEngine', ([], {'storage': 'storage', 'n_torsion_divisions': 'n_torsion_divisions', 'use_sterics': 'use_sterics'}), '(storage=storage, n_torsion_divisions=\n n_torsion_divisions, use_sterics=use_sterics)\n', (5788, 5876), False, 'from perses.rjmc import geometry\n'), ((5891, 5994), 'openmmtools.mcmc.LangevinSplittingDynamicsMove', 'mcmc.LangevinSplittingDynamicsMove', ([], {'timestep': 'timestep', 'splitting': '"""V R O R V"""', 'n_restart_attempts': '(10)'}), "(timestep=timestep, splitting='V R O R V',\n n_restart_attempts=10)\n", (5925, 5994), False, 'from openmmtools import states, mcmc\n'), ((6542, 6614), 'openmmtools.mcmc.MCMCSampler', 'mcmc.MCMCSampler', (['thermodynamic_states[environment]', 'sampler_state', 'move'], {}), '(thermodynamic_states[environment], sampler_state, move)\n', (6558, 6614), False, 'from openmmtools import states, mcmc\n'), ((6658, 6954), 'perses.samplers.samplers.ExpandedEnsembleSampler', 'ExpandedEnsembleSampler', (['mcmc_samplers[environment]', 'topologies[environment]', 'chemical_state_key', 'proposal_engines[environment]', 'geometry_engine'], {'options': "{'nsteps': self._ncmc_switching_times[environment]}", 'storage': 'storage', 'ncmc_write_interval': 'self._ncmc_switching_times[environment]'}), "(mcmc_samplers[environment], topologies[environment],\n chemical_state_key, proposal_engines[environment], geometry_engine,\n options={'nsteps': self._ncmc_switching_times[environment]}, storage=\n storage, ncmc_write_interval=self._ncmc_switching_times[environment])\n", (6681, 6954), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((7232, 7288), 'perses.samplers.samplers.SAMSSampler', 'SAMSSampler', (['exen_samplers[environment]'], {'storage': 'storage'}), '(exen_samplers[environment], storage=storage)\n', (7243, 7288), False, 'from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler\n'), ((6447, 6500), 'openmmtools.states.SamplerState', 'states.SamplerState', ([], {'positions': 'positions[environment]'}), '(positions=positions[environment])\n', (6466, 6500), False, 'from openmmtools import states, mcmc\n')] |
import featureflow
import numpy as np
import unittest2
from .preprocess import \
UnitNorm, MeanStdNormalization, Binarize, PreprocessingPipeline, Log
from zounds.spectral import \
GeometricScale, FrequencyAdaptive
from zounds.timeseries import Seconds, TimeDimension
from zounds.util import simple_in_memory_settings
class TestPipeline(unittest2.TestCase):
def test_cannot_invert_pipeline_if_any_steps_are_missing(self):
class Settings(featureflow.PersistenceSettings):
id_provider = featureflow.UuidProvider()
key_builder = featureflow.StringDelimitedKeyBuilder()
database = featureflow.InMemoryDatabase(key_builder=key_builder)
class Model(featureflow.BaseModel, Settings):
unitnorm = featureflow.PickleFeature(
UnitNorm,
store=False)
binary = featureflow.PickleFeature(
Binarize,
needs=unitnorm,
store=False)
pipeline = featureflow.PickleFeature(
PreprocessingPipeline,
needs=(unitnorm, binary),
store=True)
data = np.random.random_sample((1000, 4))
_id = Model.process(unitnorm=data)
example = np.random.random_sample((10, 4))
model = Model(_id)
transformed = model.pipeline.transform(example)
self.assertRaises(
NotImplementedError, lambda: transformed.inverse_transform())
def test_can_invert_pipeline(self):
class Settings(featureflow.PersistenceSettings):
id_provider = featureflow.UuidProvider()
key_builder = featureflow.StringDelimitedKeyBuilder()
database = featureflow.InMemoryDatabase(key_builder=key_builder)
class Model(featureflow.BaseModel, Settings):
unitnorm = featureflow.PickleFeature(
UnitNorm,
store=False)
meanstd = featureflow.PickleFeature(
MeanStdNormalization,
needs=unitnorm,
store=False)
pipeline = featureflow.PickleFeature(
PreprocessingPipeline,
needs=(unitnorm, meanstd),
store=True)
data = np.random.random_sample((1000, 4))
_id = Model.process(unitnorm=data)
example = np.random.random_sample((10, 4))
model = Model(_id)
transformed = model.pipeline.transform(example)
reconstructed = transformed.inverse_transform()
diff = np.abs(example - reconstructed)
self.assertTrue(np.all(diff < .00001))
def test_can_invert_pipeline_with_log(self):
class Settings(featureflow.PersistenceSettings):
id_provider = featureflow.UuidProvider()
key_builder = featureflow.StringDelimitedKeyBuilder()
database = featureflow.InMemoryDatabase(key_builder=key_builder)
class Model(featureflow.BaseModel, Settings):
log = featureflow.PickleFeature(
Log,
store=False)
meanstd = featureflow.PickleFeature(
MeanStdNormalization,
needs=log,
store=False)
pipeline = featureflow.PickleFeature(
PreprocessingPipeline,
needs=(log, meanstd),
store=True)
data = np.random.random_sample((1000, 4))
_id = Model.process(log=data)
example = np.random.random_sample((10, 4))
model = Model(_id)
transformed = model.pipeline.transform(example)
reconstructed = transformed.inverse_transform()
diff = np.abs(example - reconstructed)
self.assertTrue(np.all(diff < .00001))
def test_can_invert_pipeline_that_takes_frequency_adaptive_transform(self):
td = TimeDimension(frequency=Seconds(1))
scale = GeometricScale(20, 5000, 0.05, 10)
arrs = [np.zeros((10, x)) for x in range(1, 11)]
fa = FrequencyAdaptive(arrs, td, scale)
@simple_in_memory_settings
class Model(featureflow.BaseModel):
log = featureflow.PickleFeature(
Log,
store=False)
meanstd = featureflow.PickleFeature(
MeanStdNormalization,
needs=log,
store=False)
pipeline = featureflow.PickleFeature(
PreprocessingPipeline,
needs=(log, meanstd),
store=True)
_id = Model.process(log=fa)
model = Model(_id)
result = model.pipeline.transform(fa)
recon = result.inverse_transform()
self.assertIsInstance(recon, FrequencyAdaptive)
self.assertEqual(fa.shape, recon.shape) | [
"numpy.abs",
"zounds.spectral.GeometricScale",
"numpy.random.random_sample",
"numpy.zeros",
"numpy.all",
"zounds.spectral.FrequencyAdaptive",
"featureflow.UuidProvider",
"featureflow.PickleFeature",
"featureflow.StringDelimitedKeyBuilder",
"zounds.timeseries.Seconds",
"featureflow.InMemoryDataba... | [((1161, 1195), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000, 4)'], {}), '((1000, 4))\n', (1184, 1195), True, 'import numpy as np\n'), ((1257, 1289), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 4)'], {}), '((10, 4))\n', (1280, 1289), True, 'import numpy as np\n'), ((2254, 2288), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000, 4)'], {}), '((1000, 4))\n', (2277, 2288), True, 'import numpy as np\n'), ((2350, 2382), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 4)'], {}), '((10, 4))\n', (2373, 2382), True, 'import numpy as np\n'), ((2537, 2568), 'numpy.abs', 'np.abs', (['(example - reconstructed)'], {}), '(example - reconstructed)\n', (2543, 2568), True, 'import numpy as np\n'), ((3385, 3419), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000, 4)'], {}), '((1000, 4))\n', (3408, 3419), True, 'import numpy as np\n'), ((3476, 3508), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 4)'], {}), '((10, 4))\n', (3499, 3508), True, 'import numpy as np\n'), ((3663, 3694), 'numpy.abs', 'np.abs', (['(example - reconstructed)'], {}), '(example - reconstructed)\n', (3669, 3694), True, 'import numpy as np\n'), ((3888, 3922), 'zounds.spectral.GeometricScale', 'GeometricScale', (['(20)', '(5000)', '(0.05)', '(10)'], {}), '(20, 5000, 0.05, 10)\n', (3902, 3922), False, 'from zounds.spectral import GeometricScale, FrequencyAdaptive\n'), ((3993, 4027), 'zounds.spectral.FrequencyAdaptive', 'FrequencyAdaptive', (['arrs', 'td', 'scale'], {}), '(arrs, td, scale)\n', (4010, 4027), False, 'from zounds.spectral import GeometricScale, FrequencyAdaptive\n'), ((519, 545), 'featureflow.UuidProvider', 'featureflow.UuidProvider', ([], {}), '()\n', (543, 545), False, 'import featureflow\n'), ((572, 611), 'featureflow.StringDelimitedKeyBuilder', 'featureflow.StringDelimitedKeyBuilder', ([], {}), '()\n', (609, 611), False, 'import featureflow\n'), ((635, 688), 'featureflow.InMemoryDatabase', 'featureflow.InMemoryDatabase', ([], {'key_builder': 'key_builder'}), '(key_builder=key_builder)\n', (663, 688), False, 'import featureflow\n'), ((767, 815), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['UnitNorm'], {'store': '(False)'}), '(UnitNorm, store=False)\n', (792, 815), False, 'import featureflow\n'), ((871, 935), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['Binarize'], {'needs': 'unitnorm', 'store': '(False)'}), '(Binarize, needs=unitnorm, store=False)\n', (896, 935), False, 'import featureflow\n'), ((1009, 1099), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['PreprocessingPipeline'], {'needs': '(unitnorm, binary)', 'store': '(True)'}), '(PreprocessingPipeline, needs=(unitnorm, binary),\n store=True)\n', (1034, 1099), False, 'import featureflow\n'), ((1598, 1624), 'featureflow.UuidProvider', 'featureflow.UuidProvider', ([], {}), '()\n', (1622, 1624), False, 'import featureflow\n'), ((1651, 1690), 'featureflow.StringDelimitedKeyBuilder', 'featureflow.StringDelimitedKeyBuilder', ([], {}), '()\n', (1688, 1690), False, 'import featureflow\n'), ((1714, 1767), 'featureflow.InMemoryDatabase', 'featureflow.InMemoryDatabase', ([], {'key_builder': 'key_builder'}), '(key_builder=key_builder)\n', (1742, 1767), False, 'import featureflow\n'), ((1846, 1894), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['UnitNorm'], {'store': '(False)'}), '(UnitNorm, store=False)\n', (1871, 1894), False, 'import featureflow\n'), ((1951, 2027), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['MeanStdNormalization'], {'needs': 'unitnorm', 'store': '(False)'}), '(MeanStdNormalization, needs=unitnorm, store=False)\n', (1976, 2027), False, 'import featureflow\n'), ((2101, 2192), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['PreprocessingPipeline'], {'needs': '(unitnorm, meanstd)', 'store': '(True)'}), '(PreprocessingPipeline, needs=(unitnorm, meanstd),\n store=True)\n', (2126, 2192), False, 'import featureflow\n'), ((2593, 2613), 'numpy.all', 'np.all', (['(diff < 1e-05)'], {}), '(diff < 1e-05)\n', (2599, 2613), True, 'import numpy as np\n'), ((2749, 2775), 'featureflow.UuidProvider', 'featureflow.UuidProvider', ([], {}), '()\n', (2773, 2775), False, 'import featureflow\n'), ((2802, 2841), 'featureflow.StringDelimitedKeyBuilder', 'featureflow.StringDelimitedKeyBuilder', ([], {}), '()\n', (2839, 2841), False, 'import featureflow\n'), ((2865, 2918), 'featureflow.InMemoryDatabase', 'featureflow.InMemoryDatabase', ([], {'key_builder': 'key_builder'}), '(key_builder=key_builder)\n', (2893, 2918), False, 'import featureflow\n'), ((2992, 3035), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['Log'], {'store': '(False)'}), '(Log, store=False)\n', (3017, 3035), False, 'import featureflow\n'), ((3092, 3163), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['MeanStdNormalization'], {'needs': 'log', 'store': '(False)'}), '(MeanStdNormalization, needs=log, store=False)\n', (3117, 3163), False, 'import featureflow\n'), ((3237, 3323), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['PreprocessingPipeline'], {'needs': '(log, meanstd)', 'store': '(True)'}), '(PreprocessingPipeline, needs=(log, meanstd),\n store=True)\n', (3262, 3323), False, 'import featureflow\n'), ((3719, 3739), 'numpy.all', 'np.all', (['(diff < 1e-05)'], {}), '(diff < 1e-05)\n', (3725, 3739), True, 'import numpy as np\n'), ((3939, 3956), 'numpy.zeros', 'np.zeros', (['(10, x)'], {}), '((10, x))\n', (3947, 3956), True, 'import numpy as np\n'), ((4126, 4169), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['Log'], {'store': '(False)'}), '(Log, store=False)\n', (4151, 4169), False, 'import featureflow\n'), ((4226, 4297), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['MeanStdNormalization'], {'needs': 'log', 'store': '(False)'}), '(MeanStdNormalization, needs=log, store=False)\n', (4251, 4297), False, 'import featureflow\n'), ((4371, 4457), 'featureflow.PickleFeature', 'featureflow.PickleFeature', (['PreprocessingPipeline'], {'needs': '(log, meanstd)', 'store': '(True)'}), '(PreprocessingPipeline, needs=(log, meanstd),\n store=True)\n', (4396, 4457), False, 'import featureflow\n'), ((3860, 3870), 'zounds.timeseries.Seconds', 'Seconds', (['(1)'], {}), '(1)\n', (3867, 3870), False, 'from zounds.timeseries import Seconds, TimeDimension\n')] |
import argparse
import logging
import os
import sys
from typing import Tuple
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from brc_pytorch.datasets import BRCDataset
from brc_pytorch.layers import (
BistableRecurrentCell, MultiLayerBase, NeuromodulatedBistableRecurrentCell
)
def permute_reshape(output, batch, num_directions, hidden_size):
output = output.permute(1, 0, 2)
assert output.size() == torch.Size([batch, num_directions, hidden_size])
output = output.reshape(batch, num_directions * hidden_size)
return output
def generate_sample(n: int) -> Tuple:
"""Generates 1D data.
Args:
n (int): Lag size.
Returns:
Tuple: Tuple of 1D time series and the true value n steps behind.
"""
true_n = np.random.randn()
chain = np.concatenate([[true_n], np.random.randn(n - 1)])
return chain, true_n
parser = argparse.ArgumentParser()
parser.add_argument(
'model_path', type=str, help='Path to save the best performing model.'
)
parser.add_argument(
'results_path', type=str, help='Path to save the loss plots.'
)
# get device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(model_path: str, results_path: str) -> None:
"""Execute copy-first-input for each cell and sequence length specified."
Args:
model_path (string): Path where the best model should be saved.
results_path (string): Path where the results should be saved.
"""
logging.basicConfig(
handlers=[
logging.
FileHandler(os.path.join(results_path, "BRC_benchmark1.log")),
logging.StreamHandler(sys.stdout)
]
)
logger = logging.getLogger('Benchmark1_BRC')
logger.setLevel(logging.DEBUG)
zs = [5, 100, 300]
test_size = 5000
lines = ['dotted', '-', '--']
colours = sns.color_palette("husl", 5)
line = -1
for z in zs:
line += 1
c = -1
fig, ax = plt.subplots(figsize=(10, 5), constrained_layout=True)
ax.set_title(
f'Copy-First-Input Training Loss of Various Recurrent Cells for Sequence Length {z}',
fontsize=12,
fontweight='medium'
)
ax.set_xlabel('Number of Gradient Iterations')
ax.set_ylabel('MSE Loss')
for name, cell in zip(
["LSTM", "GRU", "nBRC", "BRC"], [
nn.LSTMCell, nn.GRUCell, NeuromodulatedBistableRecurrentCell,
BistableRecurrentCell
]
):
c += 1
save_here = os.path.join(model_path, f'{name}_{z}')
model = None
if model is not None:
del (model)
"""Create Train and Test Dataset"""
inputs = []
outputs = []
for i in range(50000):
inp, out = generate_sample(z)
inputs.append(inp)
outputs.append(out)
inputs = np.array(inputs)
outputs = np.array(outputs)
inputs_train = np.expand_dims(inputs, axis=2).astype(np.float32
)[:-test_size]
inputs_test = np.expand_dims(inputs, axis=2).astype(np.float32
)[-test_size:]
outputs_train = np.expand_dims(outputs,
axis=1).astype(np.float32
)[:-test_size]
outputs_test = np.expand_dims(outputs,
axis=1).astype(np.float32
)[-test_size:]
dataset_train = BRCDataset(inputs_train, outputs_train)
dataset_test = BRCDataset(inputs_test, outputs_test)
training_loader = DataLoader(
dataset_train, batch_size=100, shuffle=True
)
test_loader = DataLoader(
dataset_test, batch_size=100, shuffle=True
)
""" Create Layers and add to Sequential"""
logger.info(
"Training network with cells of type {} with a lag of {} time-steps"
.format(name, z)
)
logger.info("---------------------")
input_size = inputs_train.shape[2]
hidden_size = 100
num_layers = 2
bidirectional = False
num_directions = 2 if bidirectional else 1
inner_input_dimensions = num_directions * hidden_size
recurrent_layers = [cell(input_size, hidden_size)]
for _ in range(num_layers - 1):
recurrent_layers.append(
cell(inner_input_dimensions, hidden_size)
)
rnn = MultiLayerBase(
name,
recurrent_layers,
hidden_size,
batch_first=True,
bidirectional=bidirectional,
return_sequences=False,
device=device
)
fc = nn.Linear(hidden_size * num_directions, 1)
get_hidden = itemgetter(0)
model = nn.ModuleDict({'rnn': rnn, 'fc': fc}).to(device)
loss_fn = nn.MSELoss()
optimiser = torch.optim.Adam(model.parameters())
epochs = 60
min_loss = np.inf
train_allepochs_losses = []
test_allepochs_losses = []
train_epochs_avg_losses = []
test_epochs_avg_losses = []
with torch.autograd.set_detect_anomaly(True):
grad_iterations = 0
for e in range(epochs):
train_loss_epoch_avg = 0
model.train()
logger.info("=== Epoch [{}/{}]".format(e + 1, epochs))
for idx, (x_batch, y_batch) in enumerate(training_loader):
x_batch, y_batch = x_batch.to(device
), y_batch.to(device)
pred_train = model['rnn'](x_batch)
if name == "LSTM":
pred_train = get_hidden(pred_train)
pred_train = permute_reshape(
pred_train, x_batch.size(0), num_directions,
hidden_size
)
pred_train = model['fc'](pred_train)
train_loss = loss_fn(pred_train, y_batch)
optimiser.zero_grad()
train_loss.backward()
optimiser.step()
train_allepochs_losses.append(
train_loss.data.cpu().numpy()
)
grad_iterations += 1
train_loss_epoch_avg = (
train_loss_epoch_avg * idx +
train_loss.data.cpu().numpy()
) / (idx + 1)
logger.info(
"Train Loss = {}".format(
train_loss.data.cpu().numpy()
)
)
train_epochs_avg_losses.append(train_loss_epoch_avg)
model.eval()
test_loss_epoch_avg = 0
for idx, (x_test, y_test) in enumerate(test_loader):
x_test, y_test = x_test.to(device), y_test.to(device)
# pred_test = model(x_test)
pred_test = model['rnn'](x_test)
if name == "LSTM":
pred_test = get_hidden(pred_test)
pred_train = permute_reshape(
pred_test, x_test.size(0), num_directions,
hidden_size
)
pred_test = model['fc'](pred_test)
test_loss = loss_fn(pred_test, y_test)
test_allepochs_losses.append(
test_loss.data.cpu().numpy()
)
test_loss_epoch_avg = (
test_loss_epoch_avg * idx +
test_loss.data.cpu().numpy()
) / (idx + 1)
logger.info("Test Loss = {}".format(test_loss_epoch_avg))
test_allepochs_losses.append(test_loss_epoch_avg)
if test_loss_epoch_avg < min_loss:
min_loss = test_loss_epoch_avg
torch.save(
{
'epoch': e,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimiser.state_dict(),
'train_loss': train_loss,
'test_loss': test_loss
}, save_here
)
if test_loss_epoch_avg < 0.1:
break
np.save(
os.path.join(results_path, f'TrainLoss_AllE_{name}_{z}'),
train_allepochs_losses
)
np.save(
os.path.join(results_path, f'TrainAvgLoss_AllE_{name}_{z}'),
train_epochs_avg_losses
)
np.save(
os.path.join(results_path, f'ValidLoss_AllE_{name}_{z}'),
test_allepochs_losses
)
np.save(
os.path.join(results_path, f'ValidAvgLoss_AllE_{name}_{z}'),
test_epochs_avg_losses
)
ax.plot(
range(grad_iterations),
train_allepochs_losses,
ls=lines[line],
color=colours[c]
)
fig.savefig(
os.path.join(
results_path, f'Training_{name}{z}_benchmark1.png'
)
)
lgd = ["LSTM", "GRU", "nBRC", "BRC"]
lgd1 = fig.legend(lgd, bbox_to_anchor=(1.04, 0.5), loc="center left")
fig.savefig(
os.path.join(results_path, f'Training_{z}_benchmark1.png'),
bbox_extra_artists=(lgd1, ),
bbox_inches='tight'
)
fig.clf()
if __name__ == '__main__':
args = parser.parse_args()
main(args.model_path, args.results_path)
| [
"argparse.ArgumentParser",
"brc_pytorch.datasets.BRCDataset",
"torch.nn.ModuleDict",
"torch.autograd.set_detect_anomaly",
"os.path.join",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"numpy.random.randn",
"torch.nn.Linear",
"operator.itemgetter",
"matplotlib.pyplot.subplots",
"logging.St... | [((1010, 1035), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1033, 1035), False, 'import argparse\n'), ((893, 910), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (908, 910), True, 'import numpy as np\n'), ((1820, 1855), 'logging.getLogger', 'logging.getLogger', (['"""Benchmark1_BRC"""'], {}), "('Benchmark1_BRC')\n", (1837, 1855), False, 'import logging\n'), ((1985, 2013), 'seaborn.color_palette', 'sns.color_palette', (['"""husl"""', '(5)'], {}), "('husl', 5)\n", (2002, 2013), True, 'import seaborn as sns\n'), ((548, 596), 'torch.Size', 'torch.Size', (['[batch, num_directions, hidden_size]'], {}), '([batch, num_directions, hidden_size])\n', (558, 596), False, 'import torch\n'), ((1269, 1294), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1292, 1294), False, 'import torch\n'), ((2097, 2151), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)', 'constrained_layout': '(True)'}), '(figsize=(10, 5), constrained_layout=True)\n', (2109, 2151), True, 'import matplotlib.pyplot as plt\n'), ((949, 971), 'numpy.random.randn', 'np.random.randn', (['(n - 1)'], {}), '(n - 1)\n', (964, 971), True, 'import numpy as np\n'), ((2690, 2729), 'os.path.join', 'os.path.join', (['model_path', 'f"""{name}_{z}"""'], {}), "(model_path, f'{name}_{z}')\n", (2702, 2729), False, 'import os\n'), ((3090, 3106), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (3098, 3106), True, 'import numpy as np\n'), ((3129, 3146), 'numpy.array', 'np.array', (['outputs'], {}), '(outputs)\n', (3137, 3146), True, 'import numpy as np\n'), ((3875, 3914), 'brc_pytorch.datasets.BRCDataset', 'BRCDataset', (['inputs_train', 'outputs_train'], {}), '(inputs_train, outputs_train)\n', (3885, 3914), False, 'from brc_pytorch.datasets import BRCDataset\n'), ((3942, 3979), 'brc_pytorch.datasets.BRCDataset', 'BRCDataset', (['inputs_test', 'outputs_test'], {}), '(inputs_test, outputs_test)\n', (3952, 3979), False, 'from brc_pytorch.datasets import BRCDataset\n'), ((4011, 4066), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': '(100)', 'shuffle': '(True)'}), '(dataset_train, batch_size=100, shuffle=True)\n', (4021, 4066), False, 'from torch.utils.data import DataLoader\n'), ((4123, 4177), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_test'], {'batch_size': '(100)', 'shuffle': '(True)'}), '(dataset_test, batch_size=100, shuffle=True)\n', (4133, 4177), False, 'from torch.utils.data import DataLoader\n'), ((4980, 5121), 'brc_pytorch.layers.MultiLayerBase', 'MultiLayerBase', (['name', 'recurrent_layers', 'hidden_size'], {'batch_first': '(True)', 'bidirectional': 'bidirectional', 'return_sequences': '(False)', 'device': 'device'}), '(name, recurrent_layers, hidden_size, batch_first=True,\n bidirectional=bidirectional, return_sequences=False, device=device)\n', (4994, 5121), False, 'from brc_pytorch.layers import BistableRecurrentCell, MultiLayerBase, NeuromodulatedBistableRecurrentCell\n'), ((5262, 5304), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * num_directions)', '(1)'], {}), '(hidden_size * num_directions, 1)\n', (5271, 5304), True, 'import torch.nn as nn\n'), ((5331, 5344), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (5341, 5344), False, 'from operator import itemgetter\n'), ((5438, 5450), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5448, 5450), True, 'import torch.nn as nn\n'), ((10486, 10544), 'os.path.join', 'os.path.join', (['results_path', 'f"""Training_{z}_benchmark1.png"""'], {}), "(results_path, f'Training_{z}_benchmark1.png')\n", (10498, 10544), False, 'import os\n'), ((1757, 1790), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1778, 1790), False, 'import logging\n'), ((5746, 5785), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (5779, 5785), False, 'import torch\n'), ((9427, 9483), 'os.path.join', 'os.path.join', (['results_path', 'f"""TrainLoss_AllE_{name}_{z}"""'], {}), "(results_path, f'TrainLoss_AllE_{name}_{z}')\n", (9439, 9483), False, 'import os\n'), ((9575, 9634), 'os.path.join', 'os.path.join', (['results_path', 'f"""TrainAvgLoss_AllE_{name}_{z}"""'], {}), "(results_path, f'TrainAvgLoss_AllE_{name}_{z}')\n", (9587, 9634), False, 'import os\n'), ((9727, 9783), 'os.path.join', 'os.path.join', (['results_path', 'f"""ValidLoss_AllE_{name}_{z}"""'], {}), "(results_path, f'ValidLoss_AllE_{name}_{z}')\n", (9739, 9783), False, 'import os\n'), ((9875, 9934), 'os.path.join', 'os.path.join', (['results_path', 'f"""ValidAvgLoss_AllE_{name}_{z}"""'], {}), "(results_path, f'ValidAvgLoss_AllE_{name}_{z}')\n", (9887, 9934), False, 'import os\n'), ((10211, 10275), 'os.path.join', 'os.path.join', (['results_path', 'f"""Training_{name}{z}_benchmark1.png"""'], {}), "(results_path, f'Training_{name}{z}_benchmark1.png')\n", (10223, 10275), False, 'import os\n'), ((1694, 1742), 'os.path.join', 'os.path.join', (['results_path', '"""BRC_benchmark1.log"""'], {}), "(results_path, 'BRC_benchmark1.log')\n", (1706, 1742), False, 'import os\n'), ((5366, 5403), 'torch.nn.ModuleDict', 'nn.ModuleDict', (["{'rnn': rnn, 'fc': fc}"], {}), "({'rnn': rnn, 'fc': fc})\n", (5379, 5403), True, 'import torch.nn as nn\n'), ((3175, 3205), 'numpy.expand_dims', 'np.expand_dims', (['inputs'], {'axis': '(2)'}), '(inputs, axis=2)\n', (3189, 3205), True, 'import numpy as np\n'), ((3331, 3361), 'numpy.expand_dims', 'np.expand_dims', (['inputs'], {'axis': '(2)'}), '(inputs, axis=2)\n', (3345, 3361), True, 'import numpy as np\n'), ((3488, 3519), 'numpy.expand_dims', 'np.expand_dims', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (3502, 3519), True, 'import numpy as np\n'), ((3682, 3713), 'numpy.expand_dims', 'np.expand_dims', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (3696, 3713), True, 'import numpy as np\n')] |
import os
import platform
import numpy as np
import tensorflow as tf
from utils.infolog import log
from models import create_model
from utils import plot, audio
from utils.text_sequence import text_to_sequence
class Synthesizer:
def load(self, checkpoint_path, hparams, freezer=False):
log('Constructing model: Centaur')
if freezer:
try:
checkpoint_path = tf.train.get_checkpoint_state(checkpoint_path).model_checkpoint_path
except:
raise RuntimeError('Failed to load checkpoint at {}'.format(checkpoint_path))
# Force the batch size to be known in order to use attention masking in batch synthesis
self.inputs = tf.placeholder(tf.int32, (None, None), name='inputs')
self.input_lengths = tf.placeholder(tf.int32, (None,), name='input_lengths')
with tf.variable_scope('model', reuse=tf.AUTO_REUSE):
self.model = create_model(hparams)
self.model.initialize(self.inputs, self.input_lengths, is_training=False,
is_validation=False, is_prediction=True)
self.mel_outputs = self.model.decoder_predictions
self.linear_outputs = self.model.mag_pred
self.alignments = self.model.alignments
self.wav_output = self.model.audio
self.stop_token_prediction = self.model.stop_token_predictions
self.audio_length = self.model.sequence_lengths
self._hparams = hparams
# pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
log('Loading checkpoint: %s' % checkpoint_path)
# Memory allocation on the GPUs as needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
self.session = tf.Session(config=config)
self.session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(self.session, checkpoint_path)
def calc_num_pad(self, r, target_length):
padded_target_length = (target_length // r + 1) * r
num_pad = padded_target_length - target_length
return num_pad
def get_alignments(self, attention_mask):
alignments_name = ["align"]
specs = []
titles = []
for name, alignment in zip(alignments_name, attention_mask):
for layer in range(len(alignment)):
for head in range(alignment.shape[1]):
specs.append(alignment[layer][head])
titles.append("{}_layer_{}_head_{}".format(name, layer, head))
return specs, titles
def synthesize(self, texts, basenames, log_dir, mel_filenames):
hparams = self._hparams
# Repeat last sample until number of samples is dividable by the number of GPUs (last run scenario)
while len(texts) % hparams.synthesis_batch_size != 0:
texts.append(texts[-1])
basenames.append(basenames[-1])
if mel_filenames is not None:
mel_filenames.append(mel_filenames[-1])
sequences = [np.asarray(text_to_sequence(text)) for text in texts]
input_lengths = [len(seq) for seq in sequences]
seqs, max_seq_len = self._prepare_inputs(sequences)
feed_dict = {
self.inputs: seqs,
self.input_lengths: np.asarray(input_lengths, dtype=np.int32)
}
linears, mels, alignments, audio_length = self.session.run(
[self.linear_outputs, self.mel_outputs, self.alignments[0], self.audio_length],
feed_dict=feed_dict)
# Natural batch synthesis
# Get Mel/Linear lengths for the entire batch from stop_tokens predictions
target_lengths = audio_length
if basenames is None:
# Generate wav and read it
wav = audio.inv_mel_spectrogram(mels[0].T, hparams)
audio.save_wav(wav, 'temp.wav', sr=hparams.sample_rate) # Find a better way
if platform.system() == 'Linux':
# Linux wav reader
os.system('aplay temp.wav')
elif platform.system() == 'Windows':
# windows wav reader
os.system('start /min mplay32 /play /close temp.wav')
else:
raise RuntimeError(
'Your OS type is not supported yet, please add it to "centaur/synthesizer.py, line-165" and feel free to make a Pull Request ;) Thanks!')
return
for i, mel in enumerate(mels):
if log_dir is not None:
# save wav (mel -> wav)
wav = audio.inv_mel_spectrogram(mel.T, hparams)
audio.save_wav(wav, os.path.join(log_dir, 'wavs/wav-{}-mel.wav'.format(basenames[i])),
sr=hparams.sample_rate)
alignments_samples, alignment_titles = self.get_alignments(alignments)
for idx in range(len(alignments_samples)):
# save alignments
plot.plot_alignment(alignments_samples[idx],
os.path.join(log_dir, 'plots/{}.png'.format(
alignment_titles[
idx])),
title='{}'.format(texts[i]), split_title=True, max_len=target_lengths[i])
# save mel spectrogram plot
plot.plot_spectrogram(mel,
os.path.join(log_dir, 'plots/mel-{}.png'.format(basenames[i])),
title='{}'.format(texts[i]), split_title=True)
# save wav (linear -> wav)
wav = audio.inv_linear_spectrogram(linears[i].T, hparams)
audio.save_wav(wav,
os.path.join(log_dir, 'wavs/wav-{}-linear.wav'.format(basenames[i])),
sr=hparams.sample_rate)
# save linear spectrogram plot
plot.plot_spectrogram(linears[i],
os.path.join(log_dir, 'plots/linear-{}.png'.format(basenames[i])),
title='{}'.format(texts[i]), split_title=True, auto_aspect=True)
@staticmethod
def _round_up(x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode='constant', constant_values=self._pad)
| [
"numpy.pad",
"utils.audio.save_wav",
"tensorflow.train.Saver",
"utils.infolog.log",
"models.create_model",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.variable_scope",
"os.system",
"utils.text_sequence.text_to_sequence",
"platform.system",
"t... | [((300, 334), 'utils.infolog.log', 'log', (['"""Constructing model: Centaur"""'], {}), "('Constructing model: Centaur')\n", (303, 334), False, 'from utils.infolog import log\n'), ((707, 760), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None, None)'], {'name': '"""inputs"""'}), "(tf.int32, (None, None), name='inputs')\n", (721, 760), True, 'import tensorflow as tf\n'), ((790, 845), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(None,)'], {'name': '"""input_lengths"""'}), "(tf.int32, (None,), name='input_lengths')\n", (804, 845), True, 'import tensorflow as tf\n'), ((1590, 1637), 'utils.infolog.log', 'log', (["('Loading checkpoint: %s' % checkpoint_path)"], {}), "('Loading checkpoint: %s' % checkpoint_path)\n", (1593, 1637), False, 'from utils.infolog import log\n'), ((1705, 1721), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1719, 1721), True, 'import tensorflow as tf\n'), ((1836, 1861), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1846, 1861), True, 'import tensorflow as tf\n'), ((1939, 1955), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1953, 1955), True, 'import tensorflow as tf\n'), ((6693, 6772), 'numpy.pad', 'np.pad', (['x', '(0, length - x.shape[0])'], {'mode': '"""constant"""', 'constant_values': 'self._pad'}), "(x, (0, length - x.shape[0]), mode='constant', constant_values=self._pad)\n", (6699, 6772), True, 'import numpy as np\n'), ((860, 907), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'tf.AUTO_REUSE'}), "('model', reuse=tf.AUTO_REUSE)\n", (877, 907), True, 'import tensorflow as tf\n'), ((934, 955), 'models.create_model', 'create_model', (['hparams'], {}), '(hparams)\n', (946, 955), False, 'from models import create_model\n'), ((1887, 1920), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1918, 1920), True, 'import tensorflow as tf\n'), ((3387, 3428), 'numpy.asarray', 'np.asarray', (['input_lengths'], {'dtype': 'np.int32'}), '(input_lengths, dtype=np.int32)\n', (3397, 3428), True, 'import numpy as np\n'), ((3876, 3921), 'utils.audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['mels[0].T', 'hparams'], {}), '(mels[0].T, hparams)\n', (3901, 3921), False, 'from utils import plot, audio\n'), ((3934, 3989), 'utils.audio.save_wav', 'audio.save_wav', (['wav', '"""temp.wav"""'], {'sr': 'hparams.sample_rate'}), "(wav, 'temp.wav', sr=hparams.sample_rate)\n", (3948, 3989), False, 'from utils import plot, audio\n'), ((3142, 3164), 'utils.text_sequence.text_to_sequence', 'text_to_sequence', (['text'], {}), '(text)\n', (3158, 3164), False, 'from utils.text_sequence import text_to_sequence\n'), ((4027, 4044), 'platform.system', 'platform.system', ([], {}), '()\n', (4042, 4044), False, 'import platform\n'), ((4108, 4135), 'os.system', 'os.system', (['"""aplay temp.wav"""'], {}), "('aplay temp.wav')\n", (4117, 4135), False, 'import os\n'), ((4665, 4706), 'utils.audio.inv_mel_spectrogram', 'audio.inv_mel_spectrogram', (['mel.T', 'hparams'], {}), '(mel.T, hparams)\n', (4690, 4706), False, 'from utils import plot, audio\n'), ((5773, 5824), 'utils.audio.inv_linear_spectrogram', 'audio.inv_linear_spectrogram', (['linears[i].T', 'hparams'], {}), '(linears[i].T, hparams)\n', (5801, 5824), False, 'from utils import plot, audio\n'), ((406, 452), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_path'], {}), '(checkpoint_path)\n', (435, 452), True, 'import tensorflow as tf\n'), ((4154, 4171), 'platform.system', 'platform.system', ([], {}), '()\n', (4169, 4171), False, 'import platform\n'), ((4239, 4292), 'os.system', 'os.system', (['"""start /min mplay32 /play /close temp.wav"""'], {}), "('start /min mplay32 /play /close temp.wav')\n", (4248, 4292), False, 'import os\n')] |
import os
from collections import deque
import numpy as np
class TemperatureDictionary:
DECADES = ['55-64', '65-74', '75-84', '85-94', '95-04', '05-12']
SEASONS = ['winter', 'spring', 'summer', 'autumn']
LAT_LON_NUDGE = [[0.25, 0], [-0.25, 0], [0, 0.25], [0, -0.25]]
VALID_DECIMALS = [0.125, 0.375, 0.625, 0.875]
def __init__(self):
self.temp_data = {d : {s: {} for s in self.SEASONS} for d in self.DECADES}
def initialize(self):
for filename in os.listdir('data/ocean_temp'):
if filename[-3:] != 'csv': continue
file = list(open('data/ocean_temp/' + filename, 'r', encoding="latin-1"))
x = [line.strip().split(',')[:3] for line in file[2:]]
x = np.array(x)
x = np.delete(x, np.where(x == ''), 0)
metadata = filename.split('.')[0].split('_')[-1].split('-')
year = int(metadata[0])
season = metadata[-1]
for x_i in x:
self.add(year, season, x_i)
def add(self, year, season, data_row):
decade = self._get_decade(year)
lat_lon_key = self._make_key(data_row)
self.temp_data[decade][season][lat_lon_key] = data_row[2]
def get(self, data_row):
year, month = data_row[2], data_row[3]
decade = self._get_decade(year)
season = self._get_season(month)
temp_dict = self.temp_data[decade][season]
if not temp_dict: return 0.0 # Empty dictionary
lat_lon_pair = (data_row[0], data_row[1])
valid_lat_lon_pair = self._make_valid_lat_lon_pair(lat_lon_pair)
entry_exists = lambda e: self._make_key(e) in temp_dict
valid_lat_lon_pair = self._bfs_find(valid_lat_lon_pair, entry_exists)
valid_key = self._make_key(valid_lat_lon_pair)
return float(temp_dict[valid_key])
def _get_decade(self, year):
if year < 1955: return '55-64' # Treat pre-1955 as 1955-1964
if year < 1964: return '55-64'
if year < 1974: return '65-74'
if year < 1984: return '75-84'
if year < 1994: return '85-94'
if year < 2004: return '95-04'
if year < 2013: return '05-12'
else: return '05-12' # Treat post-2012 as 2005-2012
def _get_season(self, month):
"""Assumes northern hemisphere."""
if month < 1 or month > 12:
#raise ValueError('Month {0} is not valid!'.format(month))
print('Month {0} is not valid! Adapting...'.format(month))
month = (month % 12) + 1
if month in [4, 5, 6]: # April - June
return 'spring'
if month in [7, 8, 9]: # July - September
return 'summer'
if month in [10, 11, 12]: # October - December
return 'autumn'
if month in [1, 2, 3]: # January - March
return 'winter'
def _make_key(self, data_row):
return str(data_row[0]) + "&" + str(data_row[1])
def _make_valid_lat_lon_pair(self, lat_lon_pair):
def find_possible_value(value):
return min([int(value) + dec for dec in self.VALID_DECIMALS],
key=lambda possible_value: abs(possible_value - value))
lat, lon = lat_lon_pair[0], lat_lon_pair[1]
return (find_possible_value(lat), find_possible_value(lon))
def _get_neighbors(self, lat_lon_pair):
lat, lon = lat_lon_pair[0], lat_lon_pair[1]
for dlat, dlon in self.LAT_LON_NUDGE:
yield (lat + dlat, lon + dlon)
def _bfs_find(self, start, success_fn):
visited = set()
visited.add(start)
q = deque()
q.append(start)
while q:
u = q.popleft()
if success_fn(u):
return u
for v in self._get_neighbors(u):
if v not in visited:
q.append(v)
visited.add(v)
| [
"numpy.where",
"numpy.array",
"os.listdir",
"collections.deque"
] | [((490, 519), 'os.listdir', 'os.listdir', (['"""data/ocean_temp"""'], {}), "('data/ocean_temp')\n", (500, 519), False, 'import os\n'), ((3595, 3602), 'collections.deque', 'deque', ([], {}), '()\n', (3600, 3602), False, 'from collections import deque\n'), ((738, 749), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (746, 749), True, 'import numpy as np\n'), ((779, 796), 'numpy.where', 'np.where', (["(x == '')"], {}), "(x == '')\n", (787, 796), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import gmean
from collections import namedtuple
import threading
import multiprocessing
import configparser
import timestreamquery as tsquery
import os
from timeit import default_timer as timer
from query_execution_utils import executeQueryInstance, Query
import sys, traceback
import random, string
import time
Params = namedtuple('Params', 'dbname tablename region az cell silo microservicename instancetype osversion instancename processname jdkversion')
QueryParams = namedtuple('QueryParams', 'repetitions paramlist')
Header = 'Query type, Total Count, Successful Count, Avg. latency (in secs), Std dev latency (in secs), Median, 90th perc (in secs), 99th Perc (in secs), Geo Mean (in secs)'
### Create the query string using the list of parameters.
def createQueryStr(query):
return query.queryStr.format(*query.params.paramlist)
def getQueryStrings(queries, params):
queryStrings = dict()
for query in queries:
queryStrings[query] = createQueryStr(queries[query])
return queryStrings
## For each query, convert them into row-count variants where the actual query is enclosed within a sub-query
## where the outer query counts the number of rows returned by the sub-query (i.e., the original query).
def createQueryInstancesRowCount(queries, endTime = "now()"):
rowCountStr = """
SELECT COUNT(*)
FROM (
{}
)
"""
rowCountQueries = dict()
for key in queries:
query = queries[key]
rowCountQuery = Query(rowCountStr.format(query.queryStr), query.params)
rowCountQueries[key] = rowCountQuery
return rowCountQueries
## Config constants. These define the strings used in the config files.
configDefaultSection = 'default'
configQueryDistributionSection = 'query_distribution'
configQueryMode = 'query_mode'
configRepetitions = 'repetitions'
configRetries = 'retries'
configQueryModeRowCount = 'row_count'
configQueryModeRegular = 'regular'
class QueryStats:
def __init__(self):
self.count = 0
self.success = 0
self.timings = list()
## The main execution thread the reads in the config file and executes the queries per the parameters
## defined in the config file.
class RandomizedExecutionThread(threading.Thread):
def __init__(self, threadId, args, startTime, queryProvider, tableName = None):
threading.Thread.__init__(self)
self.threadId = threadId
self.args = args
self.startTime = startTime
self.config = configparser.ConfigParser()
self.config.read(args.config)
retries = int(self.config.get(configDefaultSection, configRetries, fallback = 0))
self.client = tsquery.createQueryClient(region=args.region, profile=args.profile, retries=retries, endpoint=args.endpoint)
self.output = ()
self.tps = 0.0
self.queryCount = 0
self.tableName = tableName
## Query parameters
if self.args.fixedParams:
## Use fixed query parameters
self.params = Params(self.args.databaseName, self.args.tableName, 'eu-west-1', 'eu-west-1-1', 'eu-west-1-cell-8', 'eu-west-1-cell-8-silo-1', 'apollo', 'r5.4xlarge', 'AL2', 'i-ojtoEEXU-apollo-eu-west-1-cell-8-silo-1-00000033.amazonaws.com', 'host_manager', 'JDK_8')
else:
## Read the query parameters from the database and table.
self.params = getQueryParams(self.args)
print(self.params)
queryInstances = queryProvider(self.params, args.queryEndTime, args.wide)
## Initialize the query mode.
self.queryMode = self.config.get(configDefaultSection, configQueryMode, fallback = configQueryModeRegular)
if self.queryMode not in [configQueryModeRegular, configQueryModeRowCount]:
raise Exception('Unknown query mode: {}'.format(self.queryMode))
if self.queryMode == configQueryModeRegular:
self.queries = getQueryStrings(queryInstances, self.params)
else:
queriesRowCount = createQueryInstancesRowCount(queryInstances, args.queryEndTime)
self.queries = getQueryStrings(queriesRowCount, self.params)
## Initialize the query distributions.
if configQueryDistributionSection not in self.config:
raise Exception('Query distribution section missing')
queryDistributions = self.config[configQueryDistributionSection]
distributions = dict()
for query in queryDistributions:
if query not in self.queries:
raise Exception('Unknown query: {}'.format(query))
if query in distributions:
distributions[query] += float(queryDistributions[query]) / 100
else:
distributions[query] = float(queryDistributions[query]) / 100
self.queriesSelected = list()
self.queryWeights = list()
sum = 0
for query in distributions.keys():
print('{} : {}'.format(query, distributions[query]))
self.queriesSelected.append(query)
self.queryWeights.append(distributions[query])
sum += distributions[query]
print("Sum of probabilities: ", sum)
def run(self):
logPrefix = "[{}]".format(self.threadId)
databaseName = self.args.databaseName
if self.tableName == None:
tableName = self.args.tableName
else:
tableName = self.tableName
repetitions = int(self.config.get(configDefaultSection, configRepetitions, fallback = 100))
logDir = self.args.logDir
runPrefix = self.args.runPrefix
if not os.path.exists(logDir):
os.makedirs(logDir)
## Create an experiment name for logging purposes.
expName = "{}-{}-{}".format(runPrefix, self.startTime.strftime("%Y-%m-%d-%H-%M-%S"), self.threadId)
expDirName = os.path.join(logDir, expName)
if not os.path.exists(expDirName):
os.makedirs(expDirName)
print("Starting experiment {} at {}. Database: {}. Table: {}. Log files at: {}".format(
expName, self.startTime, databaseName, tableName, expDirName))
beginExperiment = timer()
## Start running the experiment
self.logStats(expDirName, logPrefix, databaseName, tableName, "begin")
queryLogFiles = dict()
queryExecutionStats = dict()
try:
## Generate the query strings and initalize other resources
for query in self.queriesSelected:
outFilePath = os.path.join(expDirName, "{0}.log".format(query))
errFilePath = os.path.join(expDirName, "{0}.err".format(query))
sqlFilePath = os.path.join(expDirName, "{0}.sql".format(query))
outFile = open(outFilePath, "w")
errFile = open(errFilePath, "w")
queryLogFiles[query] = (outFile, errFile)
queryExecutionStats[query] = QueryStats()
with open(sqlFilePath, "w") as file:
file.write(self.queries[query])
output = list()
output.append('Query type, Total Count, Successful Count, Avg. latency (in secs), Std dev latency (in secs), Median, 90th perc (in secs), 99th Perc (in secs), Geo Mean (in secs)')
queryCount = 0
while queryCount < repetitions:
try:
queryCount += 1
## Randomly choose a query to execute
queryToExecute = np.random.choice(self.queriesSelected, p=self.queryWeights)
queryStr = self.queries[queryToExecute]
print("{} {}. {}".format(logPrefix, queryCount, queryToExecute))
result = executeQueryInstance(self.client, queryStr, queryCount, logPrefix=logPrefix,
thinkTimeMillis=self.args.thinkTimeMillis, randomizedThink = self.args.randomizedThink,
outFile=queryLogFiles[queryToExecute][0], errFile=queryLogFiles[queryToExecute][1])
queryStat = queryExecutionStats[queryToExecute]
queryStat.count += 1
queryStat.success += result.success
if result.success == 1:
queryStat.timings.append(result.timing)
except:
print("Error executing query: ", query)
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
print('\nSummary Results\n')
for query in self.queriesSelected:
queryStat = queryExecutionStats[query]
if queryStat.success > 0:
output.append('{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}'.format(query, queryStat.count, queryStat.success,
round(np.average(queryStat.timings), 3), round(np.std(queryStat.timings), 3),
round(np.percentile(queryStat.timings, 50), 3), round(np.percentile(queryStat.timings, 90), 3),
round(np.percentile(queryStat.timings, 99), 3), round(gmean(queryStat.timings), 3)))
else:
output.append('{0}, {1}, {2}, , , , ,'.format(query, queryStat.count, queryStat.success))
print(os.linesep.join("{}".format(x) for x in output))
summaryFile = os.path.join(expDirName, "{}-summary.csv".format(expName))
with open(summaryFile, "w") as summary:
summary.write(os.linesep.join("{}".format(x) for x in output))
## Get a count at the end of the experiment.
self.logStats(expDirName, logPrefix, databaseName, tableName, "end")
endExperiment = timer()
print("Experiment {} completed. Time (seconds): {}. Log directory: {}".format(expName,
round(endExperiment - beginExperiment, 2), expDirName))
self.queryCount = queryCount
self.output = output
finally:
for key in queryLogFiles:
val = queryLogFiles[key]
val[0].close()
val[1].close()
def getOutput(self):
return self.output
def getQueryCount(self):
return self.queryCount
def getTps(self):
return self.tps
## Log a few summary statistics from the table.
def logStats(self, expDirName, logPrefix, databaseName, tableName, stage):
# Number of data points ingested.
numDatapoints = """
SELECT COUNT(*) AS num_data_points
FROM "{0}"."{1}"
WHERE time BETWEEN {2} AND {3}
"""
datapoints = Query(numDatapoints, QueryParams(1, (databaseName, tableName, '{} - 15m'.format(self.args.queryEndTime), '{}'.format(self.args.queryEndTime))))
queryStr = createQueryStr(datapoints)
outputFilePath = os.path.join(expDirName, "numDatapoints-{}.log".format(stage))
outFile = open(outputFilePath, "w")
errorFilePath = os.path.join(expDirName, "numDatapoints-{}.err".format(stage))
errFile = open(errorFilePath, "w")
try:
print("Obtaining the data points ingested in the last 15 mins")
result = executeQueryInstance(self.client, queryStr, 1, logPrefix=logPrefix, outFile=outFile, errFile=errFile)
if result != None:
df = tsquery.flatModelToDataframe(result.result)
print(df)
resultFilePath = os.path.join(expDirName, "numDatapoints-{}-result.txt".format(stage))
with open(resultFilePath, "w") as resultFile:
resultFile.write(str(df))
finally:
outFile.close()
errFile.close()
## A multi-process executer that uses the RandomizedExecutionThread instances to execute queries
## using multiple processes.
class MultiProcessQueryWorker(multiprocessing.Process):
def __init__(self, processId, args, startTime, queryProvider, conn):
super(MultiProcessQueryWorker, self).__init__()
self.processId = processId
self.args = args
self.conn = conn
self.startTime = startTime
self.queryInstanceProvider = queryProvider
def run(self):
threads = []
overallSummary = dict()
try:
for threadId in range(1, self.args.concurrency + 1):
threadIdStr = "{}-{}".format(self.processId, threadId)
thread = RandomizedExecutionThread(threadIdStr, self.args, self.startTime, self.queryInstanceProvider, self.args.tableName)
thread.start()
if self.args.thinkTimeMillis > 0:
thinkTimeMillis = self.args.thinkTimeMillis
if self.args.randomizedThink:
thinkTimeMillis = random.randint(0, thinkTimeMillis)
time.sleep(thinkTimeMillis / 1000.0)
threads.append(thread)
queryCount = 0
outputs = dict()
for t in threads:
t.join()
outputs[t.threadId] = t.getOutput()
queryCount += t.getQueryCount()
overallSummary["Outputs"] = outputs
overallSummary["Count"] = queryCount
print("Experiment {} complete for process ID: {}".format(self.args.runPrefix, self.processId))
print(os.linesep.join([os.linesep.join([x for x in z]) for z in outputs.values()]))
finally:
self.conn.send(overallSummary)
## Obtain the query parameters by issuing a query to the database and table.
def getQueryParams(args):
print("Obtaining query params from the database.")
client = tsquery.createQueryClient(region=args.region, profile=args.profile, endpoint=args.endpoint)
## Use the following query to get the query parameters from the data
## by picking attributes each type of time series to initialize the dimensions.
## Introduces some randomization to enable multiple executions to pick different parameters.
if args.wide:
queryStr = """
WITH selectedTaskCompleted AS (
SELECT *
FROM "{0}"."{1}"
WHERE measure_name = 'events'
AND time BETWEEN {2} - 12h AND {2}
LIMIT 1
), selectedCpu AS (
SELECT t.*
FROM "{0}"."{1}" t INNER JOIN selectedTaskCompleted c ON c.instance_name = t.instance_name
WHERE t.measure_name = 'metrics'
AND t.time BETWEEN {2} - 12h AND {2}
LIMIT 1
)
SELECT * FROM(
(
SELECT * FROM selectedCpu
)
UNION
(
SELECT * FROM selectedTaskCompleted
)
)
ORDER BY measure_name DESC
""".format(args.databaseName, args.tableName, args.queryEndTime)
else:
queryStr = """
WITH selectedTaskCompleted AS (
SELECT *
FROM "{0}"."{1}"
WHERE measure_name = 'task_completed'
AND time BETWEEN {2} - 12h AND {2}
LIMIT 1
), selectedCpu AS (
SELECT t.*
FROM "{0}"."{1}" t INNER JOIN selectedTaskCompleted c ON c.instance_name = t.instance_name
WHERE t.measure_name = 'cpu_user'
AND t.time BETWEEN {2} - 12h AND {2}
LIMIT 1
)
SELECT * FROM(
(
SELECT * FROM selectedCpu
)
UNION
(
SELECT * FROM selectedTaskCompleted
)
)
ORDER BY measure_name
""".format(args.databaseName, args.tableName, args.queryEndTime)
print(queryStr)
result = tsquery.executeQueryAndReturnAsDataframe(client, queryStr, True)
params = Params(args.databaseName, args.tableName, result['region'][0], result['availability_zone'][0], result['cell'][0],
result['silo'][0], result['microservice_name'][0], result['instance_type'][0], result['os_version'][0],
result['instance_name'][0], result['process_name'][1], result['jdk_version'][1])
return params | [
"scipy.stats.gmean",
"sys.exc_info",
"os.path.join",
"timestreamquery.flatModelToDataframe",
"threading.Thread.__init__",
"random.randint",
"numpy.std",
"os.path.exists",
"traceback.print_exception",
"numpy.random.choice",
"configparser.ConfigParser",
"numpy.average",
"time.sleep",
"numpy.... | [((358, 503), 'collections.namedtuple', 'namedtuple', (['"""Params"""', '"""dbname tablename region az cell silo microservicename instancetype osversion instancename processname jdkversion"""'], {}), "('Params',\n 'dbname tablename region az cell silo microservicename instancetype osversion instancename processname jdkversion'\n )\n", (368, 503), False, 'from collections import namedtuple\n'), ((509, 559), 'collections.namedtuple', 'namedtuple', (['"""QueryParams"""', '"""repetitions paramlist"""'], {}), "('QueryParams', 'repetitions paramlist')\n", (519, 559), False, 'from collections import namedtuple\n'), ((13834, 13929), 'timestreamquery.createQueryClient', 'tsquery.createQueryClient', ([], {'region': 'args.region', 'profile': 'args.profile', 'endpoint': 'args.endpoint'}), '(region=args.region, profile=args.profile,\n endpoint=args.endpoint)\n', (13859, 13929), True, 'import timestreamquery as tsquery\n'), ((15863, 15927), 'timestreamquery.executeQueryAndReturnAsDataframe', 'tsquery.executeQueryAndReturnAsDataframe', (['client', 'queryStr', '(True)'], {}), '(client, queryStr, True)\n', (15903, 15927), True, 'import timestreamquery as tsquery\n'), ((2349, 2380), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (2374, 2380), False, 'import threading\n'), ((2496, 2523), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2521, 2523), False, 'import configparser\n'), ((2674, 2787), 'timestreamquery.createQueryClient', 'tsquery.createQueryClient', ([], {'region': 'args.region', 'profile': 'args.profile', 'retries': 'retries', 'endpoint': 'args.endpoint'}), '(region=args.region, profile=args.profile, retries\n =retries, endpoint=args.endpoint)\n', (2699, 2787), True, 'import timestreamquery as tsquery\n'), ((5861, 5890), 'os.path.join', 'os.path.join', (['logDir', 'expName'], {}), '(logDir, expName)\n', (5873, 5890), False, 'import os\n'), ((6168, 6175), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6173, 6175), True, 'from timeit import default_timer as timer\n'), ((5616, 5638), 'os.path.exists', 'os.path.exists', (['logDir'], {}), '(logDir)\n', (5630, 5638), False, 'import os\n'), ((5652, 5671), 'os.makedirs', 'os.makedirs', (['logDir'], {}), '(logDir)\n', (5663, 5671), False, 'import os\n'), ((5906, 5932), 'os.path.exists', 'os.path.exists', (['expDirName'], {}), '(expDirName)\n', (5920, 5932), False, 'import os\n'), ((5946, 5969), 'os.makedirs', 'os.makedirs', (['expDirName'], {}), '(expDirName)\n', (5957, 5969), False, 'import os\n'), ((9882, 9889), 'timeit.default_timer', 'timer', ([], {}), '()\n', (9887, 9889), True, 'from timeit import default_timer as timer\n'), ((11367, 11473), 'query_execution_utils.executeQueryInstance', 'executeQueryInstance', (['self.client', 'queryStr', '(1)'], {'logPrefix': 'logPrefix', 'outFile': 'outFile', 'errFile': 'errFile'}), '(self.client, queryStr, 1, logPrefix=logPrefix, outFile\n =outFile, errFile=errFile)\n', (11387, 11473), False, 'from query_execution_utils import executeQueryInstance, Query\n'), ((11521, 11564), 'timestreamquery.flatModelToDataframe', 'tsquery.flatModelToDataframe', (['result.result'], {}), '(result.result)\n', (11549, 11564), True, 'import timestreamquery as tsquery\n'), ((7502, 7561), 'numpy.random.choice', 'np.random.choice', (['self.queriesSelected'], {'p': 'self.queryWeights'}), '(self.queriesSelected, p=self.queryWeights)\n', (7518, 7561), True, 'import numpy as np\n'), ((7736, 7996), 'query_execution_utils.executeQueryInstance', 'executeQueryInstance', (['self.client', 'queryStr', 'queryCount'], {'logPrefix': 'logPrefix', 'thinkTimeMillis': 'self.args.thinkTimeMillis', 'randomizedThink': 'self.args.randomizedThink', 'outFile': 'queryLogFiles[queryToExecute][0]', 'errFile': 'queryLogFiles[queryToExecute][1]'}), '(self.client, queryStr, queryCount, logPrefix=logPrefix,\n thinkTimeMillis=self.args.thinkTimeMillis, randomizedThink=self.args.\n randomizedThink, outFile=queryLogFiles[queryToExecute][0], errFile=\n queryLogFiles[queryToExecute][1])\n', (7756, 7996), False, 'from query_execution_utils import executeQueryInstance, Query\n'), ((13013, 13049), 'time.sleep', 'time.sleep', (['(thinkTimeMillis / 1000.0)'], {}), '(thinkTimeMillis / 1000.0)\n', (13023, 13049), False, 'import time\n'), ((8479, 8493), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8491, 8493), False, 'import sys, traceback\n'), ((8514, 8606), 'traceback.print_exception', 'traceback.print_exception', (['exc_type', 'exc_value', 'exc_traceback'], {'limit': '(2)', 'file': 'sys.stdout'}), '(exc_type, exc_value, exc_traceback, limit=2, file\n =sys.stdout)\n', (8539, 8606), False, 'import sys, traceback\n'), ((12958, 12992), 'random.randint', 'random.randint', (['(0)', 'thinkTimeMillis'], {}), '(0, thinkTimeMillis)\n', (12972, 12992), False, 'import random, string\n'), ((13541, 13572), 'os.linesep.join', 'os.linesep.join', (['[x for x in z]'], {}), '([x for x in z])\n', (13556, 13572), False, 'import os\n'), ((8964, 8993), 'numpy.average', 'np.average', (['queryStat.timings'], {}), '(queryStat.timings)\n', (8974, 8993), True, 'import numpy as np\n'), ((9005, 9030), 'numpy.std', 'np.std', (['queryStat.timings'], {}), '(queryStat.timings)\n', (9011, 9030), True, 'import numpy as np\n'), ((9082, 9118), 'numpy.percentile', 'np.percentile', (['queryStat.timings', '(50)'], {}), '(queryStat.timings, 50)\n', (9095, 9118), True, 'import numpy as np\n'), ((9130, 9166), 'numpy.percentile', 'np.percentile', (['queryStat.timings', '(90)'], {}), '(queryStat.timings, 90)\n', (9143, 9166), True, 'import numpy as np\n'), ((9218, 9254), 'numpy.percentile', 'np.percentile', (['queryStat.timings', '(99)'], {}), '(queryStat.timings, 99)\n', (9231, 9254), True, 'import numpy as np\n'), ((9266, 9290), 'scipy.stats.gmean', 'gmean', (['queryStat.timings'], {}), '(queryStat.timings)\n', (9271, 9290), False, 'from scipy.stats import gmean\n')] |
import pandas as pd
import os
import pickle
def save_file(data, fname, dname):
"""Save a datafile (data) to a specific location (dname) and filename (fname)
Currently valid formats are limited to CSV or PKL."""
if not os.path.exists(dname):
os.mkdir(dname)
print(f'Directory {dname} was created.')
fpath = os.path.join(dname, fname)
if os.path.exists(fpath):
print("A file already exists with this name.\n")
yesno = None
while yesno != "Y" and yesno != "N":
yesno = input('Do you want to overwrite? (Y/N)').strip()[0].capitalize()
if yesno == "Y":
print(f'Writing file. "{fpath}"')
_save_file(data, fpath)
break # Not required
elif yesno == "N":
print('\nPlease re-run this cell with a new filename.')
break # Not required
else:
print('\nUnknown input, please enter "Y" or "N".')
else: # path does not exist, ok to save the file
print(f'Writing file. "{fpath}"')
_save_file(data, fpath)
def _save_file(data, fpath):
import pickle
valid_ftypes = ['.csv', '.pkl']
assert (fpath[-4:] in valid_ftypes), "Invalid file type. Use '.csv' or '.pkl'"
# Figure out what kind of file we're dealing with by name
if fpath[-3:] == 'csv':
data.to_csv(fpath, index=False)
elif fpath[-3:] == 'pkl':
with open(fpath, 'wb') as f:
pickle.dump(data, f)
def create_ranges(player1,player2,params, df):
ranges = []
a_values = []
b_values = []
# create ranges for each player
for x in params:
a = min(df[params][x])
a = a - (a*.15)
b = max(df[params][x])
b = b + (b*.15)
ranges.append((a,b,))
for x in range(len(df['player_name'])):
if df['player_name'].iloc[x] == player1:
a_values = df.iloc[x].values.tolist()
if df['player_name'].iloc[x] == player2:
b_values = df.iloc[x].values.tolist()
#remove names and club from values
a_values = a_values[2:]
b_values = b_values[2:]
values = [a_values,b_values]
return values, ranges
def radar_compare(player1,player2,params, df):
from soccerplots.radar_chart import Radar
values, ranges = create_ranges(player1,player2, params, df)
title = dict(
title_name=player1,
title_color = 'royalblue',
subtitle_name = 'defensive stats',
subtitle_color = 'orange',
title_name_2=player2,
title_color_2 = 'red',
subtitle_name_2 = 'defensive stats',
subtitle_color_2 = 'purple',
title_fontsize = 18,
subtitle_fontsize=8
)
## endnote
endnote = "Visualization made by: <NAME> \nAll units are in per90 and are possesion adjusted"
radar = Radar()
fig,ax = radar.plot_radar(ranges=ranges,params=params,values=values,
radar_color=['orange','purple'],
alphas=[0.3,0.3],
title=title,
figsize = (20, 20),
compare=True,
endnote = endnote
)
# function to viz skill groups and players most associated with skill group
def display_features(H,W,feature_names, X_matrix ,no_top_features, no_top_players):
import numpy as np
""" visualize skill get group and highest ranked players in group """
groups = []
topics = {}
# iterate through topics in topic-term matrix, 'H' aka
# H is the hidden layer which is shape (F x C) feature times topic matrix
for topic_idx, topic in enumerate(H):
top_players =[]
print("Topic %d:" % (topic_idx))
print(" ".join([ (feature_names[i] + " (" + str(topic[i].round(2)) + ")")
for i in topic.argsort()[:-no_top_features - 1:-1]]))
# group
# groups.append[]
# add features to topics dictionary for later assesment.
features = [ (feature_names[i] + " (" + str(topic[i].round(2)) + ")")
for i in topic.argsort()[:-(no_top_features+3) - 1:-1]]
fs = ''
for i in features:
fs = fs +str(i)
print(type(features))
top_player_indicies = np.argsort( W[:,topic_idx] )[::-1][0:no_top_players]
for p_index in top_player_indicies:
print(p_index," ",X_matrix.index[p_index])
player = (p_index,X_matrix.index[p_index])
top_players.append(player)
topics[fs] = top_players
final = pd.DataFrame(topics)
return final
def add_skill_group(X,W):
import numpy as np
""" add skill group to player feature to use for classification"""
df_new = X.copy()
# Get the top predicted topic and add to df copy
df_new['pred_topic_num']= [np.argsort(each)[::-1][0] for each in W]
return df_new | [
"pandas.DataFrame",
"os.mkdir",
"pickle.dump",
"os.path.exists",
"numpy.argsort",
"soccerplots.radar_chart.Radar",
"os.path.join"
] | [((344, 370), 'os.path.join', 'os.path.join', (['dname', 'fname'], {}), '(dname, fname)\n', (356, 370), False, 'import os\n'), ((380, 401), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (394, 401), False, 'import os\n'), ((2856, 2863), 'soccerplots.radar_chart.Radar', 'Radar', ([], {}), '()\n', (2861, 2863), False, 'from soccerplots.radar_chart import Radar\n'), ((4647, 4667), 'pandas.DataFrame', 'pd.DataFrame', (['topics'], {}), '(topics)\n', (4659, 4667), True, 'import pandas as pd\n'), ((235, 256), 'os.path.exists', 'os.path.exists', (['dname'], {}), '(dname)\n', (249, 256), False, 'import os\n'), ((266, 281), 'os.mkdir', 'os.mkdir', (['dname'], {}), '(dname)\n', (274, 281), False, 'import os\n'), ((1510, 1530), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1521, 1530), False, 'import pickle\n'), ((4343, 4370), 'numpy.argsort', 'np.argsort', (['W[:, topic_idx]'], {}), '(W[:, topic_idx])\n', (4353, 4370), True, 'import numpy as np\n'), ((4920, 4936), 'numpy.argsort', 'np.argsort', (['each'], {}), '(each)\n', (4930, 4936), True, 'import numpy as np\n')] |
import dataclasses
import numpy as np
import tensorflow as tf
import gym
import pickle
import sys
from stable_baselines import SAC
from imitation.data import types
from imitation.data.rollout import unwrap_traj
import deep_rlsp
def convert_trajs(filename, traj_len):
with open(filename, "rb") as f:
data = pickle.load(f)
assert traj_len < len(data["observations"][0])
obs = np.array(data["observations"][0][: traj_len + 1])
acts = np.array(data["actions"][0][:traj_len])
rews = np.array([0 for _ in range(traj_len)])
infos = [{} for _ in range(traj_len)]
traj = types.TrajectoryWithRew(obs=obs, acts=acts, infos=infos, rews=rews)
return [traj]
def rollout_policy(filename, traj_len, seed, env_name, n_trajs=1):
model = SAC.load(filename)
env = gym.make(env_name)
env.seed(seed)
trajs = []
for _ in range(int(n_trajs)):
obs_list, acts_list, rews_list = [], [], []
obs = env.reset()
obs_list.append(obs)
for _ in range(traj_len):
act = model.predict(obs, deterministic=True)[0]
obs, r, done, _ = env.step(act)
# assert not done
acts_list.append(act)
obs_list.append(obs)
rews_list.append(r)
infos = [{} for _ in range(traj_len)]
traj = types.TrajectoryWithRew(
obs=np.array(obs_list),
acts=np.array(acts_list),
infos=infos,
rews=np.array(rews_list),
)
trajs.append(traj)
return trajs
def recode_and_save_trajectories(traj_or_policy_file, save_loc, traj_len, seed, args):
if "skills" in traj_or_policy_file:
trajs = convert_trajs(traj_or_policy_file, traj_len, *args)
else:
trajs = rollout_policy(traj_or_policy_file, traj_len, seed, *args)
# assert len(trajs) == 1
for traj in trajs:
assert traj.obs.shape[0] == traj_len + 1
assert traj.acts.shape[0] == traj_len
trajs = [dataclasses.replace(traj, infos=None) for traj in trajs]
types.save(save_loc, trajs)
if __name__ == "__main__":
_, traj_or_policy_file, save_loc, traj_len, seed = sys.argv[:5]
if seed == "generate_seed":
seed = np.random.randint(0, 1e9)
else:
seed = int(seed)
save_loc = save_loc.format(traj_len, seed)
np.random.seed(seed)
tf.random.set_random_seed(seed)
traj_len = int(traj_len)
recode_and_save_trajectories(
traj_or_policy_file, save_loc, traj_len, seed, sys.argv[5:]
)
| [
"imitation.data.types.TrajectoryWithRew",
"numpy.random.seed",
"gym.make",
"dataclasses.replace",
"stable_baselines.SAC.load",
"imitation.data.types.save",
"pickle.load",
"numpy.array",
"numpy.random.randint",
"tensorflow.random.set_random_seed"
] | [((399, 447), 'numpy.array', 'np.array', (["data['observations'][0][:traj_len + 1]"], {}), "(data['observations'][0][:traj_len + 1])\n", (407, 447), True, 'import numpy as np\n'), ((460, 499), 'numpy.array', 'np.array', (["data['actions'][0][:traj_len]"], {}), "(data['actions'][0][:traj_len])\n", (468, 499), True, 'import numpy as np\n'), ((603, 670), 'imitation.data.types.TrajectoryWithRew', 'types.TrajectoryWithRew', ([], {'obs': 'obs', 'acts': 'acts', 'infos': 'infos', 'rews': 'rews'}), '(obs=obs, acts=acts, infos=infos, rews=rews)\n', (626, 670), False, 'from imitation.data import types\n'), ((770, 788), 'stable_baselines.SAC.load', 'SAC.load', (['filename'], {}), '(filename)\n', (778, 788), False, 'from stable_baselines import SAC\n'), ((799, 817), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (807, 817), False, 'import gym\n'), ((2044, 2071), 'imitation.data.types.save', 'types.save', (['save_loc', 'trajs'], {}), '(save_loc, trajs)\n', (2054, 2071), False, 'from imitation.data import types\n'), ((2328, 2348), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2342, 2348), True, 'import numpy as np\n'), ((2353, 2384), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['seed'], {}), '(seed)\n', (2378, 2384), True, 'import tensorflow as tf\n'), ((322, 336), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (333, 336), False, 'import pickle\n'), ((1983, 2020), 'dataclasses.replace', 'dataclasses.replace', (['traj'], {'infos': 'None'}), '(traj, infos=None)\n', (2002, 2020), False, 'import dataclasses\n'), ((2216, 2250), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000000000.0)'], {}), '(0, 1000000000.0)\n', (2233, 2250), True, 'import numpy as np\n'), ((1364, 1382), 'numpy.array', 'np.array', (['obs_list'], {}), '(obs_list)\n', (1372, 1382), True, 'import numpy as np\n'), ((1401, 1420), 'numpy.array', 'np.array', (['acts_list'], {}), '(acts_list)\n', (1409, 1420), True, 'import numpy as np\n'), ((1464, 1483), 'numpy.array', 'np.array', (['rews_list'], {}), '(rews_list)\n', (1472, 1483), True, 'import numpy as np\n')] |
# Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module contains a registry of dataset classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from magenta.models.gansynth.lib import spectral_ops
from magenta.models.gansynth.lib import util
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import data as contrib_data
from tensorflow.contrib import lookup as contrib_lookup
Counter = collections.Counter
class BaseDataset(object):
"""A base class for reading data from disk."""
def __init__(self, config):
self._train_data_path = util.expand_path(config['train_data_path'])
def provide_one_hot_labels(self, batch_size):
"""Provides one-hot labels."""
raise NotImplementedError
def provide_dataset(self):
"""Provides audio dataset."""
raise NotImplementedError
def get_pitch_counts(self):
"""Returns a dictionary {pitch value (int): count (int)}."""
raise NotImplementedError
def get_pitches(self, num_samples):
"""Returns pitch_counter for num_samples for given dataset."""
all_pitches = []
pitch_counts = self.get_pitch_counts()
for k, v in pitch_counts.items():
all_pitches.extend([k]*v)
sample_pitches = np.random.choice(all_pitches, num_samples)
pitch_counter = Counter(sample_pitches)
return pitch_counter
class NSynthTFRecordDataset(BaseDataset):
"""A dataset for reading NSynth from a TFRecord file."""
def _get_dataset_from_path(self):
dataset = tf.data.Dataset.list_files(self._train_data_path)
dataset = dataset.apply(contrib_data.shuffle_and_repeat(buffer_size=1000))
dataset = dataset.apply(
contrib_data.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=20, sloppy=True))
return dataset
def provide_one_hot_labels(self, batch_size):
"""Provides one hot labels."""
pitch_counts = self.get_pitch_counts()
pitches = sorted(pitch_counts.keys())
counts = [pitch_counts[p] for p in pitches]
indices = tf.reshape(
tf.multinomial(tf.log([tf.to_float(counts)]), batch_size), [batch_size])
one_hot_labels = tf.one_hot(indices, depth=len(pitches))
return one_hot_labels
def provide_dataset(self, length=64000):
"""Provides dataset (audio, labels) of nsynth."""
channels = 1
pitch_counts = self.get_pitch_counts()
pitches = sorted(pitch_counts.keys())
label_index_table = contrib_lookup.index_table_from_tensor(
sorted(pitches), dtype=tf.int64)
def _parse_nsynth(record):
"""Parsing function for NSynth dataset."""
features = {
'pitch': tf.FixedLenFeature([1], dtype=tf.int64),
'audio': tf.FixedLenFeature([length], dtype=tf.float32),
'qualities': tf.FixedLenFeature([10], dtype=tf.int64),
'instrument_source': tf.FixedLenFeature([1], dtype=tf.int64),
'instrument_family': tf.FixedLenFeature([1], dtype=tf.int64),
}
example = tf.parse_single_example(record, features)
wave, label = example['audio'], example['pitch']
wave = spectral_ops.crop_or_pad(wave[tf.newaxis, :, tf.newaxis],
length,
channels)[0]
one_hot_label = tf.one_hot(
label_index_table.lookup(label), depth=len(pitches))[0]
return wave, one_hot_label, label, example['instrument_source']
dataset = self._get_dataset_from_path()
dataset = dataset.map(_parse_nsynth, num_parallel_calls=4)
# Filter just acoustic instruments (as in the paper)
# (0=acoustic, 1=electronic, 2=synthetic)
# dataset = dataset.filter(lambda w, l, p, s: tf.equal(s, 1)[0]) # Not filtering.
# Filter just pitches 24-84
dataset = dataset.filter(lambda w, l, p, s: tf.greater_equal(p, 24)[0])
dataset = dataset.filter(lambda w, l, p, s: tf.less_equal(p, 84)[0])
dataset = dataset.map(lambda w, l, p, s: (w, l))
return dataset
# ToDo: make dynamic
def get_pitch_counts(self):
pitch_counts = {
24: 711,
25: 720,
26: 715,
27: 725,
28: 726,
29: 723,
30: 738,
31: 829,
32: 839,
33: 840,
34: 860,
35: 870,
36: 999,
37: 1007,
38: 1063,
39: 1070,
40: 1084,
41: 1121,
42: 1134,
43: 1129,
44: 1155,
45: 1149,
46: 1169,
47: 1154,
48: 1432,
49: 1406,
50: 1454,
51: 1432,
52: 1593,
53: 1613,
54: 1578,
55: 1784,
56: 1738,
57: 1756,
58: 1718,
59: 1738,
60: 1789,
61: 1746,
62: 1765,
63: 1748,
64: 1764,
65: 1744,
66: 1677,
67: 1746,
68: 1682,
69: 1705,
70: 1694,
71: 1667,
72: 1695,
73: 1580,
74: 1608,
75: 1546,
76: 1576,
77: 1485,
78: 1408,
79: 1438,
80: 1333,
81: 1369,
82: 1331,
83: 1295,
84: 1291
}
return pitch_counts
registry = {
'nsynth_tfrecord': NSynthTFRecordDataset,
}
| [
"tensorflow.contrib.data.shuffle_and_repeat",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.less_equal",
"tensorflow.compat.v1.to_float",
"tensorflow.compat.v1.greater_equal",
"tensorflow.contrib.data.parallel_interleave",
"numpy.random.choice",
"magenta.models.gansynth.lib.util.e... | [((1195, 1238), 'magenta.models.gansynth.lib.util.expand_path', 'util.expand_path', (["config['train_data_path']"], {}), "(config['train_data_path'])\n", (1211, 1238), False, 'from magenta.models.gansynth.lib import util\n'), ((1834, 1876), 'numpy.random.choice', 'np.random.choice', (['all_pitches', 'num_samples'], {}), '(all_pitches, num_samples)\n', (1850, 1876), True, 'import numpy as np\n'), ((2100, 2149), 'tensorflow.compat.v1.data.Dataset.list_files', 'tf.data.Dataset.list_files', (['self._train_data_path'], {}), '(self._train_data_path)\n', (2126, 2149), True, 'import tensorflow.compat.v1 as tf\n'), ((2178, 2227), 'tensorflow.contrib.data.shuffle_and_repeat', 'contrib_data.shuffle_and_repeat', ([], {'buffer_size': '(1000)'}), '(buffer_size=1000)\n', (2209, 2227), True, 'from tensorflow.contrib import data as contrib_data\n'), ((2266, 2357), 'tensorflow.contrib.data.parallel_interleave', 'contrib_data.parallel_interleave', (['tf.data.TFRecordDataset'], {'cycle_length': '(20)', 'sloppy': '(True)'}), '(tf.data.TFRecordDataset, cycle_length=20,\n sloppy=True)\n', (2298, 2357), True, 'from tensorflow.contrib import data as contrib_data\n'), ((3565, 3606), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['record', 'features'], {}), '(record, features)\n', (3588, 3606), True, 'import tensorflow.compat.v1 as tf\n'), ((3223, 3262), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[1]'], {'dtype': 'tf.int64'}), '([1], dtype=tf.int64)\n', (3241, 3262), True, 'import tensorflow.compat.v1 as tf\n'), ((3283, 3329), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[length]'], {'dtype': 'tf.float32'}), '([length], dtype=tf.float32)\n', (3301, 3329), True, 'import tensorflow.compat.v1 as tf\n'), ((3354, 3394), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[10]'], {'dtype': 'tf.int64'}), '([10], dtype=tf.int64)\n', (3372, 3394), True, 'import tensorflow.compat.v1 as tf\n'), ((3427, 3466), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[1]'], {'dtype': 'tf.int64'}), '([1], dtype=tf.int64)\n', (3445, 3466), True, 'import tensorflow.compat.v1 as tf\n'), ((3499, 3538), 'tensorflow.compat.v1.FixedLenFeature', 'tf.FixedLenFeature', (['[1]'], {'dtype': 'tf.int64'}), '([1], dtype=tf.int64)\n', (3517, 3538), True, 'import tensorflow.compat.v1 as tf\n'), ((3675, 3750), 'magenta.models.gansynth.lib.spectral_ops.crop_or_pad', 'spectral_ops.crop_or_pad', (['wave[tf.newaxis, :, tf.newaxis]', 'length', 'channels'], {}), '(wave[tf.newaxis, :, tf.newaxis], length, channels)\n', (3699, 3750), False, 'from magenta.models.gansynth.lib import spectral_ops\n'), ((4378, 4401), 'tensorflow.compat.v1.greater_equal', 'tf.greater_equal', (['p', '(24)'], {}), '(p, 24)\n', (4394, 4401), True, 'import tensorflow.compat.v1 as tf\n'), ((4454, 4474), 'tensorflow.compat.v1.less_equal', 'tf.less_equal', (['p', '(84)'], {}), '(p, 84)\n', (4467, 4474), True, 'import tensorflow.compat.v1 as tf\n'), ((2661, 2680), 'tensorflow.compat.v1.to_float', 'tf.to_float', (['counts'], {}), '(counts)\n', (2672, 2680), True, 'import tensorflow.compat.v1 as tf\n')] |
import copy
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist, squareform
from yass.evaluate.util import *
def align_template(template, temp_len=40, mode='all'):
window = np.arange(0, temp_len) - temp_len // 2
n_chan = template.shape[1]
main_chan = main_channels(template)[-1]
base_trace = np.zeros(template.shape[0])
base_trace[:] = template[:, main_chan]
temp_norm = np.sum(template * template, axis=0)
base_norm = temp_norm[main_chan]
aligned_temp = np.zeros([temp_len, n_chan])
if mode == 'neg':
base_trace[base_trace > 0] = 0
for c in range(n_chan):
orig_filt = template[:, c]
filt = np.zeros(orig_filt.shape)
filt[:] = orig_filt
if mode == 'neg':
filt[filt > 0] = 0
filt_norm = temp_norm[c]
conv_dist = -2 * np.convolve(filt, np.flip(base_trace, axis=0), mode='same') + base_norm + filt_norm
center = np.argmin(conv_dist)
try:
aligned_temp[:, c] = orig_filt[center + window]
except:
aligned_temp[:, c] = orig_filt[np.arange(0, temp_len)]
return aligned_temp
def recon(template, rank=3):
"""SVD reconstruction of a template."""
u, s, vh = np.linalg.svd(template)
return np.matmul(u[:, :rank] * s[:rank], vh[:rank, :])
def recon_error(template, rank=3):
"""Reconstruction error of SVD with given rank."""
temp_rec = recon(template, rank=rank)
return np.linalg.norm((template - temp_rec))
class Geometry(object):
"""Geometry Object for finidng closest channels."""
def __init__(self, geometry):
self.geom = geometry
self.pdist = squareform(pdist(geometry))
def neighbors(self, channel, size):
return np.argsort(self.pdist[channel, :])[:size]
def vis_chan(template, min_peak_to_peak=1):
"""Visible channels on a standardized template with given threshold."""
return np.max(template, axis=0) - np.min(template, axis=0) > min_peak_to_peak
def conv_dist(ref, temp):
"""l2 distance of temp with all windows of ref."""
return np.convolve((ref * ref), np.ones(len(temp)), mode='valid') - 2 * np.convolve(ref, np.flip(temp, axis=0), mode='valid') + np.sum(temp * temp)
def align_temp_to_temp(ref, temp):
"""Aligns temp with bigger window to ref with smaller window."""
n_chan = ref.shape[1]
shifts = np.zeros(n_chan)
for c in range(n_chan):
shifts[c] = np.argmin(conv_dist(temp[:, c], ref[:, c]))
#plt.plot(conv_dist(temp[:, c], ref[:, c]))
return shifts
def optimal_aligned_compress(template, upsample=5, rank=3, max_shift=6):
"""Greedy local search of alignments for best SVD compression error."""
upsample = 5
max_shift = max_shift * upsample
half_max_shift = max_shift // 2
n_chan = template.shape[1]
n_times = template.shape[0]
template = sp.signal.resample(template, n_times * upsample)
new_times = upsample * n_times
snip_win = (half_max_shift, -half_max_shift)
snip_temp = copy.copy(template[snip_win[0]:snip_win[1], :])
shifts = np.zeros(n_chan, dtype='int')
#
obj = recon_error(snip_temp, rank=rank)
obj_list = []
for i, k in enumerate(reversed(main_channels(template))):
if i == 0:
# main channel do nothing
continue
#cand_chan = np.random.randint(0, n_chan)
cand_chan = k
# obj of jitter -1, 0, 0 respectively
new_obj = np.zeros(max_shift + 1)
for j, jitter in enumerate(range(-half_max_shift, half_max_shift + 1)):
snip_from, snip_to = snip_win[0] + jitter, snip_win[1] + jitter
if snip_to == 0:
snip_to = new_times
snip_temp[:, cand_chan] = template[snip_from:snip_to, cand_chan]
new_obj[j] = recon_error(snip_temp, rank=rank)
#plt.plot(np.arange(- max_shift, max_shift + 1, 1), new_obj)
# Optimal local jitterupsample
opt_shift = np.argmin(new_obj) - half_max_shift
shifts[cand_chan] = opt_shift
snip_from, snip_to = snip_win[0] + opt_shift, snip_win[1] + opt_shift
if snip_to == 0:
snip_to = new_times
snip_temp[:, cand_chan] = template[snip_from:snip_to, cand_chan]
obj = min(new_obj)
obj_list.append(obj)
return snip_temp, obj_list
def optimal_svd_align(template, geometry, rank=3, upsample=5, chunk=7, max_shift=10):
"""Iterative svd then align approach to alignment."""
max_shift = upsample * max_shift
n_times = template.shape[0]
n_chan = template.shape[1]
main_chan = np.flip(main_channels(template), axis=0)
win_len = n_times * upsample - max_shift
# Upsample
temp = sp.signal.resample(template, n_times * upsample)
shifts = np.zeros(n_chan, dtype=int) + max_shift // 2
#
chunk_set = 0
i = 1
terminate = False
while not terminate:
if i * chunk > n_chan:
cum_chan = main_chan
terminate = True
else:
#cum_chan = main_chan[:i * chunk]
cum_chan = geometry.neighbors(main_chan[0], size=chunk * i)
for iteration in range(4):
temp_ref = []
for c in cum_chan:
temp_ref.append(temp[shifts[c]:shifts[c] + win_len, c])
temp_ref = np.array(temp_ref).T
temp_ref_rec = recon(temp_ref, rank=rank)
shifts[cum_chan] = align_temp_to_temp(temp_ref_rec, temp[:, cum_chan])
i += 1
aligned_temp = []
for c in range(n_chan):
aligned_temp.append(temp[shifts[c]:shifts[c] + win_len, c])
return np.array(aligned_temp).T
def plot_spatial(geom, temp, ax, color='C0', alpha=0.7, scale=10., squeeze=8.):
"""Plots template spatially."""
leng = temp.shape[0]
for c in range(temp.shape[1]):
ax.plot(
np.arange(0, leng, 1) / squeeze + geom[c, 0],
temp[:, c] * scale + geom[c, 1], alpha=alpha, color=color, lw=2)
def plot_spatial_fill(geom, temp, ax, color='C0', scale=10., squeeze=8.):
"""Plots standard error for each channel spatially."""
temp_ = temp * 0
leng = temp.shape[0]
for c in range(temp.shape[1]):
ax.fill_between(
np.arange(0, leng, 1) / squeeze + geom[c, 0],
temp_[:, c] - scale / 2 + geom[c, 1],
temp_[:, c] + scale / 2 + geom[c, 1], color=color, alpha=0.3)
def plot_chan_numbers(geom, ax, offset=10):
"""Plots template spatially.77"""
for c in range(geom.shape[0]):
plt.text(geom[c, 0] + offset, geom[c, 1], str(c), size='large')
def fake_data(spt, temps, length, noise=True):
"""Given a spike train and templates creates a fake data."""
n_time, n_chan, n_unit = temps.shape
data = None
if noise:
data = np.random.normal(0, 1, [length, n_chan])
else:
data = np.zeros([length, n_chan])
for u in range(n_unit):
spt_u = spt[spt[:, 1] == u, 0]
spt_u = spt_u[spt_u < length - n_time]
idx = spt_u + np.arange(0, n_time)[:, np.newaxis]
data[idx, :] += temps[:, :, u][:, np.newaxis, :]
return data
def count_matches(array1, array2, admissible_proximity=40):
"""Finds the matches between two count process.
Returns
-------
tuple of lists
(M, U, M) where M is the list of indices of array2 where
matched with array 1 happened and U contains a list of
indices of array2 where no match with array1 happened.
"""
# In time samples
m, n = len(array1), len(array2)
i, j = 0, 0
count = 0
matched_idx = []
unmatched_idx = []
while i < m and j < n:
if abs(array1[i] - array2[j]) < admissible_proximity:
matched_idx.append(j)
i += 1
j += 1
count += 1
elif array1[i] < array2[j]:
i += 1
else:
unmatched_idx.append(j)
j += 1
return matched_idx, unmatched_idx
def compute_snr(temps):
"""Computes peak to peak SNR for given templates."""
chan_peaks = np.max(temps, axis=0)
chan_lows = np.min(temps, axis=0)
peak_to_peak = chan_peaks - chan_lows
return np.max(peak_to_peak, axis=0)
def enforce_refractory_period(spike_train, refractory_period):
"""Removes spike times that violate refractory period.
Parameters:
-----------
spike_train: numpy.ndarray
Shape (N, 2) where first column indicates spike times
and second column unit identities. Should be sorted
by times across all units.
refractory_period: int
Returns:
--------
np.ndarray of shape shape (N, 2).
"""
n_unit = np.max(spike_train[:, 1])
delete_idx = []
for u in range(n_unit):
sp_idx = np.where(spike_train[:, 1] == u)[0]
sp = spike_train[sp_idx, 0]
diffs = np.diff(sp)
idx = diffs < refractory_period
while np.sum(idx) > 0:
# Remove violating spike times
delete_idx += list(sp_idx[np.where(idx)[0] + 1])
sp_idx = np.delete(sp_idx, np.where(idx)[0] + 1, axis=0)
# Recompute
sp = spike_train[sp_idx, 0]
diffs = np.diff(sp)
idx = diffs < refractory_period
# Remove all the spike times from the original spike train
return np.delete(spike_train, delete_idx, axis=0) | [
"numpy.sum",
"numpy.argmin",
"numpy.argsort",
"numpy.linalg.svd",
"numpy.linalg.norm",
"numpy.arange",
"numpy.random.normal",
"scipy.spatial.distance.pdist",
"numpy.max",
"numpy.min",
"numpy.delete",
"numpy.flip",
"scipy.signal.resample",
"numpy.zeros",
"copy.copy",
"numpy.diff",
"nu... | [((370, 397), 'numpy.zeros', 'np.zeros', (['template.shape[0]'], {}), '(template.shape[0])\n', (378, 397), True, 'import numpy as np\n'), ((457, 492), 'numpy.sum', 'np.sum', (['(template * template)'], {'axis': '(0)'}), '(template * template, axis=0)\n', (463, 492), True, 'import numpy as np\n'), ((549, 577), 'numpy.zeros', 'np.zeros', (['[temp_len, n_chan]'], {}), '([temp_len, n_chan])\n', (557, 577), True, 'import numpy as np\n'), ((1284, 1307), 'numpy.linalg.svd', 'np.linalg.svd', (['template'], {}), '(template)\n', (1297, 1307), True, 'import numpy as np\n'), ((1319, 1366), 'numpy.matmul', 'np.matmul', (['(u[:, :rank] * s[:rank])', 'vh[:rank, :]'], {}), '(u[:, :rank] * s[:rank], vh[:rank, :])\n', (1328, 1366), True, 'import numpy as np\n'), ((1511, 1546), 'numpy.linalg.norm', 'np.linalg.norm', (['(template - temp_rec)'], {}), '(template - temp_rec)\n', (1525, 1546), True, 'import numpy as np\n'), ((2423, 2439), 'numpy.zeros', 'np.zeros', (['n_chan'], {}), '(n_chan)\n', (2431, 2439), True, 'import numpy as np\n'), ((2926, 2974), 'scipy.signal.resample', 'sp.signal.resample', (['template', '(n_times * upsample)'], {}), '(template, n_times * upsample)\n', (2944, 2974), True, 'import scipy as sp\n'), ((3076, 3123), 'copy.copy', 'copy.copy', (['template[snip_win[0]:snip_win[1], :]'], {}), '(template[snip_win[0]:snip_win[1], :])\n', (3085, 3123), False, 'import copy\n'), ((3137, 3166), 'numpy.zeros', 'np.zeros', (['n_chan'], {'dtype': '"""int"""'}), "(n_chan, dtype='int')\n", (3145, 3166), True, 'import numpy as np\n'), ((4767, 4815), 'scipy.signal.resample', 'sp.signal.resample', (['template', '(n_times * upsample)'], {}), '(template, n_times * upsample)\n', (4785, 4815), True, 'import scipy as sp\n'), ((8127, 8148), 'numpy.max', 'np.max', (['temps'], {'axis': '(0)'}), '(temps, axis=0)\n', (8133, 8148), True, 'import numpy as np\n'), ((8165, 8186), 'numpy.min', 'np.min', (['temps'], {'axis': '(0)'}), '(temps, axis=0)\n', (8171, 8186), True, 'import numpy as np\n'), ((8240, 8268), 'numpy.max', 'np.max', (['peak_to_peak'], {'axis': '(0)'}), '(peak_to_peak, axis=0)\n', (8246, 8268), True, 'import numpy as np\n'), ((8731, 8756), 'numpy.max', 'np.max', (['spike_train[:, 1]'], {}), '(spike_train[:, 1])\n', (8737, 8756), True, 'import numpy as np\n'), ((9389, 9431), 'numpy.delete', 'np.delete', (['spike_train', 'delete_idx'], {'axis': '(0)'}), '(spike_train, delete_idx, axis=0)\n', (9398, 9431), True, 'import numpy as np\n'), ((239, 261), 'numpy.arange', 'np.arange', (['(0)', 'temp_len'], {}), '(0, temp_len)\n', (248, 261), True, 'import numpy as np\n'), ((723, 748), 'numpy.zeros', 'np.zeros', (['orig_filt.shape'], {}), '(orig_filt.shape)\n', (731, 748), True, 'import numpy as np\n'), ((993, 1013), 'numpy.argmin', 'np.argmin', (['conv_dist'], {}), '(conv_dist)\n', (1002, 1013), True, 'import numpy as np\n'), ((2259, 2278), 'numpy.sum', 'np.sum', (['(temp * temp)'], {}), '(temp * temp)\n', (2265, 2278), True, 'import numpy as np\n'), ((3512, 3535), 'numpy.zeros', 'np.zeros', (['(max_shift + 1)'], {}), '(max_shift + 1)\n', (3520, 3535), True, 'import numpy as np\n'), ((4829, 4856), 'numpy.zeros', 'np.zeros', (['n_chan'], {'dtype': 'int'}), '(n_chan, dtype=int)\n', (4837, 4856), True, 'import numpy as np\n'), ((5673, 5695), 'numpy.array', 'np.array', (['aligned_temp'], {}), '(aligned_temp)\n', (5681, 5695), True, 'import numpy as np\n'), ((6844, 6884), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '[length, n_chan]'], {}), '(0, 1, [length, n_chan])\n', (6860, 6884), True, 'import numpy as np\n'), ((6910, 6936), 'numpy.zeros', 'np.zeros', (['[length, n_chan]'], {}), '([length, n_chan])\n', (6918, 6936), True, 'import numpy as np\n'), ((8919, 8930), 'numpy.diff', 'np.diff', (['sp'], {}), '(sp)\n', (8926, 8930), True, 'import numpy as np\n'), ((1725, 1740), 'scipy.spatial.distance.pdist', 'pdist', (['geometry'], {}), '(geometry)\n', (1730, 1740), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((1798, 1832), 'numpy.argsort', 'np.argsort', (['self.pdist[channel, :]'], {}), '(self.pdist[channel, :])\n', (1808, 1832), True, 'import numpy as np\n'), ((1973, 1997), 'numpy.max', 'np.max', (['template'], {'axis': '(0)'}), '(template, axis=0)\n', (1979, 1997), True, 'import numpy as np\n'), ((2000, 2024), 'numpy.min', 'np.min', (['template'], {'axis': '(0)'}), '(template, axis=0)\n', (2006, 2024), True, 'import numpy as np\n'), ((4021, 4039), 'numpy.argmin', 'np.argmin', (['new_obj'], {}), '(new_obj)\n', (4030, 4039), True, 'import numpy as np\n'), ((8822, 8854), 'numpy.where', 'np.where', (['(spike_train[:, 1] == u)'], {}), '(spike_train[:, 1] == u)\n', (8830, 8854), True, 'import numpy as np\n'), ((8985, 8996), 'numpy.sum', 'np.sum', (['idx'], {}), '(idx)\n', (8991, 8996), True, 'import numpy as np\n'), ((9259, 9270), 'numpy.diff', 'np.diff', (['sp'], {}), '(sp)\n', (9266, 9270), True, 'import numpy as np\n'), ((5367, 5385), 'numpy.array', 'np.array', (['temp_ref'], {}), '(temp_ref)\n', (5375, 5385), True, 'import numpy as np\n'), ((7073, 7093), 'numpy.arange', 'np.arange', (['(0)', 'n_time'], {}), '(0, n_time)\n', (7082, 7093), True, 'import numpy as np\n'), ((1146, 1168), 'numpy.arange', 'np.arange', (['(0)', 'temp_len'], {}), '(0, temp_len)\n', (1155, 1168), True, 'import numpy as np\n'), ((2220, 2241), 'numpy.flip', 'np.flip', (['temp'], {'axis': '(0)'}), '(temp, axis=0)\n', (2227, 2241), True, 'import numpy as np\n'), ((5905, 5926), 'numpy.arange', 'np.arange', (['(0)', 'leng', '(1)'], {}), '(0, leng, 1)\n', (5914, 5926), True, 'import numpy as np\n'), ((6281, 6302), 'numpy.arange', 'np.arange', (['(0)', 'leng', '(1)'], {}), '(0, leng, 1)\n', (6290, 6302), True, 'import numpy as np\n'), ((910, 937), 'numpy.flip', 'np.flip', (['base_trace'], {'axis': '(0)'}), '(base_trace, axis=0)\n', (917, 937), True, 'import numpy as np\n'), ((9145, 9158), 'numpy.where', 'np.where', (['idx'], {}), '(idx)\n', (9153, 9158), True, 'import numpy as np\n'), ((9083, 9096), 'numpy.where', 'np.where', (['idx'], {}), '(idx)\n', (9091, 9096), True, 'import numpy as np\n')] |
# This file is used to simulate the Flick The Cap game as part of Korean Drinking Games
# Import Files
import random
import time
import numpy as np
# Auxiliary Functions #################################################################
# Generate strength of bottle cap
def BottleCapStrength():
min = 1
max = 25
seed_val = random.randint(1, 1000)
np.random.seed(seed_val)
bc_stren = np.random.uniform(min, max)
return abs(bc_stren)
# Generate strength of flick
def FlickStrength():
min = 0
max = 1
seed_val = int(np.random.uniform(1, 100))
np.random.seed(seed_val)
flick_stren = np.random.uniform(min, max)
return abs(flick_stren)
# Generate graphic of bottle cap
def DrawBottleCap(bc_stren, bc_init, num_flicks, lean_left, flick_success):
ll = lean_left
height = 4
num_tab = 3
char_per_tab = 8
num_char = char_per_tab*num_tab
width = 2 + num_tab*num_char
# simulating flick miss here
if not(flick_success):
print("WOOPS, YA MISSED\n")
ll = not(ll)
if bc_stren >= 0:
# draw the tail bit
# for i in reversed(range(0, height)):
# print((num_char-i)*" "*ll + i*" "*(not(ll)) + "*")
# I'm ashamed of what I'm doing here
num_del = 5
if ll:
print("*")
print(" *")
print(" *")
print(" *")
print(" *")
else:
print(" " + num_tab*"\t" + " *")
print(" " + num_tab*"\t" + " *")
print(" " + num_tab*"\t" + " *")
print(" " + num_tab*"\t" + " *")
print(" " + num_tab*"\t" + "*")
# draw bottle cap with no tail. ie Game End
for i in range(0, height):
print("|" + num_tab*"\t" + "|")
print("|" + (num_char-1)*"_" + "|")
print("\n Number of Flicks = " + str(num_flicks) + "\n")
# now let them know how much damage has been done
low_damage = [0.67*bc_init, 1.0*bc_init]
mod_damage = [0.34*bc_init, 0.66*bc_init]
high_damage = [0*bc_init, 0.33*bc_init]
if low_damage[0] <= bc_stren <= low_damage[1]:
print("The cap's barely moved! Surely you can do more damage!\n")
elif mod_damage[0] <= bc_stren <= mod_damage[1]:
print("Hmmm, the cap is breaking but I think you can put more oomf in it...\n")
elif high_damage[0] <= bc_stren <= high_damage[1]:
print("You almost got it! Just a few more taps now!\n")
# switch lean left
ll = not(ll)
return ll
# Main function #######################################################################
def FlickTheCap(ListOfNames):
game_fin = False
max_players = len(ListOfNames)
player = 0
winner = player
ret = dict()
ret["WINNER"] = "UNKOWN ERROR"
wait_time = 0.65
# Get the bottle cap strength
bc_strength = BottleCapStrength()
bc_init = bc_strength
# number of flicks occured
num_flicks = 0
# lean of the bottle cap
lean_left = True
print()
print()
print("########## FLICK THE BOTTLE CAP ##########")
print()
# main game loop
while game_fin == False:
# get player to play
print("Your turn, " + ListOfNames[player] + "!")
print("Press ENTER to flick!")
input()
print("Simulating Flick")
for i in range (0, 3):
time.sleep(wait_time)
print("...")
# Extra lines to separate drawing
print()
print()
# get flick strength
flick_success = False
flick_strength = FlickStrength()
if flick_strength >= 0.33:
# successfull flick
bc_strength = bc_strength - 1
flick_success = True
num_flicks = num_flicks + 1 # working with random number of flicks
# draw bottle cap state
lean_left = DrawBottleCap(bc_stren=bc_strength, bc_init=bc_init, num_flicks=num_flicks, lean_left=lean_left, flick_success=flick_success)
# check for winner
if bc_strength <= 0:
print("Congratulations, " + ListOfNames[player] + "!")
print("Following players need to DRINK:")
winner = player
ret["WINNER"] = winner
# get losers
loser1 = winner - 1
if loser1 < 0:
loser1 = max_players - 1
loser2 = winner + 1
if loser2 > (max_players - 1):
loser2 = 0
ret["LOSER1"] = loser1
ret["LOSER2"] = loser2
ret["LOSERS"] = [loser1, loser2]
print(ListOfNames[loser1])
print(ListOfNames[loser2])
game_fin = True
# otherwise, go to next player
player = player + 1
if player > (max_players - 1):
player = 0
return ret | [
"numpy.random.uniform",
"numpy.random.seed",
"random.randint",
"time.sleep"
] | [((339, 362), 'random.randint', 'random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (353, 362), False, 'import random\n'), ((367, 391), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (381, 391), True, 'import numpy as np\n'), ((407, 434), 'numpy.random.uniform', 'np.random.uniform', (['min', 'max'], {}), '(min, max)\n', (424, 434), True, 'import numpy as np\n'), ((588, 612), 'numpy.random.seed', 'np.random.seed', (['seed_val'], {}), '(seed_val)\n', (602, 612), True, 'import numpy as np\n'), ((631, 658), 'numpy.random.uniform', 'np.random.uniform', (['min', 'max'], {}), '(min, max)\n', (648, 658), True, 'import numpy as np\n'), ((557, 582), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(100)'], {}), '(1, 100)\n', (574, 582), True, 'import numpy as np\n'), ((3501, 3522), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (3511, 3522), False, 'import time\n')] |
"""
Contains the core building blocks of the framework.
"""
from __future__ import division
import math
from copy import deepcopy
import cython as cy
import numpy as np
import pandas as pd
from future.utils import iteritems
PAR = 100.0
TOL = 1e-16
@cy.locals(x=cy.double)
def is_zero(x):
"""
Test for zero that is robust against floating point precision errors
"""
return abs(x) < TOL
class Node(object):
"""
The Node is the main building block in bt's tree structure design.
Both StrategyBase and SecurityBase inherit Node. It contains the
core functionality of a tree node.
Args:
* name (str): The Node name
* parent (Node): The parent Node
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Children can be any type of Node or str.
String values correspond to children which will be lazily created
with that name when needed.
Attributes:
* name (str): Node name
* parent (Node): Node parent
* root (Node): Root node of the tree (topmost node)
* children (dict): Node's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Node is stale and need
updating
* prices (TimeSeries): Prices of the Node. Prices for a security will
be the security's price, for a strategy it will be an index that
reflects the value of the strategy over time.
* price (float): last price
* value (float): last value
* notional_value (float): last notional value. Notional value is used
when fixed_income=True. It is always positive for strategies, but
is signed for securities (and typically set to either market value,
position, or zero).
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Node + node's children
* fixed_income (bool): Whether the node corresponds to a fixed income
component, which would use notional-weighting instead of market
value weighing. See also FixedIncomeStrategy for more details.
"""
_capital = cy.declare(cy.double)
_price = cy.declare(cy.double)
_value = cy.declare(cy.double)
_notl_value = cy.declare(cy.double)
_weight = cy.declare(cy.double)
_issec = cy.declare(cy.bint)
_has_strat_children = cy.declare(cy.bint)
_fixed_income = cy.declare(cy.bint)
_bidoffer_set = cy.declare(cy.bint)
_bidoffer_paid = cy.declare(cy.double)
def __init__(self, name, parent=None, children=None):
self.name = name
# children helpers
self.children = {}
self._lazy_children = {}
self._universe_tickers = []
self._childrenv = [] # Shortcut to self.children.values()
# strategy children helpers
self._has_strat_children = False
self._strat_children = []
if parent is None:
self.parent = self
self.root = self
# by default all positions are integer
self.integer_positions = True
else:
self.parent = parent
parent._add_children([self], dc=False)
self._add_children(children, dc=True)
# set default value for now
self.now = 0
# make sure root has stale flag
# used to avoid unnecessary update
# sometimes we change values in the tree and we know that we will need
# to update if another node tries to access a given value (say weight).
# This avoid calling the update until it is actually needed.
self.root.stale = False
# helper vars
self._price = 0
self._value = 0
self._notl_value = 0
self._weight = 0
self._capital = 0
# is security flag - used to avoid updating 0 pos securities
self._issec = False
# fixed income flag - used to turn on notional weighing
self._fixed_income = False
# flag for whether to do bid/offer accounting
self._bidoffer_set = False
self._bidoffer_paid = 0
def __getitem__(self, key):
return self.children[key]
def _add_children(self, children, dc):
"""
Add the collection of children to the current node, where
children is either an iterable of children objects/strings, or
a dictionary
Args:
dc (bool): Whether or not to deepcopy nodes before adding them.
"""
if children is not None:
if isinstance(children, dict):
# Preserve the names from the dictionary by renaming the nodes
tmp = []
for name, c in iteritems(children):
if isinstance(c, str):
tmp.append(name)
else:
if dc:
c = deepcopy(c)
c.name = name
tmp.append(c)
children = tmp
for c in children:
if dc: # deepcopy object for possible later reuse
c = deepcopy(c)
if type(c) == str:
if c in self._universe_tickers:
raise ValueError("Child %s already exists" % c)
# Create default security with lazy_add
c = Security(c, lazy_add=True)
if getattr(c, "lazy_add", False):
self._lazy_children[c.name] = c
else:
if c.name in self.children:
raise ValueError("Child %s already exists" % c)
c.parent = self
c._set_root(self.root)
c.use_integer_positions(self.integer_positions)
self.children[c.name] = c
self._childrenv.append(c)
# if strategy, turn on flag and add name to list
# strategy children have special treatment
if isinstance(c, StrategyBase):
self._has_strat_children = True
self._strat_children.append(c.name)
# if not strategy, then we will want to add this to
# universe_tickers to filter on setup
elif c.name not in self._universe_tickers:
self._universe_tickers.append(c.name)
def _set_root(self, root):
self.root = root
for c in self._childrenv:
c._set_root(root)
def use_integer_positions(self, integer_positions):
"""
Set indicator to use (or not) integer positions for a given strategy or
security.
By default all positions in number of stocks should be integer.
However this may lead to unexpected results when working with adjusted
prices of stocks. Because of series of reverse splits of stocks, the
adjusted prices back in time might be high. Thus rounding of desired
amount of stocks to buy may lead to having 0, and thus ignoring this
stock from backtesting.
"""
self.integer_positions = integer_positions
for c in self._childrenv:
c.use_integer_positions(integer_positions)
@property
def fixed_income(self):
"""
Whether the node is a fixed income node (using notional weighting).
"""
return self._fixed_income
@property
def prices(self):
"""
A TimeSeries of the Node's price.
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def price(self):
"""
Current price of the Node
"""
# can optimize depending on type -
# securities don't need to check stale to
# return latest prices, whereas strategies do...
raise NotImplementedError()
@property
def value(self):
"""
Current value of the Node
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._value
@property
def notional_value(self):
"""
Current notional value of the Node
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._notl_value
@property
def weight(self):
"""
Current weight of the Node (with respect to the parent).
"""
if self.root.stale:
self.root.update(self.root.now, None)
return self._weight
def setup(self, universe, **kwargs):
"""
Setup method used to initialize a Node with a universe, and potentially other information.
"""
raise NotImplementedError()
def update(self, date, data=None, inow=None):
"""
Update Node with latest date, and optionally some data.
"""
raise NotImplementedError()
def adjust(self, amount, update=True, flow=True):
"""
Adjust Node value by amount.
"""
raise NotImplementedError()
def allocate(self, amount, update=True):
"""
Allocate capital to Node.
"""
raise NotImplementedError()
@property
def members(self):
"""
Node members. Members include current node as well as Node's
children.
"""
res = [self]
for c in list(self.children.values()):
res.extend(c.members)
return res
@property
def full_name(self):
if self.parent == self:
return self.name
else:
return "%s>%s" % (self.parent.full_name, self.name)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.full_name)
def to_dot(self, root=True):
"""
Represent the node structure in DOT format.
"""
name = lambda x: x.name or repr(self) # noqa: E731
edges = "\n".join(
'\t"%s" -> "%s"' % (name(self), name(c)) for c in self.children.values()
)
below = "\n".join(c.to_dot(False) for c in self.children.values())
body = "\n".join([edges, below]).rstrip()
if root:
return "\n".join(["digraph {", body, "}"])
return body
class StrategyBase(Node):
"""
Strategy Node. Used to define strategy logic within a tree.
A Strategy's role is to allocate capital to it's children
based on a function.
Args:
* name (str): Strategy name
* children (dict, list): A collection of children. If dict,
the format is {name: child}, if list then list of children.
Children can be any type of Node or str.
String values correspond to children which will be lazily created
with that name when needed.
* parent (Node): The parent Node
Attributes:
* name (str): Strategy name
* parent (Strategy): Strategy parent
* root (Strategy): Root node of the tree (topmost node)
* children (dict): Strategy's children
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Strategy is stale and need
updating
* prices (TimeSeries): Prices of the Strategy - basically an index that
reflects the value of the strategy over time.
* outlays (DataFrame): Outlays for each SecurityBase child
* price (float): last price
* value (float): last value
* notional_value (float): last notional value
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Strategy + strategy's children
* securities (list): List of strategy children that are of type
SecurityBase
* commission_fn (fn(quantity, price)): A function used to determine the
commission (transaction fee) amount. Could be used to model
slippage (implementation shortfall). Note that often fees are
symmetric for buy and sell and absolute value of quantity should
be used for calculation.
* capital (float): Capital amount in Strategy - cash
* universe (DataFrame): Data universe available at the current time.
Universe contains the data passed in when creating a Backtest. Use
this data to determine strategy logic.
"""
_net_flows = cy.declare(cy.double)
_last_value = cy.declare(cy.double)
_last_notl_value = cy.declare(cy.double)
_last_price = cy.declare(cy.double)
_last_fee = cy.declare(cy.double)
_paper_trade = cy.declare(cy.bint)
bankrupt = cy.declare(cy.bint)
def __init__(self, name, children=None, parent=None):
Node.__init__(self, name, children=children, parent=parent)
self._weight = 1
self._value = 0
self._notl_value = 0
self._price = PAR
# helper vars
self._net_flows = 0
self._last_value = 0
self._last_notl_value = 0
self._last_price = PAR
self._last_fee = 0
# default commission function
self.commission_fn = self._dflt_comm_fn
self._paper_trade = False
self._positions = None
self.bankrupt = False
@property
def price(self):
"""
Current price.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._prices.loc[: self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._values.loc[: self.now]
@property
def notional_values(self):
"""
TimeSeries of notional values.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._notl_values.loc[: self.now]
@property
def capital(self):
"""
Current capital - amount of unallocated capital left in strategy.
"""
# no stale check needed
return self._capital
@property
def cash(self):
"""
TimeSeries of unallocated capital.
"""
# no stale check needed
return self._cash
@property
def fees(self):
"""
TimeSeries of fees.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._fees.loc[: self.now]
@property
def flows(self):
"""
TimeSeries of flows.
"""
if self.root.stale:
self.root.update(self.now, None)
return self._all_flows.loc[: self.now]
@property
def bidoffer_paid(self):
"""
Bid/offer spread paid on transactions in the current step
"""
if self._bidoffer_set:
if self.root.stale:
self.root.update(self.now, None)
return self._bidoffer_paid
else:
raise Exception(
"Bid/offer accounting not turned on: "
'"bidoffer" argument not provided during setup'
)
@property
def bidoffers_paid(self):
"""
TimeSeries of bid/offer spread paid on transactions in each step
"""
if self._bidoffer_set:
if self.root.stale:
self.root.update(self.now, None)
return self._bidoffers_paid.loc[: self.now]
else:
raise Exception(
"Bid/offer accounting not turned on: "
'"bidoffer" argument not provided during setup'
)
@property
def universe(self):
"""
Data universe available at the current time.
Universe contains the data passed in when creating a Backtest.
Use this data to determine strategy logic.
"""
# avoid windowing every time
# if calling and on same date return
# cached value
if self.now == self._last_chk:
return self._funiverse
else:
self._last_chk = self.now
self._funiverse = self._universe.loc[: self.now]
return self._funiverse
@property
def securities(self):
"""
Returns a list of children that are of type SecurityBase
"""
return [x for x in self.members if isinstance(x, SecurityBase)]
@property
def outlays(self):
"""
Returns a DataFrame of outlays for each child SecurityBase
"""
if self.root.stale:
self.root.update(self.root.now, None)
return pd.DataFrame({x.name: x.outlays for x in self.securities})
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self.root.stale:
self.root.update(self.root.now, None)
vals = pd.DataFrame(
{x.name: x.positions for x in self.members if isinstance(x, SecurityBase)}
)
self._positions = vals
return vals
def setup(self, universe, **kwargs):
"""
Setup strategy with universe. This will speed up future calculations
and updates.
"""
# save full universe in case we need it
self._original_data = universe
self._setup_kwargs = kwargs
# Guard against fixed income children of regular
# strategies as the "price" is just a reference
# value and should not be used for capital allocation
if self.fixed_income and not self.parent.fixed_income:
raise ValueError(
"Cannot have fixed income "
"strategy child (%s) of non-"
"fixed income strategy (%s)" % (self.name, self.parent.name)
)
# determine if needs paper trading
# and setup if so
if self is not self.parent:
self._paper_trade = True
self._paper_amount = 1000000
paper = deepcopy(self)
paper.parent = paper
paper.root = paper
paper._paper_trade = False
paper.setup(self._original_data, **kwargs)
paper.adjust(self._paper_amount)
self._paper = paper
# setup universe
funiverse = universe
if self._universe_tickers:
# if we have universe_tickers defined, limit universe to
# those tickers
valid_filter = list(
set(universe.columns).intersection(self._universe_tickers)
)
funiverse = universe[valid_filter].copy()
# if we have strat children, we will need to create their columns
# in the new universe
if self._has_strat_children:
for c in self._strat_children:
funiverse[c] = np.nan
# must create to avoid pandas warning
funiverse = pd.DataFrame(funiverse)
self._universe = funiverse
# holds filtered universe
self._funiverse = funiverse
self._last_chk = None
# We're not bankrupt yet
self.bankrupt = False
# setup internal data
self.data = pd.DataFrame(
index=funiverse.index,
columns=["price", "value", "notional_value", "cash", "fees", "flows"],
data=0.0,
)
self._prices = self.data["price"]
self._values = self.data["value"]
self._notl_values = self.data["notional_value"]
self._cash = self.data["cash"]
self._fees = self.data["fees"]
self._all_flows = self.data["flows"]
if "bidoffer" in kwargs:
self._bidoffer_set = True
self.data["bidoffer_paid"] = 0.0
self._bidoffers_paid = self.data["bidoffer_paid"]
# setup children as well - use original universe here - don't want to
# pollute with potential strategy children in funiverse
if self.children is not None:
[c.setup(universe, **kwargs) for c in self._childrenv]
def setup_from_parent(self, **kwargs):
"""
Setup a strategy from the parent. Used when dynamically creating
child strategies.
Args:
* kwargs: additional arguments that will be passed to setup
(potentially overriding those from the parent)
"""
all_kwargs = self.parent._setup_kwargs.copy()
all_kwargs.update(kwargs)
self.setup(self.parent._original_data, **all_kwargs)
if self.name not in self.parent._universe:
self.parent._universe[self.name] = np.nan
def get_data(self, key):
"""
Returns additional data that was passed to the setup function via kwargs,
for use in the algos. This allows algos to reference data sources "by name",
where the binding of the data to the name happens at Backtest creation
time rather than at Strategy definition time, allowing the same strategies
to be run against different data sets more easily.
"""
return self._setup_kwargs[key]
@cy.locals(
newpt=cy.bint,
val=cy.double,
ret=cy.double,
coupons=cy.double,
notl_val=cy.double,
bidoffer_paid=cy.double,
)
def update(self, date, data=None, inow=None):
"""
Update strategy. Updates prices, values, weight, etc.
"""
# resolve stale state
self.root.stale = False
# update helpers on date change
# also set newpt flag
newpt = False
if self.now == 0:
newpt = True
elif date != self.now:
self._net_flows = 0
self._last_price = self._price
self._last_value = self._value
self._last_notl_value = self._notl_value
self._last_fee = 0.0
newpt = True
# update now
self.now = date
if inow is None:
if self.now == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# update children if any and calculate value
val = self._capital # default if no children
notl_val = 0.0 # Capital doesn't count towards notional value
bidoffer_paid = 0.0
coupons = 0
if self.children:
for c in self._childrenv:
# Sweep up cash from the security nodes (from coupon payments, etc)
if c._issec and newpt:
coupons += c._capital
c._capital = 0
# avoid useless update call
if c._issec and not c._needupdate:
continue
c.update(date, data, inow)
val += c.value
# Strategies always have positive notional value
notl_val += abs(c.notional_value)
if self._bidoffer_set:
bidoffer_paid += c.bidoffer_paid
self._capital += coupons
val += coupons
if self.root == self:
if (
(val < 0)
and not self.bankrupt
and not self.fixed_income
and not is_zero(val)
):
# Declare a bankruptcy
self.bankrupt = True
self.flatten()
# update data if this value is different or
# if now has changed - avoid all this if not since it
# won't change
if (
newpt
or not is_zero(self._value - val)
or not is_zero(self._notl_value - notl_val)
):
self._value = val
self._values.values[inow] = val
self._notl_value = notl_val
self._notl_values.values[inow] = notl_val
if self._bidoffer_set:
self._bidoffer_paid = bidoffer_paid
self._bidoffers_paid.values[inow] = bidoffer_paid
if self.fixed_income:
# For notional weights, we compute additive return
pnl = self._value - (self._last_value + self._net_flows)
if not is_zero(self._last_notl_value):
ret = pnl / self._last_notl_value * PAR
elif not is_zero(self._notl_value):
# This case happens when paying bid/offer or fees when building an initial position
ret = pnl / self._notl_value * PAR
else:
if is_zero(pnl):
ret = 0
else:
raise ZeroDivisionError(
"Could not update %s on %s. Last notional value "
"was %s and pnl was %s. Therefore, "
"we are dividing by zero to obtain the pnl "
"per unit notional for the period."
% (self.name, self.now, self._last_notl_value, pnl)
)
self._price = self._last_price + ret
self._prices.values[inow] = self._price
else:
bottom = self._last_value + self._net_flows
if not is_zero(bottom):
ret = self._value / (self._last_value + self._net_flows) - 1
else:
if is_zero(self._value):
ret = 0
else:
raise ZeroDivisionError(
"Could not update %s on %s. Last value "
"was %s and net flows were %s. Current"
"value is %s. Therefore, "
"we are dividing by zero to obtain the return "
"for the period."
% (
self.name,
self.now,
self._last_value,
self._net_flows,
self._value,
)
)
self._price = self._last_price * (1 + ret)
self._prices.values[inow] = self._price
# update children weights
if self.children:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
if self.fixed_income:
if not is_zero(notl_val):
c._weight = c.notional_value / notl_val
else:
c._weight = 0.0
else:
if not is_zero(val):
c._weight = c.value / val
else:
c._weight = 0.0
# if we have strategy children, we will need to update them in universe
if self._has_strat_children:
for c in self._strat_children:
# TODO: optimize ".loc" here as well
self._universe.loc[date, c] = self.children[c].price
# Cash should track the unallocated capital at the end of the day, so
# we should update it every time we call "update".
# Same for fees and flows
self._cash.values[inow] = self._capital
self._fees.values[inow] = self._last_fee
self._all_flows.values[inow] = self._net_flows
# update paper trade if necessary
if self._paper_trade:
if newpt:
self._paper.update(date)
self._paper.run()
self._paper.update(date)
# update price
self._price = self._paper.price
self._prices.values[inow] = self._price
@cy.locals(amount=cy.double, update=cy.bint, flow=cy.bint, fees=cy.double)
def adjust(self, amount, update=True, flow=True, fee=0.0):
"""
Adjust capital - used to inject capital to a Strategy. This injection
of capital will have no effect on the children.
Args:
* amount (float): Amount to adjust by.
* update (bool): Force update?
* flow (bool): Is this adjustment a flow? A flow will not have an
impact on the performance (price index). Example of flows are
simply capital injections (say a monthly contribution to a
portfolio). This should not be reflected in the returns. A
non-flow (flow=False) does impact performance. A good example
of this is a commission, or a dividend.
"""
# adjust capital
self._capital += amount
self._last_fee += fee
# if flow - increment net_flows - this will not affect
# performance. Commissions and other fees are not flows since
# they have a performance impact
if flow:
self._net_flows += amount
if update:
# indicates that data is now stale and must
# be updated before access
self.root.stale = True
@cy.locals(amount=cy.double, update=cy.bint)
def allocate(self, amount, child=None, update=True):
"""
Allocate capital to Strategy. By default, capital is allocated
recursively down the children, proportionally to the children's
weights. If a child is specified, capital will be allocated
to that specific child.
Allocation also have a side-effect. They will deduct the same amount
from the parent's "account" to offset the allocation. If there is
remaining capital after allocation, it will remain in Strategy.
Args:
* amount (float): Amount to allocate.
* child (str): If specified, allocation will be directed to child
only. Specified by name.
* update (bool): Force update.
"""
# allocate to child
if child is not None:
self._create_child_if_needed(child)
# allocate to child
self.children[child].allocate(amount)
# allocate to self
else:
# adjust parent's capital
# no need to update now - avoids repetition
if self.parent == self:
self.parent.adjust(-amount, update=False, flow=True)
else:
# do NOT set as flow - parent will be another strategy
# and therefore should not incur flow
self.parent.adjust(-amount, update=False, flow=False)
# adjust self's capital
self.adjust(amount, update=False, flow=True)
# push allocation down to children if any
# use _weight to avoid triggering an update
if self.children is not None:
[c.allocate(amount * c._weight, update=False) for c in self._childrenv]
# mark as stale if update requested
if update:
self.root.stale = True
@cy.locals(q=cy.double, update=cy.bint)
def transact(self, q, child=None, update=True):
"""
Transact a notional amount q in the Strategy. By default, it is allocated
recursively down the children, proportionally to the children's
weights. Recursive allocation only works for fixed income strategies.
If a child is specified, notional will be allocated
to that specific child.
Args:
* q (float): Notional quantity to allocate.
* child (str): If specified, allocation will be directed to child
only. Specified by name.
* update (bool): Force update.
"""
# allocate to child
if child is not None:
self._create_child_if_needed(child)
# allocate to child
self.children[child].transact(q)
# allocate to self
else:
# push allocation down to children if any
# use _weight to avoid triggering an update
if self.children is not None:
[c.transact(q * c._weight, update=False) for c in self._childrenv]
# mark as stale if update requested
if update:
self.root.stale = True
@cy.locals(delta=cy.double, weight=cy.double, base=cy.double, update=cy.bint)
def rebalance(self, weight, child, base=np.nan, update=True):
"""
Rebalance a child to a given weight.
This is a helper method to simplify code logic. This method is used
when we want to see the weight of a particular child to a set amount.
It is similar to allocate, but it calculates the appropriate allocation
based on the current weight. For fixed income strategies, it uses
transact to rebalance based on notional value instead of capital.
Args:
* weight (float): The target weight. Usually between -1.0 and 1.0.
* child (str): child to allocate to - specified by name.
* base (float): If specified, this is the base amount all weight
delta calculations will be based off of. This is useful when we
determine a set of weights and want to rebalance each child
given these new weights. However, as we iterate through each
child and call this method, the base (which is by default the
current value) will change. Therefore, we can set this base to
the original value before the iteration to ensure the proper
allocations are made.
* update (bool): Force update?
"""
# if weight is 0 - we want to close child
if is_zero(weight):
if child in self.children:
return self.close(child, update=update)
else:
return
# if no base specified use self's value
if np.isnan(base):
if self.fixed_income:
base = self.notional_value
else:
base = self.value
# else make sure we have child
self._create_child_if_needed(child)
# allocate to child
# figure out weight delta
c = self.children[child]
if self.fixed_income:
# In fixed income strategies, the provided "base" value can be used
# to upscale/downscale the notional_value of the strategy, whereas
# in normal strategies the total capital is fixed. Thus, when
# rebalancing, we must take care to account for differences between
# previous notional value and passed base value. Note that for
# updating many weights in sequence, one must pass update=False so
# that the existing weights and notional_value are not recalculated
# before finishing.
if c.fixed_income:
delta = weight * base - c.weight * self.notional_value
c.transact(delta, update=update)
else:
delta = weight * base - c.weight * self.notional_value
c.allocate(delta, update=update)
else:
delta = weight - c.weight
c.allocate(delta * base, update=update)
@cy.locals(update=cy.bint)
def close(self, child, update=True):
"""
Close a child position - alias for rebalance(0, child). This will also
flatten (close out all) the child's children.
Args:
* child (str): Child, specified by name.
"""
c = self.children[child]
# flatten if children not None
if c.children is not None and len(c.children) != 0:
c.flatten()
if self.fixed_income:
if c.position != 0.0:
c.transact(-c.position, update=update)
else:
if c.value != 0.0 and not np.isnan(c.value):
c.allocate(-c.value, update=update)
def flatten(self):
"""
Close all child positions.
"""
# go right to base alloc
if self.fixed_income:
[
c.transact(-c.position, update=False)
for c in self._childrenv
if c.position != 0
]
else:
[
c.allocate(-c.value, update=False)
for c in self._childrenv
if c.value != 0
]
self.root.stale = True
def run(self):
"""
This is the main logic method. Override this method to provide some
algorithm to execute on each date change. This method is called by
backtester.
"""
pass
def set_commissions(self, fn):
"""
Set commission (transaction fee) function.
Args:
fn (fn(quantity, price)): Function used to determine commission
amount.
"""
self.commission_fn = fn
for c in self._childrenv:
if isinstance(c, StrategyBase):
c.set_commissions(fn)
def get_transactions(self):
"""
Helper function that returns the transactions in the following format:
Date, Security | quantity, price
The result is a MultiIndex DataFrame.
"""
# get prices for each security in the strategy & create unstacked
# series
prc = pd.DataFrame({x.name: x.prices for x in self.securities}).unstack()
# get security positions
positions = pd.DataFrame({x.name: x.positions for x in self.securities})
# trades are diff
trades = positions.diff()
# must adjust first row
trades.iloc[0] = positions.iloc[0]
# now convert to unstacked series, dropping nans along the way
trades = trades[trades != 0].unstack().dropna()
# Adjust prices for bid/offer paid if needed
if self._bidoffer_set:
bidoffer = pd.DataFrame(
{x.name: x.bidoffers_paid for x in self.securities}
).unstack()
prc += bidoffer / trades
res = pd.DataFrame({"price": prc, "quantity": trades}).dropna(
subset=["quantity"]
)
# set names
res.index.names = ["Security", "Date"]
# swap levels so that we have (date, security) as index and sort
res = res.swaplevel().sort_index()
return res
@cy.locals(q=cy.double, p=cy.double)
def _dflt_comm_fn(self, q, p):
return 0.0
def _create_child_if_needed(self, child):
if child not in self.children:
# Look up name in lazy children, or create a default security
c = self._lazy_children.pop(child, Security(child))
c.lazy_add = False
# add child to tree
self._add_children([c], dc=False)
c.setup(self._universe, **self._setup_kwargs)
# update to bring up to speed
c.update(self.now)
class SecurityBase(Node):
"""
Security Node. Used to define a security within a tree.
A Security's has no children. It simply models an asset that can be bought
or sold.
Args:
* name (str): Security name
* multiplier (float): security multiplier - typically used for
derivatives.
* lazy_add (bool): Flag to control whether instrument should be added
to strategy children lazily, i.e. only when there is a transaction
on the instrument. This improves performance of strategies which
transact on a sparse set of children.
Attributes:
* name (str): Security name
* parent (Security): Security parent
* root (Security): Root node of the tree (topmost node)
* now (datetime): Used when backtesting to store current date
* stale (bool): Flag used to determine if Security is stale and need
updating
* prices (TimeSeries): Security prices.
* price (float): last price
* outlays (TimeSeries): Series of outlays. Positive outlays mean
capital was allocated to security and security consumed that
amount. Negative outlays are the opposite. This can be useful for
calculating turnover at the strategy level.
* value (float): last value - basically position * price * multiplier
* weight (float): weight in parent
* full_name (str): Name including parents' names
* members (list): Current Security + strategy's children
* position (float): Current position (quantity).
* bidoffer (float): Current bid/offer spread
* bidoffers (TimeSeries): Series of bid/offer spreads
* bidoffer_paid (TimeSeries): Series of bid/offer paid on transactions
"""
_last_pos = cy.declare(cy.double)
_position = cy.declare(cy.double)
multiplier = cy.declare(cy.double)
_prices_set = cy.declare(cy.bint)
_needupdate = cy.declare(cy.bint)
_outlay = cy.declare(cy.double)
_bidoffer = cy.declare(cy.double)
@cy.locals(multiplier=cy.double)
def __init__(self, name, multiplier=1, lazy_add=False):
Node.__init__(self, name, parent=None, children=None)
self._value = 0
self._price = 0
self._weight = 0
self._position = 0
self.multiplier = multiplier
self.lazy_add = lazy_add
# opt
self._last_pos = 0
self._issec = True
self._needupdate = True
self._outlay = 0
self._bidoffer = 0
@property
def price(self):
"""
Current price.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._price
@property
def prices(self):
"""
TimeSeries of prices.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._prices.loc[: self.now]
@property
def values(self):
"""
TimeSeries of values.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._values.loc[: self.now]
@property
def notional_values(self):
"""
TimeSeries of notional values.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._notl_values.loc[: self.now]
@property
def position(self):
"""
Current position
"""
# no stale check needed
return self._position
@property
def positions(self):
"""
TimeSeries of positions.
"""
# if accessing and stale - update first
if self._needupdate:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._positions.loc[: self.now]
@property
def outlays(self):
"""
TimeSeries of outlays. Positive outlays (buys) mean this security
received and consumed capital (capital was allocated to it). Negative
outlays are the opposite (the security close/sold, and returned capital
to parent).
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._outlays.loc[: self.now]
@property
def bidoffer(self):
"""
Current bid/offer spread.
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._bidoffer
@property
def bidoffers(self):
"""
TimeSeries of bid/offer spread
"""
if self._bidoffer_set:
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._bidoffers.loc[: self.now]
else:
raise Exception(
"Bid/offer accounting not turned on: "
'"bidoffer" argument not provided during setup'
)
@property
def bidoffer_paid(self):
"""
TimeSeries of bid/offer spread paid on transactions in the current step
"""
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
return self._bidoffer_paid
@property
def bidoffers_paid(self):
"""
TimeSeries of bid/offer spread paid on transactions in the current step
"""
if self._bidoffer_set:
# if accessing and stale - update first
if self._needupdate or self.now != self.parent.now:
self.update(self.root.now)
if self.root.stale:
self.root.update(self.root.now, None)
return self._bidoffers_paid.loc[: self.now]
else:
raise Exception(
"Bid/offer accounting not turned on: "
'"bidoffer" argument not provided during setup'
)
def setup(self, universe, **kwargs):
"""
Setup Security with universe. Speeds up future runs.
Args:
* universe (DataFrame): DataFrame of prices with security's name as
one of the columns.
** kwargs (DataFrames): DataFrames of additional security level
information (i.e. bid/ask spread, risk, etc).
"""
# if we already have all the prices, we will store them to speed up
# future updates
try:
prices = universe[self.name]
except KeyError:
prices = None
# setup internal data
if prices is not None:
self._prices = prices
self.data = pd.DataFrame(
index=universe.index,
columns=["value", "position", "notional_value"],
data=0.0,
)
self._prices_set = True
else:
self.data = pd.DataFrame(
index=universe.index,
columns=["price", "value", "position", "notional_value"],
)
self._prices = self.data["price"]
self._prices_set = False
self._values = self.data["value"]
self._notl_values = self.data["notional_value"]
self._positions = self.data["position"]
# add _outlay
self.data["outlay"] = 0.0
self._outlays = self.data["outlay"]
# save bidoffer, if provided
if "bidoffer" in kwargs:
self._bidoffer_set = True
self._bidoffers = kwargs["bidoffer"]
try:
bidoffers = self._bidoffers[self.name]
except KeyError:
bidoffers = None
if bidoffers is not None:
if bidoffers.index.equals(universe.index):
self._bidoffers = bidoffers
else:
raise ValueError("Index of bidoffer must match universe data")
else:
self.data["bidoffer"] = 0.0
self._bidoffers = self.data["bidoffer"]
self.data["bidoffer_paid"] = 0.0
self._bidoffers_paid = self.data["bidoffer_paid"]
@cy.locals(prc=cy.double)
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
# filter for internal calls when position has not changed - nothing to
# do. Internal calls (stale root calls) have None data. Also want to
# make sure date has not changed, because then we do indeed want to
# update.
if date == self.now and self._last_pos == self._position:
return
if inow is None:
if date == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# date change - update price
if date != self.now:
# update now
self.now = date
if self._prices_set:
self._price = self._prices.values[inow]
# traditional data update
elif data is not None:
prc = data[self.name]
self._price = prc
self._prices.values[inow] = prc
# update bid/offer
if self._bidoffer_set:
self._bidoffer = self._bidoffers.values[inow]
self._bidoffer_paid = 0.0
self._positions.values[inow] = self._position
self._last_pos = self._position
if np.isnan(self._price):
if is_zero(self._position):
self._value = 0
else:
raise Exception(
"Position is open (non-zero: %s) and latest price is NaN "
"for security %s on %s. Cannot update node value."
% (self._position, self.name, date)
)
else:
self._value = self._position * self._price * self.multiplier
self._notl_value = self._value
self._values.values[inow] = self._value
self._notl_values.values[inow] = self._notl_value
if is_zero(self._weight) and is_zero(self._position):
self._needupdate = False
# save outlay to outlays
if self._outlay != 0:
self._outlays.values[inow] += self._outlay
# reset outlay back to 0
self._outlay = 0
if self._bidoffer_set:
self._bidoffers_paid.values[inow] = self._bidoffer_paid
@cy.locals(
amount=cy.double, update=cy.bint, q=cy.double, outlay=cy.double, i=cy.int
)
def allocate(self, amount, update=True):
"""
This allocates capital to the Security. This is the method used to
buy/sell the security.
A given amount of shares will be determined on the current price, a
commission will be calculated based on the parent's commission fn, and
any remaining capital will be passed back up to parent as an
adjustment.
Args:
* amount (float): Amount of adjustment.
* update (bool): Force update?
"""
# will need to update if this has been idle for a while...
# update if needupdate or if now is stale
# fetch parent's now since our now is stale
if self._needupdate or self.now != self.parent.now:
self.update(self.parent.now)
# ignore 0 alloc
# Note that if the price of security has dropped to zero, then it
# should never be selected by SelectAll, SelectN etc. I.e. we should
# not open the position at zero price. At the same time, we are able
# to close it at zero price, because at that point amount=0.
# Note also that we don't erase the position in an asset which price
# has dropped to zero (though the weight will indeed be = 0)
if is_zero(amount):
return
if self.parent is self or self.parent is None:
raise Exception("Cannot allocate capital to a parentless security")
if is_zero(self._price) or np.isnan(self._price):
raise Exception(
"Cannot allocate capital to "
"%s because price is %s as of %s"
% (self.name, self._price, self.parent.now)
)
# buy/sell
# determine quantity - must also factor in commission
# closing out?
if is_zero(amount + self._value):
q = -self._position
else:
q = amount / (self._price * self.multiplier)
if self.integer_positions:
if (self._position > 0) or (is_zero(self._position) and (amount > 0)):
# if we're going long or changing long position
q = math.floor(q)
else:
# if we're going short or changing short position
q = math.ceil(q)
# if q is 0 nothing to do
if is_zero(q) or np.isnan(q):
return
# unless we are closing out a position (q == -position)
# we want to ensure that
#
# - In the event of a positive amount, this indicates the maximum
# amount a given security can use up for a purchase. Therefore, if
# commissions push us above this amount, we cannot buy `q`, and must
# decrease its value
#
# - In the event of a negative amount, we want to 'raise' at least the
# amount indicated, no less. Therefore, if we have commission, we must
# sell additional units to fund this requirement. As such, q must once
# again decrease.
#
if not q == -self._position:
full_outlay, _, _, _ = self.outlay(q)
# if full outlay > amount, we must decrease the magnitude of `q`
# this can potentially lead to an infinite loop if the commission
# per share > price per share. However, we cannot really detect
# that in advance since the function can be non-linear (say a fn
# like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these
# situations.
# cap the maximum number of iterations to 1e4 and raise exception
# if we get there
# if integer positions then we know we are stuck if q doesn't change
# if integer positions is false then we want full_outlay == amount
# if integer positions is true then we want to be at the q where
# if we bought 1 more then we wouldn't have enough cash
i = 0
last_q = q
last_amount_short = full_outlay - amount
while not np.isclose(full_outlay, amount, rtol=0.0) and q != 0:
dq_wout_considering_tx_costs = (full_outlay - amount) / (
self._price * self.multiplier
)
q = q - dq_wout_considering_tx_costs
if self.integer_positions:
q = math.floor(q)
full_outlay, _, _, _ = self.outlay(q)
# if our q is too low and we have integer positions
# then we know that the correct quantity is the one where
# the outlay of q + 1 < amount. i.e. if we bought one more
# position then we wouldn't have enough cash
if self.integer_positions:
full_outlay_of_1_more, _, _, _ = self.outlay(q + 1)
if full_outlay < amount and full_outlay_of_1_more > amount:
break
# if not integer positions then we should keep going until
# full_outlay == amount or is close enough
i = i + 1
if i > 1e4:
raise Exception(
"Potentially infinite loop detected. This occurred "
"while trying to reduce the amount of shares purchased"
" to respect the outlay <= amount rule. This is most "
"likely due to a commission function that outputs a "
"commission that is greater than the amount of cash "
"a short sale can raise."
)
if self.integer_positions and last_q == q:
raise Exception(
"Newton Method like root search for quantity is stuck!"
" q did not change in iterations so it is probably a bug"
" but we are not entirely sure it is wrong! Consider "
" changing to warning."
)
last_q = q
if np.abs(full_outlay - amount) > np.abs(last_amount_short):
raise Exception(
"The difference between what we have raised with q and"
" the amount we are trying to raise has gotten bigger since"
" last iteration! full_outlay should always be approaching"
" amount! There may be a case where the commission fn is"
" not smooth"
)
last_amount_short = full_outlay - amount
self.transact(q, update, False)
@cy.locals(
q=cy.double,
update=cy.bint,
update_self=cy.bint,
outlay=cy.double,
bidoffer=cy.double,
)
def transact(self, q, update=True, update_self=True, price=None):
"""
This transacts the Security. This is the method used to
buy/sell the security for a given quantity.
The amount of shares is explicitly provided, a
commission will be calculated based on the parent's commission fn, and
any remaining capital will be passed back up to parent as an
adjustment.
Args:
* amount (float): Amount of adjustment.
* update (bool): Force update on parent due to transaction proceeds
* update_self (bool): Check for update on self
* price (float): Optional price if the transaction happens at a bespoke level
"""
# will need to update if this has been idle for a while...
# update if needupdate or if now is stale
# fetch parent's now since our now is stale
if update_self and (self._needupdate or self.now != self.parent.now):
self.update(self.parent.now)
# if q is 0 nothing to do
if is_zero(q) or np.isnan(q):
return
if price is not None and not self._bidoffer_set:
raise ValueError(
'Cannot transact at custom prices when "bidoffer" has '
"not been passed during setup to enable bid-offer tracking."
)
# this security will need an update, even if pos is 0 (for example if
# we close the positions, value and pos is 0, but still need to do that
# last update)
self._needupdate = True
# adjust position & value
self._position += q
# calculate proper adjustment for parent
# parent passed down amount so we want to pass
# -outlay back up to parent to adjust for capital
# used
full_outlay, outlay, fee, bidoffer = self.outlay(q, p=price)
# store outlay for future reference
self._outlay += outlay
self._bidoffer_paid += bidoffer
# call parent
self.parent.adjust(-full_outlay, update=update, flow=False, fee=fee)
@cy.locals(q=cy.double, p=cy.double)
def commission(self, q, p):
"""
Calculates the commission (transaction fee) based on quantity and
price. Uses the parent's commission_fn.
Args:
* q (float): quantity
* p (float): price
"""
return self.parent.commission_fn(q, p)
@cy.locals(q=cy.double)
def outlay(self, q, p=None):
"""
Determines the complete cash outlay (including commission) necessary
given a quantity q.
Second returning parameter is a commission itself.
Args:
* q (float): quantity
* p (float): price override
"""
if p is None:
fee = self.commission(q, self._price * self.multiplier)
bidoffer = abs(q) * 0.5 * self._bidoffer * self.multiplier
else:
# price override provided: custom transaction
fee = self.commission(q, p * self.multiplier)
bidoffer = q * (p - self._price) * self.multiplier
outlay = q * self._price * self.multiplier + bidoffer
return outlay + fee, outlay, fee, bidoffer
def run(self):
"""
Does nothing - securities have nothing to do on run.
"""
pass
class Security(SecurityBase):
"""
A standard security with no special features, and where notional value
is measured based on market value (notional times price).
It exists to be able to identify standard securities from nonstandard
ones via isinstance, i.e. isinstance( sec, Security ) would only return
true for a vanilla security
"""
pass
class FixedIncomeSecurity(SecurityBase):
"""
A Fixed Income Security is a security where notional value is
measured only based on the quantity (par value) of the security.
"""
@cy.locals(coupon=cy.double)
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
if inow is None:
if date == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
super(FixedIncomeSecurity, self).update(date, data, inow)
# For fixed income securities (bonds, swaps), notional value is position size, not value!
self._notl_value = self._position
self._notl_values.values[inow] = self._notl_value
class CouponPayingSecurity(FixedIncomeSecurity):
"""
CouponPayingSecurity expands on SecurityBase to handle securities which
pay (possibly irregular) coupons (or other forms of cash disbursement).
More generally, this can include instruments with any sort of carry,
including (potentially asymmetric) holding costs.
Args:
* name (str): Security name
* multiplier (float): security multiplier - typically used for
derivatives.
* fixed_income (bool): Flag to control whether notional_value is based
only on quantity, or on market value (like an equity).
Defaults to notional weighting for coupon paying instruments.
* lazy_add (bool): Flag to control whether instrument should be added
to strategy children lazily, i.e. only when there is a transaction
on the instrument. This improves performance of strategies which
transact on a sparse set of children.
Attributes:
* SecurityBase attributes
* coupon (float): Current coupon payment (quantity).
* holding_cost (float): Current holding cost (quantity).
Represents a coupon-paying security, where coupon payments adjust
the capital of the parent. Coupons and costs must be passed in during setup.
"""
_coupon = cy.declare(cy.double)
_holding_cost = cy.declare(cy.double)
@cy.locals(multiplier=cy.double)
def __init__(self, name, multiplier=1, fixed_income=True, lazy_add=False):
super(CouponPayingSecurity, self).__init__(name, multiplier)
self._coupon = 0
self._holding_cost = 0
self._fixed_income = fixed_income
self.lazy_add = lazy_add
def setup(self, universe, **kwargs):
"""
Setup Security with universe and coupon data. Speeds up future runs.
Args:
* universe (DataFrame): DataFrame of prices with security's name as
one of the columns.
** kwargs (DataFrames): DataFrames of additional security level
information (i.e. bid/ask spread, risk, etc).
"""
super(CouponPayingSecurity, self).setup(universe, **kwargs)
# Handle coupons
if "coupons" not in kwargs:
raise Exception(
'"coupons" must be passed to setup for a CouponPayingSecurity'
)
try:
self._coupons = kwargs["coupons"][self.name]
except KeyError:
self._coupons = None
if self._coupons is None or not self._coupons.index.equals(universe.index):
raise ValueError("Index of coupons must match universe data")
# Handle holding costs
try:
self._cost_long = kwargs["cost_long"][self.name]
except KeyError:
self._cost_long = None
try:
self._cost_short = kwargs["cost_short"][self.name]
except KeyError:
self._cost_short = None
self.data["coupon"] = 0.0
self.data["holding_cost"] = 0.0
self._coupon_income = self.data["coupon"]
self._holding_costs = self.data["holding_cost"]
@cy.locals(coupon=cy.double, cost=cy.double)
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
if inow is None:
if date == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
if self._coupons is None:
raise Exception("coupons have not been set for security %s" % self.name)
# Standard update
super(CouponPayingSecurity, self).update(date, data, inow)
coupon = self._coupons.values[inow]
# If we were to call self.parent.adjust, then all the child weights would
# need to be updated. If each security pays a coupon, then this happens for
# each child. Instead, we store the coupon on self._capital, and it gets
# swept up as part of the strategy update
if np.isnan(coupon):
if is_zero(self._position):
self._coupon = 0.0
else:
raise Exception(
"Position is open (non-zero) and latest coupon is NaN "
"for security %s on %s. Cannot update node value."
% (self.name, date)
)
else:
self._coupon = self._position * coupon
if self._position > 0 and self._cost_long is not None:
cost = self._cost_long.values[inow]
self._holding_cost = self._position * cost
elif self._position < 0 and self._cost_short is not None:
cost = self._cost_short.values[inow]
self._holding_cost = -self._position * cost
else:
self._holding_cost = 0.0
self._capital = self._coupon - self._holding_cost
self._coupon_income.values[inow] = self._coupon
self._holding_costs.values[inow] = self._holding_cost
@property
def coupon(self):
"""
Current coupon payment (scaled by position)
"""
if (
self.root.stale
): # Stale check needed because coupon paid depends on position
self.root.update(self.root.now, None)
return self._coupon
@property
def coupons(self):
"""
TimeSeries of coupons paid (scaled by position)
"""
if (
self.root.stale
): # Stale check needed because coupon paid depends on position
self.root.update(self.root.now, None)
return self._coupon_income.loc[: self.now]
@property
def holding_cost(self):
"""
Current holding cost (scaled by position)
"""
if (
self.root.stale
): # Stale check needed because coupon paid depends on position
self.root.update(self.root.now, None)
return self._holding_cost
@property
def holding_costs(self):
"""
TimeSeries of coupons paid (scaled by position)
"""
if (
self.root.stale
): # Stale check needed because coupon paid depends on position
self.root.update(self.root.now, None)
return self._holding_costs.loc[: self.now]
class HedgeSecurity(SecurityBase):
"""
HedgeSecurity is a SecurityBase where the notional value is set to zero, and thus
does not count towards the notional value of the strategy. It is intended for use
in fixed income strategies.
For example in a corporate bond strategy, the notional value might refer to the size
of the corporate bond portfolio, and exclude the notional of treasury bonds or interest
rate swaps used as hedges.
"""
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
super(HedgeSecurity, self).update(date, data, inow)
self._notl_value = 0.0
self._notl_values.values.fill(0.0)
class CouponPayingHedgeSecurity(CouponPayingSecurity):
"""
CouponPayingHedgeSecurity is a CouponPayingSecurity where the notional value is set to zero, and thus
does not count towards the notional value of the strategy. It is intended for use
in fixed income strategies.
For example in a corporate bond strategy, the notional value might refer to the size
of the corporate bond portfolio, and exclude the notional of treasury bonds or interest
rate swaps used as hedges.
"""
def update(self, date, data=None, inow=None):
"""
Update security with a given date and optionally, some data.
This will update price, value, weight, etc.
"""
super(CouponPayingHedgeSecurity, self).update(date, data, inow)
self._notl_value = 0.0
self._notl_values.values.fill(0.0)
class Algo(object):
"""
Algos are used to modularize strategy logic so that strategy logic becomes
modular, composable, more testable and less error prone. Basically, the
Algo should follow the unix philosophy - do one thing well.
In practice, algos are simply a function that receives one argument, the
Strategy (referred to as target) and are expected to return a bool.
When some state preservation is necessary between calls, the Algo
object can be used (this object). The __call___ method should be
implemented and logic defined therein to mimic a function call. A
simple function may also be used if no state preservation is necessary.
Args:
* name (str): Algo name
"""
def __init__(self, name=None):
self._name = name
@property
def name(self):
"""
Algo name.
"""
if self._name is None:
self._name = self.__class__.__name__
return self._name
def __call__(self, target):
raise NotImplementedError("%s not implemented!" % self.name)
class AlgoStack(Algo):
"""
An AlgoStack derives from Algo runs multiple Algos until a
failure is encountered.
The purpose of an AlgoStack is to group a logic set of Algos together. Each
Algo in the stack is run. Execution stops if one Algo returns False.
Args:
* algos (list): List of algos.
"""
def __init__(self, *algos):
super(AlgoStack, self).__init__()
self.algos = algos
self.check_run_always = any(hasattr(x, "run_always") for x in self.algos)
def __call__(self, target):
# normal running mode
if not self.check_run_always:
for algo in self.algos:
if not algo(target):
return False
return True
# run mode when at least one algo has a run_always attribute
else:
# store result in res
# allows continuation to check for and run
# algos that have run_always set to True
res = True
for algo in self.algos:
if res:
res = algo(target)
elif hasattr(algo, "run_always"):
if algo.run_always:
algo(target)
return res
class Strategy(StrategyBase):
"""
Strategy expands on the StrategyBase and incorporates Algos.
Basically, a Strategy is built by passing in a set of algos. These algos
will be placed in an Algo stack and the run function will call the stack.
Furthermore, two class attributes are created to pass data between algos.
perm for permanent data, temp for temporary data.
Args:
* name (str): Strategy name
* algos (list): List of Algos to be passed into an AlgoStack
* children (dict, list): Children - useful when you want to create
strategies of strategies
Children can be any type of Node or str.
String values correspond to children which will be lazily created
with that name when needed.
* parent (Node): The parent Node
Attributes:
* stack (AlgoStack): The stack
* temp (dict): A dict containing temporary data - cleared on each call
to run. This can be used to pass info to other algos.
* perm (dict): Permanent data used to pass info from one algo to
another. Not cleared on each pass.
"""
def __init__(self, name, algos=None, children=None, parent=None):
super(Strategy, self).__init__(name, children=children, parent=parent)
if algos is None:
algos = []
self.stack = AlgoStack(*algos)
self.temp = {}
self.perm = {}
def run(self):
# clear out temp data
self.temp = {}
# run algo stack
self.stack(self)
# run children
for c in self._childrenv:
c.run()
class FixedIncomeStrategy(Strategy):
"""
FixedIncomeStrategy is an alias for Strategy where the fixed_income flag
is set to True.
For this type of strategy:
- capital allocations are not necessary, and initial capital is not used
- bankruptcy is disabled
- weights are based off notional_value rather than value
- strategy price is computed from additive PNL returns
per unit of notional_value, with a reference price of PAR
- "transact" assumes the role of "allocate", in order to buy/sell
children on a weighted notional basis
- "rebalance" adjusts notionals rather than capital allocations based
on weights
"""
def __init__(self, name, algos=None, children=None):
super(FixedIncomeStrategy, self).__init__(name, algos=algos, children=children)
self._fixed_income = True
| [
"pandas.DataFrame",
"copy.deepcopy",
"cython.locals",
"numpy.abs",
"math.ceil",
"math.floor",
"numpy.isnan",
"numpy.isclose",
"cython.declare",
"future.utils.iteritems"
] | [((254, 276), 'cython.locals', 'cy.locals', ([], {'x': 'cy.double'}), '(x=cy.double)\n', (263, 276), True, 'import cython as cy\n'), ((2331, 2352), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2341, 2352), True, 'import cython as cy\n'), ((2366, 2387), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2376, 2387), True, 'import cython as cy\n'), ((2401, 2422), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2411, 2422), True, 'import cython as cy\n'), ((2441, 2462), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2451, 2462), True, 'import cython as cy\n'), ((2477, 2498), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2487, 2498), True, 'import cython as cy\n'), ((2512, 2531), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (2522, 2531), True, 'import cython as cy\n'), ((2558, 2577), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (2568, 2577), True, 'import cython as cy\n'), ((2598, 2617), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (2608, 2617), True, 'import cython as cy\n'), ((2638, 2657), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (2648, 2657), True, 'import cython as cy\n'), ((2679, 2700), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (2689, 2700), True, 'import cython as cy\n'), ((12774, 12795), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (12784, 12795), True, 'import cython as cy\n'), ((12814, 12835), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (12824, 12835), True, 'import cython as cy\n'), ((12859, 12880), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (12869, 12880), True, 'import cython as cy\n'), ((12899, 12920), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (12909, 12920), True, 'import cython as cy\n'), ((12937, 12958), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (12947, 12958), True, 'import cython as cy\n'), ((12978, 12997), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (12988, 12997), True, 'import cython as cy\n'), ((13013, 13032), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (13023, 13032), True, 'import cython as cy\n'), ((21676, 21798), 'cython.locals', 'cy.locals', ([], {'newpt': 'cy.bint', 'val': 'cy.double', 'ret': 'cy.double', 'coupons': 'cy.double', 'notl_val': 'cy.double', 'bidoffer_paid': 'cy.double'}), '(newpt=cy.bint, val=cy.double, ret=cy.double, coupons=cy.double,\n notl_val=cy.double, bidoffer_paid=cy.double)\n', (21685, 21798), True, 'import cython as cy\n'), ((28371, 28444), 'cython.locals', 'cy.locals', ([], {'amount': 'cy.double', 'update': 'cy.bint', 'flow': 'cy.bint', 'fees': 'cy.double'}), '(amount=cy.double, update=cy.bint, flow=cy.bint, fees=cy.double)\n', (28380, 28444), True, 'import cython as cy\n'), ((29689, 29732), 'cython.locals', 'cy.locals', ([], {'amount': 'cy.double', 'update': 'cy.bint'}), '(amount=cy.double, update=cy.bint)\n', (29698, 29732), True, 'import cython as cy\n'), ((31604, 31642), 'cython.locals', 'cy.locals', ([], {'q': 'cy.double', 'update': 'cy.bint'}), '(q=cy.double, update=cy.bint)\n', (31613, 31642), True, 'import cython as cy\n'), ((32854, 32930), 'cython.locals', 'cy.locals', ([], {'delta': 'cy.double', 'weight': 'cy.double', 'base': 'cy.double', 'update': 'cy.bint'}), '(delta=cy.double, weight=cy.double, base=cy.double, update=cy.bint)\n', (32863, 32930), True, 'import cython as cy\n'), ((35845, 35870), 'cython.locals', 'cy.locals', ([], {'update': 'cy.bint'}), '(update=cy.bint)\n', (35854, 35870), True, 'import cython as cy\n'), ((38993, 39028), 'cython.locals', 'cy.locals', ([], {'q': 'cy.double', 'p': 'cy.double'}), '(q=cy.double, p=cy.double)\n', (39002, 39028), True, 'import cython as cy\n'), ((41379, 41400), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (41389, 41400), True, 'import cython as cy\n'), ((41417, 41438), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (41427, 41438), True, 'import cython as cy\n'), ((41456, 41477), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (41466, 41477), True, 'import cython as cy\n'), ((41496, 41515), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (41506, 41515), True, 'import cython as cy\n'), ((41534, 41553), 'cython.declare', 'cy.declare', (['cy.bint'], {}), '(cy.bint)\n', (41544, 41553), True, 'import cython as cy\n'), ((41568, 41589), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (41578, 41589), True, 'import cython as cy\n'), ((41606, 41627), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (41616, 41627), True, 'import cython as cy\n'), ((41634, 41665), 'cython.locals', 'cy.locals', ([], {'multiplier': 'cy.double'}), '(multiplier=cy.double)\n', (41643, 41665), True, 'import cython as cy\n'), ((48445, 48469), 'cython.locals', 'cy.locals', ([], {'prc': 'cy.double'}), '(prc=cy.double)\n', (48454, 48469), True, 'import cython as cy\n'), ((50824, 50912), 'cython.locals', 'cy.locals', ([], {'amount': 'cy.double', 'update': 'cy.bint', 'q': 'cy.double', 'outlay': 'cy.double', 'i': 'cy.int'}), '(amount=cy.double, update=cy.bint, q=cy.double, outlay=cy.double,\n i=cy.int)\n', (50833, 50912), True, 'import cython as cy\n'), ((57648, 57750), 'cython.locals', 'cy.locals', ([], {'q': 'cy.double', 'update': 'cy.bint', 'update_self': 'cy.bint', 'outlay': 'cy.double', 'bidoffer': 'cy.double'}), '(q=cy.double, update=cy.bint, update_self=cy.bint, outlay=cy.\n double, bidoffer=cy.double)\n', (57657, 57750), True, 'import cython as cy\n'), ((59901, 59936), 'cython.locals', 'cy.locals', ([], {'q': 'cy.double', 'p': 'cy.double'}), '(q=cy.double, p=cy.double)\n', (59910, 59936), True, 'import cython as cy\n'), ((60250, 60272), 'cython.locals', 'cy.locals', ([], {'q': 'cy.double'}), '(q=cy.double)\n', (60259, 60272), True, 'import cython as cy\n'), ((61747, 61774), 'cython.locals', 'cy.locals', ([], {'coupon': 'cy.double'}), '(coupon=cy.double)\n', (61756, 61774), True, 'import cython as cy\n'), ((63722, 63743), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (63732, 63743), True, 'import cython as cy\n'), ((63764, 63785), 'cython.declare', 'cy.declare', (['cy.double'], {}), '(cy.double)\n', (63774, 63785), True, 'import cython as cy\n'), ((63792, 63823), 'cython.locals', 'cy.locals', ([], {'multiplier': 'cy.double'}), '(multiplier=cy.double)\n', (63801, 63823), True, 'import cython as cy\n'), ((65545, 65588), 'cython.locals', 'cy.locals', ([], {'coupon': 'cy.double', 'cost': 'cy.double'}), '(coupon=cy.double, cost=cy.double)\n', (65554, 65588), True, 'import cython as cy\n'), ((17153, 17211), 'pandas.DataFrame', 'pd.DataFrame', (['{x.name: x.outlays for x in self.securities}'], {}), '({x.name: x.outlays for x in self.securities})\n', (17165, 17211), True, 'import pandas as pd\n'), ((19763, 19883), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'funiverse.index', 'columns': "['price', 'value', 'notional_value', 'cash', 'fees', 'flows']", 'data': '(0.0)'}), "(index=funiverse.index, columns=['price', 'value',\n 'notional_value', 'cash', 'fees', 'flows'], data=0.0)\n", (19775, 19883), True, 'import pandas as pd\n'), ((34512, 34526), 'numpy.isnan', 'np.isnan', (['base'], {}), '(base)\n', (34520, 34526), True, 'import numpy as np\n'), ((38094, 38154), 'pandas.DataFrame', 'pd.DataFrame', (['{x.name: x.positions for x in self.securities}'], {}), '({x.name: x.positions for x in self.securities})\n', (38106, 38154), True, 'import pandas as pd\n'), ((49829, 49850), 'numpy.isnan', 'np.isnan', (['self._price'], {}), '(self._price)\n', (49837, 49850), True, 'import numpy as np\n'), ((66499, 66515), 'numpy.isnan', 'np.isnan', (['coupon'], {}), '(coupon)\n', (66507, 66515), True, 'import numpy as np\n'), ((18555, 18569), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (18563, 18569), False, 'from copy import deepcopy\n'), ((19488, 19511), 'pandas.DataFrame', 'pd.DataFrame', (['funiverse'], {}), '(funiverse)\n', (19500, 19511), True, 'import pandas as pd\n'), ((46968, 47065), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'universe.index', 'columns': "['value', 'position', 'notional_value']", 'data': '(0.0)'}), "(index=universe.index, columns=['value', 'position',\n 'notional_value'], data=0.0)\n", (46980, 47065), True, 'import pandas as pd\n'), ((47199, 47295), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'universe.index', 'columns': "['price', 'value', 'position', 'notional_value']"}), "(index=universe.index, columns=['price', 'value', 'position',\n 'notional_value'])\n", (47211, 47295), True, 'import pandas as pd\n'), ((52414, 52435), 'numpy.isnan', 'np.isnan', (['self._price'], {}), '(self._price)\n', (52422, 52435), True, 'import numpy as np\n'), ((53307, 53318), 'numpy.isnan', 'np.isnan', (['q'], {}), '(q)\n', (53315, 53318), True, 'import numpy as np\n'), ((58872, 58883), 'numpy.isnan', 'np.isnan', (['q'], {}), '(q)\n', (58880, 58883), True, 'import numpy as np\n'), ((4881, 4900), 'future.utils.iteritems', 'iteritems', (['children'], {}), '(children)\n', (4890, 4900), False, 'from future.utils import iteritems\n'), ((37972, 38029), 'pandas.DataFrame', 'pd.DataFrame', (['{x.name: x.prices for x in self.securities}'], {}), '({x.name: x.prices for x in self.securities})\n', (37984, 38029), True, 'import pandas as pd\n'), ((38683, 38731), 'pandas.DataFrame', 'pd.DataFrame', (["{'price': prc, 'quantity': trades}"], {}), "({'price': prc, 'quantity': trades})\n", (38695, 38731), True, 'import pandas as pd\n'), ((5318, 5329), 'copy.deepcopy', 'deepcopy', (['c'], {}), '(c)\n', (5326, 5329), False, 'from copy import deepcopy\n'), ((36465, 36482), 'numpy.isnan', 'np.isnan', (['c.value'], {}), '(c.value)\n', (36473, 36482), True, 'import numpy as np\n'), ((38525, 38590), 'pandas.DataFrame', 'pd.DataFrame', (['{x.name: x.bidoffers_paid for x in self.securities}'], {}), '({x.name: x.bidoffers_paid for x in self.securities})\n', (38537, 38590), True, 'import pandas as pd\n'), ((53104, 53117), 'math.floor', 'math.floor', (['q'], {}), '(q)\n', (53114, 53117), False, 'import math\n'), ((53234, 53246), 'math.ceil', 'math.ceil', (['q'], {}), '(q)\n', (53243, 53246), False, 'import math\n'), ((55018, 55059), 'numpy.isclose', 'np.isclose', (['full_outlay', 'amount'], {'rtol': '(0.0)'}), '(full_outlay, amount, rtol=0.0)\n', (55028, 55059), True, 'import numpy as np\n'), ((55336, 55349), 'math.floor', 'math.floor', (['q'], {}), '(q)\n', (55346, 55349), False, 'import math\n'), ((57058, 57086), 'numpy.abs', 'np.abs', (['(full_outlay - amount)'], {}), '(full_outlay - amount)\n', (57064, 57086), True, 'import numpy as np\n'), ((57089, 57114), 'numpy.abs', 'np.abs', (['last_amount_short'], {}), '(last_amount_short)\n', (57095, 57114), True, 'import numpy as np\n'), ((5075, 5086), 'copy.deepcopy', 'deepcopy', (['c'], {}), '(c)\n', (5083, 5086), False, 'from copy import deepcopy\n')] |
"""
Preprocessing: Tests for ._tokenfuncs submodule.
"""
import string
from hypothesis import given, strategies as st
import pytest
import numpy as np
from tmtoolkit.preprocess._tokenfuncs import (
str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches,
token_match_subsequent, token_glue_subsequent, token_match
)
def test_str_multisplit():
punct = list(string.punctuation)
assert str_multisplit('Te;s,t', {';', ','}) == ['Te', 's', 't']
assert str_multisplit('US-Student', punct) == ['US', 'Student']
assert str_multisplit('-main_file.exe,', punct) == ['', 'main', 'file', 'exe', '']
@given(s=st.text(), split_chars=st.lists(st.characters()))
def test_str_multisplit_hypothesis(s, split_chars):
res = str_multisplit(s, split_chars)
assert type(res) is list
if len(s) == 0:
assert res == ['']
if len(split_chars) == 0:
assert res == [s]
for p in res:
assert all(c not in p for c in split_chars)
n_asserted_parts = 0
for c in set(split_chars):
n_asserted_parts += s.count(c)
assert len(res) == n_asserted_parts + 1
def test_str_shape():
assert str_shape('') == []
assert str_shape('xxx') == [0, 0, 0]
assert str_shape('Xxx') == [1, 0, 0]
assert str_shape('xxX') == [0, 0, 1]
assert str_shape('Xxx', lower=1, upper=0) == [0, 1, 1]
assert str_shape('Xxx', lower=1, upper=0, as_str=True) == '011'
assert str_shape('Foo', lower='x', upper='X', as_str=True) == 'Xxx'
@given(s=st.text(), lower_int=st.integers(min_value=0, max_value=9), upper_int=st.integers(min_value=0, max_value=9),
lower=st.characters(), upper=st.characters(),
as_str=st.booleans(), use_ints=st.booleans())
def test_str_shape_hypothesis(s, lower_int, upper_int, lower, upper, as_str, use_ints):
if use_ints:
l = lower_int
u = upper_int
else:
l = lower
u = upper
res = str_shape(s, l, u, as_str)
if as_str:
assert isinstance(res, str)
assert all([x in {str(l), str(u)} for x in res])
else:
assert isinstance(res, list)
assert all([x in {l, u} for x in res])
assert len(s) == len(res)
def test_str_shapesplit():
assert str_shapesplit('') == ['']
assert str_shapesplit('NewYork') == ['New', 'York']
assert str_shapesplit('newYork') == ['new', 'York']
assert str_shapesplit('newyork') == ['newyork']
assert str_shapesplit('USflag') == ['US', 'flag']
assert str_shapesplit('eMail') == ['eMail']
assert str_shapesplit('foobaR') == ['foobaR']
@given(s=st.text(string.printable), precalc_shape=st.booleans(), min_len=st.integers(min_value=1, max_value=5))
def test_str_shapesplit_hypothesis(s, precalc_shape, min_len):
if precalc_shape:
shape = str_shape(s)
else:
shape = None
res = str_shapesplit(s, shape, min_part_length=min_len)
assert len(res) >= 1
assert all([isinstance(x, str) for x in res])
if len(s) >= min_len:
assert all([min_len <= len(x) <= len(s) for x in res])
assert ''.join(res) == s
def test_expand_compound_token():
assert expand_compound_token('US-Student') == ['US', 'Student']
assert expand_compound_token('US-Student-X') == ['US', 'StudentX']
assert expand_compound_token('Camel-CamelCase') == ['Camel', 'CamelCase']
assert expand_compound_token('Camel-CamelCase', split_on_casechange=True) == ['Camel', 'Camel', 'Case']
assert expand_compound_token('Camel-camelCase') == ['Camel', 'camelCase']
assert expand_compound_token('Camel-camelCase', split_on_casechange=True) == ['Camel', 'camel', 'Case']
assert expand_compound_token('Student-X') == ['StudentX']
assert expand_compound_token('Do-Not-Disturb') == ['Do', 'Not', 'Disturb']
assert expand_compound_token('E-Mobility-Strategy') == ['EMobility', 'Strategy']
for inp, expected in zip(['US-Student', 'Do-Not-Disturb', 'E-Mobility-Strategy'],
[['USStudent'], ['Do', 'Not', 'Disturb'], ['EMobility', 'Strategy']]):
assert expand_compound_token(inp, split_on_len=None, split_on_casechange=True) == expected
for inp, expected in zip(['US-Student', 'Do-Not-Disturb', 'E-Mobility-Strategy'],
[['US', 'Student'], ['Do', 'Not', 'Disturb'], ['EMobility', 'Strategy']]):
assert expand_compound_token(inp, split_on_len=2, split_on_casechange=True) == expected
assert expand_compound_token('E-Mobility-Strategy', split_on_len=1) == ['E', 'Mobility', 'Strategy']
assert expand_compound_token('') == ['']
assert expand_compound_token('Te;s,t', split_chars=[';', ','], split_on_len=1, split_on_casechange=False) \
== expand_compound_token('Te-s-t', split_chars=['-'], split_on_len=1, split_on_casechange=False) \
== ['Te', 's', 't']
@given(s=st.text(string.printable), split_chars=st.lists(st.characters(min_codepoint=32)),
split_on_len=st.integers(1),
split_on_casechange=st.booleans())
def test_expand_compound_token_hypothesis(s, split_chars, split_on_len, split_on_casechange):
res = expand_compound_token(s, split_chars, split_on_len=split_on_len, split_on_casechange=split_on_casechange)
assert isinstance(res, list)
assert len(res) > 0
s_contains_split_char = any(c in s for c in split_chars)
s_is_split_chars = all(c in split_chars for c in s)
if not s_contains_split_char: # nothing to split on
assert res == [s]
if len(s) > 0:
assert all([p for p in res])
if not s_is_split_chars:
for p in res:
assert all(c not in p for c in split_chars)
@given(matches=st.lists(st.booleans()),
left=st.integers(min_value=0, max_value=10),
right=st.integers(min_value=0, max_value=10),
remove_overlaps=st.booleans())
def test_make_index_window_around_matches_flatten(matches, left, right, remove_overlaps):
matches = np.array(matches, dtype=np.bool)
matches_ind = np.where(matches)[0]
n_true = matches.sum()
res = make_index_window_around_matches(matches, left, right, flatten=True, remove_overlaps=remove_overlaps)
assert isinstance(res, np.ndarray)
assert res.dtype.kind in {'u', 'i'}
assert len(res) >= n_true
if len(res) > 0:
assert np.min(res) >= 0
assert np.max(res) < len(matches)
if left == 0 and right == 0:
assert np.array_equal(matches_ind, res)
if remove_overlaps:
assert np.array_equal(res, np.sort(np.unique(res)))
for i in matches_ind:
for x in range(i-left, i+right+1):
if 0 <= x < len(matches):
assert x in res
@given(matches=st.lists(st.booleans()),
left=st.integers(min_value=0, max_value=10),
right=st.integers(min_value=0, max_value=10))
def test_make_index_window_around_matches_not_flattened(matches, left, right):
matches = np.array(matches, dtype=np.bool)
matches_ind = np.where(matches)[0]
n_true = matches.sum()
res = make_index_window_around_matches(matches, left, right, flatten=False)
assert isinstance(res, list)
assert len(res) == n_true == len(matches_ind)
for win, i in zip(res, matches_ind):
assert win.dtype.kind in {'u', 'i'}
assert len(win) > 0
assert np.min(win) >= 0
assert np.max(win) < len(matches)
i_in_win = 0
for x in range(i-left, i+right+1):
if 0 <= x < len(matches):
assert x == win[i_in_win]
i_in_win += 1
@pytest.mark.parametrize('pattern, tokens, match_type, ignore_case, glob_method, expected', [
('a', [], 'exact', False, 'match', []),
('', [], 'exact', False, 'match', []),
('', ['a', ''], 'exact', False, 'match', [False, True]),
('a', ['a', 'b', 'c'], 'exact', False, 'match', [True, False, False]),
('a', np.array(['a', 'b', 'c']), 'exact', False, 'match', [True, False, False]),
('A', ['a', 'b', 'c'], 'exact', False, 'match', [False, False, False]),
('A', ['a', 'b', 'c'], 'exact', True, 'match', [True, False, False]),
(r'foo$', ['a', 'bfoo', 'c'], 'regex', False, 'match', [False, True, False]),
(r'foo$', ['a', 'bFOO', 'c'], 'regex', False, 'match', [False, False, False]),
(r'foo$', ['a', 'bFOO', 'c'], 'regex', True, 'match', [False, True, False]),
(r'foo*', ['a', 'food', 'c'], 'glob', False, 'match', [False, True, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', False, 'match', [False, False, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', True, 'match', [False, True, False]),
(r'foo*', ['a', 'FOOd', 'c'], 'glob', True, 'search', [False, True, False]),
])
def test_token_match(pattern, tokens, match_type, ignore_case, glob_method, expected):
assert np.array_equal(token_match(pattern, tokens, match_type, ignore_case, glob_method), np.array(expected))
def test_token_match_subsequent():
tok = ['green', 'test', 'emob', 'test', 'greener', 'tests', 'test', 'test']
with pytest.raises(ValueError):
token_match_subsequent('pattern', tok)
with pytest.raises(ValueError):
token_match_subsequent(['pattern'], tok)
assert token_match_subsequent(['a', 'b'], []) == []
assert token_match_subsequent(['foo', 'bar'], tok) == []
res = token_match_subsequent(['green*', 'test*'], tok, match_type='glob')
assert len(res) == 2
assert np.array_equal(res[0], np.array([0, 1]))
assert np.array_equal(res[1], np.array([4, 5]))
res = token_match_subsequent(['green*', 'test*', '*'], tok, match_type='glob')
assert len(res) == 2
assert np.array_equal(res[0], np.array([0, 1, 2]))
assert np.array_equal(res[1], np.array([4, 5, 6]))
@given(tokens=st.lists(st.text()), n_patterns=st.integers(0, 4))
def test_token_match_subsequent_hypothesis(tokens, n_patterns):
tokens = np.array(tokens)
n_patterns = min(len(tokens), n_patterns)
pat_ind = np.arange(n_patterns)
np.random.shuffle(pat_ind)
patterns = list(tokens[pat_ind])
if len(patterns) < 2:
with pytest.raises(ValueError):
token_match_subsequent(patterns, tokens)
else:
res = token_match_subsequent(patterns, tokens)
assert isinstance(res, list)
if len(tokens) == 0:
assert res == []
else:
for ind in res:
assert len(ind) == len(patterns)
assert np.all(ind >= 0)
assert np.all(ind < len(tokens))
assert np.all(np.diff(ind) == 1) # subsequent words
assert np.array_equal(tokens[ind], patterns)
def test_token_glue_subsequent():
tok = ['green', 'test', 'emob', 'test', 'greener', 'tests', 'test', 'test']
with pytest.raises(ValueError):
token_glue_subsequent(tok, 'invalid')
assert token_glue_subsequent(tok, []) == tok
matches = token_match_subsequent(['green*', 'test*'], tok, match_type='glob')
assert token_glue_subsequent(tok, matches) == ['green_test', 'emob', 'test', 'greener_tests', 'test', 'test']
matches = token_match_subsequent(['green*', 'test*', '*'], tok, match_type='glob')
assert token_glue_subsequent(tok, matches) == ['green_test_emob', 'test', 'greener_tests_test', 'test']
@given(tokens=st.lists(st.text(string.printable)), n_patterns=st.integers(0, 4))
def test_token_glue_subsequent_hypothesis(tokens, n_patterns):
tokens_arr = np.array(tokens)
n_patterns = min(len(tokens), n_patterns)
pat_ind = np.arange(n_patterns)
np.random.shuffle(pat_ind)
patterns = list(tokens_arr[pat_ind])
if len(patterns) > 1:
matches = token_match_subsequent(patterns, tokens)
assert token_glue_subsequent(tokens, []) == tokens
if len(tokens) == 0:
assert token_glue_subsequent(tokens, matches) == []
elif len(matches) == 0:
assert token_glue_subsequent(tokens, matches) == tokens
else:
res = token_glue_subsequent(tokens, matches)
assert isinstance(res, list)
assert 0 < len(res) < len(tokens)
for ind in matches:
assert '_'.join(tokens_arr[ind]) in res
| [
"tmtoolkit.preprocess._tokenfuncs.expand_compound_token",
"numpy.arange",
"tmtoolkit.preprocess._tokenfuncs.str_multisplit",
"tmtoolkit.preprocess._tokenfuncs.token_match_subsequent",
"numpy.unique",
"hypothesis.strategies.booleans",
"tmtoolkit.preprocess._tokenfuncs.token_match",
"pytest.raises",
"... | [((783, 813), 'tmtoolkit.preprocess._tokenfuncs.str_multisplit', 'str_multisplit', (['s', 'split_chars'], {}), '(s, split_chars)\n', (797, 813), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1969, 1995), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['s', 'l', 'u', 'as_str'], {}), '(s, l, u, as_str)\n', (1978, 1995), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2883, 2932), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['s', 'shape'], {'min_part_length': 'min_len'}), '(s, shape, min_part_length=min_len)\n', (2897, 2932), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((5154, 5263), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['s', 'split_chars'], {'split_on_len': 'split_on_len', 'split_on_casechange': 'split_on_casechange'}), '(s, split_chars, split_on_len=split_on_len,\n split_on_casechange=split_on_casechange)\n', (5175, 5263), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((5975, 6007), 'numpy.array', 'np.array', (['matches'], {'dtype': 'np.bool'}), '(matches, dtype=np.bool)\n', (5983, 6007), True, 'import numpy as np\n'), ((6085, 6190), 'tmtoolkit.preprocess._tokenfuncs.make_index_window_around_matches', 'make_index_window_around_matches', (['matches', 'left', 'right'], {'flatten': '(True)', 'remove_overlaps': 'remove_overlaps'}), '(matches, left, right, flatten=True,\n remove_overlaps=remove_overlaps)\n', (6117, 6190), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((6940, 6972), 'numpy.array', 'np.array', (['matches'], {'dtype': 'np.bool'}), '(matches, dtype=np.bool)\n', (6948, 6972), True, 'import numpy as np\n'), ((7050, 7119), 'tmtoolkit.preprocess._tokenfuncs.make_index_window_around_matches', 'make_index_window_around_matches', (['matches', 'left', 'right'], {'flatten': '(False)'}), '(matches, left, right, flatten=False)\n', (7082, 7119), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9311, 9378), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['green*', 'test*']", 'tok'], {'match_type': '"""glob"""'}), "(['green*', 'test*'], tok, match_type='glob')\n", (9333, 9378), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9519, 9591), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['green*', 'test*', '*']", 'tok'], {'match_type': '"""glob"""'}), "(['green*', 'test*', '*'], tok, match_type='glob')\n", (9541, 9591), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9871, 9887), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (9879, 9887), True, 'import numpy as np\n'), ((9950, 9971), 'numpy.arange', 'np.arange', (['n_patterns'], {}), '(n_patterns)\n', (9959, 9971), True, 'import numpy as np\n'), ((9976, 10002), 'numpy.random.shuffle', 'np.random.shuffle', (['pat_ind'], {}), '(pat_ind)\n', (9993, 10002), True, 'import numpy as np\n'), ((10896, 10963), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['green*', 'test*']", 'tok'], {'match_type': '"""glob"""'}), "(['green*', 'test*'], tok, match_type='glob')\n", (10918, 10963), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11093, 11165), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['green*', 'test*', '*']", 'tok'], {'match_type': '"""glob"""'}), "(['green*', 'test*', '*'], tok, match_type='glob')\n", (11115, 11165), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11437, 11453), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (11445, 11453), True, 'import numpy as np\n'), ((11516, 11537), 'numpy.arange', 'np.arange', (['n_patterns'], {}), '(n_patterns)\n', (11525, 11537), True, 'import numpy as np\n'), ((11542, 11568), 'numpy.random.shuffle', 'np.random.shuffle', (['pat_ind'], {}), '(pat_ind)\n', (11559, 11568), True, 'import numpy as np\n'), ((448, 484), 'tmtoolkit.preprocess._tokenfuncs.str_multisplit', 'str_multisplit', (['"""Te;s,t"""', "{';', ','}"], {}), "('Te;s,t', {';', ','})\n", (462, 484), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((516, 551), 'tmtoolkit.preprocess._tokenfuncs.str_multisplit', 'str_multisplit', (['"""US-Student"""', 'punct'], {}), "('US-Student', punct)\n", (530, 551), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((584, 624), 'tmtoolkit.preprocess._tokenfuncs.str_multisplit', 'str_multisplit', (['"""-main_file.exe,"""', 'punct'], {}), "('-main_file.exe,', punct)\n", (598, 624), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((671, 680), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (678, 680), True, 'from hypothesis import given, strategies as st\n'), ((1195, 1208), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['""""""'], {}), "('')\n", (1204, 1208), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1226, 1242), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""xxx"""'], {}), "('xxx')\n", (1235, 1242), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1267, 1283), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""Xxx"""'], {}), "('Xxx')\n", (1276, 1283), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1308, 1324), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""xxX"""'], {}), "('xxX')\n", (1317, 1324), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1349, 1383), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""Xxx"""'], {'lower': '(1)', 'upper': '(0)'}), "('Xxx', lower=1, upper=0)\n", (1358, 1383), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1408, 1455), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""Xxx"""'], {'lower': '(1)', 'upper': '(0)', 'as_str': '(True)'}), "('Xxx', lower=1, upper=0, as_str=True)\n", (1417, 1455), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1476, 1527), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['"""Foo"""'], {'lower': '"""x"""', 'upper': '"""X"""', 'as_str': '(True)'}), "('Foo', lower='x', upper='X', as_str=True)\n", (1485, 1527), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((1548, 1557), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (1555, 1557), True, 'from hypothesis import given, strategies as st\n'), ((1569, 1606), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(9)'}), '(min_value=0, max_value=9)\n', (1580, 1606), True, 'from hypothesis import given, strategies as st\n'), ((1618, 1655), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(9)'}), '(min_value=0, max_value=9)\n', (1629, 1655), True, 'from hypothesis import given, strategies as st\n'), ((1670, 1685), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (1683, 1685), True, 'from hypothesis import given, strategies as st\n'), ((1693, 1708), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (1706, 1708), True, 'from hypothesis import given, strategies as st\n'), ((1724, 1737), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1735, 1737), True, 'from hypothesis import given, strategies as st\n'), ((1748, 1761), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1759, 1761), True, 'from hypothesis import given, strategies as st\n'), ((2270, 2288), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['""""""'], {}), "('')\n", (2284, 2288), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2308, 2333), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""NewYork"""'], {}), "('NewYork')\n", (2322, 2333), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2364, 2389), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""newYork"""'], {}), "('newYork')\n", (2378, 2389), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2420, 2445), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""newyork"""'], {}), "('newyork')\n", (2434, 2445), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2472, 2496), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""USflag"""'], {}), "('USflag')\n", (2486, 2496), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2526, 2549), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""eMail"""'], {}), "('eMail')\n", (2540, 2549), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2574, 2598), 'tmtoolkit.preprocess._tokenfuncs.str_shapesplit', 'str_shapesplit', (['"""foobaR"""'], {}), "('foobaR')\n", (2588, 2598), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2828, 2840), 'tmtoolkit.preprocess._tokenfuncs.str_shape', 'str_shape', (['s'], {}), '(s)\n', (2837, 2840), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((2624, 2649), 'hypothesis.strategies.text', 'st.text', (['string.printable'], {}), '(string.printable)\n', (2631, 2649), True, 'from hypothesis import given, strategies as st\n'), ((2665, 2678), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (2676, 2678), True, 'from hypothesis import given, strategies as st\n'), ((2688, 2725), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(5)'}), '(min_value=1, max_value=5)\n', (2699, 2725), True, 'from hypothesis import given, strategies as st\n'), ((3174, 3209), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""US-Student"""'], {}), "('US-Student')\n", (3195, 3209), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3242, 3279), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""US-Student-X"""'], {}), "('US-Student-X')\n", (3263, 3279), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3313, 3353), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Camel-CamelCase"""'], {}), "('Camel-CamelCase')\n", (3334, 3353), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3391, 3457), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Camel-CamelCase"""'], {'split_on_casechange': '(True)'}), "('Camel-CamelCase', split_on_casechange=True)\n", (3412, 3457), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3499, 3539), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Camel-camelCase"""'], {}), "('Camel-camelCase')\n", (3520, 3539), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3577, 3643), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Camel-camelCase"""'], {'split_on_casechange': '(True)'}), "('Camel-camelCase', split_on_casechange=True)\n", (3598, 3643), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3685, 3719), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Student-X"""'], {}), "('Student-X')\n", (3706, 3719), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3747, 3786), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Do-Not-Disturb"""'], {}), "('Do-Not-Disturb')\n", (3768, 3786), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((3826, 3870), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""E-Mobility-Strategy"""'], {}), "('E-Mobility-Strategy')\n", (3847, 3870), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4485, 4545), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""E-Mobility-Strategy"""'], {'split_on_len': '(1)'}), "('E-Mobility-Strategy', split_on_len=1)\n", (4506, 4545), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4591, 4616), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['""""""'], {}), "('')\n", (4612, 4616), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4637, 4739), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Te;s,t"""'], {'split_chars': "[';', ',']", 'split_on_len': '(1)', 'split_on_casechange': '(False)'}), "('Te;s,t', split_chars=[';', ','], split_on_len=1,\n split_on_casechange=False)\n", (4658, 4739), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4752, 4849), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['"""Te-s-t"""'], {'split_chars': "['-']", 'split_on_len': '(1)', 'split_on_casechange': '(False)'}), "('Te-s-t', split_chars=['-'], split_on_len=1,\n split_on_casechange=False)\n", (4773, 4849), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4890, 4915), 'hypothesis.strategies.text', 'st.text', (['string.printable'], {}), '(string.printable)\n', (4897, 4915), True, 'from hypothesis import given, strategies as st\n'), ((4992, 5006), 'hypothesis.strategies.integers', 'st.integers', (['(1)'], {}), '(1)\n', (5003, 5006), True, 'from hypothesis import given, strategies as st\n'), ((5035, 5048), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5046, 5048), True, 'from hypothesis import given, strategies as st\n'), ((6026, 6043), 'numpy.where', 'np.where', (['matches'], {}), '(matches)\n', (6034, 6043), True, 'import numpy as np\n'), ((6442, 6474), 'numpy.array_equal', 'np.array_equal', (['matches_ind', 'res'], {}), '(matches_ind, res)\n', (6456, 6474), True, 'import numpy as np\n'), ((5740, 5778), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(10)'}), '(min_value=0, max_value=10)\n', (5751, 5778), True, 'from hypothesis import given, strategies as st\n'), ((5793, 5831), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(10)'}), '(min_value=0, max_value=10)\n', (5804, 5831), True, 'from hypothesis import given, strategies as st\n'), ((5856, 5869), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5867, 5869), True, 'from hypothesis import given, strategies as st\n'), ((6991, 7008), 'numpy.where', 'np.where', (['matches'], {}), '(matches)\n', (6999, 7008), True, 'import numpy as np\n'), ((6754, 6792), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(10)'}), '(min_value=0, max_value=10)\n', (6765, 6792), True, 'from hypothesis import given, strategies as st\n'), ((6807, 6845), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(10)'}), '(min_value=0, max_value=10)\n', (6818, 6845), True, 'from hypothesis import given, strategies as st\n'), ((8806, 8872), 'tmtoolkit.preprocess._tokenfuncs.token_match', 'token_match', (['pattern', 'tokens', 'match_type', 'ignore_case', 'glob_method'], {}), '(pattern, tokens, match_type, ignore_case, glob_method)\n', (8817, 8872), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((8874, 8892), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (8882, 8892), True, 'import numpy as np\n'), ((9021, 9046), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9034, 9046), False, 'import pytest\n'), ((9056, 9094), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (['"""pattern"""', 'tok'], {}), "('pattern', tok)\n", (9078, 9094), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9105, 9130), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9118, 9130), False, 'import pytest\n'), ((9140, 9180), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['pattern']", 'tok'], {}), "(['pattern'], tok)\n", (9162, 9180), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9193, 9231), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['a', 'b']", '[]'], {}), "(['a', 'b'], [])\n", (9215, 9231), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9250, 9293), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (["['foo', 'bar']", 'tok'], {}), "(['foo', 'bar'], tok)\n", (9272, 9293), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9438, 9454), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (9446, 9454), True, 'import numpy as np\n'), ((9490, 9506), 'numpy.array', 'np.array', (['[4, 5]'], {}), '([4, 5])\n', (9498, 9506), True, 'import numpy as np\n'), ((9651, 9670), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (9659, 9670), True, 'import numpy as np\n'), ((9706, 9725), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (9714, 9725), True, 'import numpy as np\n'), ((10184, 10224), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (['patterns', 'tokens'], {}), '(patterns, tokens)\n', (10206, 10224), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9775, 9792), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(4)'], {}), '(0, 4)\n', (9786, 9792), True, 'from hypothesis import given, strategies as st\n'), ((10758, 10783), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10771, 10783), False, 'import pytest\n'), ((10793, 10830), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tok', '"""invalid"""'], {}), "(tok, 'invalid')\n", (10814, 10830), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((10843, 10873), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tok', '[]'], {}), '(tok, [])\n', (10864, 10873), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((10975, 11010), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tok', 'matches'], {}), '(tok, matches)\n', (10996, 11010), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11177, 11212), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tok', 'matches'], {}), '(tok, matches)\n', (11198, 11212), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11655, 11695), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (['patterns', 'tokens'], {}), '(patterns, tokens)\n', (11677, 11695), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11338, 11355), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(4)'], {}), '(0, 4)\n', (11349, 11355), True, 'from hypothesis import given, strategies as st\n'), ((703, 718), 'hypothesis.strategies.characters', 'st.characters', ([], {}), '()\n', (716, 718), True, 'from hypothesis import given, strategies as st\n'), ((4102, 4173), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['inp'], {'split_on_len': 'None', 'split_on_casechange': '(True)'}), '(inp, split_on_len=None, split_on_casechange=True)\n', (4123, 4173), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4392, 4460), 'tmtoolkit.preprocess._tokenfuncs.expand_compound_token', 'expand_compound_token', (['inp'], {'split_on_len': '(2)', 'split_on_casechange': '(True)'}), '(inp, split_on_len=2, split_on_casechange=True)\n', (4413, 4460), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((4938, 4969), 'hypothesis.strategies.characters', 'st.characters', ([], {'min_codepoint': '(32)'}), '(min_codepoint=32)\n', (4951, 4969), True, 'from hypothesis import given, strategies as st\n'), ((6334, 6345), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (6340, 6345), True, 'import numpy as np\n'), ((6366, 6377), 'numpy.max', 'np.max', (['res'], {}), '(res)\n', (6372, 6377), True, 'import numpy as np\n'), ((5712, 5725), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (5723, 5725), True, 'from hypothesis import given, strategies as st\n'), ((7332, 7343), 'numpy.min', 'np.min', (['win'], {}), '(win)\n', (7338, 7343), True, 'import numpy as np\n'), ((7364, 7375), 'numpy.max', 'np.max', (['win'], {}), '(win)\n', (7370, 7375), True, 'import numpy as np\n'), ((6726, 6739), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (6737, 6739), True, 'from hypothesis import given, strategies as st\n'), ((7895, 7920), 'numpy.array', 'np.array', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (7903, 7920), True, 'import numpy as np\n'), ((10080, 10105), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10093, 10105), False, 'import pytest\n'), ((10119, 10159), 'tmtoolkit.preprocess._tokenfuncs.token_match_subsequent', 'token_match_subsequent', (['patterns', 'tokens'], {}), '(patterns, tokens)\n', (10141, 10159), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((9752, 9761), 'hypothesis.strategies.text', 'st.text', ([], {}), '()\n', (9759, 9761), True, 'from hypothesis import given, strategies as st\n'), ((11711, 11744), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tokens', '[]'], {}), '(tokens, [])\n', (11732, 11744), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11299, 11324), 'hypothesis.strategies.text', 'st.text', (['string.printable'], {}), '(string.printable)\n', (11306, 11324), True, 'from hypothesis import given, strategies as st\n'), ((6543, 6557), 'numpy.unique', 'np.unique', (['res'], {}), '(res)\n', (6552, 6557), True, 'import numpy as np\n'), ((10435, 10451), 'numpy.all', 'np.all', (['(ind >= 0)'], {}), '(ind >= 0)\n', (10441, 10451), True, 'import numpy as np\n'), ((10594, 10631), 'numpy.array_equal', 'np.array_equal', (['tokens[ind]', 'patterns'], {}), '(tokens[ind], patterns)\n', (10608, 10631), True, 'import numpy as np\n'), ((11804, 11842), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tokens', 'matches'], {}), '(tokens, matches)\n', (11825, 11842), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11981, 12019), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tokens', 'matches'], {}), '(tokens, matches)\n', (12002, 12019), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((11900, 11938), 'tmtoolkit.preprocess._tokenfuncs.token_glue_subsequent', 'token_glue_subsequent', (['tokens', 'matches'], {}), '(tokens, matches)\n', (11921, 11938), False, 'from tmtoolkit.preprocess._tokenfuncs import str_multisplit, str_shape, str_shapesplit, expand_compound_token, make_index_window_around_matches, token_match_subsequent, token_glue_subsequent, token_match\n'), ((10531, 10543), 'numpy.diff', 'np.diff', (['ind'], {}), '(ind)\n', (10538, 10543), True, 'import numpy as np\n')] |
#!pip install timm
import torch
import numpy as np
import pandas
import os
import gc
from tqdm import tqdm
import wandb
from datetime import datetime
import pprint
from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch
from config import Paths, Training
from augmentations import get_augmentations_train, get_augmentations_val
from model import Model
from dataset import PawpularDataset
DEBUG = False
if DEBUG:
Training.epochs = 2
Training.n_folds = 2
set_random_seed()
device = torch.device('cuda')
# def get_trans(img, I):
# if I >= 4:
# img = img.transpose(2,3)
# if I % 4 == 0:
# return img
# elif I % 4 == 1:
# return img.flip(2)
# elif I % 4 == 2:
# return img.flip(3)
# elif I % 4 == 3:
# return img.flip(2).flip(3)
# model = timm.create_model('efficientnetv2_m')
# print(model.__str__()[-1000:])
# model
# Reading data
df = pandas.read_csv(Paths.train_csv)
# Meta features
if Training.use_meta:
# Adding image sizes
image_names = df['Id'].values
image_sizes = np.zeros(image_names.shape[0])
for i, img_name in enumerate(tqdm(image_names)):
image_sizes[i] = os.path.getsize(os.path.join(Paths.data, 'train', f'{img_name}.jpg'))
df['Image_size'] = np.log(image_sizes)
meta_features = list(filter(lambda e: e not in ('Id', 'fold', 'Pawpularity'), df.columns))
n_meta_features = len(meta_features)
else:
meta_features = None
n_meta_features = 0
# Adding bins to train.csv
df['fold'] = np.random.randint(low=0, high=Training.n_folds, size=len(df))
criterion = get_criterion(Training)
def run(notes='Baseline'):
datetime_suffix = datetime.now().strftime("%d-%m-%Y-%H:%M:%S")
group = Training.kernel_type + '_' + datetime_suffix
pprint.pprint(Training.get_class_attributes())
print(group)
for fold in range(Training.n_folds):
model_name = group + f'_fold_{fold}'
if not DEBUG:
wandb.init(
project="petfinder",
entity='vladislav',
group=group,
name=model_name,
notes=notes,
config=Training.get_class_attributes(),
)
train_df = df[df['fold'] != fold]
val_df = df[df['fold'] == fold]
train_dataset = PawpularDataset(csv=train_df, data_path=Paths.data,
augmentations=get_augmentations_train(Training), meta_features=meta_features)
val_dataset = PawpularDataset(csv=val_df, data_path=Paths.data,
augmentations=get_augmentations_val(Training), meta_features=meta_features)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=Training.batch_size,
sampler=torch.utils.data.sampler.RandomSampler(train_dataset),
num_workers=Training.num_workers,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=Training.batch_size,
num_workers=Training.num_workers)
model = Model(kernel_type=Training.kernel_type, n_meta_features=n_meta_features) # ,drop_rate=Training.drop_rate, drop_path_rate=Training.drop_path_rate)
model = model.to(device)
patience_counter = 0
val_rmse_min = float('inf')
model_file = os.path.join(Paths.weights, f'{model_name}_best.pth')
optimizer = torch.optim.Adam(model.parameters(), lr=Training.lr)
scheduler = get_scheduler(Training, optimizer, Training.epochs)
for epoch in range(1, Training.epochs + 1):
print('Epoch:', epoch)
if (epoch - 1) == Training.warm_up_epochs:
model.unfreeze()
train_loss, train_rmse = train_epoch(model=model, loader=train_loader, optimizer=optimizer,
criterion=criterion, use_meta=Training.use_meta, device=device,
DEBUG=DEBUG)
val_loss, val_rmse = val_epoch(model=model, loader=val_loader, criterion=criterion,
use_meta=Training.use_meta, device=device, DEBUG=DEBUG)
if not DEBUG:
wandb.log({'train_loss': np.mean(train_loss), 'train_rmse': train_rmse,
'val_loss': val_loss, 'val_rmse': val_rmse})
content = f'Fold {fold}, Epoch {epoch}, lr: {optimizer.param_groups[0]["lr"]:.7f}, \
\ntrain loss: {np.mean(train_loss):.5f}, train rmse: {train_rmse:.4f}, \
valid loss: {val_loss:.5f}, val_rmse: {val_rmse:.4f}.'
print(content)
if val_rmse < val_rmse_min:
print('val_rmse_min ({:.6f} --> {:.6f}). Saving model ...'.format(val_rmse_min, val_rmse))
torch.save(model.state_dict(), model_file)
if DEBUG:
os.remove(model_file)
else:
wandb.run.summary["val_rmse_min"] = val_rmse
val_rmse_min = val_rmse
patience_counter = 0
if patience_counter >= Training.patience:
print(f"Early stopping at epoch # {epoch}")
break
patience_counter += 1
if scheduler:
scheduler.step()
if Training.scheduler == 'melanoma' and epoch == 2: scheduler.step() # bug workaround
# Memory cleaning
model = None
optimizer = None
scheduler = None
gc.collect()
torch.cuda.empty_cache()
if not DEBUG:
wandb.finish()
if __name__ == '__main__':
run()
| [
"utils.get_scheduler",
"os.remove",
"wandb.finish",
"pandas.read_csv",
"utils.val_epoch",
"utils.get_criterion",
"gc.collect",
"numpy.mean",
"torch.device",
"augmentations.get_augmentations_train",
"os.path.join",
"torch.utils.data.DataLoader",
"utils.set_random_seed",
"config.Training.get... | [((497, 514), 'utils.set_random_seed', 'set_random_seed', ([], {}), '()\n', (512, 514), False, 'from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch\n'), ((524, 544), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (536, 544), False, 'import torch\n'), ((943, 975), 'pandas.read_csv', 'pandas.read_csv', (['Paths.train_csv'], {}), '(Paths.train_csv)\n', (958, 975), False, 'import pandas\n'), ((1632, 1655), 'utils.get_criterion', 'get_criterion', (['Training'], {}), '(Training)\n', (1645, 1655), False, 'from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch\n'), ((1097, 1127), 'numpy.zeros', 'np.zeros', (['image_names.shape[0]'], {}), '(image_names.shape[0])\n', (1105, 1127), True, 'import numpy as np\n'), ((1299, 1318), 'numpy.log', 'np.log', (['image_sizes'], {}), '(image_sizes)\n', (1305, 1318), True, 'import numpy as np\n'), ((1161, 1178), 'tqdm.tqdm', 'tqdm', (['image_names'], {}), '(image_names)\n', (1165, 1178), False, 'from tqdm import tqdm\n'), ((1837, 1868), 'config.Training.get_class_attributes', 'Training.get_class_attributes', ([], {}), '()\n', (1866, 1868), False, 'from config import Paths, Training\n'), ((3163, 3273), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'Training.batch_size', 'num_workers': 'Training.num_workers'}), '(val_dataset, batch_size=Training.batch_size,\n num_workers=Training.num_workers)\n', (3190, 3273), False, 'import torch\n'), ((3393, 3465), 'model.Model', 'Model', ([], {'kernel_type': 'Training.kernel_type', 'n_meta_features': 'n_meta_features'}), '(kernel_type=Training.kernel_type, n_meta_features=n_meta_features)\n', (3398, 3465), False, 'from model import Model\n'), ((3659, 3712), 'os.path.join', 'os.path.join', (['Paths.weights', 'f"""{model_name}_best.pth"""'], {}), "(Paths.weights, f'{model_name}_best.pth')\n", (3671, 3712), False, 'import os\n'), ((3807, 3858), 'utils.get_scheduler', 'get_scheduler', (['Training', 'optimizer', 'Training.epochs'], {}), '(Training, optimizer, Training.epochs)\n', (3820, 3858), False, 'from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch\n'), ((5887, 5899), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5897, 5899), False, 'import gc\n'), ((5908, 5932), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5930, 5932), False, 'import torch\n'), ((1222, 1274), 'os.path.join', 'os.path.join', (['Paths.data', '"""train"""', 'f"""{img_name}.jpg"""'], {}), "(Paths.data, 'train', f'{img_name}.jpg')\n", (1234, 1274), False, 'import os\n'), ((1712, 1726), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1724, 1726), False, 'from datetime import datetime\n'), ((4072, 4224), 'utils.train_epoch', 'train_epoch', ([], {'model': 'model', 'loader': 'train_loader', 'optimizer': 'optimizer', 'criterion': 'criterion', 'use_meta': 'Training.use_meta', 'device': 'device', 'DEBUG': 'DEBUG'}), '(model=model, loader=train_loader, optimizer=optimizer,\n criterion=criterion, use_meta=Training.use_meta, device=device, DEBUG=DEBUG\n )\n', (4083, 4224), False, 'from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch\n'), ((4347, 4470), 'utils.val_epoch', 'val_epoch', ([], {'model': 'model', 'loader': 'val_loader', 'criterion': 'criterion', 'use_meta': 'Training.use_meta', 'device': 'device', 'DEBUG': 'DEBUG'}), '(model=model, loader=val_loader, criterion=criterion, use_meta=\n Training.use_meta, device=device, DEBUG=DEBUG)\n', (4356, 4470), False, 'from utils import set_random_seed, get_scheduler, get_criterion, train_epoch, val_epoch\n'), ((5972, 5986), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (5984, 5986), False, 'import wandb\n'), ((2476, 2509), 'augmentations.get_augmentations_train', 'get_augmentations_train', (['Training'], {}), '(Training)\n', (2499, 2509), False, 'from augmentations import get_augmentations_train, get_augmentations_val\n'), ((2664, 2695), 'augmentations.get_augmentations_val', 'get_augmentations_val', (['Training'], {}), '(Training)\n', (2685, 2695), False, 'from augmentations import get_augmentations_train, get_augmentations_val\n'), ((2934, 2987), 'torch.utils.data.sampler.RandomSampler', 'torch.utils.data.sampler.RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (2972, 2987), False, 'import torch\n'), ((2216, 2247), 'config.Training.get_class_attributes', 'Training.get_class_attributes', ([], {}), '()\n', (2245, 2247), False, 'from config import Paths, Training\n'), ((4863, 4882), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4870, 4882), True, 'import numpy as np\n'), ((5253, 5274), 'os.remove', 'os.remove', (['model_file'], {}), '(model_file)\n', (5262, 5274), False, 'import os\n'), ((4589, 4608), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4596, 4608), True, 'import numpy as np\n')] |
import pickle
import random
from functools import reduce, partial
import dill
import numpy as np
from pathlib import Path
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import Subset, ConcatDataset
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, GridSearchCV
from sklearn.metrics import hamming_loss, f1_score, classification_report
from skai.dataset import TokenDataset, SimpleDataset
from skai.sanitizer import sample_cleaner, small_class, small_class_multi
from skai.utils import get_classification_type, weights_init, multilabel_prediction
from skai.vectorizer import OneHotVectorizer
from skai.metrics import hamming_loss_skai
SKAI_COMMON = Path('skai/common/')
class TextRunner:
def __init__(self, mwrappers, rdata, labels, dataset_name,
make_pyt_data=True, verbose=True):
"""Takes different mwrappers and runs training and evaluation
over them.
mwrappers: A list of mwrappers.
rdata: Raw data, list of strings.
labels: Labels, list of strings.
"""
self.verbose = verbose
dill.settings['recurse'] = True
try:
rdata, labels = pickle.load((SKAI_COMMON/f'{dataset_name}.pkl').open('rb'))
except FileNotFoundError:
# rdata, labels = sample_cleaner([small_class], rdata, labels)
pickle.dump((rdata, labels), (SKAI_COMMON/f'{dataset_name}.pkl').open('wb'))
if verbose:
print('Checkpoint reached: raw data cleaned.')
self.rdata = rdata
self.labels = labels
self.classification_type = get_classification_type(labels)
if verbose:
print(f'{self.classification_type} classification.')
try:
self.alldata = dill.load((SKAI_COMMON/f'{dataset_name}_alldata.pkl').open('rb'))
except (FileNotFoundError, EOFError):
self.alldata = TokenDataset(rdata, labels, onehot=True)
dill.dump(self.alldata, (SKAI_COMMON/f'{dataset_name}_alldata.pkl').open('wb'))
self.mwrappers = mwrappers
if make_pyt_data:
self.data_setup_pyt(dataset_name)
def data_setup_pyt(self, dataset_name):
self.trainsets_pyt = []
self.valsets_pyt = []
trainset = self.get_dataset(f'{dataset_name}_trainset0', None, None,
TokenDataset, onehot=True,
tvectorizer=self.alldata.tvectorizer,
ovectorizer=self.alldata.ovectorizer)
valset = self.get_dataset(f'{dataset_name}_valset0', None, None,
TokenDataset, onehot=True,
tvectorizer=self.alldata.tvectorizer,
ovectorizer=self.alldata.ovectorizer)
testset = self.get_dataset(f'{dataset_name}_testset4', None, None,
TokenDataset, onehot=True,
tvectorizer=self.alldata.tvectorizer,
ovectorizer=self.alldata.ovectorizer)
Xall, yall = [], []
allset = ConcatDataset([trainset, valset, testset])
for x, y in allset:
Xall.append(x)
yall.append(y)
self.dataset = [np.array(Xall), np.array(yall)]
def get_clf_sk(self, mwrapper, X, y, scoring='accuracy'):
pipeline = mwrapper.pipeline
parameters = mwrapper.parameters
if self.classification_type == 'multilabel':
# ohv = OneHotVectorizer()
print(set(reduce(lambda acc, x: acc + x, y, [])))
print(len(set(reduce(lambda acc, x: acc + x, y, []))))
# print(y[1])
y = self.alldata.ovectorizer.transform(y)
# y = ohv.fit_transform(y)
# print(y[1])
grid_search_tune = GridSearchCV(pipeline, parameters,
n_jobs=4, verbose=1, scoring=scoring)
grid_search_tune.fit(X, y)
return grid_search_tune.best_estimator_, grid_search_tune.best_score_
def get_learner_pyt(self, mwrapper, trainset, valset, ce_loss=False):
dl_train = fd.DataLoader(trainset, batch_size=32,
num_workers=1, pad_idx=1,
transpose=False)
dl_val = fd.DataLoader(valset, batch_size=32,
num_workers=1, pad_idx=1,
transpose=False)
# dl_test = fd.DataLoader(self.testset, batch_size=4,
# num_workers=2, pad_idx=1,
# transpose=False)
modeldata = fs.ModelData(f'skai/models/{mwrapper.name}', dl_train, dl_val)
# print(modeldata.trn_y)
mwrapper.model.apply(weights_init)
crit = None
if ce_loss:
w = torch.Tensor([0.08148148148148147, 0.22916666666666666,
0.25, 0.2727272727272727, 0.3, 0.5076923076923077,
0.515625, 0.673469387755102, 0.9166666666666666, 1.0])
crit = CE_lambda(w)
learner = fl.Learner.from_model_data(mwrapper.model, modeldata,
opt_fn=Adam_lambda(), crit=crit)
if self.classification_type == 'multilabel':
learner.metrics = [fm.accuracy_multi, hamming_loss_skai]
else:
learner.metrics = [fm.accuracy, fm.recall]
return learner
def load_learner_pyt(self, mwrapper, trainset, valset, ce_loss=False):
learner = self.get_learner_pyt(mwrapper, trainset, valset, ce_loss)
learner.load('best')
return learner
def vis_lr_pyt(self, learner):
learner.sched.plot()
def fit_pyt(self, learner, lrs, epochs):
learner.fit(lrs, epochs)
def get_dataset(self, name, X, y, Dataset=TokenDataset, **kwargs):
try:
# raise FileNotFoundError
dataset = dill.load((SKAI_COMMON/f'{name}.pkl').open('rb'))
except (FileNotFoundError, EOFError):
dataset = Dataset(X, y, **kwargs)
dill.dump(dataset, (SKAI_COMMON/f'{name}.pkl').open('wb'))
return dataset
def run(self, lrs=1e-4, epochs=5):
for mwrapper in self.mwrappers:
if mwrapper.type == 'sklearn':
clf = self.get_clf_sk(mwrapper)
predictions = clf.predict(self.X_test)
print(classification_report(self.y_test, predictions))
else:
best_acc = 0
best_f1_mic = 0
for trs, vals in zip(self.trainsets_pyt, self.valsets_pyt):
# Set True when use cross entropy loss
# print(trs[0])
learner = self.get_learner_pyt(mwrapper, trs, vals, False)
self.fit_pyt(learner, lrs, epochs)
dl_test = fd.DataLoader(self.testset, batch_size=32,
num_workers=2, pad_idx=1,
transpose=False)
preds, targs = learner.predict_dl(dl_test)
if self.verbose:
print(targs[:1], preds[:1])
if self.classification_type == 'multilabel':
preds = multilabel_prediction(preds, threshold=0.5)
# hl = hamming_loss(targs, preds)
micro_f1 = f1_score(targs, preds, average='micro')
# macro_f1 = f1_score(targs, preds, average='macro')
print(micro_f1)
if micro_f1 > best_f1_mic:
best_f1_mic = micro_f1
learner.save(f'best')
else:
acc = float(fm.accuracy(torch.from_numpy(preds), torch.from_numpy(targs)))
print(acc)
if acc > best_acc:
best_acc = acc
learner.save('best')
# print(fm.precision(torch.from_numpy(preds), torch.from_numpy(targs)))
# print(fm.recall(torch.from_numpy(preds), torch.from_numpy(targs)))
# print(fm.f1(torch.from_numpy(preds), torch.from_numpy(targs)))
return (preds, targs)
def loaded_run(self, model='test'):
mwrapper = self.mwrappers[0]
for trs, vals in zip(self.trainsets_pyt, self.valsets_pyt):
learner = self.get_learner_pyt(mwrapper, trs, vals, False)
learner.load('best')
testset = self.testset
if model == 'val':
testset = self.valsets_pyt[0]
dl_test = fd.DataLoader(testset, batch_size=32,
num_workers=2, pad_idx=1,
transpose=False)
preds, targs = learner.predict_dl(dl_test)
return (preds, targs)
def Adam_lambda(lr=0.001):
return lambda *args, **kwargs: Adam(*args, lr=lr, *kwargs)
def SGD_Momentum(momentum):
return lambda *args, **kwargs: optim.SGD(*args, momentum=momentum, **kwargs)
def CE_lambda(w):
crit = partial(F.cross_entropy, weight=fc.to_gpu(w))
def loss(y_pred, y_true):
y_true = torch.argmax(y_true, -1)
return crit(y_pred, y_true)
return loss | [
"sklearn.model_selection.GridSearchCV",
"torch.utils.data.ConcatDataset",
"skai.dataset.TokenDataset",
"torch.argmax",
"skai.utils.multilabel_prediction",
"sklearn.metrics.classification_report",
"skai.utils.get_classification_type",
"pathlib.Path",
"torch.optim.Adam",
"numpy.array",
"torch.Tens... | [((734, 754), 'pathlib.Path', 'Path', (['"""skai/common/"""'], {}), "('skai/common/')\n", (738, 754), False, 'from pathlib import Path\n'), ((1673, 1704), 'skai.utils.get_classification_type', 'get_classification_type', (['labels'], {}), '(labels)\n', (1696, 1704), False, 'from skai.utils import get_classification_type, weights_init, multilabel_prediction\n'), ((3246, 3288), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['[trainset, valset, testset]'], {}), '([trainset, valset, testset])\n', (3259, 3288), False, 'from torch.utils.data import Subset, ConcatDataset\n'), ((3974, 4046), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipeline', 'parameters'], {'n_jobs': '(4)', 'verbose': '(1)', 'scoring': 'scoring'}), '(pipeline, parameters, n_jobs=4, verbose=1, scoring=scoring)\n', (3986, 4046), False, 'from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, GridSearchCV\n'), ((9241, 9268), 'torch.optim.Adam', 'Adam', (['*args', '*kwargs'], {'lr': 'lr'}), '(*args, *kwargs, lr=lr)\n', (9245, 9268), False, 'from torch.optim import Adam\n'), ((9502, 9526), 'torch.argmax', 'torch.argmax', (['y_true', '(-1)'], {}), '(y_true, -1)\n', (9514, 9526), False, 'import torch\n'), ((3404, 3418), 'numpy.array', 'np.array', (['Xall'], {}), '(Xall)\n', (3412, 3418), True, 'import numpy as np\n'), ((3420, 3434), 'numpy.array', 'np.array', (['yall'], {}), '(yall)\n', (3428, 3434), True, 'import numpy as np\n'), ((4999, 5170), 'torch.Tensor', 'torch.Tensor', (['[0.08148148148148147, 0.22916666666666666, 0.25, 0.2727272727272727, 0.3, \n 0.5076923076923077, 0.515625, 0.673469387755102, 0.9166666666666666, 1.0]'], {}), '([0.08148148148148147, 0.22916666666666666, 0.25, \n 0.2727272727272727, 0.3, 0.5076923076923077, 0.515625, \n 0.673469387755102, 0.9166666666666666, 1.0])\n', (5011, 5170), False, 'import torch\n'), ((1970, 2010), 'skai.dataset.TokenDataset', 'TokenDataset', (['rdata', 'labels'], {'onehot': '(True)'}), '(rdata, labels, onehot=True)\n', (1982, 2010), False, 'from skai.dataset import TokenDataset, SimpleDataset\n'), ((3695, 3732), 'functools.reduce', 'reduce', (['(lambda acc, x: acc + x)', 'y', '[]'], {}), '(lambda acc, x: acc + x, y, [])\n', (3701, 3732), False, 'from functools import reduce, partial\n'), ((6615, 6662), 'sklearn.metrics.classification_report', 'classification_report', (['self.y_test', 'predictions'], {}), '(self.y_test, predictions)\n', (6636, 6662), False, 'from sklearn.metrics import hamming_loss, f1_score, classification_report\n'), ((3761, 3798), 'functools.reduce', 'reduce', (['(lambda acc, x: acc + x)', 'y', '[]'], {}), '(lambda acc, x: acc + x, y, [])\n', (3767, 3798), False, 'from functools import reduce, partial\n'), ((7501, 7544), 'skai.utils.multilabel_prediction', 'multilabel_prediction', (['preds'], {'threshold': '(0.5)'}), '(preds, threshold=0.5)\n', (7522, 7544), False, 'from skai.utils import get_classification_type, weights_init, multilabel_prediction\n'), ((7638, 7677), 'sklearn.metrics.f1_score', 'f1_score', (['targs', 'preds'], {'average': '"""micro"""'}), "(targs, preds, average='micro')\n", (7646, 7677), False, 'from sklearn.metrics import hamming_loss, f1_score, classification_report\n'), ((8022, 8045), 'torch.from_numpy', 'torch.from_numpy', (['preds'], {}), '(preds)\n', (8038, 8045), False, 'import torch\n'), ((8047, 8070), 'torch.from_numpy', 'torch.from_numpy', (['targs'], {}), '(targs)\n', (8063, 8070), False, 'import torch\n')] |
with open('input.txt', 'r') as f:
image_data = f.read()
# print(image_data)
width = 25
height = 6
elements_per_im = width * height
class Layer:
def __init__(self, data, layer_no):
self.layer = [int(l) for l in data if l != '\n']
self.layer_no = layer_no
self.counts = {}
self.counts[0] = len([i for i in self.layer if i == 0])
self.counts[1] = len([i for i in self.layer if i == 1])
self.counts[2] = len([i for i in self.layer if i == 2])
def pixel(self, r, c):
# print((r+c+2*r))
return self.layer[r+c+(width-1)*r]
layers = []
for layer_no, i in enumerate(range(0, len(image_data)-1, elements_per_im)):
layer = image_data[i:i+elements_per_im]
layers.append(Layer(layer, layer_no))
zeros = [l.counts[0] for l in layers]
layer_with_least_zeros = zeros.index(min(zeros))
layer = layers[layer_with_least_zeros]
print(layer.counts[1] * layer.counts[2])
final_im = []
for r in range(height):
for c in range(width):
pixels = [l.pixel(r, c) for l in layers]
for p in pixels:
if p == 0:
final_im.append(p)
break
elif p == 1:
final_im.append(p)
break
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
im = np.array(final_im).reshape(height, width)*255
plt.imshow(im)
plt.show()
print(im)
# im = Image.fromarray(im).show()
| [
"numpy.array",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
] | [((1369, 1383), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1379, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1392, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1341), 'numpy.array', 'np.array', (['final_im'], {}), '(final_im)\n', (1331, 1341), True, 'import numpy as np\n')] |
import math
import numbers
from typing import Optional
import numpy as np
from fairseq.data.audio.feature_transforms import (
AudioFeatureTransform,
register_audio_feature_transform,
)
@register_audio_feature_transform("specaugment")
class SpecAugmentTransform(AudioFeatureTransform):
"""SpecAugment (https://arxiv.org/abs/1904.08779)"""
@classmethod
def from_config_dict(cls, config=None):
_config = {} if config is None else config
return SpecAugmentTransform(
_config.get("time_warp_W", 0),
_config.get("freq_mask_N", 0),
_config.get("freq_mask_F", 0),
_config.get("time_mask_N", 0),
_config.get("time_mask_T", 0),
_config.get("time_mask_p", 0.0),
_config.get("mask_value", None),
)
def __init__(
self,
time_warp_w: int = 0,
freq_mask_n: int = 0,
freq_mask_f: int = 0,
time_mask_n: int = 0,
time_mask_t: int = 0,
time_mask_p: float = 0.0,
mask_value: Optional[float] = 0.0,
):
# Sanity checks
assert mask_value is None or isinstance(
mask_value, numbers.Number
), f"mask_value (type: {type(mask_value)}) must be None or a number"
if freq_mask_n > 0:
assert freq_mask_f > 0, (
f"freq_mask_F ({freq_mask_f}) "
f"must be larger than 0 when doing freq masking."
)
if time_mask_n > 0:
assert time_mask_t > 0, (
f"time_mask_T ({time_mask_t}) must be larger than 0 when "
f"doing time masking."
)
self.time_warp_w = time_warp_w
self.freq_mask_n = freq_mask_n
self.freq_mask_f = freq_mask_f
self.time_mask_n = time_mask_n
self.time_mask_t = time_mask_t
self.time_mask_p = time_mask_p
self.mask_value = mask_value
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(
[
f"time_warp_w={self.time_warp_w}",
f"freq_mask_n={self.freq_mask_n}",
f"freq_mask_f={self.freq_mask_f}",
f"time_mask_n={self.time_mask_n}",
f"time_mask_t={self.time_mask_t}",
f"time_mask_p={self.time_mask_p}",
]
)
+ ")"
)
def __call__(self, spectrogram):
assert len(spectrogram.shape) == 2, "spectrogram must be a 2-D tensor."
distorted = spectrogram.copy() # make a copy of input spectrogram.
num_frames = spectrogram.shape[0] # or 'tau' in the paper.
num_freqs = spectrogram.shape[1] # or 'miu' in the paper.
mask_value = self.mask_value
if mask_value is None: # if no value was specified, use local mean.
mask_value = spectrogram.mean()
if num_frames == 0:
return spectrogram
if num_freqs < self.freq_mask_f:
return spectrogram
if self.time_warp_w > 0:
if 2 * self.time_warp_w < num_frames:
import cv2
w0 = np.random.randint(self.time_warp_w, num_frames - self.time_warp_w)
w = np.random.randint(0, self.time_warp_w)
upper, lower = distorted[:w0, :], distorted[w0:, :]
upper = cv2.resize(
upper, dsize=(num_freqs, w0 + w), interpolation=cv2.INTER_LINEAR
)
lower = cv2.resize(
lower,
dsize=(num_freqs, num_frames - w0 - w),
interpolation=cv2.INTER_LINEAR,
)
distorted = np.concatenate((upper, lower), axis=0)
for _i in range(self.freq_mask_n):
f = np.random.randint(0, self.freq_mask_f)
f0 = np.random.randint(0, num_freqs - f)
if f != 0:
distorted[:, f0 : f0 + f] = mask_value
max_time_mask_t = min(
self.time_mask_t, math.floor(num_frames * self.time_mask_p)
)
if max_time_mask_t < 1:
return distorted
for _i in range(self.time_mask_n):
t = np.random.randint(0, max_time_mask_t)
t0 = np.random.randint(0, num_frames - t)
if t != 0:
distorted[t0 : t0 + t, :] = mask_value
return distorted
| [
"cv2.resize",
"fairseq.data.audio.feature_transforms.register_audio_feature_transform",
"math.floor",
"numpy.random.randint",
"numpy.concatenate"
] | [((208, 255), 'fairseq.data.audio.feature_transforms.register_audio_feature_transform', 'register_audio_feature_transform', (['"""specaugment"""'], {}), "('specaugment')\n", (240, 255), False, 'from fairseq.data.audio.feature_transforms import AudioFeatureTransform, register_audio_feature_transform\n'), ((3984, 4022), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.freq_mask_f'], {}), '(0, self.freq_mask_f)\n', (4001, 4022), True, 'import numpy as np\n'), ((4041, 4076), 'numpy.random.randint', 'np.random.randint', (['(0)', '(num_freqs - f)'], {}), '(0, num_freqs - f)\n', (4058, 4076), True, 'import numpy as np\n'), ((4222, 4263), 'math.floor', 'math.floor', (['(num_frames * self.time_mask_p)'], {}), '(num_frames * self.time_mask_p)\n', (4232, 4263), False, 'import math\n'), ((4401, 4438), 'numpy.random.randint', 'np.random.randint', (['(0)', 'max_time_mask_t'], {}), '(0, max_time_mask_t)\n', (4418, 4438), True, 'import numpy as np\n'), ((4457, 4493), 'numpy.random.randint', 'np.random.randint', (['(0)', '(num_frames - t)'], {}), '(0, num_frames - t)\n', (4474, 4493), True, 'import numpy as np\n'), ((3317, 3383), 'numpy.random.randint', 'np.random.randint', (['self.time_warp_w', '(num_frames - self.time_warp_w)'], {}), '(self.time_warp_w, num_frames - self.time_warp_w)\n', (3334, 3383), True, 'import numpy as np\n'), ((3405, 3443), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.time_warp_w'], {}), '(0, self.time_warp_w)\n', (3422, 3443), True, 'import numpy as np\n'), ((3538, 3614), 'cv2.resize', 'cv2.resize', (['upper'], {'dsize': '(num_freqs, w0 + w)', 'interpolation': 'cv2.INTER_LINEAR'}), '(upper, dsize=(num_freqs, w0 + w), interpolation=cv2.INTER_LINEAR)\n', (3548, 3614), False, 'import cv2\n'), ((3680, 3774), 'cv2.resize', 'cv2.resize', (['lower'], {'dsize': '(num_freqs, num_frames - w0 - w)', 'interpolation': 'cv2.INTER_LINEAR'}), '(lower, dsize=(num_freqs, num_frames - w0 - w), interpolation=cv2\n .INTER_LINEAR)\n', (3690, 3774), False, 'import cv2\n'), ((3882, 3920), 'numpy.concatenate', 'np.concatenate', (['(upper, lower)'], {'axis': '(0)'}), '((upper, lower), axis=0)\n', (3896, 3920), True, 'import numpy as np\n')] |
"""Class definition for the SMOS Soil Mositure data type.
.. module:: smos
:synopsis: Definition of the SMOS class
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from soilmoist import Soilmoist
import dbio
import os
import netCDF4 as netcdf
from scipy.spatial import KDTree
import numpy as np
from datetime import datetime, timedelta
import datasets
import logging
table = "soilmoist.smos"
def dates(dbname):
dts = datasets.dates(dbname, table)
return dts
def regridNearestNeighbor(lat, lon, res):
"""Generate grid of nearest neighbor locations from *lat*, *lon*
arrays for specified resolution *res*."""
x, y = np.meshgrid(lon, lat)
tree = KDTree(zip(x.ravel(), y.ravel()))
grid_lat = np.arange(round(lat[0], 1), round(lat[-1], 1)-res, -res)
grid_lon = np.arange(round(lon[0], 1), round(lon[-1], 1)+res, res)
grid_x, grid_y = np.meshgrid(grid_lon, grid_lat)
_, pos = tree.query(zip(grid_x.ravel(), grid_y.ravel()))
return pos, grid_lat, grid_lon
def download(dbname, dt, bbox=None):
"""Downloads SMOS soil mositure data for a set of dates *dt*
and imports them into the PostGIS database *dbname*. Optionally
uses a bounding box to limit the region with [minlon, minlat, maxlon, maxlat]."""
log = logging.getLogger(__name__)
res = 0.25
url = "http://rheas:rheasjpl@cp34-bec.cmima.csic.es/thredds/dodsC/NRTSM001D025A_ALL"
f = netcdf.Dataset(url)
lat = f.variables['lat'][::-1] # swap latitude orientation to northwards
lon = f.variables['lon'][:]
i1, i2, j1, j2 = datasets.spatialSubset(lat, lon, res, bbox)
smi1 = len(lat) - i2 - 1
smi2 = len(lat) - i1 - 1
lat = lat[i1:i2]
lon = lon[j1:j2]
t0 = datetime(2010, 1, 12) # initial date of SMOS data
t1 = (dt[0] - t0).days
if t1 < 0:
log.warning("Reseting start date to {0}".format(t0.strftime("%Y-%m-%d")))
t1 = 0
t2 = (dt[-1] - t0).days + 1
nt, _, _ = f.variables['SM'].shape
if t2 > nt:
t2 = nt
log.warning("Reseting end date to {0}".format((t0 + timedelta(t2)).strftime("%Y-%m-%d")))
ti = range(t1, t2)
sm = f.variables['SM'][ti, smi1:smi2, j1:j2]
sm = sm[:, ::-1, :] # flip latitude dimension in data array
# FIXME: Use spatially variable observation error
# smv = f.variables['VARIANCE_SM'][ti, i1:i2, j1:j2][:, ::-1, :]
pos, smlat, smlon = regridNearestNeighbor(lat, lon, res)
for tj in range(sm.shape[0]):
smdata = sm[tj, :, :].ravel()[pos].reshape((len(smlat), len(smlon)))
filename = dbio.writeGeotif(smlat, smlon, res, smdata)
t = t0 + timedelta(ti[tj])
dbio.ingest(dbname, filename, t, table, False)
os.remove(filename)
class Smos(Soilmoist):
def __init__(self, uncert=None):
"""Initialize SMOS soil moisture object."""
super(Smos, self).__init__(uncert)
self.res = 0.25
self.stddev = 0.01
self.tablename = "soilmoist.smos"
| [
"datasets.dates",
"netCDF4.Dataset",
"os.remove",
"numpy.meshgrid",
"datasets.spatialSubset",
"dbio.writeGeotif",
"datetime.datetime",
"dbio.ingest",
"datetime.timedelta",
"logging.getLogger"
] | [((422, 451), 'datasets.dates', 'datasets.dates', (['dbname', 'table'], {}), '(dbname, table)\n', (436, 451), False, 'import datasets\n'), ((637, 658), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (648, 658), True, 'import numpy as np\n'), ((868, 899), 'numpy.meshgrid', 'np.meshgrid', (['grid_lon', 'grid_lat'], {}), '(grid_lon, grid_lat)\n', (879, 899), True, 'import numpy as np\n'), ((1264, 1291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1281, 1291), False, 'import logging\n'), ((1404, 1423), 'netCDF4.Dataset', 'netcdf.Dataset', (['url'], {}), '(url)\n', (1418, 1423), True, 'import netCDF4 as netcdf\n'), ((1555, 1598), 'datasets.spatialSubset', 'datasets.spatialSubset', (['lat', 'lon', 'res', 'bbox'], {}), '(lat, lon, res, bbox)\n', (1577, 1598), False, 'import datasets\n'), ((1708, 1729), 'datetime.datetime', 'datetime', (['(2010)', '(1)', '(12)'], {}), '(2010, 1, 12)\n', (1716, 1729), False, 'from datetime import datetime, timedelta\n'), ((2550, 2593), 'dbio.writeGeotif', 'dbio.writeGeotif', (['smlat', 'smlon', 'res', 'smdata'], {}), '(smlat, smlon, res, smdata)\n', (2566, 2593), False, 'import dbio\n'), ((2637, 2683), 'dbio.ingest', 'dbio.ingest', (['dbname', 'filename', 't', 'table', '(False)'], {}), '(dbname, filename, t, table, False)\n', (2648, 2683), False, 'import dbio\n'), ((2692, 2711), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2701, 2711), False, 'import os\n'), ((2611, 2628), 'datetime.timedelta', 'timedelta', (['ti[tj]'], {}), '(ti[tj])\n', (2620, 2628), False, 'from datetime import datetime, timedelta\n'), ((2061, 2074), 'datetime.timedelta', 'timedelta', (['t2'], {}), '(t2)\n', (2070, 2074), False, 'from datetime import datetime, timedelta\n')] |
import os
import cv2
import argparse
import numpy as np
import scipy.signal
from PIL import Image
from skimage import exposure
import matplotlib.pyplot as plt
def config_parse():
parser = argparse.ArgumentParser(description='Parameters Parser')
parser.add_argument('--input_path', type=str, required=True, help='Specify the dir where the img seq are stored.')
parser.add_argument('--p', type=int, default=4, help='Specify the exponent parameter.')
parser.add_argument('--gsig', type=float, default=0.2, help='Specify the spread of the global Gaussian.')
parser.add_argument('--lsig', type=float, default=0.5, help='Specify the spread of the local Gaussian.')
parser.add_argument('--patch_size', type=int, default=21, help='Specify the patch size.')
parser.add_argument('--step_size', type=int, default=2, help='Specify the stride size.')
parser.add_argument('--exp_thres', type=float, default=0.01, help='Specify the exposure threshold to determine under- and over-exposed patches.')
parser.add_argument('--cons_thres', type=float, default=0.1, help='Specify the IMF threshold.')
parser.add_argument('--strt_thres', type=float, default=0.8, help='Specify the structure consistency threshold.')
args = parser.parse_args()
return args
def reorder_by_lum(seq_imgs):
seq_imgs = np.double(seq_imgs)
seq_sum_lum = np.sum(np.sum(np.sum(seq_imgs, axis=0, keepdims=True), axis=1, keepdims=True), axis=2, keepdims=True).squeeze()
lum_sorts = np.argsort(seq_sum_lum)
imgs_copy = np.copy(seq_imgs)
for i in range(len(lum_sorts)):
seq_imgs[:, :, :, i] = imgs_copy[:, :, :, lum_sorts[i]]
return seq_imgs
def down_sample(seq_imgs, max_size=None):
if max_size == None:
max_size = 512
seq_imgs = np.double(seq_imgs)
[img_height, img_width] = seq_imgs.shape[:2]
if img_height >= img_width and img_height > max_size:
sample_factor = img_height / max_size
down_spl_imgs = np.zeros((max_size, int(np.floor(img_width / sample_factor)), seq_imgs.shape[-2], seq_imgs.shape[-1]))
for i in range(seq_imgs.shape[-1]):
down_spl_imgs[:, :, :, i] = cv2.resize(seq_imgs[:, :, :, i], (int(np.floor(img_width / sample_factor)), max_size), cv2.INTER_LINEAR)
elif img_height < img_width and img_width > max_size:
sample_factor = img_width / max_size
down_spl_imgs = np.zeros((int(np.floor(img_height / sample_factor)), max_size, seq_imgs.shape[2], seq_imgs.shape[-1]))
for i in range(seq_imgs.shape[-1]):
down_spl_imgs[:, :, :, i] = cv2.resize(seq_imgs[:, :, :, i], (max_size, int(np.floor(img_height / sample_factor))), cv2.INTER_LINEAR)
else:
down_spl_imgs = seq_imgs
return down_spl_imgs
def select_ref_idx(seq_imgs, win_size=None, expos_thres=None):
if win_size == None:
win_size = 3
if expos_thres == None:
expos_thres = 0.01
seq_imgs = np.double(seq_imgs)
seq_imgs = reorder_by_lum(seq_imgs)
[_, _, size_3, size_4] = seq_imgs.shape
if size_4 == 3:
ref_idx = 1
else:
window = np.ones((win_size, win_size, 3))
window = window / window.sum()
positive = np.zeros((size_4, 1))
for i in range(size_4):
conved_img = scipy.signal.convolve(seq_imgs[:, :, :, i], window, 'valid')
positive[i] = np.sum(np.sum((conved_img < expos_thres) | (conved_img > 1 - expos_thres)))
ref_idx = np.argmin(positive)
return ref_idx
def imf_consistency(mean_intens, ref_img_idx, consistency_thres):
imf_map = np.zeros_like(mean_intens)
imf_map[:, :, ref_img_idx] = np.ones(mean_intens.shape[:2])
ref_mean_intens = mean_intens[:, :, ref_img_idx]
for i in range(mean_intens.shape[-1]):
if i != ref_img_idx:
temp_mean_intens = exposure.match_histograms(mean_intens[:, :, i], ref_mean_intens)
diff = np.abs(temp_mean_intens - ref_mean_intens)
imf_map[:, :, i] = diff <= consistency_thres
return imf_map
def spd_mef(args, seq_imgs):
exp_param = args.p
glb_gauss = args.gsig
lcl_gauss = args.lsig
patch_size = args.patch_size
step_size = args.step_size
exp_thres = args.exp_thres
cons_thres = args.cons_thres
strt_thres = args.strt_thres
C = 0.03 ** 2 / 2 # From Structural Similarity (MEF-SSIM)
window = np.ones((patch_size, patch_size))
window_3d = np.repeat(np.expand_dims(window, axis=2), 3, axis=2)
window = window / window.sum()
window_3d = window_3d / window_3d.sum()
seq_imgs = np.double(seq_imgs)
[size_1, size_2, size_3, size_4] = seq_imgs.shape
x_idx_max = size_1 - patch_size + 1
y_idx_max = size_2 - patch_size + 1
ref_img_idx = select_ref_idx(seq_imgs)
# Genarating Pseudo Exposures
exp_img_num = 2 * size_4 - 1
seq_exp_imgs = np.zeros((size_1, size_2, size_3, exp_img_num))
seq_exp_imgs[:, :, :, :3] = seq_imgs
count = 0
for i in range(size_4):
if i != ref_img_idx:
exp_img_tmp = exposure.match_histograms(seq_exp_imgs[:, :, :, ref_img_idx], seq_exp_imgs[:, :, :, i])
exp_img_tmp = np.maximum(np.minimum(exp_img_tmp, 1), 0)
seq_exp_imgs[:, :, :, count + size_4] = exp_img_tmp
count += 1
# Computing Statistics
glb_mean_intens = np.zeros((x_idx_max, y_idx_max, exp_img_num)) # Global Mean Intensity
for i in range(exp_img_num):
exp_img = seq_exp_imgs[:, :, :, i]
glb_mean_intens[:, :, i] = np.ones((x_idx_max, y_idx_max)) * exp_img.mean()
temp = np.zeros((x_idx_max, y_idx_max, size_3))
lcl_mean_intens = np.zeros((x_idx_max, y_idx_max, exp_img_num)) # Local Mean Intensity
lcl_intens_square = np.zeros((x_idx_max, y_idx_max, exp_img_num))
for i in range(exp_img_num):
for j in range(size_3):
temp[:, :, j] = scipy.signal.correlate2d(seq_exp_imgs[:, :, j, i], window, 'valid')
lcl_mean_intens[:, :, i] = temp.mean(axis=2)
lcl_intens_square[:, :, i] = lcl_mean_intens[:, :, i] ** 2
sig_strg_square = np.zeros((x_idx_max, y_idx_max, exp_img_num)) # Signal Strength from Variance
for i in range(exp_img_num):
for j in range(size_3):
temp[:, :, j] = scipy.signal.correlate2d(seq_exp_imgs[:, :, j, i] ** 2, window, 'valid') - lcl_intens_square[:, :, i]
sig_strg_square[:, :, i] = temp.mean(axis=2)
sig_strength = np.sqrt(np.maximum(sig_strg_square, 0))
sig_strength = sig_strength * np.sqrt(patch_size ** 2 * size_3) + 0.001 # Signal Strength
# Computing Structural Consistency Map
stru_consist_map = np.zeros((x_idx_max, y_idx_max, size_4, size_4))
for i in range(size_4):
for j in range(i+1, size_4):
cross_intens = lcl_mean_intens[:, :, i] * lcl_mean_intens[:, :, j]
cross_strg = scipy.signal.convolve(seq_exp_imgs[:, :, :, i] * seq_exp_imgs[:, :, :, j], window_3d, 'valid').squeeze() - cross_intens
stru_consist_map[:, :, i, j] = (cross_strg + C) / (sig_strength[:, :, i] * sig_strength[:, :, j] + C) # The third term in MEF-SSIM
stru_consist_map = np.maximum(stru_consist_map, 0)
stru_ref_map = stru_consist_map[:, :, ref_img_idx, :].squeeze() + stru_consist_map[:, :, :, ref_img_idx]
stru_ref_map[:, :, ref_img_idx] = np.ones((x_idx_max, y_idx_max)) # Add Reference
stru_ref_map[stru_ref_map <= strt_thres] = 0
stru_ref_map[stru_ref_map > strt_thres] = 1
intens_idx_map = (lcl_mean_intens[:, :, ref_img_idx] < exp_thres) | (lcl_mean_intens[:, :, ref_img_idx] > 1 - exp_thres)
intens_idx_map = np.repeat(np.expand_dims(intens_idx_map, axis=2), size_4, axis=2)
stru_ref_map[intens_idx_map] = 1
struct_elem = np.zeros((41, 41))
n = 11
for i in range(struct_elem.shape[0]):
n = n - 1 if np.abs(i - int(struct_elem.shape[1] / 2)) > 10 else 0
for j in range(np.abs(n), struct_elem.shape[1] - np.abs(n)):
struct_elem[i, j] = 1
for i in range(size_4):
stru_ref_map[:, :, i] = cv2.morphologyEx(stru_ref_map[:, :, i], cv2.MORPH_OPEN, struct_elem.astype(np.uint8))
imf_ref_map = imf_consistency(lcl_mean_intens[:, :, :size_4], ref_img_idx, cons_thres)
ref_map = stru_ref_map * imf_ref_map
exp_ref_map = np.zeros((x_idx_max, y_idx_max, 2 * size_4 - 1))
exp_ref_map[:, :, :size_4] = ref_map
count = 0
for i in range(size_4):
if i != ref_img_idx:
exp_ref_map[:, :, count + size_4] = 1 - exp_ref_map[:, :, i]
count += 1
# Computing Weighing Map
mean_intens_map = np.exp(-0.5 * ((glb_mean_intens - 0.5) ** 2 / glb_gauss ** 2 + (lcl_mean_intens - 0.5) ** 2 / lcl_gauss ** 2)) # Mean Intensity Weighing Map
mean_intens_map = mean_intens_map * exp_ref_map
normalizer = np.sum(mean_intens_map, axis=2)
mean_intens_map = mean_intens_map / np.repeat(np.expand_dims(normalizer, axis=2), exp_img_num, axis=2)
stru_consist_map = sig_strength ** exp_param # Signal Structure Weighting Map
stru_consist_map = stru_consist_map * exp_ref_map + 0.001
normalizer = np.sum(stru_consist_map, axis=2)
stru_consist_map = stru_consist_map / np.repeat(np.expand_dims(normalizer, axis=2), exp_img_num, axis=2)
max_exp = sig_strength * exp_ref_map # Desired Signal Strength
max_exp = np.max(max_exp, axis=2)
# Computing Index Matrix for Main Loop
idx_matrix = np.zeros((x_idx_max, y_idx_max, size_4))
idx_matrix[:, :, ref_img_idx] = ref_img_idx
for i in range(size_4):
if i < ref_img_idx:
idx_matrix[:, :, i] = exp_ref_map[:, :, i] * i + exp_ref_map[:, :, i + size_4] * (i + size_4)
elif i > ref_img_idx:
idx_matrix[:, :, i] = exp_ref_map[:, :, i] * i + exp_ref_map[:, :, i + size_4 - 1] * (i + size_4 - 1)
# Main Loop for SPD-MEF
final_img = np.zeros((size_1, size_2, size_3))
count_map = np.zeros((size_1, size_2, size_3))
count_window = np.ones((patch_size, patch_size, size_3))
x_idx_tmp = [x for x in range(x_idx_max)]
x_idx = x_idx_tmp[:x_idx_max:step_size]
x_idx.append(x_idx_tmp[x_idx[-1] + 1:x_idx_max][0])
y_idx_tmp = [y for y in range(y_idx_max)]
y_idx = y_idx_tmp[:y_idx_max:step_size]
y_idx.append(y_idx_tmp[y_idx[-1] + 1:y_idx_max][0])
offset = patch_size
for row in range(len(x_idx)):
for col in range(len(y_idx)):
i = x_idx[row]
j = y_idx[col]
blocks = seq_exp_imgs[i:i + offset, j:j + offset, :, list(idx_matrix[i, j, :].astype(np.uint8))]
r_block = np.zeros((patch_size, patch_size, size_3))
for k in range(size_4):
r_block = r_block + stru_consist_map[i, j, k] * (blocks[:, :, :, k] - lcl_mean_intens[i, j, k]) / sig_strength[i, j, k]
if np.linalg.norm(r_block.flatten()) > 0:
r_block = r_block / np.linalg.norm(r_block.flatten()) * max_exp[i, j]
r_block = r_block + np.sum(mean_intens_map[i, j, :] * lcl_mean_intens[i, j, :])
final_img[i:i + offset, j:j + offset, :] = final_img[i:i + offset, j:j + offset, :] + r_block
count_map[i:i + offset, j:j + offset, :] = count_map[i:i + offset, j:j + offset, :] + count_window
final_img = final_img / count_map
final_img = np.maximum(np.minimum(final_img, 1), 0)
return final_img
def main():
args = config_parse()
seq_img_path = args.input_path
exp_img = np.array(Image.open(os.path.join(seq_img_path, os.listdir(seq_img_path)[0])))
seq_img_RGB = np.zeros((exp_img.shape[0], exp_img.shape[1], exp_img.shape[2], len(os.listdir(seq_img_path))))
for i in range(len(os.listdir(seq_img_path))):
seq_img_RGB[:, :, :, i] = np.double(
np.array(Image.open(os.path.join(seq_img_path, os.listdir(seq_img_path)[i])))) / 255.0
seq_img_RGB = reorder_by_lum(seq_img_RGB)
seq_img_RGB = down_sample(seq_img_RGB, 1024)
enhanced_img = spd_mef(args, seq_img_RGB)
plt.imsave('fused_img.png', enhanced_img)
plt.title('Fused IMG')
plt.imshow(enhanced_img)
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.title",
"numpy.maximum",
"argparse.ArgumentParser",
"numpy.double",
"numpy.sum",
"numpy.abs",
"numpy.floor",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"numpy.exp",
"matplotlib.pyplot.imsave",
"numpy.zeros_like",
"numpy.copy",
"matplotlib.pyplot.imshow",
"numpy.m... | [((194, 250), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parameters Parser"""'}), "(description='Parameters Parser')\n", (217, 250), False, 'import argparse\n'), ((1331, 1350), 'numpy.double', 'np.double', (['seq_imgs'], {}), '(seq_imgs)\n', (1340, 1350), True, 'import numpy as np\n'), ((1497, 1520), 'numpy.argsort', 'np.argsort', (['seq_sum_lum'], {}), '(seq_sum_lum)\n', (1507, 1520), True, 'import numpy as np\n'), ((1537, 1554), 'numpy.copy', 'np.copy', (['seq_imgs'], {}), '(seq_imgs)\n', (1544, 1554), True, 'import numpy as np\n'), ((1784, 1803), 'numpy.double', 'np.double', (['seq_imgs'], {}), '(seq_imgs)\n', (1793, 1803), True, 'import numpy as np\n'), ((2946, 2965), 'numpy.double', 'np.double', (['seq_imgs'], {}), '(seq_imgs)\n', (2955, 2965), True, 'import numpy as np\n'), ((3591, 3617), 'numpy.zeros_like', 'np.zeros_like', (['mean_intens'], {}), '(mean_intens)\n', (3604, 3617), True, 'import numpy as np\n'), ((3651, 3681), 'numpy.ones', 'np.ones', (['mean_intens.shape[:2]'], {}), '(mean_intens.shape[:2])\n', (3658, 3681), True, 'import numpy as np\n'), ((4388, 4421), 'numpy.ones', 'np.ones', (['(patch_size, patch_size)'], {}), '((patch_size, patch_size))\n', (4395, 4421), True, 'import numpy as np\n'), ((4586, 4605), 'numpy.double', 'np.double', (['seq_imgs'], {}), '(seq_imgs)\n', (4595, 4605), True, 'import numpy as np\n'), ((4871, 4918), 'numpy.zeros', 'np.zeros', (['(size_1, size_2, size_3, exp_img_num)'], {}), '((size_1, size_2, size_3, exp_img_num))\n', (4879, 4918), True, 'import numpy as np\n'), ((5351, 5396), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, exp_img_num)'], {}), '((x_idx_max, y_idx_max, exp_img_num))\n', (5359, 5396), True, 'import numpy as np\n'), ((5594, 5634), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, size_3)'], {}), '((x_idx_max, y_idx_max, size_3))\n', (5602, 5634), True, 'import numpy as np\n'), ((5657, 5702), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, exp_img_num)'], {}), '((x_idx_max, y_idx_max, exp_img_num))\n', (5665, 5702), True, 'import numpy as np\n'), ((5751, 5796), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, exp_img_num)'], {}), '((x_idx_max, y_idx_max, exp_img_num))\n', (5759, 5796), True, 'import numpy as np\n'), ((6102, 6147), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, exp_img_num)'], {}), '((x_idx_max, y_idx_max, exp_img_num))\n', (6110, 6147), True, 'import numpy as np\n'), ((6650, 6698), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, size_4, size_4)'], {}), '((x_idx_max, y_idx_max, size_4, size_4))\n', (6658, 6698), True, 'import numpy as np\n'), ((7155, 7186), 'numpy.maximum', 'np.maximum', (['stru_consist_map', '(0)'], {}), '(stru_consist_map, 0)\n', (7165, 7186), True, 'import numpy as np\n'), ((7335, 7366), 'numpy.ones', 'np.ones', (['(x_idx_max, y_idx_max)'], {}), '((x_idx_max, y_idx_max))\n', (7342, 7366), True, 'import numpy as np\n'), ((7748, 7766), 'numpy.zeros', 'np.zeros', (['(41, 41)'], {}), '((41, 41))\n', (7756, 7766), True, 'import numpy as np\n'), ((8297, 8345), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, 2 * size_4 - 1)'], {}), '((x_idx_max, y_idx_max, 2 * size_4 - 1))\n', (8305, 8345), True, 'import numpy as np\n'), ((8607, 8722), 'numpy.exp', 'np.exp', (['(-0.5 * ((glb_mean_intens - 0.5) ** 2 / glb_gauss ** 2 + (lcl_mean_intens -\n 0.5) ** 2 / lcl_gauss ** 2))'], {}), '(-0.5 * ((glb_mean_intens - 0.5) ** 2 / glb_gauss ** 2 + (\n lcl_mean_intens - 0.5) ** 2 / lcl_gauss ** 2))\n', (8613, 8722), True, 'import numpy as np\n'), ((8818, 8849), 'numpy.sum', 'np.sum', (['mean_intens_map'], {'axis': '(2)'}), '(mean_intens_map, axis=2)\n', (8824, 8849), True, 'import numpy as np\n'), ((9120, 9152), 'numpy.sum', 'np.sum', (['stru_consist_map'], {'axis': '(2)'}), '(stru_consist_map, axis=2)\n', (9126, 9152), True, 'import numpy as np\n'), ((9345, 9368), 'numpy.max', 'np.max', (['max_exp'], {'axis': '(2)'}), '(max_exp, axis=2)\n', (9351, 9368), True, 'import numpy as np\n'), ((9430, 9470), 'numpy.zeros', 'np.zeros', (['(x_idx_max, y_idx_max, size_4)'], {}), '((x_idx_max, y_idx_max, size_4))\n', (9438, 9470), True, 'import numpy as np\n'), ((9871, 9905), 'numpy.zeros', 'np.zeros', (['(size_1, size_2, size_3)'], {}), '((size_1, size_2, size_3))\n', (9879, 9905), True, 'import numpy as np\n'), ((9922, 9956), 'numpy.zeros', 'np.zeros', (['(size_1, size_2, size_3)'], {}), '((size_1, size_2, size_3))\n', (9930, 9956), True, 'import numpy as np\n'), ((9976, 10017), 'numpy.ones', 'np.ones', (['(patch_size, patch_size, size_3)'], {}), '((patch_size, patch_size, size_3))\n', (9983, 10017), True, 'import numpy as np\n'), ((11996, 12037), 'matplotlib.pyplot.imsave', 'plt.imsave', (['"""fused_img.png"""', 'enhanced_img'], {}), "('fused_img.png', enhanced_img)\n", (12006, 12037), True, 'import matplotlib.pyplot as plt\n'), ((12042, 12064), 'matplotlib.pyplot.title', 'plt.title', (['"""Fused IMG"""'], {}), "('Fused IMG')\n", (12051, 12064), True, 'import matplotlib.pyplot as plt\n'), ((12069, 12093), 'matplotlib.pyplot.imshow', 'plt.imshow', (['enhanced_img'], {}), '(enhanced_img)\n', (12079, 12093), True, 'import matplotlib.pyplot as plt\n'), ((12098, 12108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12106, 12108), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3150), 'numpy.ones', 'np.ones', (['(win_size, win_size, 3)'], {}), '((win_size, win_size, 3))\n', (3125, 3150), True, 'import numpy as np\n'), ((3209, 3230), 'numpy.zeros', 'np.zeros', (['(size_4, 1)'], {}), '((size_4, 1))\n', (3217, 3230), True, 'import numpy as np\n'), ((3469, 3488), 'numpy.argmin', 'np.argmin', (['positive'], {}), '(positive)\n', (3478, 3488), True, 'import numpy as np\n'), ((4448, 4478), 'numpy.expand_dims', 'np.expand_dims', (['window'], {'axis': '(2)'}), '(window, axis=2)\n', (4462, 4478), True, 'import numpy as np\n'), ((6456, 6486), 'numpy.maximum', 'np.maximum', (['sig_strg_square', '(0)'], {}), '(sig_strg_square, 0)\n', (6466, 6486), True, 'import numpy as np\n'), ((7637, 7675), 'numpy.expand_dims', 'np.expand_dims', (['intens_idx_map'], {'axis': '(2)'}), '(intens_idx_map, axis=2)\n', (7651, 7675), True, 'import numpy as np\n'), ((11322, 11346), 'numpy.minimum', 'np.minimum', (['final_img', '(1)'], {}), '(final_img, 1)\n', (11332, 11346), True, 'import numpy as np\n'), ((3839, 3903), 'skimage.exposure.match_histograms', 'exposure.match_histograms', (['mean_intens[:, :, i]', 'ref_mean_intens'], {}), '(mean_intens[:, :, i], ref_mean_intens)\n', (3864, 3903), False, 'from skimage import exposure\n'), ((3923, 3965), 'numpy.abs', 'np.abs', (['(temp_mean_intens - ref_mean_intens)'], {}), '(temp_mean_intens - ref_mean_intens)\n', (3929, 3965), True, 'import numpy as np\n'), ((5058, 5150), 'skimage.exposure.match_histograms', 'exposure.match_histograms', (['seq_exp_imgs[:, :, :, ref_img_idx]', 'seq_exp_imgs[:, :, :, i]'], {}), '(seq_exp_imgs[:, :, :, ref_img_idx], seq_exp_imgs[\n :, :, :, i])\n', (5083, 5150), False, 'from skimage import exposure\n'), ((5533, 5564), 'numpy.ones', 'np.ones', (['(x_idx_max, y_idx_max)'], {}), '((x_idx_max, y_idx_max))\n', (5540, 5564), True, 'import numpy as np\n'), ((6522, 6555), 'numpy.sqrt', 'np.sqrt', (['(patch_size ** 2 * size_3)'], {}), '(patch_size ** 2 * size_3)\n', (6529, 6555), True, 'import numpy as np\n'), ((7918, 7927), 'numpy.abs', 'np.abs', (['n'], {}), '(n)\n', (7924, 7927), True, 'import numpy as np\n'), ((8900, 8934), 'numpy.expand_dims', 'np.expand_dims', (['normalizer'], {'axis': '(2)'}), '(normalizer, axis=2)\n', (8914, 8934), True, 'import numpy as np\n'), ((9205, 9239), 'numpy.expand_dims', 'np.expand_dims', (['normalizer'], {'axis': '(2)'}), '(normalizer, axis=2)\n', (9219, 9239), True, 'import numpy as np\n'), ((10592, 10634), 'numpy.zeros', 'np.zeros', (['(patch_size, patch_size, size_3)'], {}), '((patch_size, patch_size, size_3))\n', (10600, 10634), True, 'import numpy as np\n'), ((11677, 11701), 'os.listdir', 'os.listdir', (['seq_img_path'], {}), '(seq_img_path)\n', (11687, 11701), False, 'import os\n'), ((3382, 3449), 'numpy.sum', 'np.sum', (['((conved_img < expos_thres) | (conved_img > 1 - expos_thres))'], {}), '((conved_img < expos_thres) | (conved_img > 1 - expos_thres))\n', (3388, 3449), True, 'import numpy as np\n'), ((5183, 5209), 'numpy.minimum', 'np.minimum', (['exp_img_tmp', '(1)'], {}), '(exp_img_tmp, 1)\n', (5193, 5209), True, 'import numpy as np\n'), ((7952, 7961), 'numpy.abs', 'np.abs', (['n'], {}), '(n)\n', (7958, 7961), True, 'import numpy as np\n'), ((10979, 11038), 'numpy.sum', 'np.sum', (['(mean_intens_map[i, j, :] * lcl_mean_intens[i, j, :])'], {}), '(mean_intens_map[i, j, :] * lcl_mean_intens[i, j, :])\n', (10985, 11038), True, 'import numpy as np\n'), ((11626, 11650), 'os.listdir', 'os.listdir', (['seq_img_path'], {}), '(seq_img_path)\n', (11636, 11650), False, 'import os\n'), ((1383, 1422), 'numpy.sum', 'np.sum', (['seq_imgs'], {'axis': '(0)', 'keepdims': '(True)'}), '(seq_imgs, axis=0, keepdims=True)\n', (1389, 1422), True, 'import numpy as np\n'), ((2006, 2041), 'numpy.floor', 'np.floor', (['(img_width / sample_factor)'], {}), '(img_width / sample_factor)\n', (2014, 2041), True, 'import numpy as np\n'), ((11509, 11533), 'os.listdir', 'os.listdir', (['seq_img_path'], {}), '(seq_img_path)\n', (11519, 11533), False, 'import os\n'), ((2207, 2242), 'numpy.floor', 'np.floor', (['(img_width / sample_factor)'], {}), '(img_width / sample_factor)\n', (2215, 2242), True, 'import numpy as np\n'), ((2415, 2451), 'numpy.floor', 'np.floor', (['(img_height / sample_factor)'], {}), '(img_height / sample_factor)\n', (2423, 2451), True, 'import numpy as np\n'), ((2636, 2672), 'numpy.floor', 'np.floor', (['(img_height / sample_factor)'], {}), '(img_height / sample_factor)\n', (2644, 2672), True, 'import numpy as np\n'), ((11809, 11833), 'os.listdir', 'os.listdir', (['seq_img_path'], {}), '(seq_img_path)\n', (11819, 11833), False, 'import os\n')] |
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
import glob
import os.path as osp
import numpy as np
# import json_tricks as json
import json
import pickle
import logging
import os
import copy
from torch.utils.data import Dataset
import cv2
import torch
import torchvision.transforms as transforms
import glob
import scipy.io as scio
from tqdm import tqdm
# train the campus
file = '/Extra/panzhiyu/CampusSeq1/calibration_campus.json'
# kpdata = scio.loadmat(file)
with open(file,'rb') as f:
test_data = json.load(f)
TRAIN_LIST = [
'160422_ultimatum1',
'160224_haggling1',
'160226_haggling1',
'161202_haggling1',
'160906_ian1',
'160906_ian2',
'160906_ian3',
'160906_band1',
'160906_band2',
'160906_band3',
]
VAL_LIST = ['160906_pizza1', '160422_haggling1', '160906_ian5', '160906_band4']
CAMERA_NUMBER = (0,1,2)
CONNS = [[0, 1],
[0, 2],
[0, 3],
[3, 4],
[4, 5],
[0, 9],
[9, 10],
[10, 11],
[2, 6],
[2, 12],
[6, 7],
[7, 8],
[12, 13],
[13, 14]] # only 15 keypoints
Related = [14,13,12,6,7,8,11,10,9,3,4,5,0,1]
# '/Extra/panzhiyu/CMU_data/'
class Campus_Depth:
def __init__(self, image_folder, view_set, istrain = True): # TODO add keypoint foldercfg,
self.view_set = view_set
# self.cam_list = [(0,x) for x in self.view_set] # get the HD images
self.view_num = len(self.view_set)
self.image_folder = image_folder
# self.transform = transform
self.conns = CONNS
# self.image_size = np.array([360,288]) # Campus
self.image_size = np.array([1032,776]) # Shelf
self.num_joints = 15 #cfg.NETWORK.NUM_JOINTS
self.paf_num = len(CONNS)
# self.sigma = 4 #cfg.NETWORK.SIGMA
# self.single_size = 512*424
# self.istrain = is_train
# if is_train:
# self.scene_list = TRAIN_LIST
# else:
# self.scene_list = VAL_LIST
# 读取k_calibration, ksync, 以depth 为准对齐一次即可
# self.scene_num = len(self.scene_list)
# self.calib_data = []
# for scene in self.scene_list:
# with open(os.path.join(image_folder,scene,f'calibration_{scene}.json'),'rb') as dfile:
# self.calib_data.append(json.load(dfile))
# calculate the total frame idx for the specific idx
# read in the keypoint file as the order
# self.kp3d_list = [osp.join(keypoint_folder, x, 'hdPose3d_stage1_coco19') for x in self.scene_list]
self.kpfile = osp.join(image_folder, 'actorsGT.mat')
self.kpdata = scio.loadmat(self.kpfile)
self.kp3d = self.kpdata['actor3D'][0] # it has max people 3
self.dataset_len = len(self.kp3d[0]) # totally 2000 frames
tris_list = list(range(self.dataset_len))
# not used
test_list = list(range(350,471))
test_list.extend(list(range(650,751)))
# get the camera parameters
self.camera_parameters = (self._get_cam())
self.max_people = 4 # campus 3 shelf 4
def _get_cam(self):
# cam_file = osp.join(self.image_folder, 'calibration_campus.json') # campus
cam_file = osp.join(self.image_folder, 'calibration_shelf.json') # shelf
with open(cam_file) as cfile:
calib = json.load(cfile)
cameras = {}
# for cam in calib['cameras']:
# if (cam['panel'], cam['node']) in self.view_set: # camera 位置信息的选择 (panel, node) 当前,视角就是Node决定
# sel_cam = {}
# sel_cam['K'] = np.array(cam['K'])
# sel_cam['distCoef'] = np.array(cam['distCoef'])
# sel_cam['R'] = np.array(cam['R']) #.dot(M) # 旋转矩阵要处理一下 (坐标设置跟投影矩阵不匹配?)
# sel_cam['t'] = np.array(cam['t']).reshape((3, 1))
# cameras[(cam['panel'], cam['node'])] = sel_cam
for cam in self.view_set:
sel_cam = {}
sel_cam['R'] = np.array(calib[f'{cam}']['R'])
sel_cam['t'] = np.array(calib[f'{cam}']['t'])
sel_cam['distCoef'] = np.zeros(5)
sel_cam['K'] = np.array([[calib[f'{cam}']['fx'],0,calib[f'{cam}']['cx']],[0,calib[f'{cam}']['fy'], calib[f'{cam}']['cy']],[0,0,1]])
# sel_cam['fx'] = np.array(calib[f'{cam}']['fx'])
# sel_cam['fy'] = np.array(calib[f'{cam}']['fy'])
# sel_cam['cx'] = np.array(calib[f'{cam}']['cx'])
# sel_cam['cy'] = np.array(calib[f'{cam}']['cy'])
cameras[cam] = sel_cam
return cameras
def __len__(self):
return int(np.sum(self.num_pers)) # useless
def __generate_meta__(self):
meta = dict()
meta['root'] = list()
for index in tqdm(range(self.dataset_len)):
# findtable = self.until_sum - (index + 1)
# scene_index = np.min(np.where(findtable>=0)[0])
# until_num = np.sum(self.num_pers[:scene_index])
# kp_idx = index - until_num
# kp_file = self.anno_kp_files[scene_index][kp_idx]
# with open(kp_file,'rb') as kp:
# try:
# kp_row_data = json.load(kp)
# except:
# continue
# kp3d_body_data = kp_row_data['bodies']
# nposes = len(kp3d_body_data)
# if nposes == 0:
# continue
# number_people = torch.tensor(nposes)
# camera_paras = self.camera_parameters[scene_index]
per_info_media = dict()
for cam in self.view_set:
# generate meta file
# different views
per_info = dict()
per_info['img_height'] = int(self.image_size[1])
per_info['img_width'] = int(self.image_size[0])
per_info['dataset'] = 'SHELF' # for campus
per_info['isValidation'] = 1
# judge the 2D valid?
view_para = self.camera_parameters[cam]
K = view_para['K']
R = view_para['R']
T = view_para['t']
Kd = view_para['distCoef']
# D = np.concatenate([R,T],axis=-1)
# P = K @ D
pose_info = []
pose_3d = []
for n in range(self.max_people):
# extract the kp_info
extracted_pose = self.kp3d[n][index][0] #[ped][frame][0]
if len(extracted_pose) < 14:
continue
# process the 3d pose into 15 joints and cmu form
pose3d = np.zeros((15,3),np.float32)
mid_hip = np.mean(np.concatenate([extracted_pose[2:3,:],extracted_pose[3:4,:]],axis=0),axis=0)
pose3d[2,:] = mid_hip
pose3d[Related,:] = extracted_pose
pose2d, depth_val, points_3d_cam, fx, fy, cx, cy = self.__projectjointsPoints__(pose3d.transpose(),K, R, T, Kd)
x_check = np.bitwise_and(pose2d[:, 0] >= 0,
pose2d[:, 0] <= self.image_size[0] - 1) #(15,) bool
y_check = np.bitwise_and(pose2d[:, 1] >= 0,
pose2d[:, 1] <= self.image_size[1] - 1)
check = np.bitwise_and(x_check, y_check)
valid_flag = np.sum(check)
if valid_flag < 3: # consider it do not has person in this view
continue
anno_vis = 2 * np.ones(15)
anno_vis[np.logical_not(check)] = 0
anno_vis = np.expand_dims(anno_vis,axis = -1)
joints_info = np.concatenate([pose2d, depth_val, anno_vis, points_3d_cam, fx, fy, cx, cy], axis=-1)
pose_info.append(joints_info[None,...])
pose_3d.append(pose3d[None,...])
if len(pose_info) == 0:
continue
pose_info = np.concatenate(pose_info,axis=0)
pose_3d = np.concatenate(pose_3d, axis=0)
per_info['bodys_3d'] = pose_3d
per_info['bodys'] = pose_info
# per_info['img_paths'] = osp.join(self.image_folder, f'Camera{cam}',f'campus4-c{cam}-{index:0>5d}.png')
per_info['img_paths'] = osp.join(self.image_folder, f'Camera{cam}',f'img_{index:0>6d}.png')
# meta['root'].append(per_info)
per_info['cam'] = view_para
per_info_media[cam] = per_info
if len(per_info_media) < 5: # 3 views for campus and 5 views for shelf
continue
meta['root'].append(per_info_media)
# prefix = '{:02d}_{:02d}'.format(cam_node[0], cam_node[1])
# postfix = osp.basename(kp_file).replace('body3DScene', '')
# file_name = osp.join(self.image_folder,self.scene_list[scene_index],'hdImgs',prefix,
# prefix+postfix)
# file_name = file_name.replace('json','jpg')
# per_info['img_paths'] = file_name
# pose_info = []
# for n in range(nposes):
# pose3d = np.array(kp3d_body_data[n]['joints19']).reshape((-1, 4))
# # process the joint into 15 keypoints
# pose3d = pose3d[:15,:].copy() # only consider 15 keypoints
# anno_vis = 2 * (pose3d[:, -1] > 0.1)
# pose3d_proc = pose3d[...,:3].copy()
# pose2d, depth_val, points_3d_cam, fx, fy, cx, cy = self.__projectjointsPoints__(pose3d_proc.transpose(),K, R, T, Kd) # get the corresponding joint depth value
# x_check = np.bitwise_and(pose2d[:, 0] >= 0,
# pose2d[:, 0] <= self.image_size[0] - 1) #(15,) bool
# y_check = np.bitwise_and(pose2d[:, 1] >= 0,
# pose2d[:, 1] <= self.image_size[1] - 1)
# check = np.bitwise_and(x_check, y_check) # check bool se
# anno_vis[np.logical_not(check)] = 0
# anno_vis = np.expand_dims(anno_vis,axis = -1)
# joints_info = np.concatenate([pose2d, depth_val, anno_vis, points_3d_cam, fx, fy, cx, cy], axis=-1)
# pose_info.append(joints_info[None,...])
# pose_info = np.concatenate(pose_info,axis=0)
# per_info['bodys'] = pose_info
# meta['root'].append(per_info)
# if self.istrain:
# writen_file = osp.join(self.image_folder,'cmu_data_train.pkl')
# else:
writen_file = osp.join(self.image_folder,'shelf_meta_multi.pkl')
with open(writen_file,'wb') as f:
pickle.dump(meta, f)
def __projectjointsPoints__(self, X, K, R, t, Kd):
"""
Projects points X (3xN) using camera intrinsics K (3x3),
extrinsics (R,t) and distortion parameters Kd=[k1,k2,p1,p2,k3].
Roughly, x = K*(R*X + t) + distortion
See http://docs.opencv.org/2.4/doc/tutorials/calib3d/camera_calibration/camera_calibration.html
or cv2.projectPoints
"""
x = (np.dot(R, X) + t) # panoptic to kinect color scaling cm to m
points_3d_cam = x.copy().transpose() #[J,3]
depth_val = x[2:3,:].transpose() # the depth value of current joint, metrix is meter [N,1]
x[0:2, :] = x[0:2, :] / (x[2, :] + 1e-5)
r = x[0, :] * x[0, :] + x[1, :] * x[1, :]
# 去畸变
x[0, :] = x[0, :] * (1 + Kd[0] * r + Kd[1] * r * r + Kd[4] * r * r * r
) + 2 * Kd[2] * x[0, :] * x[1, :] + Kd[3] * (
r + 2 * x[0, :] * x[0, :])
x[1, :] = x[1, :] * (1 + Kd[0] * r + Kd[1] * r * r + Kd[4] * r * r * r
) + 2 * Kd[3] * x[0, :] * x[1, :] + Kd[2] * (
r + 2 * x[1, :] * x[1, :])
x[0, :] = K[0, 0] * x[0, :] + K[0, 1] * x[1, :] + K[0, 2]
x[1, :] = K[1, 0] * x[0, :] + K[1, 1] * x[1, :] + K[1, 2]
# depth_val_norm = depth_val * W / f # absolute depth sensing
pose_2d = x[:2,:].copy().transpose()
fx = K[0,0] * np.ones((pose_2d.shape[0],1))
fy = K[1,1] * np.ones((pose_2d.shape[0],1))
cx = K[0,2] * np.ones((pose_2d.shape[0],1))
cy = K[1,2] * np.ones((pose_2d.shape[0],1))
return pose_2d, depth_val, points_3d_cam, fx, fy, cx, cy
if __name__ == '__main__':
# img_path = '/Extra/panzhiyu/CampusSeq1/'
img_path = '/Extra/panzhiyu/Shelf/'
view_set = [0,1,2,3,4]
depth_data_train = Campus_Depth(img_path, view_set)
# depth_data_test = Campus_Depth(img_path,view_set)
depth_data_train.__generate_meta__()
# depth_data_test.__generate_meta__()
| [
"pickle.dump",
"json.load",
"numpy.sum",
"scipy.io.loadmat",
"numpy.logical_not",
"numpy.zeros",
"numpy.ones",
"numpy.expand_dims",
"numpy.array",
"numpy.bitwise_and",
"numpy.dot",
"os.path.join",
"numpy.concatenate"
] | [((580, 592), 'json.load', 'json.load', (['f'], {}), '(f)\n', (589, 592), False, 'import json\n'), ((1733, 1754), 'numpy.array', 'np.array', (['[1032, 776]'], {}), '([1032, 776])\n', (1741, 1754), True, 'import numpy as np\n'), ((2680, 2718), 'os.path.join', 'osp.join', (['image_folder', '"""actorsGT.mat"""'], {}), "(image_folder, 'actorsGT.mat')\n", (2688, 2718), True, 'import os.path as osp\n'), ((2742, 2767), 'scipy.io.loadmat', 'scio.loadmat', (['self.kpfile'], {}), '(self.kpfile)\n', (2754, 2767), True, 'import scipy.io as scio\n'), ((3336, 3389), 'os.path.join', 'osp.join', (['self.image_folder', '"""calibration_shelf.json"""'], {}), "(self.image_folder, 'calibration_shelf.json')\n", (3344, 3389), True, 'import os.path as osp\n'), ((11021, 11072), 'os.path.join', 'osp.join', (['self.image_folder', '"""shelf_meta_multi.pkl"""'], {}), "(self.image_folder, 'shelf_meta_multi.pkl')\n", (11029, 11072), True, 'import os.path as osp\n'), ((3456, 3472), 'json.load', 'json.load', (['cfile'], {}), '(cfile)\n', (3465, 3472), False, 'import json\n'), ((4100, 4130), 'numpy.array', 'np.array', (["calib[f'{cam}']['R']"], {}), "(calib[f'{cam}']['R'])\n", (4108, 4130), True, 'import numpy as np\n'), ((4158, 4188), 'numpy.array', 'np.array', (["calib[f'{cam}']['t']"], {}), "(calib[f'{cam}']['t'])\n", (4166, 4188), True, 'import numpy as np\n'), ((4224, 4235), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (4232, 4235), True, 'import numpy as np\n'), ((4263, 4391), 'numpy.array', 'np.array', (["[[calib[f'{cam}']['fx'], 0, calib[f'{cam}']['cx']], [0, calib[f'{cam}'][\n 'fy'], calib[f'{cam}']['cy']], [0, 0, 1]]"], {}), "([[calib[f'{cam}']['fx'], 0, calib[f'{cam}']['cx']], [0, calib[\n f'{cam}']['fy'], calib[f'{cam}']['cy']], [0, 0, 1]])\n", (4271, 4391), True, 'import numpy as np\n'), ((4730, 4751), 'numpy.sum', 'np.sum', (['self.num_pers'], {}), '(self.num_pers)\n', (4736, 4751), True, 'import numpy as np\n'), ((11135, 11155), 'pickle.dump', 'pickle.dump', (['meta', 'f'], {}), '(meta, f)\n', (11146, 11155), False, 'import pickle\n'), ((11566, 11578), 'numpy.dot', 'np.dot', (['R', 'X'], {}), '(R, X)\n', (11572, 11578), True, 'import numpy as np\n'), ((12593, 12623), 'numpy.ones', 'np.ones', (['(pose_2d.shape[0], 1)'], {}), '((pose_2d.shape[0], 1))\n', (12600, 12623), True, 'import numpy as np\n'), ((12645, 12675), 'numpy.ones', 'np.ones', (['(pose_2d.shape[0], 1)'], {}), '((pose_2d.shape[0], 1))\n', (12652, 12675), True, 'import numpy as np\n'), ((12697, 12727), 'numpy.ones', 'np.ones', (['(pose_2d.shape[0], 1)'], {}), '((pose_2d.shape[0], 1))\n', (12704, 12727), True, 'import numpy as np\n'), ((12749, 12779), 'numpy.ones', 'np.ones', (['(pose_2d.shape[0], 1)'], {}), '((pose_2d.shape[0], 1))\n', (12756, 12779), True, 'import numpy as np\n'), ((8234, 8267), 'numpy.concatenate', 'np.concatenate', (['pose_info'], {'axis': '(0)'}), '(pose_info, axis=0)\n', (8248, 8267), True, 'import numpy as np\n'), ((8293, 8324), 'numpy.concatenate', 'np.concatenate', (['pose_3d'], {'axis': '(0)'}), '(pose_3d, axis=0)\n', (8307, 8324), True, 'import numpy as np\n'), ((8579, 8647), 'os.path.join', 'osp.join', (['self.image_folder', 'f"""Camera{cam}"""', 'f"""img_{index:0>6d}.png"""'], {}), "(self.image_folder, f'Camera{cam}', f'img_{index:0>6d}.png')\n", (8587, 8647), True, 'import os.path as osp\n'), ((6819, 6848), 'numpy.zeros', 'np.zeros', (['(15, 3)', 'np.float32'], {}), '((15, 3), np.float32)\n', (6827, 6848), True, 'import numpy as np\n'), ((7222, 7295), 'numpy.bitwise_and', 'np.bitwise_and', (['(pose2d[:, 0] >= 0)', '(pose2d[:, 0] <= self.image_size[0] - 1)'], {}), '(pose2d[:, 0] >= 0, pose2d[:, 0] <= self.image_size[0] - 1)\n', (7236, 7295), True, 'import numpy as np\n'), ((7387, 7460), 'numpy.bitwise_and', 'np.bitwise_and', (['(pose2d[:, 1] >= 0)', '(pose2d[:, 1] <= self.image_size[1] - 1)'], {}), '(pose2d[:, 1] >= 0, pose2d[:, 1] <= self.image_size[1] - 1)\n', (7401, 7460), True, 'import numpy as np\n'), ((7537, 7569), 'numpy.bitwise_and', 'np.bitwise_and', (['x_check', 'y_check'], {}), '(x_check, y_check)\n', (7551, 7569), True, 'import numpy as np\n'), ((7603, 7616), 'numpy.sum', 'np.sum', (['check'], {}), '(check)\n', (7609, 7616), True, 'import numpy as np\n'), ((7868, 7901), 'numpy.expand_dims', 'np.expand_dims', (['anno_vis'], {'axis': '(-1)'}), '(anno_vis, axis=-1)\n', (7882, 7901), True, 'import numpy as np\n'), ((7937, 8026), 'numpy.concatenate', 'np.concatenate', (['[pose2d, depth_val, anno_vis, points_3d_cam, fx, fy, cx, cy]'], {'axis': '(-1)'}), '([pose2d, depth_val, anno_vis, points_3d_cam, fx, fy, cx, cy],\n axis=-1)\n', (7951, 8026), True, 'import numpy as np\n'), ((6885, 6957), 'numpy.concatenate', 'np.concatenate', (['[extracted_pose[2:3, :], extracted_pose[3:4, :]]'], {'axis': '(0)'}), '([extracted_pose[2:3, :], extracted_pose[3:4, :]], axis=0)\n', (6899, 6957), True, 'import numpy as np\n'), ((7769, 7780), 'numpy.ones', 'np.ones', (['(15)'], {}), '(15)\n', (7776, 7780), True, 'import numpy as np\n'), ((7810, 7831), 'numpy.logical_not', 'np.logical_not', (['check'], {}), '(check)\n', (7824, 7831), True, 'import numpy as np\n')] |
"""Integrity checks and tests for specific features used"""
import copy
import logging
import numbers
import re
from typing import Optional, Iterable, Any
from collections import Counter
import libsbml
import numpy as np
import pandas as pd
import sympy as sp
import petab_MS
from . import (core, parameters, sbml, measurements)
from .C import * # noqa: F403
logger = logging.getLogger(__name__)
def _check_df(df: pd.DataFrame, req_cols: Iterable, name: str) -> None:
"""Check if given columns are present in DataFrame
Arguments:
df: Dataframe to check
req_cols: Column names which have to be present
name: Name of the DataFrame to be included in error message
Raises:
AssertionError: if a column is missing
"""
cols_set = df.columns.values
missing_cols = set(req_cols) - set(cols_set)
if missing_cols:
raise AssertionError(
f"DataFrame {name} requires the columns {missing_cols}.")
def assert_no_leading_trailing_whitespace(
names_list: Iterable[str], name: str) -> None:
"""Check that there is no trailing whitespace in elements of Iterable
Arguments:
names_list: strings to check for whitespace
name: name of `names_list` for error messages
Raises:
AssertionError: if there is trailing whitespace
"""
r = re.compile(r'(?:^\s)|(?:\s$)')
for i, x in enumerate(names_list):
if isinstance(x, str) and r.search(x):
raise AssertionError(f"Whitespace around {name}[{i}] = '{x}'.")
def check_condition_df(
df: pd.DataFrame, sbml_model: Optional[libsbml.Model] = None) -> None:
"""Run sanity checks on PEtab condition table
Arguments:
df: PEtab condition DataFrame
sbml_model: SBML Model for additional checking of parameter IDs
Raises:
AssertionError: in case of problems
"""
# Check required columns are present
req_cols = []
_check_df(df, req_cols, "condition")
# Check for correct index
if not df.index.name == CONDITION_ID:
raise AssertionError(
f"Condition table has wrong index {df.index.name}."
f"expected {CONDITION_ID}.")
check_ids(df.index.values, kind='condition')
for column_name in req_cols:
if not np.issubdtype(df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
df[column_name].values, column_name)
if sbml_model is not None:
for column_name in df.columns:
if column_name != CONDITION_NAME \
and sbml_model.getParameter(column_name) is None \
and sbml_model.getSpecies(column_name) is None \
and sbml_model.getCompartment(column_name) is None:
raise AssertionError(
"Condition table contains column for unknown entity '"
f"{column_name}'. Column names must match parameter, "
"species or compartment IDs specified in the SBML model.")
def check_measurement_df(df: pd.DataFrame,
observable_df: Optional[pd.DataFrame] = None) -> None:
"""Run sanity checks on PEtab measurement table
Arguments:
df: PEtab measurement DataFrame
observable_df: PEtab observable DataFrame for checking if measurements
are compatible with observable transformations.
Raises:
AssertionError, ValueError: in case of problems
"""
_check_df(df, MEASUREMENT_DF_REQUIRED_COLS, "measurement")
for column_name in MEASUREMENT_DF_REQUIRED_COLS:
if not np.issubdtype(df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
df[column_name].values, column_name)
for column_name in MEASUREMENT_DF_OPTIONAL_COLS:
if column_name in df \
and not np.issubdtype(df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
df[column_name].values, column_name)
if observable_df is not None:
# Check all observables are defined
observables_defined = set(observable_df.index.values)
observables_used = set(df[OBSERVABLE_ID])
observables_undefined = observables_used - observables_defined
if observables_undefined:
raise ValueError(f"Observables {observables_undefined} used in "
"measurement table but not defined in "
"observables table.")
if OBSERVABLE_TRANSFORMATION in observable_df:
# Check for positivity of measurements in case of
# log-transformation
assert_unique_observable_ids(observable_df)
# If the above is not checked, in the following loop
# trafo may become a pandas Series
for measurement, obs_id in zip(df[MEASUREMENT], df[OBSERVABLE_ID]):
trafo = observable_df.loc[obs_id, OBSERVABLE_TRANSFORMATION]
if measurement <= 0.0 and trafo in [LOG, LOG10]:
raise ValueError('Measurements with observable '
f'transformation {trafo} must be '
f'positive, but {measurement} <= 0.')
if observable_df is not None:
assert_measured_observables_defined(df, observable_df)
measurements.assert_overrides_match_parameter_count(
df, observable_df)
def check_parameter_df(
df: pd.DataFrame,
sbml_model: Optional[libsbml.Model] = None,
observable_df: Optional[pd.DataFrame] = None,
measurement_df: Optional[pd.DataFrame] = None,
condition_df: Optional[pd.DataFrame] = None) -> None:
"""Run sanity checks on PEtab parameter table
Arguments:
df: PEtab condition DataFrame
sbml_model: SBML Model for additional checking of parameter IDs
observable_df: PEtab observable table for additional checks
measurement_df: PEtab measurement table for additional checks
condition_df: PEtab condition table for additional checks
Raises:
AssertionError: in case of problems
"""
_check_df(df, PARAMETER_DF_REQUIRED_COLS[1:], "parameter")
if not df.index.name == PARAMETER_ID:
raise AssertionError(
f"Parameter table has wrong index {df.index.name}."
f"expected {PARAMETER_ID}.")
check_ids(df.index.values, kind='parameter')
for column_name in PARAMETER_DF_REQUIRED_COLS[1:]: # 0 is PARAMETER_ID
if not np.issubdtype(df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
df[column_name].values, column_name)
# nominal value is generally optional, but required if any for any
# parameter estimate != 1
non_estimated_par_ids = list(
df.index[(df[ESTIMATE] != 1) | (
pd.api.types.is_string_dtype(df[ESTIMATE])
and df[ESTIMATE] != '1')])
if non_estimated_par_ids:
if NOMINAL_VALUE not in df:
raise AssertionError("Parameter table contains parameters "
f"{non_estimated_par_ids} that are not "
"specified to be estimated, "
f"but column {NOMINAL_VALUE} is missing.")
try:
df.loc[non_estimated_par_ids, NOMINAL_VALUE].apply(float)
except ValueError:
raise AssertionError("Expected numeric values for "
f"`{NOMINAL_VALUE}` in parameter table for "
"all non-estimated parameters.")
assert_parameter_id_is_string(df)
assert_parameter_scale_is_valid(df)
assert_parameter_bounds_are_numeric(df)
assert_parameter_estimate_is_boolean(df)
assert_unique_parameter_ids(df)
check_parameter_bounds(df)
assert_parameter_prior_type_is_valid(df)
if sbml_model and measurement_df is not None \
and condition_df is not None:
assert_all_parameters_present_in_parameter_df(
df, sbml_model, observable_df, measurement_df, condition_df)
def check_observable_df(observable_df: pd.DataFrame) -> None:
"""Check validity of observable table
Arguments:
observable_df: PEtab observable DataFrame
Raises:
AssertionError: in case of problems
"""
_check_df(observable_df, OBSERVABLE_DF_REQUIRED_COLS[1:], "observable")
check_ids(observable_df.index.values, kind='observable')
for column_name in OBSERVABLE_DF_REQUIRED_COLS[1:]:
if not np.issubdtype(observable_df[column_name].dtype, np.number):
assert_no_leading_trailing_whitespace(
observable_df[column_name].values, column_name)
for column_name in OBSERVABLE_DF_OPTIONAL_COLS:
if column_name in observable_df \
and not np.issubdtype(observable_df[column_name].dtype,
np.number):
assert_no_leading_trailing_whitespace(
observable_df[column_name].values, column_name)
assert_noise_distributions_valid(observable_df)
assert_unique_observable_ids(observable_df)
# Check that formulas are parsable
for row in observable_df.itertuples():
obs = getattr(row, OBSERVABLE_FORMULA)
try:
sp.sympify(obs)
except sp.SympifyError as e:
raise AssertionError(f"Cannot parse expression '{obs}' "
f"for observable {row.Index}: {e}")
noise = getattr(row, NOISE_FORMULA)
try:
sp.sympify(noise)
except sp.SympifyError as e:
raise AssertionError(f"Cannot parse expression '{noise}' "
f"for noise model for observable "
f"{row.Index}: {e}")
def assert_all_parameters_present_in_parameter_df(
parameter_df: pd.DataFrame,
sbml_model: libsbml.Model,
observable_df: pd.DataFrame,
measurement_df: pd.DataFrame,
condition_df: pd.DataFrame) -> None:
"""Ensure all required parameters are contained in the parameter table
with no additional ones
Arguments:
parameter_df: PEtab parameter DataFrame
sbml_model: PEtab SBML Model
observable_df: PEtab observable table
measurement_df: PEtab measurement table
condition_df: PEtab condition table
Raises:
AssertionError: in case of problems
"""
required = parameters.get_required_parameters_for_parameter_table(
sbml_model=sbml_model, condition_df=condition_df,
observable_df=observable_df, measurement_df=measurement_df)
allowed = parameters.get_valid_parameters_for_parameter_table(
sbml_model=sbml_model, condition_df=condition_df,
observable_df=observable_df, measurement_df=measurement_df)
actual = set(parameter_df.index)
missing = required - actual
extraneous = actual - allowed
if missing:
raise AssertionError('Missing parameter(s) in parameter table: '
+ str(missing))
if extraneous:
raise AssertionError('Extraneous parameter(s) in parameter table: '
+ str(extraneous))
def assert_measured_observables_defined(
measurement_df: pd.DataFrame,
observable_df: pd.DataFrame) -> None:
"""Check if all observables in the measurement table have been defined in
the observable table
Arguments:
measurement_df: PEtab measurement table
observable_df: PEtab observable table
Raises:
AssertionError: in case of problems
"""
used_observables = set(measurement_df[OBSERVABLE_ID].values)
defined_observables = set(observable_df.index.values)
undefined_observables = used_observables - defined_observables
if undefined_observables:
raise AssertionError(
"Undefined observables in measurement file: "
f"{undefined_observables}.")
def condition_table_is_parameter_free(condition_df: pd.DataFrame) -> bool:
"""Check if all entries in the condition table are numeric
(no parameter IDs)
Arguments:
condition_df: PEtab condition table
Returns:
True if there are no parameter overrides in the condition table,
False otherwise.
"""
return len(petab_MS.get_parametric_overrides(condition_df)) == 0
def assert_parameter_id_is_string(parameter_df: pd.DataFrame) -> None:
"""
Check if all entries in the parameterId column of the parameter table
are string and not empty.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
for parameter_id in parameter_df:
if isinstance(parameter_id, str):
if parameter_id[0].isdigit():
raise AssertionError(
f"{PARAMETER_ID} {parameter_id} starts with integer.")
else:
raise AssertionError(f"Empty {PARAMETER_ID} found.")
def assert_unique_parameter_ids(parameter_df: pd.DataFrame) -> None:
"""
Check if the parameterId column of the parameter table is unique.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
non_unique_ids = get_non_unique(parameter_df.index)
if len(non_unique_ids) > 0:
raise AssertionError(
f"Non-unique values found in the {PARAMETER_ID} column"
" of the parameter table: " + str(non_unique_ids))
def assert_parameter_scale_is_valid(parameter_df: pd.DataFrame) -> None:
"""
Check if all entries in the parameterScale column of the parameter table
are 'lin' for linear, 'log' for natural logarithm or 'log10' for base 10
logarithm.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
for parameter_scale in parameter_df[PARAMETER_SCALE]:
if parameter_scale not in [LIN, LOG, LOG10]:
raise AssertionError(f"Expected {LIN}, {LOG}, or {LOG10}, but "
f"got {parameter_scale}.")
def assert_parameter_bounds_are_numeric(parameter_df: pd.DataFrame) -> None:
"""
Check if all entries in the lowerBound and upperBound columns of the
parameter table are numeric.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
parameter_df[LOWER_BOUND].apply(float).all()
parameter_df[UPPER_BOUND].apply(float).all()
def check_parameter_bounds(parameter_df: pd.DataFrame) -> None:
"""
Check if all entries in the lowerBound are smaller than upperBound column
in the parameter table and that bounds are positive for parameterScale
log|log10.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
for _, row in parameter_df.iterrows():
if int(row[ESTIMATE]):
if not row[LOWER_BOUND] <= row[UPPER_BOUND]:
raise AssertionError(
f"{LOWER_BOUND} greater than {UPPER_BOUND} for "
f"{PARAMETER_ID} {row.name}.")
if (row[LOWER_BOUND] <= 0.0 or row[UPPER_BOUND] < 0.0) \
and row[PARAMETER_SCALE] in [LOG, LOG10]:
raise AssertionError(
f"Bounds for {row[PARAMETER_SCALE]} scaled parameter "
f"{ row.name} must be positive.")
def assert_parameter_prior_type_is_valid(
parameter_df: pd.DataFrame) -> None:
"""Check that valid prior types have been selected
Arguments:
parameter_df: PEtab parameter table
Raises:
AssertionError: in case of invalid prior
"""
for col in [INITIALIZATION_PRIOR_TYPE, OBJECTIVE_PRIOR_TYPE]:
if col not in parameter_df.columns:
continue
for _, row in parameter_df.iterrows():
if row[col] not in PRIOR_TYPES and not core.is_empty(row[col]):
raise AssertionError(
f"{col} must be one of {PRIOR_TYPES} but is "
f"'{row[col]}'.")
def assert_parameter_prior_parameters_are_valid(
parameter_df: pd.DataFrame) -> None:
"""Check that the prior parameters are valid.
Arguments:
parameter_df: PEtab parameter table
Raises:
AssertionError: in case of invalid prior parameters
"""
prior_type_cols = [INITIALIZATION_PRIOR_TYPE,
OBJECTIVE_PRIOR_TYPE]
prior_par_cols = [INITIALIZATION_PRIOR_PARAMETERS,
OBJECTIVE_PRIOR_PARAMETERS]
# perform test for both priors
for type_col, par_col in zip(prior_type_cols, prior_par_cols):
# iterate over rows
for _, row in parameter_df.iterrows():
# get type
if type_col not in row or core.is_empty(row[type_col]):
type_ = PARAMETER_SCALE_UNIFORM
else:
type_ = row[type_col]
# get parameters
pars_str = row.get(par_col, '')
with_default_parameters = [PARAMETER_SCALE_UNIFORM]
# check if parameters are empty
if core.is_empty(pars_str):
if type_ not in with_default_parameters:
raise AssertionError(
f"An empty {par_col} is only permitted with "
f"{type_col} in {with_default_parameters}.")
# empty parameters fine
continue
# parse parameters
try:
pars = tuple([float(val) for val in pars_str.split(';')])
except ValueError:
raise AssertionError(
f"Could not parse prior parameters '{pars_str}'.")
# all distributions take 2 parameters
if len(pars) != 2:
raise AssertionError(
f"The prior parameters '{pars}' do not contain the "
"expected number of entries (currently 'par1;par2' "
"for all prior types).")
def assert_parameter_estimate_is_boolean(parameter_df: pd.DataFrame) -> None:
"""
Check if all entries in the estimate column of the parameter table are
0 or 1.
Arguments:
parameter_df: PEtab parameter DataFrame
Raises:
AssertionError: in case of problems
"""
for estimate in parameter_df[ESTIMATE]:
if int(estimate) not in [True, False]:
raise AssertionError(
f"Expected 0 or 1 but got {estimate} in {ESTIMATE} column.")
def is_scalar_float(x: Any):
"""
Checks whether input is a number or can be transformed into a number
via float
:param x:
input
:return:
``True`` if is or can be converted to number, ``False`` otherwise.
"""
if isinstance(x, numbers.Number):
return True
try:
float(x)
return True
except (ValueError, TypeError):
return False
def measurement_table_has_timepoint_specific_mappings(
measurement_df: Optional[pd.DataFrame],
allow_scalar_numeric_noise_parameters: bool = False,
allow_scalar_numeric_observable_parameters: bool = False,
) -> bool:
"""
Are there time-point or replicate specific parameter assignments in the
measurement table.
Arguments:
measurement_df:
PEtab measurement table
allow_scalar_numeric_noise_parameters:
ignore scalar numeric assignments to noiseParameter placeholders
allow_scalar_numeric_observable_parameters:
ignore scalar numeric assignments to observableParameter
placeholders
Returns:
True if there are time-point or replicate specific (non-numeric)
parameter assignments in the measurement table, False otherwise.
"""
if measurement_df is None:
return False
# since we edit it, copy it first
measurement_df = copy.deepcopy(measurement_df)
# mask numeric values
for col, allow_scalar_numeric in [
(OBSERVABLE_PARAMETERS, allow_scalar_numeric_observable_parameters),
(NOISE_PARAMETERS, allow_scalar_numeric_noise_parameters)
]:
if col not in measurement_df:
continue
measurement_df[col] = measurement_df[col].apply(str)
if allow_scalar_numeric:
measurement_df.loc[
measurement_df[col].apply(is_scalar_float), col
] = np.nan
grouping_cols = core.get_notnull_columns(
measurement_df,
[OBSERVABLE_ID,
SIMULATION_CONDITION_ID,
PREEQUILIBRATION_CONDITION_ID,
OBSERVABLE_PARAMETERS,
NOISE_PARAMETERS])
grouped_df = measurement_df.groupby(grouping_cols, dropna=False)
grouping_cols = core.get_notnull_columns(
measurement_df,
[OBSERVABLE_ID,
SIMULATION_CONDITION_ID,
PREEQUILIBRATION_CONDITION_ID])
grouped_df2 = measurement_df.groupby(grouping_cols)
# data frame has timepoint specific overrides if grouping by noise
# parameters and observable parameters in addition to observable,
# condition and preeq id yields more groups
return len(grouped_df) != len(grouped_df2)
def observable_table_has_nontrivial_noise_formula(
observable_df: Optional[pd.DataFrame]) -> bool:
"""
Does any observable have a noise formula that is not just a single
parameter?
Arguments:
observable_df: PEtab observable table
Returns:
True if any noise formula does not consist of a single identifier,
False otherwise.
"""
if observable_df is None:
return False
return not observable_df[NOISE_FORMULA].apply(
lambda x: is_scalar_float(x) or
re.match(r'^[\w]+$', str(x)) is not None
).all()
def measurement_table_has_observable_parameter_numeric_overrides(
measurement_df: pd.DataFrame) -> bool:
"""Are there any numbers to override observable parameters?
Arguments:
measurement_df: PEtab measurement table
Returns:
True if there are any numbers to override observable/noise parameters,
False otherwise.
"""
if OBSERVABLE_PARAMETERS not in measurement_df:
return False
for _, row in measurement_df.iterrows():
for override in measurements.split_parameter_replacement_list(
row.get(OBSERVABLE_PARAMETERS, None)):
if isinstance(override, numbers.Number):
return True
return False
def assert_noise_distributions_valid(observable_df: pd.DataFrame) -> None:
"""
Ensure that noise distributions and transformations for observables are
valid.
Arguments:
observable_df: PEtab observable table
Raises:
AssertionError: in case of problems
"""
if OBSERVABLE_TRANSFORMATION in observable_df:
# check for valid values
for trafo in observable_df[OBSERVABLE_TRANSFORMATION]:
if trafo not in ['', *OBSERVABLE_TRANSFORMATIONS] \
and not (isinstance(trafo, numbers.Number)
and np.isnan(trafo)):
raise ValueError(
f"Unrecognized observable transformation in observable "
f"table: {trafo}.")
if NOISE_DISTRIBUTION in observable_df:
for distr in observable_df[NOISE_DISTRIBUTION]:
if distr not in ['', *NOISE_MODELS] \
and not (isinstance(distr, numbers.Number)
and np.isnan(distr)):
raise ValueError(
f"Unrecognized noise distribution in observable "
f"table: {distr}.")
def assert_unique_observable_ids(observable_df: pd.DataFrame) -> None:
"""
Check if the observableId column of the observable table is unique.
Arguments:
observable_df: PEtab observable DataFrame
Raises:
AssertionError: in case of problems
"""
non_unique_ids = get_non_unique(observable_df.index)
if len(non_unique_ids) > 0:
raise AssertionError(
f"Non-unique values found in the {OBSERVABLE_ID} column"
" of the observable table: " + str(non_unique_ids))
def get_non_unique(values):
counter = Counter(values)
return [value for (value, count) in counter.items() if count > 1]
def lint_problem(problem: 'petab.Problem') -> bool:
"""Run PEtab validation on problem
Arguments:
problem: PEtab problem to check
Returns:
True is errors occurred, False otherwise
"""
# pylint: disable=too-many-statements
errors_occurred = False
# Run checks on individual files
if problem.sbml_model is not None:
logger.info("Checking SBML model...")
errors_occurred |= not sbml.is_sbml_consistent(
problem.sbml_model.getSBMLDocument())
sbml.log_sbml_errors(problem.sbml_model.getSBMLDocument())
else:
logger.warning("SBML model not available. Skipping.")
if problem.measurement_df is not None:
logger.info("Checking measurement table...")
try:
check_measurement_df(problem.measurement_df, problem.observable_df)
if problem.condition_df is not None:
assert_measurement_conditions_present_in_condition_table(
problem.measurement_df, problem.condition_df
)
except AssertionError as e:
logger.error(e)
errors_occurred = True
else:
logger.warning("Measurement table not available. Skipping.")
if problem.condition_df is not None:
logger.info("Checking condition table...")
try:
check_condition_df(problem.condition_df, problem.sbml_model)
except AssertionError as e:
logger.error(e)
errors_occurred = True
else:
logger.warning("Condition table not available. Skipping.")
if problem.observable_df is not None:
logger.info("Checking observable table...")
try:
check_observable_df(problem.observable_df)
except AssertionError as e:
logger.error(e)
errors_occurred = True
if problem.sbml_model is not None:
for obs_id in problem.observable_df.index:
if problem.sbml_model.getElementBySId(obs_id):
logger.error(f"Observable ID {obs_id} shadows model "
"entity.")
errors_occurred = True
else:
logger.warning("Observable table not available. Skipping.")
if problem.parameter_df is not None:
logger.info("Checking parameter table...")
try:
check_parameter_df(problem.parameter_df, problem.sbml_model,
problem.observable_df,
problem.measurement_df, problem.condition_df)
except AssertionError as e:
logger.error(e)
errors_occurred = True
else:
logger.warning("Parameter table not available. Skipping.")
if problem.sbml_model is not None and problem.condition_df is not None \
and problem.parameter_df is not None:
try:
assert_model_parameters_in_condition_or_parameter_table(
problem.sbml_model,
problem.condition_df,
problem.parameter_df
)
except AssertionError as e:
logger.error(e)
errors_occurred = True
if errors_occurred:
logger.error('Not OK')
elif problem.measurement_df is None or problem.condition_df is None \
or problem.sbml_model is None or problem.parameter_df is None \
or problem.observable_df is None:
logger.warning('Not all files of the PEtab problem definition could '
'be checked.')
else:
logger.info('PEtab format check completed successfully.')
return errors_occurred
def assert_model_parameters_in_condition_or_parameter_table(
sbml_model: libsbml.Model,
condition_df: pd.DataFrame,
parameter_df: pd.DataFrame) -> None:
"""Model parameters that are targets of AssignmentRule must not be present
in parameter table or in condition table columns. Other parameters must
only be present in either in parameter table or condition table columns.
Check that.
Arguments:
parameter_df: PEtab parameter DataFrame
sbml_model: PEtab SBML Model
condition_df: PEtab condition table
Raises:
AssertionError: in case of problems
"""
for parameter in sbml_model.getListOfParameters():
parameter_id = parameter.getId()
if parameter_id.startswith('observableParameter'):
continue
if parameter_id.startswith('noiseParameter'):
continue
is_assignee = \
sbml_model.getAssignmentRuleByVariable(parameter_id) is not None
in_parameter_df = parameter_id in parameter_df.index
in_condition_df = parameter_id in condition_df.columns
if is_assignee and (in_parameter_df or in_condition_df):
raise AssertionError(f"Model parameter '{parameter_id}' is target "
"of AssignmentRule, and thus, must not be "
"present in condition table or in parameter "
"table.")
if in_parameter_df and in_condition_df:
raise AssertionError(f"Model parameter '{parameter_id}' present "
"in both condition table and parameter "
"table.")
def assert_measurement_conditions_present_in_condition_table(
measurement_df: pd.DataFrame, condition_df: pd.DataFrame) -> None:
"""Ensure that all entries from measurement_df.simulationConditionId and
measurement_df.preequilibrationConditionId are present in
condition_df.index.
Arguments:
measurement_df: PEtab measurement table
condition_df: PEtab condition table
Raises:
AssertionError: in case of problems
"""
used_conditions = set(measurement_df[SIMULATION_CONDITION_ID].values)
if PREEQUILIBRATION_CONDITION_ID in measurement_df:
used_conditions |= \
set(measurement_df[PREEQUILIBRATION_CONDITION_ID].dropna().values)
available_conditions = set(condition_df.index.values)
missing_conditions = used_conditions - available_conditions
if missing_conditions:
raise AssertionError("Measurement table references conditions that "
"are not specified in the condition table: "
+ str(missing_conditions))
def is_valid_identifier(x: str) -> bool:
"""Check whether `x` is a valid identifier
Check whether `x` is a valid identifier for conditions, parameters,
observables... . Identifiers may contain upper and lower case letters,
digits and underscores, but must not start with a digit.
Arguments:
x: string to check
Returns:
``True`` if valid, ``False`` otherwise
"""
if pd.isna(x):
return False
return re.match(r'^[a-zA-Z_]\w*$', x) is not None
def check_ids(ids: Iterable[str], kind: str = '') -> None:
"""Check IDs are valid
Arguments:
ids: Iterable of IDs to check
kind: Kind of IDs, for more informative error message
Raises:
ValueError - in case of invalid IDs
"""
invalids = [(index, _id)
for index, _id in enumerate(ids)
if not is_valid_identifier(_id)]
if invalids:
# The first row is the header row, and Python lists are zero-indexed,
# hence need to add 2 for the correct line number.
offset = 2
error_output = '\n'.join([
f'Line {index+offset}: ' +
('Missing ID' if pd.isna(_id) else _id)
for index, _id in invalids])
raise ValueError(f"Invalid {kind} ID(s):\n{error_output}")
| [
"copy.deepcopy",
"sympy.sympify",
"re.match",
"numpy.issubdtype",
"numpy.isnan",
"pandas.api.types.is_string_dtype",
"collections.Counter",
"pandas.isna",
"petab_MS.get_parametric_overrides",
"logging.getLogger",
"re.compile"
] | [((373, 400), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (390, 400), False, 'import logging\n'), ((1355, 1386), 're.compile', 're.compile', (['"""(?:^\\\\s)|(?:\\\\s$)"""'], {}), "('(?:^\\\\s)|(?:\\\\s$)')\n", (1365, 1386), False, 'import re\n'), ((20190, 20219), 'copy.deepcopy', 'copy.deepcopy', (['measurement_df'], {}), '(measurement_df)\n', (20203, 20219), False, 'import copy\n'), ((24538, 24553), 'collections.Counter', 'Counter', (['values'], {}), '(values)\n', (24545, 24553), False, 'from collections import Counter\n'), ((31460, 31470), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (31467, 31470), True, 'import pandas as pd\n'), ((31505, 31535), 're.match', 're.match', (['"""^[a-zA-Z_]\\\\w*$"""', 'x'], {}), "('^[a-zA-Z_]\\\\w*$', x)\n", (31513, 31535), False, 'import re\n'), ((2302, 2349), 'numpy.issubdtype', 'np.issubdtype', (['df[column_name].dtype', 'np.number'], {}), '(df[column_name].dtype, np.number)\n', (2315, 2349), True, 'import numpy as np\n'), ((3634, 3681), 'numpy.issubdtype', 'np.issubdtype', (['df[column_name].dtype', 'np.number'], {}), '(df[column_name].dtype, np.number)\n', (3647, 3681), True, 'import numpy as np\n'), ((6594, 6641), 'numpy.issubdtype', 'np.issubdtype', (['df[column_name].dtype', 'np.number'], {}), '(df[column_name].dtype, np.number)\n', (6607, 6641), True, 'import numpy as np\n'), ((8637, 8695), 'numpy.issubdtype', 'np.issubdtype', (['observable_df[column_name].dtype', 'np.number'], {}), '(observable_df[column_name].dtype, np.number)\n', (8650, 8695), True, 'import numpy as np\n'), ((9400, 9415), 'sympy.sympify', 'sp.sympify', (['obs'], {}), '(obs)\n', (9410, 9415), True, 'import sympy as sp\n'), ((9661, 9678), 'sympy.sympify', 'sp.sympify', (['noise'], {}), '(noise)\n', (9671, 9678), True, 'import sympy as sp\n'), ((12451, 12498), 'petab_MS.get_parametric_overrides', 'petab_MS.get_parametric_overrides', (['condition_df'], {}), '(condition_df)\n', (12484, 12498), False, 'import petab_MS\n'), ((3896, 3943), 'numpy.issubdtype', 'np.issubdtype', (['df[column_name].dtype', 'np.number'], {}), '(df[column_name].dtype, np.number)\n', (3909, 3943), True, 'import numpy as np\n'), ((8931, 8989), 'numpy.issubdtype', 'np.issubdtype', (['observable_df[column_name].dtype', 'np.number'], {}), '(observable_df[column_name].dtype, np.number)\n', (8944, 8989), True, 'import numpy as np\n'), ((6937, 6979), 'pandas.api.types.is_string_dtype', 'pd.api.types.is_string_dtype', (['df[ESTIMATE]'], {}), '(df[ESTIMATE])\n', (6965, 6979), True, 'import pandas as pd\n'), ((23380, 23395), 'numpy.isnan', 'np.isnan', (['trafo'], {}), '(trafo)\n', (23388, 23395), True, 'import numpy as np\n'), ((23796, 23811), 'numpy.isnan', 'np.isnan', (['distr'], {}), '(distr)\n', (23804, 23811), True, 'import numpy as np\n'), ((32221, 32233), 'pandas.isna', 'pd.isna', (['_id'], {}), '(_id)\n', (32228, 32233), True, 'import pandas as pd\n')] |
"""
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pickle as cPickle
import numpy as np
from collections import defaultdict
def configure(constraint):
UNIGRAMS_FILENAME = "data/bow_features/%s/unigram100.pkl"%(constraint)
BIGRAMS_FILENAME = "data/bow_features/%s/bigram200.pkl"%(constraint)
UNIGRAMS_LIST = cPickle.load(open(UNIGRAMS_FILENAME, "rb"))
BIGRAMS_LIST = cPickle.load(open(BIGRAMS_FILENAME, "rb"))
STATUS = {4: ['founder', 'sysop'],
3: ['accountcreator', 'bureaucrat', 'checkuser'], \
2: [ 'abusefilter', 'abusefilter-helper', 'autoreviewer', 'extendedmover', \
'filemover', 'import', 'oversight', 'patroller', \
'reviewer','rollbacker','templateeditor','epadmin', 'epcampus', 'epcoordinator',\
'epinstructor', 'eponline'],\
1: ['massmessage-sender', 'ipblock-exempt', 'extendedconfirmed',\
'autoconfirmed', 'researcher', 'user']}
ASPECTS = ['age', 'status', 'comments_on_same_talk_page', 'comments_on_all_talk_pages',\
'edits_on_subjectpage', 'edits_on_wikipedia_articles', 'history_toxicity']
attacker_profile_ASPECTS =['proportion_of_being_replied',\
'total_reply_time_gap', 'reply_latency',\
'age', 'status', 'number_of_questions_asked', \
'edits_on_wikipedia_articles']
with open('feature_extraction/utils/lexicons') as f:
LEXICONS = json.load(f)
with open("feature_extraction/question_features/%s.json"%(constraint)) as f:
q = json.load(f)
QUESTIONS = defaultdict(list)
l = 0
for key, val in q.items():
action = key.split('-')[2]
new_key = key.split('-')[1]
QUESTIONS[new_key].append({'action_id': action, 'question_type': np.argmin(val['normy_cluster_dist_vector'])})
with open("data/user_features.json") as f:
inp = json.load(f)
user_features = {}
for conv, users in inp:
user_features[conv] = users
ARGS = [STATUS, ASPECTS, attacker_profile_ASPECTS, LEXICONS, QUESTIONS, UNIGRAMS_LIST, BIGRAMS_LIST]
return user_features, ARGS
| [
"collections.defaultdict",
"json.load",
"numpy.argmin"
] | [((2096, 2113), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2107, 2113), False, 'from collections import defaultdict\n'), ((1960, 1972), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1969, 1972), False, 'import json\n'), ((2067, 2079), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2076, 2079), False, 'import json\n'), ((2408, 2420), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2417, 2420), False, 'import json\n'), ((2299, 2342), 'numpy.argmin', 'np.argmin', (["val['normy_cluster_dist_vector']"], {}), "(val['normy_cluster_dist_vector'])\n", (2308, 2342), True, 'import numpy as np\n')] |
from DAEpy.solvers.ocp import ocp_solver
import numpy as np
import matplotlib.pyplot as plt
def f(x,y,t):
return np.array([x[2], x[3], -y[0]*x[0], -y[0]*x[1]-9.81])
def g(x,t):
return np.array([(x[0]**2+x[1]**2-1.0)*1000.0])
def fx(x,y,t):
out = np.zeros((4, 4, len(t)))
out[0,2,:] = 1.0
out[1,3,:] = 1.0
out[2,0,:] = -y[0]
out[3,1,:] = -y[0]
return out
def fy(x,y,t):
out = np.zeros((4, 1, len(t)))
out[2,0,:] = -x[0]
out[3,0,:] = -x[1]
return out
def gx(x,t):
out = np.zeros((1,4,len(t)))
out[0,0,:] = 2*x[0]
out[0,1,:] = 2*x[1]
return out
def L(x,u,t):
G = g(x,t)
return np.einsum('ij...,ij...->j...', G, G)
def Lx(x,u,t):
G = g(x,t)
Gx = gx(x,t)
return 2*np.einsum('i...,i...->...', G, Gx)
def Lu(x,u,t):
nu = u.shape[0]
nt = t.shape[0]
return np.zeros((nu, nt))
if __name__ == "__main__":
numt = 500
numx = 4
numy = 1
xresult = np.empty((numx, numt), dtype=np.float64)
yresult = np.empty((numy, numt), dtype=np.float64)
t = np.linspace(0,1.0,numt, dtype=np.float64)
x = np.ones((numx, numt), dtype=np.float64)
y = np.ones((numy, numt), dtype=np.float64)
#x = x_actual(t)
#y = y_actual(t)
x0 = np.array([1.0,0.0,0.0,0.0], dtype=np.float64)
w = 1.0
m = 1.0e3
sol = ocp_solver(L, f, x, y, t, x0, m, Lx=Lx, Lu=Lu, fx=fx, fu=fy, verbose = 2, tol = 1e-4, max_nodes = 2000)
f, ax = plt.subplots(2)
ax[0].set_xlabel("Times (Arbitrary Units)")
ax[0].set_ylabel("Concentration \n (Arbitrary Units)")
ax[0].plot(sol.t, sol.x.T)
ax[1].set_xlabel("Times (Arbitrary Units)")
ax[1].set_ylabel("RMS Residuals")
ax[1].plot(sol.t[1:], sol.rms_residuals)
"""
f, ax = plt.subplots(2)
ax[0].set_xlabel("Times (Arbitrary Units)")
ax[0].set_ylabel("Absolute Error (Arbitrary Units)")
ax[0].plot(sol.t, sol.x.T - c_exact(sol.t).T)
ax[1].set_xlabel("Times (Arbitrary Units)")
ax[1].set_ylabel("Relative Error")
ax[1].plot(sol.t[1:], (sol.x.T - c_exact(sol.t).T)[1:]/c_exact(sol.t).T[1:])
f, ax = plt.subplots(2)
ax[0].set_xlabel("Times (Arbitrary Units)")
ax[0].set_ylabel("Hamiltonian (Arbitrary Units)")
ax[0].plot(sol.t, sol.h.T)
ax[1].set_xlabel("Times (Arbitrary Units)")
ax[1].set_ylabel("Hamiltonian Infinity Norm")
ax[1].plot(sol.t, sol.hu_norm)
"""
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.empty",
"numpy.zeros",
"numpy.einsum",
"numpy.ones",
"DAEpy.solvers.ocp.ocp_solver",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((120, 177), 'numpy.array', 'np.array', (['[x[2], x[3], -y[0] * x[0], -y[0] * x[1] - 9.81]'], {}), '([x[2], x[3], -y[0] * x[0], -y[0] * x[1] - 9.81])\n', (128, 177), True, 'import numpy as np\n'), ((197, 247), 'numpy.array', 'np.array', (['[(x[0] ** 2 + x[1] ** 2 - 1.0) * 1000.0]'], {}), '([(x[0] ** 2 + x[1] ** 2 - 1.0) * 1000.0])\n', (205, 247), True, 'import numpy as np\n'), ((663, 699), 'numpy.einsum', 'np.einsum', (['"""ij...,ij...->j..."""', 'G', 'G'], {}), "('ij...,ij...->j...', G, G)\n", (672, 699), True, 'import numpy as np\n'), ((867, 885), 'numpy.zeros', 'np.zeros', (['(nu, nt)'], {}), '((nu, nt))\n', (875, 885), True, 'import numpy as np\n'), ((971, 1011), 'numpy.empty', 'np.empty', (['(numx, numt)'], {'dtype': 'np.float64'}), '((numx, numt), dtype=np.float64)\n', (979, 1011), True, 'import numpy as np\n'), ((1026, 1066), 'numpy.empty', 'np.empty', (['(numy, numt)'], {'dtype': 'np.float64'}), '((numy, numt), dtype=np.float64)\n', (1034, 1066), True, 'import numpy as np\n'), ((1076, 1119), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'numt'], {'dtype': 'np.float64'}), '(0, 1.0, numt, dtype=np.float64)\n', (1087, 1119), True, 'import numpy as np\n'), ((1126, 1165), 'numpy.ones', 'np.ones', (['(numx, numt)'], {'dtype': 'np.float64'}), '((numx, numt), dtype=np.float64)\n', (1133, 1165), True, 'import numpy as np\n'), ((1174, 1213), 'numpy.ones', 'np.ones', (['(numy, numt)'], {'dtype': 'np.float64'}), '((numy, numt), dtype=np.float64)\n', (1181, 1213), True, 'import numpy as np\n'), ((1267, 1315), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {'dtype': 'np.float64'}), '([1.0, 0.0, 0.0, 0.0], dtype=np.float64)\n', (1275, 1315), True, 'import numpy as np\n'), ((1351, 1455), 'DAEpy.solvers.ocp.ocp_solver', 'ocp_solver', (['L', 'f', 'x', 'y', 't', 'x0', 'm'], {'Lx': 'Lx', 'Lu': 'Lu', 'fx': 'fx', 'fu': 'fy', 'verbose': '(2)', 'tol': '(0.0001)', 'max_nodes': '(2000)'}), '(L, f, x, y, t, x0, m, Lx=Lx, Lu=Lu, fx=fx, fu=fy, verbose=2, tol\n =0.0001, max_nodes=2000)\n', (1361, 1455), False, 'from DAEpy.solvers.ocp import ocp_solver\n'), ((1468, 1483), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1480, 1483), True, 'import matplotlib.pyplot as plt\n'), ((2427, 2437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2435, 2437), True, 'import matplotlib.pyplot as plt\n'), ((763, 797), 'numpy.einsum', 'np.einsum', (['"""i...,i...->..."""', 'G', 'Gx'], {}), "('i...,i...->...', G, Gx)\n", (772, 797), True, 'import numpy as np\n')] |
#
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import traceback
import joblib
import glob
import json
import time
import xgboost
import numpy
import flask
from flask import Flask, Response
import logging
from functools import lru_cache
try:
""" check for GPU via library imports """
import cupy
from cuml import ForestInference
GPU_INFERENCE_FLAG = True
except ImportError as gpu_import_error:
GPU_INFERENCE_FLAG = False
print(f'\n!GPU import error: {gpu_import_error}\n')
# set to true to print incoming request headers and data
DEBUG_FLAG = False
def serve(xgboost_threshold=0.5):
""" Flask Inference Server for SageMaker hosting of RAPIDS Models """
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
if GPU_INFERENCE_FLAG:
app.logger.info('GPU Model Serving Workflow')
app.logger.info(f'> {cupy.cuda.runtime.getDeviceCount()}'
f' GPUs detected \n')
else:
app.logger.info('CPU Model Serving Workflow')
app.logger.info(f'> {os.cpu_count()} CPUs detected \n')
@app.route("/ping", methods=["GET"])
def ping():
""" SageMaker required method, ping heartbeat """
return Response(response="\n", status=200)
@lru_cache()
def load_trained_model():
"""
Cached loading of trained [ XGBoost or RandomForest ] model into memory
Note: Models selected via filename parsing, edit if necessary
"""
xgb_models = glob.glob('/opt/ml/model/*_xgb')
rf_models = glob.glob('/opt/ml/model/*_rf')
app.logger.info(f'detected xgboost models : {xgb_models}')
app.logger.info(f'detected randomforest models : {rf_models}\n\n')
model_type = None
start_time = time.perf_counter()
if len(xgb_models):
model_type = 'XGBoost'
model_filename = xgb_models[0]
if GPU_INFERENCE_FLAG:
# FIL
reloaded_model = ForestInference.load(model_filename)
else:
# native XGBoost
reloaded_model = xgboost.Booster()
reloaded_model.load_model(fname=model_filename)
elif len(rf_models):
model_type = 'RandomForest'
model_filename = rf_models[0]
reloaded_model = joblib.load(model_filename)
else:
raise Exception('! No trained models detected')
exec_time = time.perf_counter() - start_time
app.logger.info(f'> model {model_filename} '
f'loaded in {exec_time:.5f} s \n')
return reloaded_model, model_type, model_filename
@app.route("/invocations", methods=["POST"])
def predict():
"""
Run CPU or GPU inference on input data,
called everytime an incoming request arrives
"""
# parse user input
try:
if DEBUG_FLAG:
app.logger.debug(flask.request.headers)
app.logger.debug(flask.request.content_type)
app.logger.debug(flask.request.get_data())
string_data = json.loads(flask.request.get_data())
query_data = numpy.array(string_data)
except Exception:
return Response(
response="Unable to parse input data"
"[ should be json/string encoded list of arrays ]",
status=415,
mimetype='text/csv'
)
# cached [reloading] of trained model to process incoming requests
reloaded_model, model_type, model_filename = load_trained_model()
try:
start_time = time.perf_counter()
if model_type == 'XGBoost':
app.logger.info('running inference using XGBoost model :'
f'{model_filename}')
if GPU_INFERENCE_FLAG:
predictions = reloaded_model.predict(query_data)
else:
dm_deserialized_data = xgboost.DMatrix(query_data)
predictions = reloaded_model.predict(dm_deserialized_data)
predictions = (predictions > xgboost_threshold) * 1.0
elif model_type == 'RandomForest':
app.logger.info('running inference using RandomForest model :'
f'{model_filename}')
if 'gpu' in model_filename and not GPU_INFERENCE_FLAG:
raise Exception('attempting to run CPU inference '
'on a GPU trained RandomForest model')
predictions = reloaded_model.predict(
query_data.astype('float32'))
app.logger.info(f'\n predictions: {predictions} \n')
exec_time = time.perf_counter() - start_time
app.logger.info(f' > inference finished in {exec_time:.5f} s \n')
# return predictions
return Response(response=json.dumps(predictions.tolist()),
status=200, mimetype='text/csv')
# error during inference
except Exception as inference_error:
app.logger.error(inference_error)
return Response(response=f"Inference failure: {inference_error}\n",
status=400, mimetype='text/csv')
# initial [non-cached] reload of trained model
reloaded_model, model_type, model_filename = load_trained_model()
# trigger start of Flask app
app.run(host="0.0.0.0", port=8080)
if __name__ == "__main__":
try:
serve()
sys.exit(0) # success exit code
except Exception:
traceback.print_exc()
sys.exit(-1) # failure exit code
"""
airline model inference test [ 3 non-late flights, and a one late flight ]
curl -X POST --header "Content-Type: application/json" --data '[[ 2019.0, 4.0, 12.0, 2.0, 3647.0, 20452.0, 30977.0, 33244.0, 1943.0, -9.0, 0.0, 75.0, 491.0 ], [0.6327389486117129, 0.4306956773589715, 0.269797132011095, 0.9802453595689266, 0.37114359481679515, 0.9916185580669782, 0.07909626511279289, 0.7329633329905694, 0.24776047025280235, 0.5692037733986525, 0.22905629196095134, 0.6247424302941754, 0.2589150304037847], [0.39624412725991653, 0.9227953615174843, 0.03561991722126401, 0.7718573109543159, 0.2700874862088877, 0.9410675866419298, 0.6185692299959633, 0.486955878112717, 0.18877072081876722, 0.8266565188148121, 0.7845597219675844, 0.6534800630725327, 0.97356320515559], [ 2018.0, 3.0, 9.0, 5.0, 2279.0, 20409.0, 30721.0, 31703.0, 733.0, 123.0, 1.0, 61.0, 200.0 ]]' http://0.0.0.0:8080/invocations
"""
| [
"cupy.cuda.runtime.getDeviceCount",
"traceback.print_exc",
"logging.basicConfig",
"flask.Flask",
"joblib.load",
"time.perf_counter",
"xgboost.Booster",
"os.cpu_count",
"cuml.ForestInference.load",
"flask.request.get_data",
"numpy.array",
"glob.glob",
"functools.lru_cache",
"flask.Response"... | [((1271, 1286), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1276, 1286), False, 'from flask import Flask, Response\n'), ((1291, 1331), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1310, 1331), False, 'import logging\n'), ((1827, 1838), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (1836, 1838), False, 'from functools import lru_cache\n'), ((1785, 1820), 'flask.Response', 'Response', ([], {'response': '"""\n"""', 'status': '(200)'}), "(response='\\n', status=200)\n", (1793, 1820), False, 'from flask import Flask, Response\n'), ((2064, 2096), 'glob.glob', 'glob.glob', (['"""/opt/ml/model/*_xgb"""'], {}), "('/opt/ml/model/*_xgb')\n", (2073, 2096), False, 'import glob\n'), ((2117, 2148), 'glob.glob', 'glob.glob', (['"""/opt/ml/model/*_rf"""'], {}), "('/opt/ml/model/*_rf')\n", (2126, 2148), False, 'import glob\n'), ((2339, 2358), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2356, 2358), False, 'import time\n'), ((6177, 6188), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6185, 6188), False, 'import sys\n'), ((3023, 3042), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3040, 3042), False, 'import time\n'), ((3753, 3777), 'numpy.array', 'numpy.array', (['string_data'], {}), '(string_data)\n', (3764, 3777), False, 'import numpy\n'), ((4232, 4251), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4249, 4251), False, 'import time\n'), ((6241, 6262), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6260, 6262), False, 'import traceback\n'), ((6271, 6283), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (6279, 6283), False, 'import sys\n'), ((2556, 2592), 'cuml.ForestInference.load', 'ForestInference.load', (['model_filename'], {}), '(model_filename)\n', (2576, 2592), False, 'from cuml import ForestInference\n'), ((2677, 2694), 'xgboost.Booster', 'xgboost.Booster', ([], {}), '()\n', (2692, 2694), False, 'import xgboost\n'), ((2900, 2927), 'joblib.load', 'joblib.load', (['model_filename'], {}), '(model_filename)\n', (2911, 2927), False, 'import joblib\n'), ((3702, 3726), 'flask.request.get_data', 'flask.request.get_data', ([], {}), '()\n', (3724, 3726), False, 'import flask\n'), ((3824, 3962), 'flask.Response', 'Response', ([], {'response': '"""Unable to parse input data[ should be json/string encoded list of arrays ]"""', 'status': '(415)', 'mimetype': '"""text/csv"""'}), "(response=\n 'Unable to parse input data[ should be json/string encoded list of arrays ]'\n , status=415, mimetype='text/csv')\n", (3832, 3962), False, 'from flask import Flask, Response\n'), ((5376, 5395), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5393, 5395), False, 'import time\n'), ((5797, 5894), 'flask.Response', 'Response', ([], {'response': 'f"""Inference failure: {inference_error}\n"""', 'status': '(400)', 'mimetype': '"""text/csv"""'}), "(response=f'Inference failure: {inference_error}\\n', status=400,\n mimetype='text/csv')\n", (5805, 5894), False, 'from flask import Flask, Response\n'), ((1443, 1477), 'cupy.cuda.runtime.getDeviceCount', 'cupy.cuda.runtime.getDeviceCount', ([], {}), '()\n', (1475, 1477), False, 'import cupy\n'), ((1619, 1633), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (1631, 1633), False, 'import os\n'), ((3638, 3662), 'flask.request.get_data', 'flask.request.get_data', ([], {}), '()\n', (3660, 3662), False, 'import flask\n'), ((4593, 4620), 'xgboost.DMatrix', 'xgboost.DMatrix', (['query_data'], {}), '(query_data)\n', (4608, 4620), False, 'import xgboost\n')] |
import matplotlib.pyplot as plt
import numpy as np
data = np.genfromtxt("/Users/benson/Desktop/Upskilling/SP/IT8701 Introduction to Programming for Data Science/Assignment1/active-cases-of-renting-out-of-flat.csv",
skip_header=1,
dtype=[('financial_year','i8'), ('type','U10'), ('no_active_cases','i8')], delimiter=",",
missing_values=['na','-'],filling_values=[0])
data_required = data[np.isin(data['type'], ['HBD flat'])]
labels = data_required['financial_year']
years = np.arange(0, len(labels))
values = data_required['no_active_cases']
plt.plot(years, values, 'o-', color = '#000067')
plt.title('Active cases of renting out of HDB flats')
plt.ylabel('No. of active cases')
plt.xlabel('Financial year from 2006 to 2018')
plt.xticks(years,labels,rotation=90)
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.isin",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.genfromtxt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((59, 390), 'numpy.genfromtxt', 'np.genfromtxt', (['"""/Users/benson/Desktop/Upskilling/SP/IT8701 Introduction to Programming for Data Science/Assignment1/active-cases-of-renting-out-of-flat.csv"""'], {'skip_header': '(1)', 'dtype': "[('financial_year', 'i8'), ('type', 'U10'), ('no_active_cases', 'i8')]", 'delimiter': '""","""', 'missing_values': "['na', '-']", 'filling_values': '[0]'}), "(\n '/Users/benson/Desktop/Upskilling/SP/IT8701 Introduction to Programming for Data Science/Assignment1/active-cases-of-renting-out-of-flat.csv'\n , skip_header=1, dtype=[('financial_year', 'i8'), ('type', 'U10'), (\n 'no_active_cases', 'i8')], delimiter=',', missing_values=['na', '-'],\n filling_values=[0])\n", (72, 390), True, 'import numpy as np\n'), ((608, 654), 'matplotlib.pyplot.plot', 'plt.plot', (['years', 'values', '"""o-"""'], {'color': '"""#000067"""'}), "(years, values, 'o-', color='#000067')\n", (616, 654), True, 'import matplotlib.pyplot as plt\n'), ((657, 710), 'matplotlib.pyplot.title', 'plt.title', (['"""Active cases of renting out of HDB flats"""'], {}), "('Active cases of renting out of HDB flats')\n", (666, 710), True, 'import matplotlib.pyplot as plt\n'), ((711, 744), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""No. of active cases"""'], {}), "('No. of active cases')\n", (721, 744), True, 'import matplotlib.pyplot as plt\n'), ((745, 791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Financial year from 2006 to 2018"""'], {}), "('Financial year from 2006 to 2018')\n", (755, 791), True, 'import matplotlib.pyplot as plt\n'), ((792, 830), 'matplotlib.pyplot.xticks', 'plt.xticks', (['years', 'labels'], {'rotation': '(90)'}), '(years, labels, rotation=90)\n', (802, 830), True, 'import matplotlib.pyplot as plt\n'), ((829, 839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (837, 839), True, 'import matplotlib.pyplot as plt\n'), ((452, 487), 'numpy.isin', 'np.isin', (["data['type']", "['HBD flat']"], {}), "(data['type'], ['HBD flat'])\n", (459, 487), True, 'import numpy as np\n')] |
'''
Created on 8 mars 2017
@author: worm
'''
import collections
import logging
import os
import numpy
import cv2
from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError
import math
import time
from numpy import int32, uint8
Rectangle = collections.namedtuple("Rectangle", ['x', 'y', 'width', 'height'])
Pixel = collections.namedtuple("Pixel", ['x', 'y'])
logger = logging.getLogger(__name__)
class PictureComparator:
MAX_DIFF_THRESHOLD = 0.1
def __init__(self):
pass
def compare(self, reference, image):
"""
Compares an image to its reference
@return: a rectangle of the matching zone in reference image or None if nothing is found
"""
if not os.path.isfile(reference):
raise PictureComparatorError("Reference file %s does not exist" % reference)
if not os.path.isfile(image):
raise PictureComparatorError("Image file %s does not exist" % image)
reference_img = cv2.imread(reference, 0)
image_img = cv2.imread(image, 0)
reference_width, reference_height = reference_img.shape[::-1]
image_width, image_height = image_img.shape[::-1]
if reference_width < image_width or reference_height < image_height:
raise PictureComparatorError("Reference picture must be greater than image to find")
method = cv2.TM_CCOEFF_NORMED
# Apply template Matching
res = cv2.matchTemplate(reference_img, image_img, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val > 0.95:
return Rectangle(max_loc[0], max_loc[1], image_width, image_height)
else:
return None
def get_changed_pixels(self, reference, image, exclude_zones=[]):
"""
@param reference: reference picture
@param image: image to compare with
@param exclude_zones: list of zones (Rectangle objects) which should not be marked as differences.
@return: list of pixels which are different between reference and image (as numpy array)
a percentage of diff pixels
the difference image
"""
if not os.path.isfile(reference):
raise PictureComparatorError("Reference file %s does not exist" % reference)
if not os.path.isfile(image):
raise PictureComparatorError("Image file %s does not exist" % image)
# compute area where comparison will be done (<min_width>x<min_height>)
reference_img = cv2.imread(reference, 0)
image_img = cv2.imread(image, 0)
reference_height = len(reference_img)
reference_width = len(reference_img[0])
image_height = len(image_img)
image_width = len(image_img[0])
min_height = min(reference_height, image_height)
if min_height == 0:
min_width = 0
else:
min_width = min(reference_width, image_width)
diff = cv2.absdiff(reference_img[0:min_height, 0:min_width], image_img[0:min_height, 0:min_width])
pixels, diff_image = self._build_list_of_changed_pixels(diff, image_width, image_height, min_width, min_height, exclude_zones)
return pixels, len(pixels) * 100.0 / (image_height * image_width), diff_image
def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones):
"""
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or non-zero if they are different), creates
- list of pixels which are different
- a PNG image of the same size as 'step' image, where each different pixel is coloured RED
"""
# complete diff "image" to the size of step image
diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1)
# ignore excluded pixels
diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height)
# draw mask of differences
mask = numpy.ones((image_height, image_width, 1), dtype=uint8)
diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8)
cnd = diff[:,:] > 0 # says which pixels are non-zeros
diff_image[cnd] = mask[cnd]
diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels
diff_pixels = numpy.transpose(diff.nonzero());
return diff_pixels, diff_image
def _build_list_of_excluded_pixels2(self, exclude_zones, img_width, img_height):
"""
From the list of rectangles, build a list of pixels that these rectangles cover
"""
full_image = numpy.ones((img_height, img_width), dtype=uint8)
for x, y, width, height in exclude_zones:
# creates a matrix where 0 is placed on pixels to exclude, and 1 on pixel to keep
exclusion = numpy.zeros((height, width), dtype=uint8)
exclusion = numpy.pad(exclusion, ((min(y, img_height) , max(0, img_height - (y + height))), (min(x, img_width), max(0, img_width - (x + width)))), constant_values=1)
full_image *= exclusion[0:img_height, 0:img_width] # crop exclusion array if it's size is higher than image (exclusion zone outside of image dimensions)
return full_image
def _build_list_of_excluded_pixels(self, exclude_zones):
"""
From the list of rectangles, build a list of pixels that these rectangles cover
"""
pixels = []
for x, y, width, height in exclude_zones:
for row in range(height):
for col in range(width):
pixels.append(Pixel(col + x, row + y))
return pixels
| [
"snapshotServer.exceptions.PictureComparatorError.PictureComparatorError",
"cv2.absdiff",
"numpy.zeros",
"numpy.ones",
"cv2.imread",
"os.path.isfile",
"numpy.array",
"collections.namedtuple",
"cv2.minMaxLoc",
"logging.getLogger",
"cv2.matchTemplate"
] | [((272, 338), 'collections.namedtuple', 'collections.namedtuple', (['"""Rectangle"""', "['x', 'y', 'width', 'height']"], {}), "('Rectangle', ['x', 'y', 'width', 'height'])\n", (294, 338), False, 'import collections\n'), ((347, 390), 'collections.namedtuple', 'collections.namedtuple', (['"""Pixel"""', "['x', 'y']"], {}), "('Pixel', ['x', 'y'])\n", (369, 390), False, 'import collections\n'), ((401, 428), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (418, 428), False, 'import logging\n'), ((1033, 1057), 'cv2.imread', 'cv2.imread', (['reference', '(0)'], {}), '(reference, 0)\n', (1043, 1057), False, 'import cv2\n'), ((1078, 1098), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (1088, 1098), False, 'import cv2\n'), ((1502, 1553), 'cv2.matchTemplate', 'cv2.matchTemplate', (['reference_img', 'image_img', 'method'], {}), '(reference_img, image_img, method)\n', (1519, 1553), False, 'import cv2\n'), ((1599, 1617), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['res'], {}), '(res)\n', (1612, 1617), False, 'import cv2\n'), ((2604, 2628), 'cv2.imread', 'cv2.imread', (['reference', '(0)'], {}), '(reference, 0)\n', (2614, 2628), False, 'import cv2\n'), ((2649, 2669), 'cv2.imread', 'cv2.imread', (['image', '(0)'], {}), '(image, 0)\n', (2659, 2669), False, 'import cv2\n'), ((3053, 3149), 'cv2.absdiff', 'cv2.absdiff', (['reference_img[0:min_height, 0:min_width]', 'image_img[0:min_height, 0:min_width]'], {}), '(reference_img[0:min_height, 0:min_width], image_img[0:\n min_height, 0:min_width])\n', (3064, 3149), False, 'import cv2\n'), ((4179, 4234), 'numpy.ones', 'numpy.ones', (['(image_height, image_width, 1)'], {'dtype': 'uint8'}), '((image_height, image_width, 1), dtype=uint8)\n', (4189, 4234), False, 'import numpy\n'), ((4256, 4312), 'numpy.zeros', 'numpy.zeros', (['(image_height, image_width, 4)'], {'dtype': 'uint8'}), '((image_height, image_width, 4), dtype=uint8)\n', (4267, 4312), False, 'import numpy\n'), ((4434, 4476), 'numpy.array', 'numpy.array', (['[0, 0, 255, 255]'], {'dtype': 'uint8'}), '([0, 0, 255, 255], dtype=uint8)\n', (4445, 4476), False, 'import numpy\n'), ((4832, 4880), 'numpy.ones', 'numpy.ones', (['(img_height, img_width)'], {'dtype': 'uint8'}), '((img_height, img_width), dtype=uint8)\n', (4842, 4880), False, 'import numpy\n'), ((765, 790), 'os.path.isfile', 'os.path.isfile', (['reference'], {}), '(reference)\n', (779, 790), False, 'import os\n'), ((810, 880), 'snapshotServer.exceptions.PictureComparatorError.PictureComparatorError', 'PictureComparatorError', (["('Reference file %s does not exist' % reference)"], {}), "('Reference file %s does not exist' % reference)\n", (832, 880), False, 'from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError\n'), ((896, 917), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (910, 917), False, 'import os\n'), ((937, 999), 'snapshotServer.exceptions.PictureComparatorError.PictureComparatorError', 'PictureComparatorError', (["('Image file %s does not exist' % image)"], {}), "('Image file %s does not exist' % image)\n", (959, 999), False, 'from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError\n'), ((1331, 1409), 'snapshotServer.exceptions.PictureComparatorError.PictureComparatorError', 'PictureComparatorError', (['"""Reference picture must be greater than image to find"""'], {}), "('Reference picture must be greater than image to find')\n", (1353, 1409), False, 'from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError\n'), ((2255, 2280), 'os.path.isfile', 'os.path.isfile', (['reference'], {}), '(reference)\n', (2269, 2280), False, 'import os\n'), ((2300, 2370), 'snapshotServer.exceptions.PictureComparatorError.PictureComparatorError', 'PictureComparatorError', (["('Reference file %s does not exist' % reference)"], {}), "('Reference file %s does not exist' % reference)\n", (2322, 2370), False, 'from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError\n'), ((2386, 2407), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (2400, 2407), False, 'import os\n'), ((2427, 2489), 'snapshotServer.exceptions.PictureComparatorError.PictureComparatorError', 'PictureComparatorError', (["('Image file %s does not exist' % image)"], {}), "('Image file %s does not exist' % image)\n", (2449, 2489), False, 'from snapshotServer.exceptions.PictureComparatorError import PictureComparatorError\n'), ((5062, 5103), 'numpy.zeros', 'numpy.zeros', (['(height, width)'], {'dtype': 'uint8'}), '((height, width), dtype=uint8)\n', (5073, 5103), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import math
import binascii
import platform
import functools
import traceback
import numpy as np
from . import label_view, table_view, header_view, cache
from .core import Window, MessageWindow
from .widgets.scrolled_frame import ScrolledFrame
from .widgets.tooltip import ToolTip
from .table_view import array_structure_to_table
from ..reader.core import pds4_read
from ..reader.data import PDS_array
from ..reader.read_tables import table_data_size_check
from ..utils.helpers import is_array_like
from ..utils.logging import logger_init
from ..extern import six
from ..extern.six.moves.tkinter import (Menu, Canvas, Frame, Scrollbar, Label, Entry, Button, Radiobutton,
BooleanVar, StringVar)
from ..extern.six.moves.tkinter_tkfiledialog import askopenfilename, asksaveasfilename
# Initialize the logger
logger = logger_init()
#################################
class StructureListWindow(Window):
""" Window that summarizes the structures showing some of their properties and giving buttons to open them """
def __init__(self, viewer, quiet=False, lazy_load=False, show_headers=False, withdrawn=False):
# Set initial necessary variables and do other required initialization procedures
super(StructureListWindow, self).__init__(viewer, withdrawn=withdrawn)
# Set window width to not be resizable
self._widget.resizable(width=0, height=1)
# Initialize structure list window variables
self._canvas = None
self._scrolled_frame = None
self._structure_list = None
self._label_open = False
# Create menu
self._add_menus(quiet, lazy_load, show_headers)
# Add notify event for scroll wheel (used to scroll structure list)
self._bind_scroll_event(self._mousewheel_scroll)
# Opens the label, reads in any structures it contains, calls _draw_summary()
def open_label(self, filename=None, from_existing_structures=None):
if (filename is None) and (from_existing_structures is None):
raise TypeError('Cannot open a label without either a filename or existing StructureList.')
# Reset the summary if a label is already open
if self._label_open:
self.reset()
# Open the label and data, or show an error if one occurs
try:
# Lazy-load all structures from file (to obtain meta data)
if filename is not None:
self._structure_list = pds4_read(filename, quiet=self.menu_option('quiet'),
lazy_load=True, decode_strings=False)
cache.write_recently_opened(filename)
# Load (lazy if previously was lazy) structures from existing list
else:
self._structure_list = from_existing_structures
# Set title
title = 'Data Structure Summary' if len(self._structure_list) > 0 else 'Label'
title += '' if (filename is None) else " for '{0}'".format(filename)
self.set_window_title("{0} - {1}".format(self.get_window_title(), title))
except Exception as e:
trace = traceback.format_exc()
if isinstance(trace, six.binary_type):
trace = trace.decode('utf-8')
log = logger.get_handler('log_handler').get_recording()
error = log + '\n' + trace
MessageWindow(self._viewer, 'An Error Occurred!', error)
logger.exception('An error occurred during label processing.')
else:
# Read data from file (or try to access if it exists) for each structure if lazy-load disabled
if not self.menu_option('lazy_load'):
self.reify_structures()
# Draw the summary window
self._draw_summary()
self._label_open = True
# Read-in the data for all unread structures
def reify_structures(self):
# Get list of unread structures
unread_structures = [structure for structure in self._structure_list if not structure.data_loaded]
# Inform user about large structures
large_structures = []
for structure in unread_structures:
if structure.is_table() and table_data_size_check(structure, quiet=True):
large_structures.append(structure)
if large_structures:
large_struct_message = 'The following structures: \n\n'
for structure in large_structures:
large_struct_message += '{0} structure: {1} \n'.format(structure.type, structure.id)
large_struct_message += (
'\ncontain a large amount of data. Loading them may take a while. Recommend lazy-load '
'be enabled via the Options menu.'
)
warning_window = self._issue_warning(large_struct_message, log=False, show=True)
# Read data for all unread structures
for structure in unread_structures:
self._reify_structure(structure, size_check=False)
# Read-in data for a particular structure. Returns False if an error was encountered during reification.
def _reify_structure(self, structure, size_check=True):
# Skip reifying structure if it has already been reified
if structure.data_loaded:
return True
# Initialize logging for data read-in
exception_occurred = False
logger.get_handler('log_handler').begin_recording()
# On request and for large, still unread, structures issue a warning message prior to
# attempting to read-in the data
warning_window = None
is_large_table = structure.is_table() and table_data_size_check(structure, quiet=True)
if size_check and is_large_table:
message = ("{0} structure '{1}' contains a large amount of data. This process may take "
"a while. Loading...".format(structure.type, structure.id))
warning_window = self._issue_warning(message, log=False, show=True)
# Read the data for the structure
try:
logger.info('Now processing a {0} structure: {1}'.format(structure.type, structure.id))
structure.data
except Exception:
if structure.data_loaded:
del structure.data
exception_occurred = True
logger.exception('An error occurred during data read-in.')
# Close warning window once loading is finished
if warning_window:
warning_window.close()
# Add logging messages for data read-in to log
log = logger.get_handler('log_handler').get_recording()
self._structure_list.read_in_log += log
# Show errors that occurred on loading
if exception_occurred:
MessageWindow(self._viewer, 'An Error Occurred!', log)
return not exception_occurred
# Draws a summary of the opened label onto the screen
def _draw_summary(self):
# Add View Menu and the Export Menu, if not already done
if not self._label_open:
# Add a View menu
view_menu = self._add_menu('View', in_menu='main')
view_menu.add_command(label='Full Label', command=self._open_label)
view_menu.add_command(label='Read-In Log', command=lambda:
MessageWindow(self._viewer, 'Label/Data Read-in Log', self._structure_list.read_in_log))
view_menu.add_separator()
view_menu.add_checkbutton(label='Show Headers', onvalue=True, offvalue=False,
variable=self._menu_options['show_headers'])
# Add an Export menu
self._add_menu('Export', in_menu='main', postcommand=self._update_export)
# Draw summary for structures found
has_structures = len(self._structure_list) > 0
all_headers = all(structure.is_header() for structure in self._structure_list)
if has_structures and (not all_headers or self.menu_option('show_headers')):
self._draw_structure_summary()
# Draw summary that shows only the full label if no data structures to display are found
else:
self._draw_empty_summary()
# Draws a summary of the label of when the StructureList contains supported data structures
def _draw_structure_summary(self):
# Shorten the Name column if we only have structures with short names
structure_names = [structure.id for structure in self._structure_list]
name_column_size = 200 if len(max(structure_names, key=len)) > 8 else 125
# Create main canvas (which will contain the header frame, and a structures canvas for the rest)
self._canvas = Canvas(self._widget, highlightthickness=0)
# Add the header
header_frame = Frame(self._canvas, takefocus=False)
header_frame.pack(side='top', fill='y', expand=0, anchor='nw', pady=(2, 0))
index = Label(header_frame, text='Index', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(0, minsize=100)
index.grid(row=0, column=0)
name = Label(header_frame, text='Name', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(1, minsize=name_column_size)
name.grid(row=0, column=1)
type = Label(header_frame, text='Type', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(2, minsize=165)
type.grid(row=0, column=2)
dimension = Label(header_frame, text='Dimension', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(3, minsize=165)
dimension.grid(row=0, column=3)
# Create structures frame, which will contain all structure meta data inside it
self._scrolled_frame = ScrolledFrame(self._canvas, vscrollmode='dynamic', hscrollmode='none')
self._scrolled_frame.pack(side='bottom', fill='both', expand=1, pady=(12, 0))
structures_frame = self._scrolled_frame.interior
# Show structure meta data for each structure
for i, structure in enumerate(self._structure_list):
# Skips headers if requested
if structure.is_header() and not self.menu_option('show_headers'):
continue
# Index
index = Label(structures_frame, text=i, font=self.get_font())
structures_frame.grid_columnconfigure(0, minsize=100)
index.grid(row=i, column=0, pady=(2, 7))
# Name or LID
name_text = structure.id if (len(structure.id) <= 30) else structure.id[:29] + '...'
name = Label(structures_frame, text=name_text, font=self.get_font())
structures_frame.grid_columnconfigure(1, minsize=name_column_size)
name.grid(row=i, column=1, pady=(2, 7))
if len(structure.id) > 30:
ToolTip(name, structure.id)
# Type
type = Label(structures_frame, text=structure.type, font=self.get_font())
structures_frame.grid_columnconfigure(2, minsize=165)
type.grid(row=i, column=2, pady=(2, 7))
# Dimensions
if structure.is_header():
dimensions_text = '---'
else:
dimensions = structure.meta_data.dimensions()
if structure.is_table():
dimensions_text = '{0} cols X {1} rows'.format(dimensions[0], dimensions[1])
elif structure.is_array():
dimensions_text = ' X '.join(six.text_type(dim) for dim in dimensions)
dimension = Label(structures_frame, text=dimensions_text, font=self.get_font())
structures_frame.grid_columnconfigure(3, minsize=165)
dimension.grid(row=i, column=3, pady=(2, 7))
# Open Data View Buttons
button_frame = Frame(structures_frame)
button_frame.grid(row=i, column=4, padx=(30, 40), sticky='w')
font = self.get_font(weight='bold')
open_label = functools.partial(self._open_label, i)
view_label = Button(button_frame, text='Label', font=font, width=7, command=open_label)
view_label.pack(side='left')
if _is_tabular(structure):
open_table = functools.partial(self._open_table, i)
view_table = Button(button_frame, text='Table', font=font, width=7, command=open_table)
view_table.pack(side='left')
if _is_plottable(structure):
open_plot = functools.partial(self._open_plot, i)
view_plot = Button(button_frame, text='Plot', font=font, width=7, command=open_plot)
view_plot.pack(side='left')
if _is_displayable(structure):
open_image = functools.partial(self._open_image, i)
view_image = Button(button_frame, text='Image', font=font, width=7, command=open_image)
view_image.pack(side='left')
if _is_parsable_header(structure):
open_header = functools.partial(self._open_header, i)
view_header = Button(button_frame, text='Text', font=font, width=7, command=open_header)
view_header.pack(side='left')
# Set the width and the initial height of the window
self._widget.update_idletasks()
half_screen_height = self._get_screen_size()[1] // 2
window_height = structures_frame.winfo_height() + header_frame.winfo_reqheight() + 16
window_width = structures_frame.winfo_reqwidth()
if window_height > half_screen_height:
# Find a window height such that it exactly fits the closest number of structures which can
# fit in half the screen height (i.e. such that no structure fits only part way on the screen)
possible_heights = [30*i + header_frame.winfo_reqheight() + 16
for i in range(0, len(self._structure_list))]
window_height = min(possible_heights, key=lambda x:abs(x-half_screen_height))
self.set_window_geometry(window_width, window_height)
# Add line dividing header and summary data
self._canvas.create_line(5, 27, window_width - 5, 27)
# Add the View text header
view = Label(header_frame, text='View', font=self.get_font(10, 'bold'))
view_left_pad = math.floor((window_width - 100 - name_column_size - 165 - 165) / 2 - 25)
view_left_pad = view_left_pad if view_left_pad > 0 else 0
view.grid(row=0, column=4, padx=(view_left_pad, 0))
# Once all widgets are added, we pack the canvas. Packing it prior to this can result
# in ugly resizing and redrawing as widgets are being added above.
self._canvas.pack(fill='both', expand=1)
# Draws a summary the label when the StructureList does not contain any supported data structures
def _draw_empty_summary(self):
# Create main canvas (which will contain the header frame, and a frame for the label info)
self._canvas = Canvas(self._widget, highlightthickness=0)
# Add the header
header_frame = Frame(self._canvas, takefocus=False)
header_frame.pack(side='top', fill='y', expand=0, anchor='nw', pady=(2, 0))
type = Label(header_frame, text='Type', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(0, minsize=165)
type.grid(row=0, column=0)
view = Label(header_frame, text='View', font=self.get_font(10, 'bold'))
header_frame.grid_columnconfigure(1, minsize=100)
view.grid(row=0, column=1, padx=(70, 0))
# Create scrolled frame, which will contain info about the label
self._scrolled_frame = ScrolledFrame(self._canvas, vscrollmode='dynamic', hscrollmode='none')
self._scrolled_frame.pack(side='bottom', fill='both', expand=1, pady=(12, 0))
label_info_frame = self._scrolled_frame.interior
type = Label(label_info_frame, text=self._structure_list.type, font=self.get_font())
label_info_frame.grid_columnconfigure(0, minsize=165)
type.grid(row=0, column=0, pady=(2, 7))
view_label = Button(label_info_frame, text='Full Label', font=self.get_font(weight='bold'), width=15,
command=self._open_label)
header_frame.grid_columnconfigure(1, minsize=100)
view_label.grid(row=0, column=1, padx=(30, 10), pady=(2, 7))
# Set the width and the initial height of the window
self._widget.update_idletasks()
window_height = label_info_frame.winfo_height() + header_frame.winfo_reqheight() + 16
window_width = label_info_frame.winfo_reqwidth()
self.set_window_geometry(window_width, window_height)
# Add line dividing header and summary data
self._canvas.create_line(5, 27, window_width - 5, 27)
# Once all widgets are added, we pack the canvas. Packing it prior to this can result
# in ugly resizing and redrawing as widgets are being added above.
self._canvas.pack(fill='both', expand=1)
# Opens the label view for a structure
def _open_label(self, structure_idx=None):
if structure_idx is None:
initial_display = 'full label'
structure_label = None
else:
initial_display = 'object label'
structure_label = self._structure_list[structure_idx].label
label_view.open_label(self._viewer, self._structure_list.label, structure_label, initial_display)
# Opens a header view for a structure
def _open_header(self, structure_idx):
# Read-in data for the structure if that has not happened already
structure = self._structure_list[structure_idx]
reified = self._reify_structure(structure, size_check=True)
# Do not attempt to open table view if an error was encountered during reification
if not reified:
return
# Open a header view
if _is_parsable_header(structure):
header_view.open_header(self._viewer, structure)
else:
raise TypeError('Cannot show header view of a non-parsable structure with type ' + structure.type)
# Opens a table view for a structure
def _open_table(self, structure_idx):
# Read-in data for the structure if that has not happened already
structure = self._structure_list[structure_idx]
reified = self._reify_structure(structure, size_check=True)
# Do not attempt to open table view if an error was encountered during reification
if not reified:
return
# Open the table view
if _is_tabular(structure):
if structure.is_array():
structure = array_structure_to_table(structure, _copy_data=False)
table_view.open_table(self._viewer, structure)
else:
raise TypeError('Cannot show table view of structure having type ' + structure.type)
# Opens a plot view for a structure
def _open_plot(self, structure_idx):
# Import plot view; this module requires MPL, so we import it here as opposed to at the top.
from . import plot_view
# Read-in data for the structure if that has not happened already
structure = self._structure_list[structure_idx]
reified = self._reify_structure(structure, size_check=True)
# Do not attempt to open plot view if an error was encountered during reification
if not reified:
return
# Open the plot view
if _is_plottable(structure):
if structure.is_array():
structure = array_structure_to_table(structure)
plot_view.open_plot_column_select(self._viewer, structure)
else:
raise TypeError('Cannot show plot of non-plottable structure with type ' + structure.type)
# Opens an image view for a structure
def _open_image(self, structure_idx):
# Import image view; this module requires MPL, so we import it here as opposed to at the top.
from . import image_view
# Read-in data for the structure if that has not happened already
structure = self._structure_list[structure_idx]
reified = self._reify_structure(structure, size_check=True)
# Do not attempt to open image view if an error was encountered during reification
if not reified:
return
# Open the image view
if _is_displayable(structure):
image_view.open_image(self._viewer, structure)
else:
raise TypeError('Cannot show image view of structure having type ' + structure.type)
# Dialog window to create a new summary view for another label
def _open_file_box(self, new_window=True):
# On Linux, the default filetype selected goes first. On Windows it depends on the version of the
# askopenfilename dialog being used. There is an old bug in Tkinter, at least under Windows 7, where
# the older Windows dialog is used; and this older dialog also takes the default filetype first, but
# the newer dialog takes it last. Ultimately this setting for Windows should be based on the system
# that the frozen distribution is packaged, such that the correct default filetype is first. On Mac
# adding any type seems to only allow that type to be selected, so we ignore this option.
if platform.system() == 'Darwin':
filetypes = []
else:
filetypes = [('XML Files', '.xml'), ('All Files', '*')]
initial_dir = cache.get_last_open_dir(if_exists=True)
filename = askopenfilename(title='Open Label',
parent=self._widget,
filetypes=filetypes,
initialdir=initial_dir)
# Check that filename is a string type (some OS' return binary str and some unicode for filename).
# Also check that it is neither None or empty (also depends on OS)
if isinstance(filename, (six.binary_type, six.text_type)) and (filename.strip() != ''):
if new_window:
open_summary(self._viewer, filename=filename,
quiet=self.menu_option('quiet'), lazy_load=self.menu_option('lazy_load'))
else:
self.open_label(filename)
def _add_menus(self, quiet, lazy_load, show_headers):
# Initialize menu options
self._menu_options['quiet'] = BooleanVar()
self._add_trace(self._menu_options['quiet'], 'w', self._update_quiet, default=quiet)
self._menu_options['lazy_load'] = BooleanVar()
self._add_trace(self._menu_options['lazy_load'], 'w', self._update_lazy_load, default=lazy_load)
self._menu_options['show_headers'] = BooleanVar()
self._add_trace(self._menu_options['show_headers'], 'w', self._update_show_headers, default=show_headers)
# Add a File menu
file_menu = self._add_menu('File', in_menu='main')
file_menu.add_command(label='Open...', command=lambda: self._open_file_box(False))
file_menu.add_command(label='Open from URL...', command=lambda:
self._add_dependent_window(OpenFromURLWindow(self._viewer, self._widget, self)))
file_menu.add_command(label='Open in New Window...', command=lambda: self._open_file_box(True))
file_menu.add_separator()
# Add an Open Recent sub-menu to File menu
self._add_menu('Open Recent', in_menu='File', postcommand=self._update_recently_opened)
file_menu.add_separator()
file_menu.add_command(label='Exit', command=self._viewer.quit)
# Add an Options menu
options_menu = self._add_menu('Options', in_menu='main')
options_menu.add_checkbutton(label='Lazy-Load Data', onvalue=True, offvalue=False,
variable=self._menu_options['lazy_load'])
options_menu.add_checkbutton(label='Hide Warnings', onvalue=True, offvalue=False,
variable=self._menu_options['quiet'])
# Updates the logger state to match current menu options value
def _update_quiet(self, *args):
if self.menu_option('quiet'):
logger.quiet()
else:
logger.loud()
# On disable of lazy-load, loads all data immediately
def _update_lazy_load(self, *args):
if self._label_open and (not self.menu_option('lazy_load')):
self.reify_structures()
# Updates whether Headers structures are shown in the structure summary
def _update_show_headers(self, *args):
if not self._label_open:
return
self._erase_summary()
self._draw_summary()
# Updates the export menu just prior to showing it
def _update_export(self):
if not self._label_open:
return
# Clear out all existing menu entries
export_menu = self._menu('Export')
export_menu.delete(0, export_menu.index('last'))
# Show export buttons for arrays and tables
has_export_structures = False
for structure in self._structure_list:
if structure.is_array() or structure.is_table():
has_export_structures = True
export_menu.add_command(label='{0}...'.format(structure.id[0:29]),
command=lambda structure=structure:
self._add_dependent_window(DataExportWindow(self._viewer, self._widget, structure)))
# Handle case when are no supported export structures
if not has_export_structures:
export_menu.add_command(label='None', state='disabled')
# Updates recently opened menu just prior to showing it
def _update_recently_opened(self):
recent_paths = cache.get_recently_opened()
recent_menu = self._menu('Open Recent', in_menu='File')
# Clear out all existing menu entries
recent_menu.delete(0, recent_menu.index('last'))
# Handle case when there are no recently opened files
if len(recent_paths) == 0:
recent_menu.add_command(label='None', state='disabled')
# Show recently opened files
else:
for path in recent_paths:
recent_menu.add_command(label=path, command=lambda path=path: self.open_label(path))
# Called on mouse wheel scroll action, scrolls structure list up or down if scrollbar is shown
def _mousewheel_scroll(self, event):
if (not self._label_open) or (not self._scrolled_frame.can_vert_scroll()):
return
event_delta = int(-1 * event.delta)
if platform.system() != 'Darwin':
event_delta //= 120
self._scrolled_frame.yview_scroll(event_delta, 'units')
# Erases the structure list summary as shown on the screen
def _erase_summary(self):
if self._label_open:
self._scrolled_frame.destroy()
self._canvas.destroy()
# Resets the window to a state before any data was opened
def reset(self):
if self._label_open:
self.set_window_title(self.get_window_title().split('-')[0].strip())
self._remove_menu('View', in_menu='main')
self._remove_menu('Export', in_menu='main')
self._erase_summary()
self._structure_list = None
self._label_open = False
def close(self):
self._structure_list = None
super(StructureListWindow, self).close()
class OpenFromURLWindow(Window):
""" Window for entering a remote URL of a label to open. """
def __init__(self, viewer, master, summary_window):
# Set initial necessary variables and do other required initialization procedures
super(OpenFromURLWindow, self).__init__(viewer, withdrawn=True)
# Set the title
self.set_window_title('{0} - Open Label from URL'.format(self.get_window_title()))
# Set OpenFromURLWindow to be transient, meaning it does not show up in the task bar and it stays
# on top of its master window. This mimics behavior of normal open windows, that ask the path to open,
# encouraging user to have at most one of these open.
self._widget.transient(master)
# Initialize open from URL window variables
self._summary_window = summary_window
self._url = StringVar()
self._url.set('')
# Draw the main window content
self._show_content()
# Update window to ensure it has taken its final form, then show it
self._widget.update_idletasks()
self.show_window()
# Draws the main content of the window
def _show_content(self):
# Add URL box
url_box = Frame(self._widget)
url_box.pack(anchor='center', padx=45, pady=(20, 10))
url_label = Label(url_box, text='Enter Label URL:', font=self.get_font(9))
url_label.pack(anchor='nw', pady=(0, 3))
url = Entry(url_box, bg='white', bd=0, highlightthickness=1, highlightbackground='gray',
width=35, textvariable=self._url)
url.pack(pady=(0, 10))
separator = Frame(url_box, height=2, bd=1, relief='sunken')
separator.pack(side='bottom', fill='x', pady=5)
# Add buttons to Open / Cancel
button_box = Frame(self._widget)
button_box.pack(side='bottom', anchor='ne', padx=45, pady=(0, 20))
open_button = Button(button_box, bg=self.get_bg(), width=10, text='Open',
font=self.get_font(weight='bold'), command=self._open_label)
open_button.pack(side='left', padx=(0, 5))
cancel_button = Button(button_box, bg=self.get_bg(), width=10, text='Cancel',
font=self.get_font(weight='bold'), command=self.close)
cancel_button.pack(side='left')
# Place cursor on URL bar to start
url.focus()
# Opens a summary window from a remote URL
def _open_label(self):
url = self._url.get().strip()
if url != '':
if '://' not in url:
url = 'http://{0}'.format(url)
self._summary_window.open_label(url)
self.close()
def close(self):
self._summary_window = None
super(OpenFromURLWindow, self).close()
class DataExportWindow(Window):
""" Window used to show export-to-file options for data """
def __init__(self, viewer, master, structure):
# Set initial necessary variables and do other required initialization procedures
super(DataExportWindow, self).__init__(viewer, withdrawn=True)
# Set the title
type = 'Array' if structure.is_array() else 'Table'
self.set_window_title("{0} - Export {1} '{2}'".format(self.get_window_title(), type, structure.id))
# Set DataExportWindow to be transient, meaning it does not show up in the task bar and it stays
# on top of its master window. This mimics behavior of normal save windows, that ask where to save,
# encouraging user to have at most one of these open.
self._widget.transient(master)
# Initialize export window variables
self._structure = structure
self._output_format = StringVar()
self._user_delimiter = StringVar()
self._user_delimiter.set('')
# Draw the main window content
self._show_content()
# Update window to ensure it has taken its final form, then show it
self._widget.update_idletasks()
self.show_window()
# Draws the main content of the window
def _show_content(self):
# Add options box, allowing user to select delimiter for output
options_box = Frame(self._widget)
options_box.pack(anchor='center', padx=45, pady=(20, 15))
if self._structure.is_array():
output_type = 'Values'
self._output_format.set('space')
radiobutton = Radiobutton(options_box, text='Space-Separated {0}'.format(output_type),
variable=self._output_format, value='space')
radiobutton.grid(row=0, column=0, sticky='W')
else:
output_type = 'Columns'
self._output_format.set('fixed')
radiobutton = Radiobutton(options_box, text='Fixed Width {0}'.format(output_type),
variable=self._output_format, value='fixed')
radiobutton.grid(row=0, column=0, sticky='W')
radiobutton = Radiobutton(options_box, text='Comma-Separated {0}'.format(output_type),
variable=self._output_format, value='csv')
radiobutton.grid(row=1, column=0, sticky='W')
radiobutton = Radiobutton(options_box, text='Tab-Separated {0}'.format(output_type),
variable=self._output_format, value='tab')
radiobutton.grid(row=2, column=0, sticky='W')
radiobutton = Radiobutton(options_box, text='User-Defined Separator: ',
variable=self._output_format, value='user')
radiobutton.grid(row=3, column=0, sticky='W')
custom_sep = Entry(options_box, bg='white', bd=0, highlightthickness=1, highlightbackground='gray',
width=5, textvariable=self._user_delimiter)
custom_sep.grid(row=3, column=1, sticky='W', ipadx=2)
separator = Frame(options_box, height=2, bd=1, relief='sunken')
separator.grid(row=4, column=0, columnspan=5, sticky='WE', pady=(20, 5))
# Add buttons to Save / Cancel
button_box = Frame(self._widget)
button_box.pack(side='bottom', anchor='center', pady=(0, 20))
save_button = Button(button_box, bg=self.get_bg(), width=5, text='Save',
font=self.get_font(weight='bold'), command=self._save_file_box)
save_button.pack(side='left', padx=(0, 5))
cancel_button = Button(button_box, bg=self.get_bg(), width=7, text='Cancel',
font=self.get_font(weight='bold'), command=self.close)
cancel_button.pack(side='left')
# Dialog window to select where the exported data should be saved
def _save_file_box(self):
initial_dir = cache.get_last_open_dir(if_exists=True)
filename = asksaveasfilename(title='Export Data',
parent=self._widget,
initialdir=initial_dir,
initialfile='Untitled.tab')
if filename == '' or filename == ():
return
cache.write_last_open_dir(os.path.dirname(filename))
# Export the data
delimiter = {'fixed': None,
'csv': ',',
'tab': '\t',
'user': self._user_delimiter.get(),
}.get(self._output_format.get())
_export_data(filename, data=self._structure.data, delimiter=delimiter)
self.close()
def _is_tabular(structure):
""" Determines if a PDS4 structure can be shown as a table.
Tabular structures are either:
(1) Tables, or
(2) Arrays
Parameters
----------
structure : Structure
PDS4 structure to check.
Returns
-------
bool
True if *structure* can be displayed as a table, False otherwise.
"""
return structure.is_table() or structure.is_array()
def _is_plottable(structure):
""" Determines if a PDS4 structure is plottable.
Plottable structures are either:
(1) 1D arrays, or
(2) Tables
Parameters
----------
structure : Structure
PDS4 structure to check.
Returns
-------
bool
True if *structure* can be plotted, False otherwise.
"""
plottable = False
if structure.is_table():
plottable = True
elif structure.is_array() and structure.meta_data.num_axes() == 1:
plottable = True
return plottable
def _is_displayable(structure):
""" Determines if a PDS4 structure is displayable as an image.
Displayable structures are PDS4 arrays are those that are either:
(1) a 2D array, or
(2) sub-types of Array_2D or Array_3D, or
(3) have a display dictionary
Parameters
----------
structure : Structure
PDS4 structure to check.
Returns
-------
bool
True if *structure* can be displayed as an image, False otherwise.
"""
if structure.is_array():
has_display_dict = structure.meta_data.display_settings is not None
is_2d_array = structure.meta_data.num_axes() == 2
if ('Array_2D_' in structure.type) or ('Array_3D_' in structure.type) or is_2d_array or has_display_dict:
return True
return False
def _is_parsable_header(structure):
""" Determines if a PDS4 header structure can be parsed into plain text.
Header structures that can be displayed as text are:
(1) Plain-text Headers
(2) Headers that can be turned into plain-text via a parser
"""
return structure.is_header() and hasattr(structure.parser(), 'to_string')
def _export_data(filename, data, delimiter=None, fmt=None, newline='\r\n'):
""" Exports PDS4 data to a text file.
Notes
-----
Supports PDS4 Tables and Arrays.
This function is not particularly fast, however it is designed to work
for all PDS4 data.
Parameters
----------
filename : str or unicode
The filename, including path, to write the exported output to.
data : PDS_narray or PDS_marray
The data to export to file, i.e. the ``structure.data`` attribute.
delimiter : str or unicode, optional
The delimiter between each value. Defaults to None, which indicates
fixed-width output for PDS4 tables, and separated by a single space
for PDS4 arrays.
fmt : str, unicode or list[str or unicode], optional
For PDS4 tables, the format for all, or each, field. For PDS4 arrays,
the format for data values. Set 'none' to indicate no formatting
of output values. Defaults to None, which uses the PDS4 field_format
in the meta data when available.
newline : str or unicode, optional
The line separator for each record (row) of a PDS4 table. Defaults to CRLF.
Has no effect when exporting PDS4 arrays.
Returns
-------
None
"""
# Formats a single data value according to PDS4 field_format
def format_value(datum, format, is_bitstring=False, dtype=None):
if isinstance(datum, six.binary_type):
# Handle bitstrings
# (Note: we ensure dtype has correct length; otherwise trailing null bytes are skipped by NumPy)
if is_bitstring:
datum_bytes = np.asarray(datum, dtype=dtype).tobytes()
datum = '0x' + binascii.hexlify(datum_bytes).decode('ascii').upper()
# Handle strings
else:
datum = datum.decode('utf-8')
# Format datum
try:
# Convert from NumPy types into Python native types, otherwise the format statement below
# can return the format itself, when format is invalid, rather than raising an exception
if hasattr(datum, 'item'):
datum = datum.item()
value = format % datum
except (ValueError, TypeError):
value = datum
return six.text_type(value)
# Formats a NumPy ndarray to a string; similar to np.array2string's default functionality
# but does not add newlines to deeply nested arrays
def format_array(array, format, is_bitstring=False, dtype=None, _top_level=True):
kwargs = {'format': format,
'is_bitstring': is_bitstring,
'dtype': dtype}
output = '['
for value in array:
if value.ndim == 0:
value = format_value(value, **kwargs)
if np.issubdtype(array.dtype, np.character):
output += "'{0}' ".format(value)
else:
output += "{0} ".format(value.strip())
else:
output += format_array(value, _top_level=False, **kwargs)
output = output[:-1] + '] '
output = output[:-1] if _top_level else output
return output
# Ensure input data is a PDS array, and unmask any masked arrays
data = PDS_array(data, masked=False)
# Determine if we are dealing with an array or a table
is_array = False
is_fixed_width = delimiter is None
if data.dtype.names is not None:
fields = [data[name] for name in data.dtype.names]
bitstring_field_nums = [i for i, field in enumerate(fields)
if field.meta_data.data_type().issubtype('BitString')]
num_fields = len(fields)
last_field_num = num_fields-1
else:
is_array = True
data = data.reshape(-1)
# For arrays
if is_array:
if delimiter is None:
delimiter = ' '
formats = '%s' if (fmt is None or fmt.lower() == 'none') else fmt
# For tables
else:
# Obtain a list of formats for table fields
if fmt is None:
formats = []
for field in fields:
# Skip formatting scaled/offset values, because the PDS4 Standard is ambiguous on whether
# field_format is pre- or post- scaling/offset. This can lead into incorrect formatting.
meta_data = field.meta_data
is_scaled = meta_data.get('scaling_factor', 1) != 1 or meta_data.get('value_offset', 0) != 0
format = '%s' if is_scaled else meta_data.get('format', '%s')
formats.append(format)
elif isinstance(fmt, six.string_types) and fmt.lower() == 'none':
formats = ['%s'] * fmt
elif is_array_like(fmt):
if len(fmt) == num_fields:
formats = fmt
else:
raise TypeError("Number of formats ({0}), does not match number of fields ({1}).".
format(len(fmt), num_fields))
else:
formats = [fmt] * num_fields
# For fixed-width tables, we need to find the proper length of each field (column) such that each
# column is separated by a single space (spaces part of string values will add to this)
if is_fixed_width:
fixed_formats = []
delimiter = ' '
for field_num, value in enumerate(fields):
ndim = value.ndim
dtype = value.dtype
kwargs = {'format': formats[field_num],
'dtype': dtype,
'is_bitstring': field_num in bitstring_field_nums}
if ndim > 1:
value = [format_array(element, **kwargs) for element in value]
else:
value = [format_value(element, **kwargs) for element in value]
max_length = len(max(value, key=len))
sign = '-' if np.issubdtype(dtype, np.character) or (ndim > 1) else '+'
fixed_format = '%{0}{1}s'.format(sign, max_length)
fixed_formats.append(fixed_format)
# Write exported output to file for arrays
if is_array:
data.tofile(filename, sep=delimiter, format=formats)
# Write exported output to file for tables
# Note: ideally np.savetxt would be able to achieve the same output, however it cannot. This
# steps most critically for it (or more specifically `np.array2string`) adding newlines to highly
# nested array outputs, resulting in extraneous newlines. Additionally it does not support a formatter
# function, and it is not clear that it deals gracefully with UTF-8 until NumPy 1.14.
else:
with io.open(filename, 'w', newline='', encoding='utf-8') as file_handler:
# Format and write out data
for record in data:
for field_num, value in enumerate(record):
# Format the value (either a scalar, or a group field) according to *fmt*
kwargs = {'format': formats[field_num],
'dtype': fields[field_num].dtype,
'is_bitstring': field_num in bitstring_field_nums}
if isinstance(value, np.ndarray):
output_value = format_array(value, **kwargs)
else:
output_value = format_value(value, **kwargs)
# For fixed-width tables, format the string-value to give the table its fixed width
if is_fixed_width:
output_value = fixed_formats[field_num] % output_value
# Add delimiter following the value
if field_num != last_field_num:
output_value += delimiter
file_handler.write(output_value)
file_handler.write(newline)
def open_summary(viewer, filename=None, from_existing_structures=None, quiet=False, lazy_load=True):
""" Open a new structure summary window (for structures found in label).
Shows a summary of the structures found in the label, letting the appropriate structures be
opened as a table, plot or image. Also allows label segments and the full label to be examined.
Parameters
----------
viewer : PDS4Viewer
An instance of PDS4Viewer.
filename : str or unicode, optional
The filename, including full or relative path if necessary, of
the PDS4 label describing the data to be viewed.
from_existing_structures : StructureList, optional
An existing StructureList, as returned by pds4_read(), to view. Takes
precedence if given together with filename.
lazy_load : bool, optional
Do not read-in data of each data structure until attempt to view said
data structure. Defaults to True.
quiet : bool, optional
Suppresses all info/warnings from being output and displayed.
Defaults to False.
Returns
-------
StructureListWindow
The window instance for the structure summary.
"""
# Create window
summary_window = StructureListWindow(viewer, quiet=quiet, lazy_load=lazy_load, withdrawn=lazy_load)
# Open label (if requested)
if (filename is not None) or (from_existing_structures is not None):
summary_window.open_label(filename=filename, from_existing_structures=from_existing_structures)
# Show window
if lazy_load:
summary_window.show_window()
return summary_window
| [
"functools.partial",
"binascii.hexlify",
"os.path.dirname",
"numpy.asarray",
"math.floor",
"traceback.format_exc",
"io.open",
"platform.system",
"numpy.issubdtype"
] | [((14627, 14699), 'math.floor', 'math.floor', (['((window_width - 100 - name_column_size - 165 - 165) / 2 - 25)'], {}), '((window_width - 100 - name_column_size - 165 - 165) / 2 - 25)\n', (14637, 14699), False, 'import math\n'), ((12271, 12309), 'functools.partial', 'functools.partial', (['self._open_label', 'i'], {}), '(self._open_label, i)\n', (12288, 12309), False, 'import functools\n'), ((21715, 21732), 'platform.system', 'platform.system', ([], {}), '()\n', (21730, 21732), False, 'import platform\n'), ((27007, 27024), 'platform.system', 'platform.system', ([], {}), '()\n', (27022, 27024), False, 'import platform\n'), ((35028, 35053), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (35043, 35053), False, 'import os\n'), ((44359, 44411), 'io.open', 'io.open', (['filename', '"""w"""'], {'newline': '""""""', 'encoding': '"""utf-8"""'}), "(filename, 'w', newline='', encoding='utf-8')\n", (44366, 44411), False, 'import io\n'), ((3361, 3383), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (3381, 3383), False, 'import traceback\n'), ((12520, 12558), 'functools.partial', 'functools.partial', (['self._open_table', 'i'], {}), '(self._open_table, i)\n', (12537, 12558), False, 'import functools\n'), ((12778, 12815), 'functools.partial', 'functools.partial', (['self._open_plot', 'i'], {}), '(self._open_plot, i)\n', (12795, 12815), False, 'import functools\n'), ((13034, 13072), 'functools.partial', 'functools.partial', (['self._open_image', 'i'], {}), '(self._open_image, i)\n', (13051, 13072), False, 'import functools\n'), ((13300, 13339), 'functools.partial', 'functools.partial', (['self._open_header', 'i'], {}), '(self._open_header, i)\n', (13317, 13339), False, 'import functools\n'), ((40419, 40459), 'numpy.issubdtype', 'np.issubdtype', (['array.dtype', 'np.character'], {}), '(array.dtype, np.character)\n', (40432, 40459), True, 'import numpy as np\n'), ((39229, 39259), 'numpy.asarray', 'np.asarray', (['datum'], {'dtype': 'dtype'}), '(datum, dtype=dtype)\n', (39239, 39259), True, 'import numpy as np\n'), ((43587, 43621), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.character'], {}), '(dtype, np.character)\n', (43600, 43621), True, 'import numpy as np\n'), ((39301, 39330), 'binascii.hexlify', 'binascii.hexlify', (['datum_bytes'], {}), '(datum_bytes)\n', (39317, 39330), False, 'import binascii\n')] |
"""
%prog [options] CONFIG
THOR detects differential peaks in multiple ChIP-seq profiles associated
with two distinct biological conditions.
Copyright (C) 2014-2016 <NAME> (<EMAIL>)
This program comes with ABSOLUTELY NO WARRANTY. This is free
software, and you are welcome to redistribute it under certain
conditions. Please see LICENSE file for details.
"""
# Python
from __future__ import print_function
import os
import sys
import pysam
import numpy as np
from math import fabs, log, ceil
from operator import add
from os.path import splitext, basename, join, isfile, isdir, exists
from optparse import OptionParser, OptionGroup
from datetime import datetime
# Internal
from ..THOR.postprocessing import merge_delete, filter_deadzones
from .MultiCoverageSet import MultiCoverageSet
from ..GenomicRegionSet import GenomicRegionSet
from ..THOR.get_extension_size import get_extension_size
from ..THOR.get_fast_gen_pvalue import get_log_pvalue_new
from .input_parser import input_parser
from ..Util import which, npath
from .. import __version__
# External
from numpy import linspace
from scipy.optimize import curve_fit
import matplotlib as mpl
#see http://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
mpl.use('Agg')
import matplotlib.pyplot as plt
FOLDER_REPORT = None
np.random.seed(42)
def merge_output(bamfiles, dims, options, no_bw_files, chrom_sizes):
for i in range(len(bamfiles)):
rep = i if i < dims[0] else i - dims[0]
sig = 1 if i < dims[0] else 2
temp_bed = npath(options.name + '-s%s-rep%s_temp.bed' % (sig, rep))
files = [options.name + '-' + str(j) + '-s%s-rep%s.bw' %(sig, rep) for j in no_bw_files]
if len(no_bw_files) > len(bamfiles):
files = filter(lambda x: isfile(x), files)
t = ['bigWigMerge'] + files + [temp_bed]
c = " ".join(t)
os.system(c)
os.system("LC_COLLATE=C sort -k1,1 -k2,2n " + temp_bed + ' > ' + temp_bed +'.sort')
t = ['bedGraphToBigWig', temp_bed + '.sort', chrom_sizes, options.name + '-s%s-rep%s.bw' % (sig, rep)]
c = " ".join(t)
os.system(c)
for f in files:
os.remove(f)
os.remove(temp_bed)
os.remove(temp_bed + ".sort")
else:
ftarget = [options.name + '-s%s-rep%s.bw' %(sig, rep) for j in no_bw_files]
for i in range(len(ftarget)):
c = ['mv', files[i], ftarget[i]]
c = " ".join(c)
os.system(c)
def _func_quad_2p(x, a, c):
"""Return y-value of y=max(|a|*x^2 + x + |c|, 0),
x may be an array or a single float"""
res = []
if type(x) is np.ndarray:
for el in x:
res.append(max(el, fabs(a) * el**2 + el + fabs(c)))
return np.asarray(res)
else:
return max(x, fabs(a) * x**2 + x + fabs(c))
def _write_emp_func_data(data, name):
"""Write mean and variance data"""
assert len(data[0]) == len(data[1])
f = open(FOLDER_REPORT_DATA + name + '.data', 'w')
for i in range(len(data[0])):
print(data[0][i], data[1][i], sep='\t', file=f)
f.close()
def _plot_func(plot_data, outputdir):
"""Plot estimated and empirical function"""
maxs = [] #max for x (mean), max for y (var)
for i in range(2):
tmp = np.concatenate((plot_data[0][i], plot_data[1][i])) #plot_data [(m, v, p)], 2 elements
maxs.append(max(tmp[tmp < np.percentile(tmp, 90)]))
for i in range(2):
x = linspace(0, max(plot_data[i][0]), int(ceil(max(plot_data[i][0]))))
y = _func_quad_2p(x, plot_data[i][2][0], plot_data[i][2][1])
for j in range(2):
#use matplotlib to plot function and datapoints
#and save datapoints to files
ext = 'original'
if j == 1:
plt.xlim([0, maxs[0]])
plt.ylim([0, maxs[1]])
ext = 'norm'
ax = plt.subplot(111)
plt.plot(x, y, 'r', label = 'fitted polynomial') #plot polynom
plt.scatter(plot_data[i][0], plot_data[i][1], label = 'empirical datapoints') #plot datapoints
ax.legend()
plt.xlabel('mean')
plt.ylabel('variance')
plt.title('Estimated Mean-Variance Function')
name = "_".join(['mean', 'variance', 'func', 'cond', str(i), ext])
_write_emp_func_data(plot_data[i], name)
plt.savefig(FOLDER_REPORT_PICS + name + '.png')
plt.close()
def _get_data_rep(overall_coverage, name, debug, sample_size):
"""Return list of (mean, var) points for samples 0 and 1"""
data_rep = []
for i in range(2):
cov = np.asarray(overall_coverage[i]) #matrix: (#replicates X #bins)
h = np.invert((cov==0).all(axis=0)) #assign True to columns != (0,..,0)
cov = cov[:,h] #remove 0-columns
r = np.random.randint(cov.shape[1], size=sample_size)
r.sort()
cov = cov[:,r]
m = list(np.squeeze(np.asarray(np.mean(cov*1.0, axis=0))))
n = list(np.squeeze(np.asarray(np.var(cov*1.0, axis=0))))
assert len(m) == len(n)
data_rep.append(zip(m, n))
data_rep[i].append((0,0))
data_rep[i] = np.asarray(data_rep[i])
if debug:
for i in range(2):
np.save(str(name) + "-emp-data" + str(i) + ".npy", data_rep[i])
for i in range(2):
data_rep[i] = data_rep[i][data_rep[i][:,0] < np.percentile(data_rep[i][:,0], 99.75)]
data_rep[i] = data_rep[i][data_rep[i][:,1] < np.percentile(data_rep[i][:,1], 99.75)]
return data_rep
def _fit_mean_var_distr(overall_coverage, name, debug, verbose, outputdir, report, poisson, sample_size=5000):
"""Estimate empirical distribution (quadr.) based on empirical distribution"""
done = False
plot_data = [] #means, vars, paras
while not done:
data_rep = _get_data_rep(overall_coverage, name, debug, sample_size)
res = []
for i in range(2):
try:
m = np.asarray(map(lambda x: x[0], data_rep[i])) #means list
v = np.asarray(map(lambda x: x[1], data_rep[i])) #vars list
if len(m) > 0 and len(v) > 0:
try:
p, _ = curve_fit(_func_quad_2p, m, v) #fit quad. function to empirical data
except:
print("Optimal parameters for mu-var-function not found, get new datapoints", file=sys.stderr)
break #restart for loop
else:
p = np.array([0, 1])
res.append(p)
plot_data.append((m, v, p))
if i == 1:
done = True
except RuntimeError:
print("Optimal parameters for mu-var-function not found, get new datapoints", file=sys.stderr)
break #restart for loop
if report:
_plot_func(plot_data, outputdir)
if poisson:
print("Use Poisson distribution as emission", file=sys.stderr)
p[0] = 0
p[1] = 0
res = [np.array([0, 0]), np.array([0, 0])]
return lambda x: _func_quad_2p(x, p[0], p[1]), res
def dump_posteriors_and_viterbi(name, posteriors, DCS, states):
print("Computing info...", file=sys.stderr)
f = open(name + '-posts.bed', 'w')
g = open(name + '-states-viterbi.bed', 'w')
for i in range(len(DCS.indices_of_interest)):
cov1, cov2 = _get_covs(DCS, i)
p1, p2, p3 = posteriors[i][0], posteriors[i][1], posteriors[i][2]
chrom, start, end = DCS._index2coordinates(DCS.indices_of_interest[i])
print(chrom, start, end, states[i], cov1, cov2, sep='\t', file=g)
print(chrom, start, end, max(p3, max(p1,p2)), p1, p2, p3, cov1, cov2, sep='\t', file=f)
f.close()
g.close()
def _compute_pvalue(tuple_arg):
(x, y, side, distr) = tuple_arg
a, b = int(np.mean(x)), int(np.mean(y))
return -get_log_pvalue_new(a, b, side, distr)
def _get_log_ratio(l1, l2):
l1, l2 = float(np.sum(np.array(l1))), float(np.sum(np.array(l2)))
try:
res = l1/l2
except:
return sys.maxint
if res > 0:
try:
res = log(res)
if np.isinf(res):
return sys.maxint
return res
except:
print('error to compute log ratio', l1, l2, file=sys.stderr)
return sys.maxint
else:
return sys.maxint
def _merge_consecutive_bins(tmp_peaks, distr, merge=True):
"""Merge consecutive peaks and compute p-value. Return list
<(chr, s, e, c1, c2, strand)> and <(pvalue)>"""
peaks = []
pvalues = []
i, j, = 0, 0
while i < len(tmp_peaks):
j+=1
c, s, e, c1, c2, strand, strand_pos, strand_neg = tmp_peaks[i]
v1 = c1
v2 = c2
tmp_pos = [strand_pos]
tmp_neg = [strand_neg]
#merge bins
while merge and i+1 < len(tmp_peaks) and e == tmp_peaks[i+1][1] and strand == tmp_peaks[i+1][5]:
e = tmp_peaks[i+1][2]
v1 = map(add, v1, tmp_peaks[i+1][3])
v2 = map(add, v2, tmp_peaks[i+1][4])
tmp_pos.append(tmp_peaks[i+1][6])
tmp_neg.append(tmp_peaks[i+1][7])
i += 1
side = 'l' if strand == '+' else 'r'
pvalues.append((v1, v2, side, distr))
ratio = _get_log_ratio(tmp_pos, tmp_neg)
peaks.append((c, s, e, v1, v2, strand, ratio))
i += 1
pvalues = map(_compute_pvalue, pvalues)
assert len(pvalues) == len(peaks)
return pvalues, peaks
def _get_covs(DCS, i, as_list=False):
"""For a multivariant Coverageset, return mean coverage cov1 and cov2 at position i"""
if not as_list:
cov1 = int(np.mean(DCS.overall_coverage[0][:, DCS.indices_of_interest[i]]))
cov2 = int(np.mean(DCS.overall_coverage[1][:, DCS.indices_of_interest[i]]))
else:
cov1 = DCS.overall_coverage[0][:,DCS.indices_of_interest[i]]
cov1 = map(lambda x: x[0], np.asarray((cov1)))
cov2 = DCS.overall_coverage[1][:,DCS.indices_of_interest[i]]
cov2 = map(lambda x: x[0], np.asarray((cov2)))
return cov1, cov2
def get_peaks(name, DCS, states, exts, merge, distr, pcutoff, debug, no_correction, deadzones, merge_bin, p=70):
"""Merge Peaks, compute p-value and give out *.bed and *.narrowPeak"""
exts = np.mean(exts)
tmp_peaks = []
tmp_data = []
for i in range(len(DCS.indices_of_interest)):
if states[i] not in [1,2]:
continue #ignore background states
strand = '+' if states[i] == 1 else '-'
cov1, cov2 = _get_covs(DCS, i, as_list=True)
cov1_strand = np.sum(DCS.overall_coverage_strand[0][0][:,DCS.indices_of_interest[i]]) + np.sum(DCS.overall_coverage_strand[1][0][:,DCS.indices_of_interest[i]])
cov2_strand = np.sum(DCS.overall_coverage_strand[0][1][:,DCS.indices_of_interest[i]] + DCS.overall_coverage_strand[1][1][:,DCS.indices_of_interest[i]])
chrom, start, end = DCS._index2coordinates(DCS.indices_of_interest[i])
tmp_peaks.append((chrom, start, end, cov1, cov2, strand, cov1_strand, cov2_strand))
side = 'l' if strand == '+' else 'r'
tmp_data.append((sum(cov1), sum(cov2), side, distr))
if not tmp_data:
print('no data', file=sys.stderr)
return [], [], []
tmp_pvalues = map(_compute_pvalue, tmp_data)
per = np.percentile(tmp_pvalues, p)
tmp = []
res = tmp_pvalues > per
for j in range(len(res)):
if res[j]:
tmp.append(tmp_peaks[j])
tmp_peaks = tmp
pvalues, peaks, = _merge_consecutive_bins(tmp_peaks, distr, merge_bin) #merge consecutive peaks and compute p-value
regions = merge_delete(exts, merge, peaks, pvalues) #postprocessing, returns GenomicRegionSet with merged regions
if deadzones:
regions = filter_deadzones(deadzones, regions)
output = []
pvalues = []
ratios = []
main_sep = ':' #sep <counts> main_sep <counts> main_sep <pvalue>
int_sep = ';' #sep counts in <counts>
for i, el in enumerate(regions):
tmp = el.data.split(',')
counts = ",".join(tmp[0:len(tmp)-1]).replace('], [', int_sep).replace('], ', int_sep).replace('([', '').replace(')', '').replace(', ', main_sep)
pvalue = float(tmp[len(tmp)-2].replace(")", "").strip())
ratio = float(tmp[len(tmp)-1].replace(")", "").strip())
pvalues.append(pvalue)
ratios.append(ratio)
output.append((el.chrom, el.initial, el.final, el.orientation, counts))
return ratios, pvalues, output
def _output_ext_data(ext_data_list, bamfiles):
"""Output textfile and png file of read size estimation"""
names = [splitext(basename(bamfile))[0] for bamfile in bamfiles]
for k, ext_data in enumerate(ext_data_list):
f = open(FOLDER_REPORT_DATA + 'fragment_size_estimate_' + names[k] + '.data', 'w')
for d in ext_data:
print(d[0], d[1], sep='\t', file=f)
f.close()
for i, ext_data in enumerate(ext_data_list):
d1 = map(lambda x: x[0], ext_data)
d2 = map(lambda x: x[1], ext_data)
ax = plt.subplot(111)
plt.xlabel('shift')
plt.ylabel('convolution')
plt.title('Fragment Size Estimation')
plt.plot(d2, d1, label=names[i])
ax.legend()
plt.savefig(FOLDER_REPORT_PICS + 'fragment_size_estimate.png')
plt.close()
def _compute_extension_sizes(bamfiles, exts, inputs, exts_inputs, report):
"""Compute Extension sizes for bamfiles and input files"""
start = 0
end = 600
ext_stepsize = 5
ext_data_list = []
#compute extension size
if not exts:
print("Computing read extension sizes for ChIP-seq profiles", file=sys.stderr)
for bamfile in bamfiles:
e, ext_data = get_extension_size(bamfile, start=start, end=end, stepsize=ext_stepsize)
exts.append(e)
ext_data_list.append(ext_data)
if report and ext_data_list:
_output_ext_data(ext_data_list, bamfiles)
if inputs and not exts_inputs:
exts_inputs = [5] * len(inputs)
return exts, exts_inputs
def get_all_chrom(bamfiles):
chrom = set()
for bamfile in bamfiles:
bam = pysam.Samfile(bamfile, "rb" )
for read in bam.fetch():
c = bam.getrname(read.reference_id)
if c not in chrom:
chrom.add(c)
return chrom
def initialize(name, dims, genome_path, regions, stepsize, binsize, bamfiles, exts, \
inputs, exts_inputs, factors_inputs, chrom_sizes, verbose, no_gc_content, \
tracker, debug, norm_regions, scaling_factors_ip, save_wig, housekeeping_genes, \
test, report, chrom_sizes_dict, counter, end, gc_content_cov=None, avg_gc_content=None, \
gc_hist=None, output_bw=True, save_input=False, m_threshold=80, a_threshold=95, rmdup=False):
"""Initialize the MultiCoverageSet"""
regionset = regions
regionset.sequences.sort()
if norm_regions:
norm_regionset = GenomicRegionSet('norm_regions')
norm_regionset.read(norm_regions)
else:
norm_regionset = None
exts, exts_inputs = _compute_extension_sizes(bamfiles, exts, inputs, exts_inputs, report)
multi_cov_set = MultiCoverageSet(name=name, regions=regionset, dims=dims, genome_path=genome_path,
binsize=binsize, stepsize=stepsize, rmdup=rmdup, path_bamfiles=bamfiles,
path_inputs=inputs, exts=exts, exts_inputs=exts_inputs,
factors_inputs=factors_inputs, chrom_sizes=chrom_sizes, verbose=verbose,
no_gc_content=no_gc_content, chrom_sizes_dict=chrom_sizes_dict, debug=debug,
norm_regionset=norm_regionset, scaling_factors_ip=scaling_factors_ip,
save_wig=save_wig, strand_cov=True, housekeeping_genes=housekeeping_genes,
tracker=tracker, gc_content_cov=gc_content_cov, avg_gc_content=avg_gc_content,
gc_hist=gc_hist, end=end, counter=counter, output_bw=output_bw,
folder_report=FOLDER_REPORT, report=report, save_input=save_input,
m_threshold=m_threshold, a_threshold=a_threshold)
return multi_cov_set
class HelpfulOptionParser(OptionParser):
"""An OptionParser that prints full help on errors."""
def error(self, msg):
self.print_help(sys.stderr)
self.exit(2, "\n%s: error: %s\n" % (self.get_prog_name(), msg))
def _callback_list(option, opt, value, parser):
setattr(parser.values, option.dest, map(lambda x: int(x), value.split(',')))
def _callback_list_float(option, opt, value, parser):
setattr(parser.values, option.dest, map(lambda x: float(x), value.split(',')))
def handle_input():
parser = HelpfulOptionParser(usage=__doc__)
parser.add_option("-n", "--name", default=None, dest="name", type="string",
help="Experiment's name and prefix for all files that are created.")
parser.add_option("-m", "--merge", default=False, dest="merge", action="store_true",
help="Merge peaks which have a distance less than the estimated mean fragment size "
"(recommended for histone data). [default: do not merge]")
parser.add_option("--no-merge-bin", default=True, dest="merge_bin", action="store_false",
help="Merge the overlapping bin before filtering by p-value."
"[default: Merging bins]")
parser.add_option("--housekeeping-genes", default=None, dest="housekeeping_genes", type="str",
help="Define housekeeping genes (BED format) used for normalizing. [default: %default]")
parser.add_option("--output-dir", dest="outputdir", default=None, type="string",
help="Store files in output directory. [default: %default]")
parser.add_option("--report", dest="report", default=False, action="store_true",
help="Generate HTML report about experiment. [default: %default]")
parser.add_option("--deadzones", dest="deadzones", default=None,
help="Define blacklisted genomic regions avoided for analysis (BED format). [default: %default]")
parser.add_option("--no-correction", default=False, dest="no_correction", action="store_true",
help="Do not use multipe test correction for p-values (Benjamini/Hochberg). [default: %default]")
parser.add_option("-p", "--pvalue", dest="pcutoff", default=0.1, type="float",
help="P-value cutoff for peak detection. Call only peaks with p-value lower than cutoff. "
"[default: %default]")
parser.add_option("--exts", default=None, dest="exts", type="str", action='callback', callback=_callback_list,
help="Read's extension size for BAM files (comma separated list for each BAM file in config "
"file). If option is not chosen, estimate extension sizes. [default: %default]")
parser.add_option("--factors-inputs", default=None, dest="factors_inputs", type="str", action="callback",
callback=_callback_list_float,
help="Normalization factors for input-DNA (comma separated list for each BAM file in config "
"file). If option is not chosen, estimate factors. [default: %default]")
parser.add_option("--scaling-factors", default=None, dest="scaling_factors_ip", type="str", action='callback',
callback=_callback_list_float,
help="Scaling factor for each BAM file (not control input-DNA) as comma separated list for "
"each BAM file in config file. If option is not chosen, follow normalization strategy "
"(TMM or HK approach) [default: %default]")
parser.add_option("--save-input", dest="save_input", default=False, action="store_true",
help="Save input-DNA file if available. [default: %default]")
parser.add_option("--version", dest="version", default=False, action="store_true",
help="Show script's version.")
group = OptionGroup(parser, "Advanced options")
group.add_option("--regions", dest="regions", default=None, type="string",
help="Define regions (BED format) to restrict the analysis, that is, where to train the HMM and "
"search for DPs. It is faster, but less precise.")
group.add_option("-b", "--binsize", dest="binsize", default=100, type="int",
help="Size of underlying bins for creating the signal. [default: %default]")
group.add_option("-s", "--step", dest="stepsize", default=50, type="int",
help="Stepsize with which the window consecutively slides across the genome to create the "
"signal. [default: %default]")
group.add_option("--debug", default=False, dest="debug", action="store_true",
help="Output debug information. Warning: space consuming! [default: %default]")
group.add_option("--no-gc-content", dest="no_gc_content", default=False, action="store_true",
help="Do not normalize towards GC content. [default: %default]")
group.add_option("--norm-regions", default=None, dest="norm_regions", type="str",
help="Restrict normalization to particular regions (BED format). [default: %default]")
group.add_option("-f", "--foldchange", dest="foldchange", default=1.6, type="float",
help="Fold change parameter to define training set (t_1, see paper). [default: %default]")
group.add_option("-t", "--threshold", dest="threshold", default=95, type="float",
help="Minimum signal support for differential peaks to define training set as percentage "
"(t_2, see paper). [default: %default]")
group.add_option("--size", dest="size_ts", default=10000, type="int",
help="Number of bins the HMM's training set constists of. [default: %default]")
group.add_option("--par", dest="par", default=1, type="int",
help="Percentile for p-value postprocessing filter. [default: %default]")
group.add_option("--poisson", default=False, dest="poisson", action="store_true",
help="Use binomial distribution as emmission. [default: %default]")
group.add_option("--single-strand", default=False, dest="singlestrand", action="store_true",
help="Allow single strand BAM file as input. [default: %default]")
group.add_option("--m_threshold", default=80, dest="m_threshold", type="int",
help="Define the M threshold of percentile for training TMM. [default: %default]")
group.add_option("--a_threshold", default=95, dest="a_threshold", type="int",
help="Define the A threshold of percentile for training TMM. [default: %default]")
group.add_option("--rmdup", default=False, dest="rmdup", action="store_true",
help="Remove the duplicate reads [default: %default]")
parser.add_option_group(group)
(options, args) = parser.parse_args()
options.save_wig = False
options.exts_inputs = None
options.verbose = False
options.hmm_free_para = False
if options.version:
print("")
print(__version__)
sys.exit()
if len(args) != 1:
parser.error("Please give config file")
config_path = npath(args[0])
if not isfile(config_path):
parser.error("Config file %s does not exist!" % config_path)
bamfiles, genome, chrom_sizes, inputs, dims = input_parser(config_path)
if not genome:
options.no_gc_content = True
if options.exts and len(options.exts) != len(bamfiles):
parser.error("Number of Extension Sizes must equal number of bamfiles")
if options.exts_inputs and len(options.exts_inputs) != len(inputs):
parser.error("Number of Input Extension Sizes must equal number of input bamfiles")
if options.scaling_factors_ip and len(options.scaling_factors_ip) != len(bamfiles):
parser.error("Number of scaling factors for IP must equal number of bamfiles")
for bamfile in bamfiles:
if not isfile(bamfile):
parser.error("BAM file %s does not exist!" % bamfile)
if not inputs and options.factors_inputs:
print("As no input-DNA, do not use input-DNA factors", file=sys.stderr)
options.factors_inputs = None
if options.factors_inputs and len(options.factors_inputs) != len(bamfiles):
parser.error("factors for input-DNA must equal number of BAM files!")
if inputs:
for bamfile in inputs:
if not isfile(bamfile):
parser.error("BAM file %s does not exist!" % bamfile)
if options.regions:
if not isfile(options.regions):
parser.error("Region file %s does not exist!" % options.regions)
if genome and not isfile(genome):
parser.error("Genome file %s does not exist!" % genome)
if options.name is None:
d = str(datetime.now()).replace("-", "_").replace(":", "_").replace(" ", "_").replace(".", "_").split("_")
options.name = "THOR-exp" + "-" + "_".join(d[:len(d) - 1])
if not which("wigToBigWig") or not which("bedGraphToBigWig") or not which("bigWigMerge"):
print("Warning: wigToBigWig, bigWigMerge or bedGraphToBigWig not found! Signal will not be stored!",
file=sys.stderr)
if options.outputdir:
options.outputdir = npath(options.outputdir)
if isdir(options.outputdir) and sum(
map(lambda x: x.startswith(options.name), os.listdir(options.outputdir))) > 0:
parser.error("Output directory exists and contains files with names starting with your chosen experiment "
"name! Do nothing to prevent file overwriting!")
if not exists(options.outputdir):
os.mkdir(options.outputdir)
else:
options.outputdir = os.getcwd()
options.name = join(options.outputdir, options.name)
if options.report and isdir(join(options.outputdir, 'report_'+basename(options.name))):
parser.error("Folder 'report_"+basename(options.name)+"' already exits in output directory!"
"Do nothing to prevent file overwriting! "
"Please rename report folder or change working directory of THOR with the option --output-dir")
if options.report:
os.mkdir(join(options.outputdir, 'report_'+basename(options.name)+"/"))
os.mkdir(join(options.outputdir, 'report_'+basename(options.name), 'pics/'))
os.mkdir(join(options.outputdir, 'report_'+basename(options.name), 'pics/data/'))
global FOLDER_REPORT
global FOLDER_REPORT_PICS
global FOLDER_REPORT_DATA
global OUTPUTDIR
global NAME
FOLDER_REPORT = join(options.outputdir, 'report_'+basename(options.name)+"/")
FOLDER_REPORT_PICS = join(options.outputdir, 'report_'+basename(options.name), 'pics/')
FOLDER_REPORT_DATA = join(options.outputdir, 'report_'+basename(options.name), 'pics/data/')
OUTPUTDIR = options.outputdir
NAME = options.name
if not inputs:
print("Warning: Do not compute GC-content, as there is no input file", file=sys.stderr)
if not genome:
print("Warning: Do not compute GC-content, as there is no genome file", file=sys.stderr)
if options.exts is None:
options.exts = []
if options.exts_inputs is None:
options.exts_inputs = []
return options, bamfiles, genome, chrom_sizes, dims, inputs
| [
"optparse.OptionGroup",
"matplotlib.pyplot.title",
"os.remove",
"os.mkdir",
"numpy.random.seed",
"numpy.sum",
"os.path.isfile",
"numpy.mean",
"numpy.random.randint",
"os.path.join",
"matplotlib.pyplot.close",
"os.path.exists",
"math.log",
"numpy.var",
"datetime.datetime.now",
"os.path.... | [((1259, 1273), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (1266, 1273), True, 'import matplotlib as mpl\n'), ((1329, 1347), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1343, 1347), True, 'import numpy as np\n'), ((10580, 10593), 'numpy.mean', 'np.mean', (['exts'], {}), '(exts)\n', (10587, 10593), True, 'import numpy as np\n'), ((11660, 11689), 'numpy.percentile', 'np.percentile', (['tmp_pvalues', 'p'], {}), '(tmp_pvalues, p)\n', (11673, 11689), True, 'import numpy as np\n'), ((13611, 13673), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FOLDER_REPORT_PICS + 'fragment_size_estimate.png')"], {}), "(FOLDER_REPORT_PICS + 'fragment_size_estimate.png')\n", (13622, 13673), True, 'import matplotlib.pyplot as plt\n'), ((13678, 13689), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13687, 13689), True, 'import matplotlib.pyplot as plt\n'), ((20747, 20786), 'optparse.OptionGroup', 'OptionGroup', (['parser', '"""Advanced options"""'], {}), "(parser, 'Advanced options')\n", (20758, 20786), False, 'from optparse import OptionParser, OptionGroup\n'), ((26716, 26753), 'os.path.join', 'join', (['options.outputdir', 'options.name'], {}), '(options.outputdir, options.name)\n', (26720, 26753), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((2856, 2871), 'numpy.asarray', 'np.asarray', (['res'], {}), '(res)\n', (2866, 2871), True, 'import numpy as np\n'), ((3388, 3438), 'numpy.concatenate', 'np.concatenate', (['(plot_data[0][i], plot_data[1][i])'], {}), '((plot_data[0][i], plot_data[1][i]))\n', (3402, 3438), True, 'import numpy as np\n'), ((4767, 4798), 'numpy.asarray', 'np.asarray', (['overall_coverage[i]'], {}), '(overall_coverage[i])\n', (4777, 4798), True, 'import numpy as np\n'), ((4964, 5013), 'numpy.random.randint', 'np.random.randint', (['cov.shape[1]'], {'size': 'sample_size'}), '(cov.shape[1], size=sample_size)\n', (4981, 5013), True, 'import numpy as np\n'), ((5312, 5335), 'numpy.asarray', 'np.asarray', (['data_rep[i]'], {}), '(data_rep[i])\n', (5322, 5335), True, 'import numpy as np\n'), ((11069, 11212), 'numpy.sum', 'np.sum', (['(DCS.overall_coverage_strand[0][1][:, DCS.indices_of_interest[i]] + DCS.\n overall_coverage_strand[1][1][:, DCS.indices_of_interest[i]])'], {}), '(DCS.overall_coverage_strand[0][1][:, DCS.indices_of_interest[i]] +\n DCS.overall_coverage_strand[1][1][:, DCS.indices_of_interest[i]])\n', (11075, 11212), True, 'import numpy as np\n'), ((13420, 13436), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (13431, 13436), True, 'import matplotlib.pyplot as plt\n'), ((13445, 13464), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""shift"""'], {}), "('shift')\n", (13455, 13464), True, 'import matplotlib.pyplot as plt\n'), ((13473, 13498), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""convolution"""'], {}), "('convolution')\n", (13483, 13498), True, 'import matplotlib.pyplot as plt\n'), ((13507, 13544), 'matplotlib.pyplot.title', 'plt.title', (['"""Fragment Size Estimation"""'], {}), "('Fragment Size Estimation')\n", (13516, 13544), True, 'import matplotlib.pyplot as plt\n'), ((13553, 13585), 'matplotlib.pyplot.plot', 'plt.plot', (['d2', 'd1'], {'label': 'names[i]'}), '(d2, d1, label=names[i])\n', (13561, 13585), True, 'import matplotlib.pyplot as plt\n'), ((14531, 14559), 'pysam.Samfile', 'pysam.Samfile', (['bamfile', '"""rb"""'], {}), "(bamfile, 'rb')\n", (14544, 14559), False, 'import pysam\n'), ((24019, 24029), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24027, 24029), False, 'import sys\n'), ((24148, 24167), 'os.path.isfile', 'isfile', (['config_path'], {}), '(config_path)\n', (24154, 24167), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((26684, 26695), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (26693, 26695), False, 'import os\n'), ((1908, 1920), 'os.system', 'os.system', (['c'], {}), '(c)\n', (1917, 1920), False, 'import os\n'), ((1934, 2022), 'os.system', 'os.system', (["('LC_COLLATE=C sort -k1,1 -k2,2n ' + temp_bed + ' > ' + temp_bed + '.sort')"], {}), "('LC_COLLATE=C sort -k1,1 -k2,2n ' + temp_bed + ' > ' + temp_bed +\n '.sort')\n", (1943, 2022), False, 'import os\n'), ((2174, 2186), 'os.system', 'os.system', (['c'], {}), '(c)\n', (2183, 2186), False, 'import os\n'), ((2257, 2276), 'os.remove', 'os.remove', (['temp_bed'], {}), '(temp_bed)\n', (2266, 2276), False, 'import os\n'), ((2289, 2318), 'os.remove', 'os.remove', (["(temp_bed + '.sort')"], {}), "(temp_bed + '.sort')\n", (2298, 2318), False, 'import os\n'), ((4020, 4036), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4031, 4036), True, 'import matplotlib.pyplot as plt\n'), ((4049, 4095), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r"""'], {'label': '"""fitted polynomial"""'}), "(x, y, 'r', label='fitted polynomial')\n", (4057, 4095), True, 'import matplotlib.pyplot as plt\n'), ((4124, 4199), 'matplotlib.pyplot.scatter', 'plt.scatter', (['plot_data[i][0]', 'plot_data[i][1]'], {'label': '"""empirical datapoints"""'}), "(plot_data[i][0], plot_data[i][1], label='empirical datapoints')\n", (4135, 4199), True, 'import matplotlib.pyplot as plt\n'), ((4255, 4273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""mean"""'], {}), "('mean')\n", (4265, 4273), True, 'import matplotlib.pyplot as plt\n'), ((4286, 4308), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""variance"""'], {}), "('variance')\n", (4296, 4308), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4366), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimated Mean-Variance Function"""'], {}), "('Estimated Mean-Variance Function')\n", (4330, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4511, 4558), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(FOLDER_REPORT_PICS + name + '.png')"], {}), "(FOLDER_REPORT_PICS + name + '.png')\n", (4522, 4558), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4582), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4580, 4582), True, 'import matplotlib.pyplot as plt\n'), ((7256, 7272), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (7264, 7272), True, 'import numpy as np\n'), ((7274, 7290), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (7282, 7290), True, 'import numpy as np\n'), ((8097, 8107), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8104, 8107), True, 'import numpy as np\n'), ((8114, 8124), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (8121, 8124), True, 'import numpy as np\n'), ((8395, 8403), 'math.log', 'log', (['res'], {}), '(res)\n', (8398, 8403), False, 'from math import fabs, log, ceil\n'), ((8419, 8432), 'numpy.isinf', 'np.isinf', (['res'], {}), '(res)\n', (8427, 8432), True, 'import numpy as np\n'), ((9945, 10008), 'numpy.mean', 'np.mean', (['DCS.overall_coverage[0][:, DCS.indices_of_interest[i]]'], {}), '(DCS.overall_coverage[0][:, DCS.indices_of_interest[i]])\n', (9952, 10008), True, 'import numpy as np\n'), ((10029, 10092), 'numpy.mean', 'np.mean', (['DCS.overall_coverage[1][:, DCS.indices_of_interest[i]]'], {}), '(DCS.overall_coverage[1][:, DCS.indices_of_interest[i]])\n', (10036, 10092), True, 'import numpy as np\n'), ((10208, 10224), 'numpy.asarray', 'np.asarray', (['cov1'], {}), '(cov1)\n', (10218, 10224), True, 'import numpy as np\n'), ((10332, 10348), 'numpy.asarray', 'np.asarray', (['cov2'], {}), '(cov2)\n', (10342, 10348), True, 'import numpy as np\n'), ((10901, 10973), 'numpy.sum', 'np.sum', (['DCS.overall_coverage_strand[0][0][:, DCS.indices_of_interest[i]]'], {}), '(DCS.overall_coverage_strand[0][0][:, DCS.indices_of_interest[i]])\n', (10907, 10973), True, 'import numpy as np\n'), ((10975, 11047), 'numpy.sum', 'np.sum', (['DCS.overall_coverage_strand[1][0][:, DCS.indices_of_interest[i]]'], {}), '(DCS.overall_coverage_strand[1][0][:, DCS.indices_of_interest[i]])\n', (10981, 11047), True, 'import numpy as np\n'), ((24899, 24914), 'os.path.isfile', 'isfile', (['bamfile'], {}), '(bamfile)\n', (24905, 24914), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((25499, 25522), 'os.path.isfile', 'isfile', (['options.regions'], {}), '(options.regions)\n', (25505, 25522), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((25624, 25638), 'os.path.isfile', 'isfile', (['genome'], {}), '(genome)\n', (25630, 25638), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((26242, 26266), 'os.path.isdir', 'isdir', (['options.outputdir'], {}), '(options.outputdir)\n', (26247, 26266), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((26579, 26604), 'os.path.exists', 'exists', (['options.outputdir'], {}), '(options.outputdir)\n', (26585, 26604), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((26618, 26645), 'os.mkdir', 'os.mkdir', (['options.outputdir'], {}), '(options.outputdir)\n', (26626, 26645), False, 'import os\n'), ((27674, 27696), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27682, 27696), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((27766, 27788), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27774, 27788), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((2232, 2244), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (2241, 2244), False, 'import os\n'), ((2560, 2572), 'os.system', 'os.system', (['c'], {}), '(c)\n', (2569, 2572), False, 'import os\n'), ((2925, 2932), 'math.fabs', 'fabs', (['c'], {}), '(c)\n', (2929, 2932), False, 'from math import fabs, log, ceil\n'), ((3912, 3934), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, maxs[0]]'], {}), '([0, maxs[0]])\n', (3920, 3934), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3973), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, maxs[1]]'], {}), '([0, maxs[1]])\n', (3959, 3973), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5582), 'numpy.percentile', 'np.percentile', (['data_rep[i][:, 0]', '(99.75)'], {}), '(data_rep[i][:, 0], 99.75)\n', (5556, 5582), True, 'import numpy as np\n'), ((5636, 5675), 'numpy.percentile', 'np.percentile', (['data_rep[i][:, 1]', '(99.75)'], {}), '(data_rep[i][:, 1], 99.75)\n', (5649, 5675), True, 'import numpy as np\n'), ((8232, 8244), 'numpy.array', 'np.array', (['l1'], {}), '(l1)\n', (8240, 8244), True, 'import numpy as np\n'), ((8261, 8273), 'numpy.array', 'np.array', (['l2'], {}), '(l2)\n', (8269, 8273), True, 'import numpy as np\n'), ((12986, 13003), 'os.path.basename', 'basename', (['bamfile'], {}), '(bamfile)\n', (12994, 13003), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((25372, 25387), 'os.path.isfile', 'isfile', (['bamfile'], {}), '(bamfile)\n', (25378, 25387), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((27587, 27609), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27595, 27609), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((1797, 1806), 'os.path.isfile', 'isfile', (['x'], {}), '(x)\n', (1803, 1806), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((5094, 5120), 'numpy.mean', 'np.mean', (['(cov * 1.0)'], {'axis': '(0)'}), '(cov * 1.0, axis=0)\n', (5101, 5120), True, 'import numpy as np\n'), ((5161, 5186), 'numpy.var', 'np.var', (['(cov * 1.0)'], {'axis': '(0)'}), '(cov * 1.0, axis=0)\n', (5167, 5186), True, 'import numpy as np\n'), ((6703, 6719), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (6711, 6719), True, 'import numpy as np\n'), ((26821, 26843), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (26829, 26843), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((26886, 26908), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (26894, 26908), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((27285, 27307), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27293, 27307), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((27370, 27392), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27378, 27392), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((2818, 2825), 'math.fabs', 'fabs', (['c'], {}), '(c)\n', (2822, 2825), False, 'from math import fabs, log, ceil\n'), ((2904, 2911), 'math.fabs', 'fabs', (['a'], {}), '(a)\n', (2908, 2911), False, 'from math import fabs, log, ceil\n'), ((3508, 3530), 'numpy.percentile', 'np.percentile', (['tmp', '(90)'], {}), '(tmp, 90)\n', (3521, 3530), True, 'import numpy as np\n'), ((6393, 6423), 'scipy.optimize.curve_fit', 'curve_fit', (['_func_quad_2p', 'm', 'v'], {}), '(_func_quad_2p, m, v)\n', (6402, 6423), False, 'from scipy.optimize import curve_fit\n'), ((26334, 26363), 'os.listdir', 'os.listdir', (['options.outputdir'], {}), '(options.outputdir)\n', (26344, 26363), False, 'import os\n'), ((27205, 27227), 'os.path.basename', 'basename', (['options.name'], {}), '(options.name)\n', (27213, 27227), False, 'from os.path import splitext, basename, join, isfile, isdir, exists\n'), ((2795, 2802), 'math.fabs', 'fabs', (['a'], {}), '(a)\n', (2799, 2802), False, 'from math import fabs, log, ceil\n'), ((25750, 25764), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25762, 25764), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import gc
import heapq
import random
import sys
import time
from collections import OrderedDict
from os import path
import numpy as np
kSampleSize = 64 # The sample size used when performing eviction.
kMicrosInSecond = 1000000
kSecondsInMinute = 60
kSecondsInHour = 3600
class TraceRecord:
"""
A trace record represents a block access.
It holds the same struct as BlockCacheTraceRecord in
trace_replay/block_cache_tracer.h
"""
def __init__(
self,
access_time,
block_id,
block_type,
block_size,
cf_id,
cf_name,
level,
fd,
caller,
no_insert,
get_id,
key_id,
kv_size,
is_hit,
referenced_key_exist_in_block,
num_keys_in_block,
table_id,
seq_number,
block_key_size,
key_size,
block_offset_in_file,
next_access_seq_no,
):
self.access_time = access_time
self.block_id = block_id
self.block_type = block_type
self.block_size = block_size + block_key_size
self.cf_id = cf_id
self.cf_name = cf_name
self.level = level
self.fd = fd
self.caller = caller
if no_insert == 1:
self.no_insert = True
else:
self.no_insert = False
self.get_id = get_id
self.key_id = key_id
self.kv_size = kv_size
if is_hit == 1:
self.is_hit = True
else:
self.is_hit = False
if referenced_key_exist_in_block == 1:
self.referenced_key_exist_in_block = True
else:
self.referenced_key_exist_in_block = False
self.num_keys_in_block = num_keys_in_block
self.table_id = table_id
self.seq_number = seq_number
self.block_key_size = block_key_size
self.key_size = key_size
self.block_offset_in_file = block_offset_in_file
self.next_access_seq_no = next_access_seq_no
class CacheEntry:
"""A cache entry stored in the cache."""
def __init__(
self,
value_size,
cf_id,
level,
block_type,
table_id,
access_number,
time_s,
num_hits=0,
):
self.value_size = value_size
self.last_access_number = access_number
self.num_hits = num_hits
self.cf_id = 0
self.level = level
self.block_type = block_type
self.last_access_time = time_s
self.insertion_time = time_s
self.table_id = table_id
def __repr__(self):
"""Debug string."""
return "(s={},last={},hits={},cf={},l={},bt={})\n".format(
self.value_size,
self.last_access_number,
self.num_hits,
self.cf_id,
self.level,
self.block_type,
)
def cost_class(self, cost_class_label):
if cost_class_label == "table_bt":
return "{}-{}".format(self.table_id, self.block_type)
elif cost_class_label == "table":
return "{}".format(self.table_id)
elif cost_class_label == "bt":
return "{}".format(self.block_type)
elif cost_class_label == "cf":
return "{}".format(self.cf_id)
elif cost_class_label == "cf_bt":
return "{}-{}".format(self.cf_id, self.block_type)
elif cost_class_label == "table_level_bt":
return "{}-{}-{}".format(self.table_id, self.level, self.block_type)
assert False, "Unknown cost class label {}".format(cost_class_label)
return None
class HashEntry:
"""A hash entry stored in a hash table."""
def __init__(self, key, hash, value):
self.key = key
self.hash = hash
self.value = value
def __repr__(self):
return "k={},h={},v=[{}]".format(self.key, self.hash, self.value)
class HashTable:
"""
A custom implementation of hash table to support fast random sampling.
It is closed hashing and uses chaining to resolve hash conflicts.
It grows/shrinks the hash table upon insertion/deletion to support
fast lookups and random samplings.
"""
def __init__(self):
self.initial_size = 32
self.table = [None] * self.initial_size
self.elements = 0
def random_sample(self, sample_size):
"""Randomly sample 'sample_size' hash entries from the table."""
samples = []
index = random.randint(0, len(self.table) - 1)
pos = index
# Starting from index, adding hash entries to the sample list until
# sample_size is met or we ran out of entries.
while True:
if self.table[pos] is not None:
for i in range(len(self.table[pos])):
if self.table[pos][i] is None:
continue
samples.append(self.table[pos][i])
if len(samples) == sample_size:
break
pos += 1
pos = pos % len(self.table)
if pos == index or len(samples) == sample_size:
break
assert len(samples) <= sample_size
return samples
def __repr__(self):
all_entries = []
for i in range(len(self.table)):
if self.table[i] is None:
continue
for j in range(len(self.table[i])):
if self.table[i][j] is not None:
all_entries.append(self.table[i][j])
return "{}".format(all_entries)
def values(self):
all_values = []
for i in range(len(self.table)):
if self.table[i] is None:
continue
for j in range(len(self.table[i])):
if self.table[i][j] is not None:
all_values.append(self.table[i][j].value)
return all_values
def __len__(self):
return self.elements
def insert(self, key, hash, value):
"""
Insert a hash entry in the table. Replace the old entry if it already
exists.
"""
self.grow()
inserted = False
index = hash % len(self.table)
if self.table[index] is None:
self.table[index] = []
# Search for the entry first.
for i in range(len(self.table[index])):
if self.table[index][i] is None:
continue
if self.table[index][i].hash == hash and self.table[index][i].key == key:
# The entry already exists in the table.
self.table[index][i] = HashEntry(key, hash, value)
return
# Find an empty slot.
for i in range(len(self.table[index])):
if self.table[index][i] is None:
self.table[index][i] = HashEntry(key, hash, value)
inserted = True
break
if not inserted:
self.table[index].append(HashEntry(key, hash, value))
self.elements += 1
def resize(self, new_size):
if new_size == len(self.table):
return
if new_size < self.initial_size:
return
if self.elements < 100:
return
new_table = [None] * new_size
# Copy 'self.table' to new_table.
for i in range(len(self.table)):
entries = self.table[i]
if entries is None:
continue
for j in range(len(entries)):
if entries[j] is None:
continue
index = entries[j].hash % new_size
if new_table[index] is None:
new_table[index] = []
new_table[index].append(entries[j])
self.table = new_table
del new_table
# Manually call python gc here to free the memory as 'self.table'
# might be very large.
gc.collect()
def grow(self):
if self.elements < 4 * len(self.table):
return
new_size = int(len(self.table) * 1.5)
self.resize(new_size)
def delete(self, key, hash):
index = hash % len(self.table)
deleted = False
deleted_entry = None
if self.table[index] is None:
return
for i in range(len(self.table[index])):
if (
self.table[index][i] is not None
and self.table[index][i].hash == hash
and self.table[index][i].key == key
):
deleted_entry = self.table[index][i]
self.table[index][i] = None
self.elements -= 1
deleted = True
break
if deleted:
self.shrink()
return deleted_entry
def shrink(self):
if self.elements * 2 >= len(self.table):
return
new_size = int(len(self.table) * 0.7)
self.resize(new_size)
def lookup(self, key, hash):
index = hash % len(self.table)
if self.table[index] is None:
return None
for i in range(len(self.table[index])):
if (
self.table[index][i] is not None
and self.table[index][i].hash == hash
and self.table[index][i].key == key
):
return self.table[index][i].value
return None
class MissRatioStats:
def __init__(self, time_unit):
self.num_misses = 0
self.num_accesses = 0
self.time_unit = time_unit
self.time_misses = {}
self.time_miss_bytes = {}
self.time_accesses = {}
def update_metrics(self, access_time, is_hit, miss_bytes):
access_time /= kMicrosInSecond * self.time_unit
self.num_accesses += 1
if access_time not in self.time_accesses:
self.time_accesses[access_time] = 0
self.time_accesses[access_time] += 1
if not is_hit:
self.num_misses += 1
if access_time not in self.time_misses:
self.time_misses[access_time] = 0
self.time_miss_bytes[access_time] = 0
self.time_misses[access_time] += 1
self.time_miss_bytes[access_time] += miss_bytes
def reset_counter(self):
self.num_misses = 0
self.num_accesses = 0
self.time_miss_bytes.clear()
self.time_misses.clear()
self.time_accesses.clear()
def compute_miss_bytes(self):
miss_bytes = []
for at in self.time_miss_bytes:
miss_bytes.append(self.time_miss_bytes[at])
miss_bytes = sorted(miss_bytes)
avg_miss_bytes = 0
p95_miss_bytes = 0
for i in range(len(miss_bytes)):
avg_miss_bytes += float(miss_bytes[i]) / float(len(miss_bytes))
p95_index = min(int(0.95 * float(len(miss_bytes))), len(miss_bytes) - 1)
p95_miss_bytes = miss_bytes[p95_index]
return avg_miss_bytes, p95_miss_bytes
def miss_ratio(self):
return float(self.num_misses) * 100.0 / float(self.num_accesses)
def write_miss_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-miss-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-miss-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
row = "{}".format(cache_type)
for trace_time in range(start, end):
row += ",{}".format(self.time_misses.get(trace_time, 0))
file.write(row + "\n")
def write_miss_ratio_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-miss-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-miss-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
row = "{}".format(cache_type)
for trace_time in range(start, end):
naccesses = self.time_accesses.get(trace_time, 0)
miss_ratio = 0
if naccesses > 0:
miss_ratio = float(
self.time_misses.get(trace_time, 0) * 100.0
) / float(naccesses)
row += ",{0:.2f}".format(miss_ratio)
file.write(row + "\n")
class PolicyStats:
def __init__(self, time_unit, policies):
self.time_selected_polices = {}
self.time_accesses = {}
self.policy_names = {}
self.time_unit = time_unit
for i in range(len(policies)):
self.policy_names[i] = policies[i].policy_name()
def update_metrics(self, access_time, selected_policy):
access_time /= kMicrosInSecond * self.time_unit
if access_time not in self.time_accesses:
self.time_accesses[access_time] = 0
self.time_accesses[access_time] += 1
if access_time not in self.time_selected_polices:
self.time_selected_polices[access_time] = {}
policy_name = self.policy_names[selected_policy]
if policy_name not in self.time_selected_polices[access_time]:
self.time_selected_polices[access_time][policy_name] = 0
self.time_selected_polices[access_time][policy_name] += 1
def write_policy_timeline(
self, cache_type, cache_size, target_cf_name, result_dir, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-policy-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-policy-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
for policy in self.policy_names:
policy_name = self.policy_names[policy]
row = "{}-{}".format(cache_type, policy_name)
for trace_time in range(start, end):
row += ",{}".format(
self.time_selected_polices.get(trace_time, {}).get(
policy_name, 0
)
)
file.write(row + "\n")
def write_policy_ratio_timeline(
self, cache_type, cache_size, target_cf_name, file_path, start, end
):
start /= kMicrosInSecond * self.time_unit
end /= kMicrosInSecond * self.time_unit
header_file_path = "{}/header-ml-policy-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
if not path.exists(header_file_path):
with open(header_file_path, "w+") as header_file:
header = "time"
for trace_time in range(start, end):
header += ",{}".format(trace_time)
header_file.write(header + "\n")
file_path = "{}/data-ml-policy-ratio-timeline-{}-{}-{}-{}".format(
result_dir, self.time_unit, cache_type, cache_size, target_cf_name
)
with open(file_path, "w+") as file:
for policy in self.policy_names:
policy_name = self.policy_names[policy]
row = "{}-{}".format(cache_type, policy_name)
for trace_time in range(start, end):
naccesses = self.time_accesses.get(trace_time, 0)
ratio = 0
if naccesses > 0:
ratio = float(
self.time_selected_polices.get(trace_time, {}).get(
policy_name, 0
)
* 100.0
) / float(naccesses)
row += ",{0:.2f}".format(ratio)
file.write(row + "\n")
class Policy(object):
"""
A policy maintains a set of evicted keys. It returns a reward of one to
itself if it has not evicted a missing key. Otherwise, it gives itself 0
reward.
"""
def __init__(self):
self.evicted_keys = {}
def evict(self, key, max_size):
self.evicted_keys[key] = 0
def delete(self, key):
self.evicted_keys.pop(key, None)
def prioritize_samples(self, samples, auxilliary_info):
raise NotImplementedError
def policy_name(self):
raise NotImplementedError
def generate_reward(self, key):
if key in self.evicted_keys:
return 0
return 1
class LRUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(
samples,
cmp=lambda e1, e2: e1.value.last_access_number
- e2.value.last_access_number,
)
def policy_name(self):
return "lru"
class MRUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(
samples,
cmp=lambda e1, e2: e2.value.last_access_number
- e1.value.last_access_number,
)
def policy_name(self):
return "mru"
class LFUPolicy(Policy):
def prioritize_samples(self, samples, auxilliary_info):
return sorted(samples, cmp=lambda e1, e2: e1.value.num_hits - e2.value.num_hits)
def policy_name(self):
return "lfu"
class HyperbolicPolicy(Policy):
"""
An implementation of Hyperbolic caching.
<NAME>, <NAME>, and <NAME>. 2017.
Hyperbolic caching: flexible caching for web applications. In Proceedings
of the 2017 USENIX Conference on Usenix Annual Technical Conference
(USENIX ATC '17). USENIX Association, Berkeley, CA, USA, 499-511.
"""
def compare(self, e1, e2, now):
e1_duration = max(0, (now - e1.value.insertion_time) / kMicrosInSecond) * float(
e1.value.value_size
)
e2_duration = max(0, (now - e2.value.insertion_time) / kMicrosInSecond) * float(
e2.value.value_size
)
if e1_duration == e2_duration:
return e1.value.num_hits - e2.value.num_hits
if e1_duration == 0:
return 1
if e2_duration == 0:
return 1
diff = (float(e1.value.num_hits) / (float(e1_duration))) - (
float(e2.value.num_hits) / float(e2_duration)
)
if diff == 0:
return 0
elif diff > 0:
return 1
else:
return -1
def prioritize_samples(self, samples, auxilliary_info):
assert len(auxilliary_info) == 3
now = auxilliary_info[0]
return sorted(samples, cmp=lambda e1, e2: self.compare(e1, e2, now))
def policy_name(self):
return "hb"
class CostClassPolicy(Policy):
"""
We calculate the hit density of a cost class as
number of hits / total size in cache * average duration in the cache.
An entry has a higher priority if its class's hit density is higher.
"""
def compare(self, e1, e2, now, cost_classes, cost_class_label):
e1_class = e1.value.cost_class(cost_class_label)
e2_class = e2.value.cost_class(cost_class_label)
assert e1_class in cost_classes
assert e2_class in cost_classes
e1_entry = cost_classes[e1_class]
e2_entry = cost_classes[e2_class]
e1_density = e1_entry.density(now)
e2_density = e2_entry.density(now)
e1_hits = cost_classes[e1_class].hits
e2_hits = cost_classes[e2_class].hits
if e1_density == e2_density:
return e1_hits - e2_hits
if e1_entry.num_entries_in_cache == 0:
return -1
if e2_entry.num_entries_in_cache == 0:
return 1
if e1_density == 0:
return 1
if e2_density == 0:
return -1
diff = (float(e1_hits) / float(e1_density)) - (
float(e2_hits) / float(e2_density)
)
if diff == 0:
return 0
elif diff > 0:
return 1
else:
return -1
def prioritize_samples(self, samples, auxilliary_info):
assert len(auxilliary_info) == 3
now = auxilliary_info[0]
cost_classes = auxilliary_info[1]
cost_class_label = auxilliary_info[2]
return sorted(
samples,
cmp=lambda e1, e2: self.compare(
e1, e2, now, cost_classes, cost_class_label
),
)
def policy_name(self):
return "cc"
class Cache(object):
"""
This is the base class for the implementations of alternative cache
replacement policies.
"""
def __init__(self, cache_size, enable_cache_row_key):
self.cache_size = cache_size
self.used_size = 0
self.per_second_miss_ratio_stats = MissRatioStats(1)
self.miss_ratio_stats = MissRatioStats(kSecondsInMinute)
self.per_hour_miss_ratio_stats = MissRatioStats(kSecondsInHour)
# 0: disabled. 1: enabled. Insert both row and the refereneced data block.
# 2: enabled. Insert only the row but NOT the referenced data block.
self.enable_cache_row_key = enable_cache_row_key
self.get_id_row_key_map = {}
self.max_seen_get_id = 0
self.retain_get_id_range = 100000
def block_key(self, trace_record):
return "b{}".format(trace_record.block_id)
def row_key(self, trace_record):
return "g{}-{}".format(trace_record.fd, trace_record.key_id)
def _lookup(self, trace_record, key, hash):
"""
Look up the key in the cache.
Returns true upon a cache hit, false otherwise.
"""
raise NotImplementedError
def _evict(self, trace_record, key, hash, value_size):
"""
Evict entries in the cache until there is enough room to insert the new
entry with 'value_size'.
"""
raise NotImplementedError
def _insert(self, trace_record, key, hash, value_size):
"""
Insert the new entry into the cache.
"""
raise NotImplementedError
def _should_admit(self, trace_record, key, hash, value_size):
"""
A custom admission policy to decide whether we should admit the new
entry upon a cache miss.
Returns true if the new entry should be admitted, false otherwise.
"""
raise NotImplementedError
def cache_name(self):
"""
The name of the replacement policy.
"""
raise NotImplementedError
def is_ml_cache(self):
return False
def _update_stats(self, access_time, is_hit, miss_bytes):
self.per_second_miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
self.miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
self.per_hour_miss_ratio_stats.update_metrics(access_time, is_hit, miss_bytes)
def access(self, trace_record):
"""
Access a trace record. The simulator calls this function to access a
trace record.
"""
assert self.used_size <= self.cache_size
if (
self.enable_cache_row_key > 0
and trace_record.caller == 1
and trace_record.key_id != 0
and trace_record.get_id != 0
):
# This is a get request.
self._access_row(trace_record)
return
is_hit = self._access_kv(
trace_record,
self.block_key(trace_record),
trace_record.block_id,
trace_record.block_size,
trace_record.no_insert,
)
self._update_stats(
trace_record.access_time, is_hit=is_hit, miss_bytes=trace_record.block_size
)
def _access_row(self, trace_record):
row_key = self.row_key(trace_record)
self.max_seen_get_id = max(self.max_seen_get_id, trace_record.get_id)
self.get_id_row_key_map.pop(
self.max_seen_get_id - self.retain_get_id_range, None
)
if trace_record.get_id not in self.get_id_row_key_map:
self.get_id_row_key_map[trace_record.get_id] = {}
self.get_id_row_key_map[trace_record.get_id]["h"] = False
if self.get_id_row_key_map[trace_record.get_id]["h"]:
# We treat future accesses as hits since this get request
# completes.
# print("row hit 1")
self._update_stats(trace_record.access_time, is_hit=True, miss_bytes=0)
return
if row_key not in self.get_id_row_key_map[trace_record.get_id]:
# First time seen this key.
is_hit = self._access_kv(
trace_record,
key=row_key,
hash=trace_record.key_id,
value_size=trace_record.kv_size,
no_insert=False,
)
inserted = False
if trace_record.kv_size > 0:
inserted = True
self.get_id_row_key_map[trace_record.get_id][row_key] = inserted
self.get_id_row_key_map[trace_record.get_id]["h"] = is_hit
if self.get_id_row_key_map[trace_record.get_id]["h"]:
# We treat future accesses as hits since this get request
# completes.
# print("row hit 2")
self._update_stats(trace_record.access_time, is_hit=True, miss_bytes=0)
return
# Access its blocks.
no_insert = trace_record.no_insert
if (
self.enable_cache_row_key == 2
and trace_record.kv_size > 0
and trace_record.block_type == 9
):
no_insert = True
is_hit = self._access_kv(
trace_record,
key=self.block_key(trace_record),
hash=trace_record.block_id,
value_size=trace_record.block_size,
no_insert=no_insert,
)
self._update_stats(
trace_record.access_time, is_hit, miss_bytes=trace_record.block_size
)
if (
trace_record.kv_size > 0
and not self.get_id_row_key_map[trace_record.get_id][row_key]
):
# Insert the row key-value pair.
self._access_kv(
trace_record,
key=row_key,
hash=trace_record.key_id,
value_size=trace_record.kv_size,
no_insert=False,
)
# Mark as inserted.
self.get_id_row_key_map[trace_record.get_id][row_key] = True
def _access_kv(self, trace_record, key, hash, value_size, no_insert):
# Sanity checks.
assert self.used_size <= self.cache_size
if self._lookup(trace_record, key, hash):
# A cache hit.
return True
if no_insert or value_size <= 0:
return False
# A cache miss.
if value_size > self.cache_size:
# The block is too large to fit into the cache.
return False
self._evict(trace_record, key, hash, value_size)
if self._should_admit(trace_record, key, hash, value_size):
self._insert(trace_record, key, hash, value_size)
self.used_size += value_size
return False
class CostClassEntry:
"""
A cost class maintains aggregated statistics of cached entries in a class.
For example, we may define block type as a class. Then, cached blocks of the
same type will share one cost class entry.
"""
def __init__(self):
self.hits = 0
self.num_entries_in_cache = 0
self.size_in_cache = 0
self.sum_insertion_times = 0
self.sum_last_access_time = 0
def insert(self, trace_record, key, value_size):
self.size_in_cache += value_size
self.num_entries_in_cache += 1
self.sum_insertion_times += trace_record.access_time / kMicrosInSecond
self.sum_last_access_time += trace_record.access_time / kMicrosInSecond
def remove(self, insertion_time, last_access_time, key, value_size, num_hits):
self.hits -= num_hits
self.num_entries_in_cache -= 1
self.sum_insertion_times -= insertion_time / kMicrosInSecond
self.size_in_cache -= value_size
self.sum_last_access_time -= last_access_time / kMicrosInSecond
def update_on_hit(self, trace_record, last_access_time):
self.hits += 1
self.sum_last_access_time -= last_access_time / kMicrosInSecond
self.sum_last_access_time += trace_record.access_time / kMicrosInSecond
def avg_lifetime_in_cache(self, now):
avg_insertion_time = self.sum_insertion_times / self.num_entries_in_cache
return now / kMicrosInSecond - avg_insertion_time
def avg_last_access_time(self):
if self.num_entries_in_cache == 0:
return 0
return float(self.sum_last_access_time) / float(self.num_entries_in_cache)
def avg_size(self):
if self.num_entries_in_cache == 0:
return 0
return float(self.sum_last_access_time) / float(self.num_entries_in_cache)
def density(self, now):
avg_insertion_time = self.sum_insertion_times / self.num_entries_in_cache
in_cache_duration = now / kMicrosInSecond - avg_insertion_time
return self.size_in_cache * in_cache_duration
class MLCache(Cache):
"""
MLCache is the base class for implementations of alternative replacement
policies using reinforcement learning.
"""
def __init__(self, cache_size, enable_cache_row_key, policies, cost_class_label):
super(MLCache, self).__init__(cache_size, enable_cache_row_key)
self.table = HashTable()
self.policy_stats = PolicyStats(kSecondsInMinute, policies)
self.per_hour_policy_stats = PolicyStats(kSecondsInHour, policies)
self.policies = policies
self.cost_classes = {}
self.cost_class_label = cost_class_label
def is_ml_cache(self):
return True
def _lookup(self, trace_record, key, hash):
value = self.table.lookup(key, hash)
if value is not None:
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = value.cost_class(self.cost_class_label)
assert cost_class in self.cost_classes
self.cost_classes[cost_class].update_on_hit(
trace_record, value.last_access_time
)
# Update the entry's last access time.
self.table.insert(
key,
hash,
CacheEntry(
value_size=value.value_size,
cf_id=value.cf_id,
level=value.level,
block_type=value.block_type,
table_id=value.table_id,
access_number=self.miss_ratio_stats.num_accesses,
time_s=trace_record.access_time,
num_hits=value.num_hits + 1,
),
)
return True
return False
def _evict(self, trace_record, key, hash, value_size):
# Select a policy, random sample kSampleSize keys from the cache, then
# evict keys in the sample set until we have enough room for the new
# entry.
policy_index = self._select_policy(trace_record, key)
assert policy_index < len(self.policies) and policy_index >= 0
self.policies[policy_index].delete(key)
self.policy_stats.update_metrics(trace_record.access_time, policy_index)
self.per_hour_policy_stats.update_metrics(
trace_record.access_time, policy_index
)
while self.used_size + value_size > self.cache_size:
# Randomly sample n entries.
samples = self.table.random_sample(kSampleSize)
samples = self.policies[policy_index].prioritize_samples(
samples,
[trace_record.access_time, self.cost_classes, self.cost_class_label],
)
for hash_entry in samples:
assert self.table.delete(hash_entry.key, hash_entry.hash) is not None
self.used_size -= hash_entry.value.value_size
self.policies[policy_index].evict(
key=hash_entry.key, max_size=self.table.elements
)
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = hash_entry.value.cost_class(self.cost_class_label)
assert cost_class in self.cost_classes
self.cost_classes[cost_class].remove(
hash_entry.value.insertion_time,
hash_entry.value.last_access_time,
key,
hash_entry.value.value_size,
hash_entry.value.num_hits,
)
if self.used_size + value_size <= self.cache_size:
break
def _insert(self, trace_record, key, hash, value_size):
assert self.used_size + value_size <= self.cache_size
entry = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
self.miss_ratio_stats.num_accesses,
trace_record.access_time,
)
# Update the entry's cost class statistics.
if self.cost_class_label is not None:
cost_class = entry.cost_class(self.cost_class_label)
if cost_class not in self.cost_classes:
self.cost_classes[cost_class] = CostClassEntry()
self.cost_classes[cost_class].insert(trace_record, key, value_size)
self.table.insert(key, hash, entry)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def _select_policy(self, trace_record, key):
raise NotImplementedError
class ThompsonSamplingCache(MLCache):
"""
An implementation of Thompson Sampling for the Bernoulli Bandit.
<NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>. 2018. A Tutorial on Thompson Sampling. Found.
Trends Mach. Learn. 11, 1 (July 2018), 1-96.
DOI: https://doi.org/10.1561/2200000070
"""
def __init__(
self,
cache_size,
enable_cache_row_key,
policies,
cost_class_label,
init_a=1,
init_b=1,
):
super(ThompsonSamplingCache, self).__init__(
cache_size, enable_cache_row_key, policies, cost_class_label
)
self._as = {}
self._bs = {}
for _i in range(len(policies)):
self._as = [init_a] * len(self.policies)
self._bs = [init_b] * len(self.policies)
def _select_policy(self, trace_record, key):
if len(self.policies) == 1:
return 0
samples = [
np.random.beta(self._as[x], self._bs[x]) for x in range(len(self.policies))
]
selected_policy = max(range(len(self.policies)), key=lambda x: samples[x])
reward = self.policies[selected_policy].generate_reward(key)
assert reward <= 1 and reward >= 0
self._as[selected_policy] += reward
self._bs[selected_policy] += 1 - reward
return selected_policy
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid ThompsonSampling with cost class {} (ts_hybrid)".format(
self.cost_class_label
)
return "ThompsonSampling with cost class {} (ts)".format(self.cost_class_label)
class LinUCBCache(MLCache):
"""
An implementation of LinUCB with disjoint linear models.
<NAME>, <NAME>, <NAME>, and <NAME>. 2010.
A contextual-bandit approach to personalized news article recommendation.
In Proceedings of the 19th international conference on World wide web
(WWW '10). ACM, New York, NY, USA, 661-670.
DOI=http://dx.doi.org/10.1145/1772690.1772758
"""
def __init__(self, cache_size, enable_cache_row_key, policies, cost_class_label):
super(LinUCBCache, self).__init__(
cache_size, enable_cache_row_key, policies, cost_class_label
)
self.nfeatures = 4 # Block type, level, cf.
self.th = np.zeros((len(self.policies), self.nfeatures))
self.eps = 0.2
self.b = np.zeros_like(self.th)
self.A = np.zeros((len(self.policies), self.nfeatures, self.nfeatures))
self.A_inv = np.zeros((len(self.policies), self.nfeatures, self.nfeatures))
for i in range(len(self.policies)):
self.A[i] = np.identity(self.nfeatures)
self.th_hat = np.zeros_like(self.th)
self.p = np.zeros(len(self.policies))
self.alph = 0.2
def _select_policy(self, trace_record, key):
if len(self.policies) == 1:
return 0
x_i = np.zeros(self.nfeatures) # The current context vector
x_i[0] = trace_record.block_type
x_i[1] = trace_record.level
x_i[2] = trace_record.cf_id
p = np.zeros(len(self.policies))
for a in range(len(self.policies)):
self.th_hat[a] = self.A_inv[a].dot(self.b[a])
ta = x_i.dot(self.A_inv[a]).dot(x_i)
a_upper_ci = self.alph * np.sqrt(ta)
a_mean = self.th_hat[a].dot(x_i)
p[a] = a_mean + a_upper_ci
p = p + (np.random.random(len(p)) * 0.000001)
selected_policy = p.argmax()
reward = self.policies[selected_policy].generate_reward(key)
assert reward <= 1 and reward >= 0
self.A[selected_policy] += np.outer(x_i, x_i)
self.b[selected_policy] += reward * x_i
self.A_inv[selected_policy] = np.linalg.inv(self.A[selected_policy])
del x_i
return selected_policy
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid LinUCB with cost class {} (linucb_hybrid)".format(
self.cost_class_label
)
return "LinUCB with cost class {} (linucb)".format(self.cost_class_label)
class OPTCacheEntry:
"""
A cache entry for the OPT algorithm. The entries are sorted based on its
next access sequence number in reverse order, i.e., the entry which next
access is the furthest in the future is ordered before other entries.
"""
def __init__(self, key, next_access_seq_no, value_size):
self.key = key
self.next_access_seq_no = next_access_seq_no
self.value_size = value_size
self.is_removed = False
def __cmp__(self, other):
if other.next_access_seq_no != self.next_access_seq_no:
return other.next_access_seq_no - self.next_access_seq_no
return self.value_size - other.value_size
def __repr__(self):
return "({} {} {} {})".format(
self.key, self.next_access_seq_no, self.value_size, self.is_removed
)
class PQTable:
"""
A hash table with a priority queue.
"""
def __init__(self):
# A list of entries arranged in a heap sorted based on the entry custom
# implementation of __cmp__
self.pq = []
self.table = {}
def pqinsert(self, entry):
"Add a new key or update the priority of an existing key"
# Remove the entry from the table first.
removed_entry = self.table.pop(entry.key, None)
if removed_entry:
# Mark as removed since there is no 'remove' API in heappq.
# Instead, an entry in pq is removed lazily when calling pop.
removed_entry.is_removed = True
self.table[entry.key] = entry
heapq.heappush(self.pq, entry)
return removed_entry
def pqpop(self):
while self.pq:
entry = heapq.heappop(self.pq)
if not entry.is_removed:
del self.table[entry.key]
return entry
return None
def pqpeek(self):
while self.pq:
entry = self.pq[0]
if not entry.is_removed:
return entry
heapq.heappop(self.pq)
return
def __contains__(self, k):
return k in self.table
def __getitem__(self, k):
return self.table[k]
def __len__(self):
return len(self.table)
def values(self):
return self.table.values()
class OPTCache(Cache):
"""
An implementation of the Belady MIN algorithm. OPTCache evicts an entry
in the cache whose next access occurs furthest in the future.
Note that Belady MIN algorithm is optimal assuming all blocks having the
same size and a missing entry will be inserted in the cache.
These are NOT true for the block cache trace since blocks have different
sizes and we may not insert a block into the cache upon a cache miss.
However, it is still useful to serve as a "theoretical upper bound" on the
lowest miss ratio we can achieve given a cache size.
<NAME>. 1966. A Study of Replacement Algorithms for a
Virtual-storage Computer. IBM Syst. J. 5, 2 (June 1966), 78-101.
DOI=http://dx.doi.org/10.1147/sj.52.0078
"""
def __init__(self, cache_size):
super(OPTCache, self).__init__(cache_size, enable_cache_row_key=0)
self.table = PQTable()
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update its next access time.
assert (
self.table.pqinsert(
OPTCacheEntry(
key, trace_record.next_access_seq_no, self.table[key].value_size
)
)
is not None
)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_entry = self.table.pqpop()
assert evict_entry is not None
self.used_size -= evict_entry.value_size
def _insert(self, trace_record, key, hash, value_size):
assert (
self.table.pqinsert(
OPTCacheEntry(key, trace_record.next_access_seq_no, value_size)
)
is None
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def cache_name(self):
return "Belady MIN (opt)"
class GDSizeEntry:
"""
A cache entry for the greedy dual size replacement policy.
"""
def __init__(self, key, value_size, priority):
self.key = key
self.value_size = value_size
self.priority = priority
self.is_removed = False
def __cmp__(self, other):
if other.priority != self.priority:
return self.priority - other.priority
return self.value_size - other.value_size
def __repr__(self):
return "({} {} {} {})".format(
self.key, self.next_access_seq_no, self.value_size, self.is_removed
)
class GDSizeCache(Cache):
"""
An implementation of the greedy dual size algorithm.
We define cost as an entry's size.
See https://www.usenix.org/legacy/publications/library/proceedings/usits97/full_papers/cao/cao_html/node8.html
and <NAME>. The k-server dual and loose competitiveness for paging.
Algorithmica,June 1994, vol. 11,(no.6):525-41.
Rewritten version of ''On-line caching as cache size varies'',
in The 2nd Annual ACM-SIAM Symposium on Discrete Algorithms, 241-250, 1991.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(GDSizeCache, self).__init__(cache_size, enable_cache_row_key)
self.table = PQTable()
self.L = 0.0
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid GreedyDualSize (gdsize_hybrid)"
return "GreedyDualSize (gdsize)"
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update its priority.
entry = self.table[key]
assert (
self.table.pqinsert(
GDSizeEntry(key, entry.value_size, self.L + entry.value_size)
)
is not None
)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_entry = self.table.pqpop()
assert evict_entry is not None
self.L = evict_entry.priority
self.used_size -= evict_entry.value_size
def _insert(self, trace_record, key, hash, value_size):
assert (
self.table.pqinsert(GDSizeEntry(key, value_size, self.L + value_size))
is None
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
class Deque(object):
"""A Deque class facilitates the implementation of LRU and ARC."""
def __init__(self):
self.od = OrderedDict()
def appendleft(self, k):
if k in self.od:
del self.od[k]
self.od[k] = None
def pop(self):
item = self.od.popitem(last=False) if self.od else None
if item is not None:
return item[0]
return None
def remove(self, k):
del self.od[k]
def __len__(self):
return len(self.od)
def __contains__(self, k):
return k in self.od
def __iter__(self):
return reversed(self.od)
def __repr__(self):
return "Deque(%r)" % (list(self),)
class ARCCache(Cache):
"""
An implementation of ARC. ARC assumes that all blocks are having the
same size. The size of index and filter blocks are variable. To accommodate
this, we modified ARC as follows:
1) We use 16 KB as the average block size and calculate the number of blocks
(c) in the cache.
2) When we insert an entry, the cache evicts entries in both t1 and t2
queues until it has enough space for the new entry. This also requires
modification of the algorithm to maintain a maximum of 2*c blocks.
<NAME> and <NAME>. 2003. ARC: A Self-Tuning, Low
Overhead Replacement Cache. In Proceedings of the 2nd USENIX Conference on
File and Storage Technologies (FAST '03). USENIX Association, Berkeley, CA,
USA, 115-130.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(ARCCache, self).__init__(cache_size, enable_cache_row_key)
self.table = {}
self.c = cache_size / 16 * 1024 # Number of elements in the cache.
self.p = 0 # Target size for the list T1
# L1: only once recently
self.t1 = Deque() # T1: recent cache entries
self.b1 = Deque() # B1: ghost entries recently evicted from the T1 cache
# L2: at least twice recently
self.t2 = Deque() # T2: frequent entries
self.b2 = Deque() # B2: ghost entries recently evicted from the T2 cache
def _replace(self, key, value_size):
while self.used_size + value_size > self.cache_size:
if self.t1 and ((key in self.b2) or (len(self.t1) > self.p)):
old = self.t1.pop()
self.b1.appendleft(old)
else:
if self.t2:
old = self.t2.pop()
self.b2.appendleft(old)
else:
old = self.t1.pop()
self.b1.appendleft(old)
self.used_size -= self.table[old].value_size
del self.table[old]
def _lookup(self, trace_record, key, hash):
# Case I: key is in T1 or T2.
# Move key to MRU position in T2.
if key in self.t1:
self.t1.remove(key)
self.t2.appendleft(key)
return True
if key in self.t2:
self.t2.remove(key)
self.t2.appendleft(key)
return True
return False
def _evict(self, trace_record, key, hash, value_size):
# Case II: key is in B1
# Move x from B1 to the MRU position in T2 (also fetch x to the cache).
if key in self.b1:
self.p = min(self.c, self.p + max(len(self.b2) / len(self.b1), 1))
self._replace(key, value_size)
self.b1.remove(key)
self.t2.appendleft(key)
return
# Case III: key is in B2
# Move x from B2 to the MRU position in T2 (also fetch x to the cache).
if key in self.b2:
self.p = max(0, self.p - max(len(self.b1) / len(self.b2), 1))
self._replace(key, value_size)
self.b2.remove(key)
self.t2.appendleft(key)
return
# Case IV: key is not in (T1 u B1 u T2 u B2)
self._replace(key, value_size)
while len(self.t1) + len(self.b1) >= self.c and self.b1:
self.b1.pop()
total = len(self.t1) + len(self.b1) + len(self.t2) + len(self.b2)
while total >= (2 * self.c) and self.b2:
self.b2.pop()
total -= 1
# Finally, move it to MRU position in T1.
self.t1.appendleft(key)
return
def _insert(self, trace_record, key, hash, value_size):
self.table[key] = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
0,
trace_record.access_time,
)
def _should_admit(self, trace_record, key, hash, value_size):
return True
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid Adaptive Replacement Cache (arc_hybrid)"
return "Adaptive Replacement Cache (arc)"
class LRUCache(Cache):
"""
A strict LRU queue.
"""
def __init__(self, cache_size, enable_cache_row_key):
super(LRUCache, self).__init__(cache_size, enable_cache_row_key)
self.table = {}
self.lru = Deque()
def cache_name(self):
if self.enable_cache_row_key:
return "Hybrid LRU (lru_hybrid)"
return "LRU (lru)"
def _lookup(self, trace_record, key, hash):
if key not in self.table:
return False
# A cache hit. Update LRU queue.
self.lru.remove(key)
self.lru.appendleft(key)
return True
def _evict(self, trace_record, key, hash, value_size):
while self.used_size + value_size > self.cache_size:
evict_key = self.lru.pop()
self.used_size -= self.table[evict_key].value_size
del self.table[evict_key]
def _insert(self, trace_record, key, hash, value_size):
self.table[key] = CacheEntry(
value_size,
trace_record.cf_id,
trace_record.level,
trace_record.block_type,
trace_record.table_id,
0,
trace_record.access_time,
)
self.lru.appendleft(key)
def _should_admit(self, trace_record, key, hash, value_size):
return True
class TraceCache(Cache):
"""
A trace cache. Lookup returns true if the trace observes a cache hit.
It is used to maintain cache hits observed in the trace.
"""
def __init__(self, cache_size):
super(TraceCache, self).__init__(cache_size, enable_cache_row_key=0)
def _lookup(self, trace_record, key, hash):
return trace_record.is_hit
def _evict(self, trace_record, key, hash, value_size):
pass
def _insert(self, trace_record, key, hash, value_size):
pass
def _should_admit(self, trace_record, key, hash, value_size):
return False
def cache_name(self):
return "Trace"
def parse_cache_size(cs):
cs = cs.replace("\n", "")
if cs[-1] == "M":
return int(cs[: len(cs) - 1]) * 1024 * 1024
if cs[-1] == "G":
return int(cs[: len(cs) - 1]) * 1024 * 1024 * 1024
if cs[-1] == "T":
return int(cs[: len(cs) - 1]) * 1024 * 1024 * 1024 * 1024
return int(cs)
def create_cache(cache_type, cache_size, downsample_size):
cache_size = cache_size / downsample_size
enable_cache_row_key = 0
if "hybridn" in cache_type:
enable_cache_row_key = 2
cache_type = cache_type[:-8]
if "hybrid" in cache_type:
enable_cache_row_key = 1
cache_type = cache_type[:-7]
if cache_type == "ts":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[LRUPolicy(), LFUPolicy(), HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "linucb":
return LinUCBCache(
cache_size,
enable_cache_row_key,
[LRUPolicy(), LFUPolicy(), HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "pylru":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [LRUPolicy()], cost_class_label=None
)
elif cache_type == "pymru":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [MRUPolicy()], cost_class_label=None
)
elif cache_type == "pylfu":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [LFUPolicy()], cost_class_label=None
)
elif cache_type == "pyhb":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[HyperbolicPolicy()],
cost_class_label=None,
)
elif cache_type == "pycctbbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table_bt",
)
elif cache_type == "pycccf":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [CostClassPolicy()], cost_class_label="cf"
)
elif cache_type == "pycctblevelbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table_level_bt",
)
elif cache_type == "pycccfbt":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="cf_bt",
)
elif cache_type == "pycctb":
return ThompsonSamplingCache(
cache_size,
enable_cache_row_key,
[CostClassPolicy()],
cost_class_label="table",
)
elif cache_type == "pyccbt":
return ThompsonSamplingCache(
cache_size, enable_cache_row_key, [CostClassPolicy()], cost_class_label="bt"
)
elif cache_type == "opt":
if enable_cache_row_key:
print("opt does not support hybrid mode.")
assert False
return OPTCache(cache_size)
elif cache_type == "trace":
if enable_cache_row_key:
print("trace does not support hybrid mode.")
assert False
return TraceCache(cache_size)
elif cache_type == "lru":
return LRUCache(cache_size, enable_cache_row_key)
elif cache_type == "arc":
return ARCCache(cache_size, enable_cache_row_key)
elif cache_type == "gdsize":
return GDSizeCache(cache_size, enable_cache_row_key)
else:
print("Unknown cache type {}".format(cache_type))
assert False
return None
class BlockAccessTimeline:
"""
BlockAccessTimeline stores all accesses of a block.
"""
def __init__(self):
self.accesses = []
self.current_access_index = 1
def get_next_access(self):
if self.current_access_index == len(self.accesses):
return sys.maxsize
next_access_seq_no = self.accesses[self.current_access_index]
self.current_access_index += 1
return next_access_seq_no
def percent(e1, e2):
if e2 == 0:
return -1
return float(e1) * 100.0 / float(e2)
def is_target_cf(access_cf, target_cf_name):
if target_cf_name == "all":
return True
return access_cf == target_cf_name
def run(
trace_file_path,
cache_type,
cache,
warmup_seconds,
max_accesses_to_process,
target_cf_name,
):
warmup_complete = False
trace_miss_ratio_stats = MissRatioStats(kSecondsInMinute)
access_seq_no = 0
time_interval = 1
start_time = time.time()
trace_start_time = 0
trace_duration = 0
is_opt_cache = False
if cache.cache_name() == "Belady MIN (opt)":
is_opt_cache = True
block_access_timelines = {}
num_no_inserts = 0
num_blocks_with_no_size = 0
num_inserts_block_with_no_size = 0
if is_opt_cache:
# Read all blocks in memory and stores their access times so that OPT
# can use this information to evict the cached key which next access is
# the furthest in the future.
print("Preprocessing block traces.")
with open(trace_file_path, "r") as trace_file:
for line in trace_file:
if (
max_accesses_to_process != -1
and access_seq_no > max_accesses_to_process
):
break
ts = line.split(",")
timestamp = int(ts[0])
cf_name = ts[5]
if not is_target_cf(cf_name, target_cf_name):
continue
if trace_start_time == 0:
trace_start_time = timestamp
trace_duration = timestamp - trace_start_time
block_id = int(ts[1])
block_size = int(ts[3])
no_insert = int(ts[9])
if block_id not in block_access_timelines:
block_access_timelines[block_id] = BlockAccessTimeline()
if block_size == 0:
num_blocks_with_no_size += 1
block_access_timelines[block_id].accesses.append(access_seq_no)
access_seq_no += 1
if no_insert == 1:
num_no_inserts += 1
if no_insert == 0 and block_size == 0:
num_inserts_block_with_no_size += 1
if access_seq_no % 100 != 0:
continue
now = time.time()
if now - start_time > time_interval * 10:
print(
"Take {} seconds to process {} trace records with trace "
"duration of {} seconds. Throughput: {} records/second.".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
)
)
time_interval += 1
print(
"Trace contains {0} blocks, {1}({2:.2f}%) blocks with no size."
"{3} accesses, {4}({5:.2f}%) accesses with no_insert,"
"{6}({7:.2f}%) accesses that want to insert but block size is 0.".format(
len(block_access_timelines),
num_blocks_with_no_size,
percent(num_blocks_with_no_size, len(block_access_timelines)),
access_seq_no,
num_no_inserts,
percent(num_no_inserts, access_seq_no),
num_inserts_block_with_no_size,
percent(num_inserts_block_with_no_size, access_seq_no),
)
)
access_seq_no = 0
time_interval = 1
start_time = time.time()
trace_start_time = 0
trace_duration = 0
print("Running simulated {} cache on block traces.".format(cache.cache_name()))
with open(trace_file_path, "r") as trace_file:
for line in trace_file:
if (
max_accesses_to_process != -1
and access_seq_no > max_accesses_to_process
):
break
if access_seq_no % 1000000 == 0:
# Force a python gc periodically to reduce memory usage.
gc.collect()
ts = line.split(",")
timestamp = int(ts[0])
cf_name = ts[5]
if not is_target_cf(cf_name, target_cf_name):
continue
if trace_start_time == 0:
trace_start_time = timestamp
trace_duration = timestamp - trace_start_time
if (
not warmup_complete
and warmup_seconds > 0
and trace_duration > warmup_seconds * 1000000
):
cache.miss_ratio_stats.reset_counter()
warmup_complete = True
next_access_seq_no = 0
block_id = int(ts[1])
if is_opt_cache:
next_access_seq_no = block_access_timelines[block_id].get_next_access()
record = TraceRecord(
access_time=int(ts[0]),
block_id=int(ts[1]),
block_type=int(ts[2]),
block_size=int(ts[3]),
cf_id=int(ts[4]),
cf_name=ts[5],
level=int(ts[6]),
fd=int(ts[7]),
caller=int(ts[8]),
no_insert=int(ts[9]),
get_id=int(ts[10]),
key_id=int(ts[11]),
kv_size=int(ts[12]),
is_hit=int(ts[13]),
referenced_key_exist_in_block=int(ts[14]),
num_keys_in_block=int(ts[15]),
table_id=int(ts[16]),
seq_number=int(ts[17]),
block_key_size=int(ts[18]),
key_size=int(ts[19]),
block_offset_in_file=int(ts[20]),
next_access_seq_no=next_access_seq_no,
)
trace_miss_ratio_stats.update_metrics(
record.access_time, is_hit=record.is_hit, miss_bytes=record.block_size
)
cache.access(record)
access_seq_no += 1
del record
del ts
if access_seq_no % 100 != 0:
continue
# Report progress every 10 seconds.
now = time.time()
if now - start_time > time_interval * 10:
print(
"Take {} seconds to process {} trace records with trace "
"duration of {} seconds. Throughput: {} records/second. "
"Trace miss ratio {}".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
trace_miss_ratio_stats.miss_ratio(),
)
)
time_interval += 1
print(
"{},0,0,{},{},{}".format(
cache_type,
cache.cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
now = time.time()
print(
"Take {} seconds to process {} trace records with trace duration of {} "
"seconds. Throughput: {} records/second. Trace miss ratio {}".format(
now - start_time,
access_seq_no,
trace_duration / 1000000,
access_seq_no / (now - start_time),
trace_miss_ratio_stats.miss_ratio(),
)
)
print(
"{},0,0,{},{},{}".format(
cache_type,
cache.cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
return trace_start_time, trace_duration
def report_stats(
cache,
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
):
cache_label = "{}-{}-{}".format(cache_type, cache_size, target_cf_name)
with open("{}/data-ml-mrc-{}".format(result_dir, cache_label), "w+") as mrc_file:
mrc_file.write(
"{},0,0,{},{},{}\n".format(
cache_type,
cache_size,
cache.miss_ratio_stats.miss_ratio(),
cache.miss_ratio_stats.num_accesses,
)
)
cache_stats = [
cache.per_second_miss_ratio_stats,
cache.miss_ratio_stats,
cache.per_hour_miss_ratio_stats,
]
for i in range(len(cache_stats)):
avg_miss_bytes, p95_miss_bytes = cache_stats[i].compute_miss_bytes()
with open(
"{}/data-ml-avgmb-{}-{}".format(
result_dir, cache_stats[i].time_unit, cache_label
),
"w+",
) as mb_file:
mb_file.write(
"{},0,0,{},{}\n".format(cache_type, cache_size, avg_miss_bytes)
)
with open(
"{}/data-ml-p95mb-{}-{}".format(
result_dir, cache_stats[i].time_unit, cache_label
),
"w+",
) as mb_file:
mb_file.write(
"{},0,0,{},{}\n".format(cache_type, cache_size, p95_miss_bytes)
)
cache_stats[i].write_miss_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
cache_stats[i].write_miss_ratio_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
if not cache.is_ml_cache():
return
policy_stats = [cache.policy_stats, cache.per_hour_policy_stats]
for i in range(len(policy_stats)):
policy_stats[i].write_policy_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
policy_stats[i].write_policy_ratio_timeline(
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
if __name__ == "__main__":
if len(sys.argv) <= 8:
print(
"Must provide 8 arguments.\n"
"1) Cache type (ts, linucb, arc, lru, opt, pylru, pymru, pylfu, "
"pyhb, gdsize, trace). One may evaluate the hybrid row_block cache "
"by appending '_hybrid' to a cache_type, e.g., ts_hybrid. "
"Note that hybrid is not supported with opt and trace. \n"
"2) Cache size (xM, xG, xT).\n"
"3) The sampling frequency used to collect the trace. (The "
"simulation scales down the cache size by the sampling frequency).\n"
"4) Warmup seconds (The number of seconds used for warmup).\n"
"5) Trace file path.\n"
"6) Result directory (A directory that saves generated results)\n"
"7) Max number of accesses to process\n"
"8) The target column family. (The simulation will only run "
"accesses on the target column family. If it is set to all, "
"it will run against all accesses.)"
)
exit(1)
print("Arguments: {}".format(sys.argv))
cache_type = sys.argv[1]
cache_size = parse_cache_size(sys.argv[2])
downsample_size = int(sys.argv[3])
warmup_seconds = int(sys.argv[4])
trace_file_path = sys.argv[5]
result_dir = sys.argv[6]
max_accesses_to_process = int(sys.argv[7])
target_cf_name = sys.argv[8]
cache = create_cache(cache_type, cache_size, downsample_size)
trace_start_time, trace_duration = run(
trace_file_path,
cache_type,
cache,
warmup_seconds,
max_accesses_to_process,
target_cf_name,
)
trace_end_time = trace_start_time + trace_duration
report_stats(
cache,
cache_type,
cache_size,
target_cf_name,
result_dir,
trace_start_time,
trace_end_time,
)
| [
"numpy.zeros_like",
"numpy.outer",
"heapq.heappush",
"numpy.random.beta",
"numpy.zeros",
"os.path.exists",
"numpy.identity",
"time.time",
"heapq.heappop",
"gc.collect",
"numpy.linalg.inv",
"collections.OrderedDict",
"numpy.sqrt"
] | [((58063, 58074), 'time.time', 'time.time', ([], {}), '()\n', (58072, 58074), False, 'import time\n'), ((61333, 61344), 'time.time', 'time.time', ([], {}), '()\n', (61342, 61344), False, 'import time\n'), ((64889, 64900), 'time.time', 'time.time', ([], {}), '()\n', (64898, 64900), False, 'import time\n'), ((8001, 8013), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8011, 8013), False, 'import gc\n'), ((38043, 38065), 'numpy.zeros_like', 'np.zeros_like', (['self.th'], {}), '(self.th)\n', (38056, 38065), True, 'import numpy as np\n'), ((38348, 38370), 'numpy.zeros_like', 'np.zeros_like', (['self.th'], {}), '(self.th)\n', (38361, 38370), True, 'import numpy as np\n'), ((38562, 38586), 'numpy.zeros', 'np.zeros', (['self.nfeatures'], {}), '(self.nfeatures)\n', (38570, 38586), True, 'import numpy as np\n'), ((39293, 39311), 'numpy.outer', 'np.outer', (['x_i', 'x_i'], {}), '(x_i, x_i)\n', (39301, 39311), True, 'import numpy as np\n'), ((39398, 39436), 'numpy.linalg.inv', 'np.linalg.inv', (['self.A[selected_policy]'], {}), '(self.A[selected_policy])\n', (39411, 39436), True, 'import numpy as np\n'), ((41328, 41358), 'heapq.heappush', 'heapq.heappush', (['self.pq', 'entry'], {}), '(self.pq, entry)\n', (41342, 41358), False, 'import heapq\n'), ((46602, 46615), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (46613, 46615), False, 'from collections import OrderedDict\n'), ((11550, 11579), 'os.path.exists', 'path.exists', (['header_file_path'], {}), '(header_file_path)\n', (11561, 11579), False, 'from os import path\n'), ((12635, 12664), 'os.path.exists', 'path.exists', (['header_file_path'], {}), '(header_file_path)\n', (12646, 12664), False, 'from os import path\n'), ((14920, 14949), 'os.path.exists', 'path.exists', (['header_file_path'], {}), '(header_file_path)\n', (14931, 14949), False, 'from os import path\n'), ((16274, 16303), 'os.path.exists', 'path.exists', (['header_file_path'], {}), '(header_file_path)\n', (16285, 16303), False, 'from os import path\n'), ((36575, 36615), 'numpy.random.beta', 'np.random.beta', (['self._as[x]', 'self._bs[x]'], {}), '(self._as[x], self._bs[x])\n', (36589, 36615), True, 'import numpy as np\n'), ((38298, 38325), 'numpy.identity', 'np.identity', (['self.nfeatures'], {}), '(self.nfeatures)\n', (38309, 38325), True, 'import numpy as np\n'), ((41453, 41475), 'heapq.heappop', 'heapq.heappop', (['self.pq'], {}), '(self.pq)\n', (41466, 41475), False, 'import heapq\n'), ((41759, 41781), 'heapq.heappop', 'heapq.heappop', (['self.pq'], {}), '(self.pq)\n', (41772, 41781), False, 'import heapq\n'), ((63948, 63959), 'time.time', 'time.time', ([], {}), '()\n', (63957, 63959), False, 'import time\n'), ((38959, 38970), 'numpy.sqrt', 'np.sqrt', (['ta'], {}), '(ta)\n', (38966, 38970), True, 'import numpy as np\n'), ((59981, 59992), 'time.time', 'time.time', ([], {}), '()\n', (59990, 59992), False, 'import time\n'), ((61854, 61866), 'gc.collect', 'gc.collect', ([], {}), '()\n', (61864, 61866), False, 'import gc\n')] |
from django.shortcuts import render
from rest_framework.permissions import IsAuthenticated,AllowAny
from .serializers import WaterLevelSerializer, FetchWaterLevelSerializer
from rest_framework.generics import ListAPIView,CreateAPIView
from dashboard.models import WaterLevel
from rest_framework.views import APIView
from decimal import *
import uuid
from rest_framework.response import Response
from rest_framework import status
class PredictWaterLevel(APIView):
permission_classes = (AllowAny,)
serializer_class = WaterLevelSerializer
def post(self, request):
serializer = WaterLevelSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
temp = request.data['temp']
humidity = request.data['humidity']
pressure = request.data['pressure']
id=uuid.uuid4();
serializer.save(id=id,waterlevel=self.trainmodel(temp,humidity,pressure))
serializer.save(waterlevel=self.trainmodel(temp,humidity,pressure))
return Response(serializer.data,status=status.HTTP_201_CREATED)
def trainmodel(self,temp,humidity,pressure):
# coding: utf-8
# # Water Crisis
# In[1]:
import pandas as pd
# In[2]:
import numpy as np
# In[3]:
df=pd.read_csv("/home/codeforvision/weatherHistory.csv")
# In[4]:
a=np.random.randint(20,8000,96453)
# In[5]:
b=pd.DataFrame(a)
# In[6]:
df1=pd.concat([df,b],axis=1)
# In[7]:
df1.rename(columns={0: "water level quantity (cm^3)"})
# In[8]:
features=df1.iloc[:5000,[3,5,10]].values
labels=df1.iloc[:5000,-1].values
# In[9]:
#Spliting the dataset into training and testing set
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(features,labels,test_size=0.10,random_state=0)
# In[10]:
#Features scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_train=sc.fit_transform(x_train)
x_test=sc.transform(x_test)
# In[11]:
#Fitting the multiple linear regression to training set
from sklearn.linear_model import LinearRegression
regressor=LinearRegression()
regressor.fit(x_train,y_train)
# In[12]:
y=regressor.predict([[int(temp),int(humidity),int(pressure)]])
return int(y[0])
class FetchWaterLevel(ListAPIView):
permission_classes = (AllowAny,)
serializer_class = FetchWaterLevelSerializer
def get_queryset(self):
return WaterLevel.objects.filter(id=self.request.POST.get('id'))
| [
"pandas.DataFrame",
"uuid.uuid4",
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LinearRegression",
"numpy.random.randint",
"rest_framework.response.Response",
"pandas.concat"
] | [((821, 833), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (831, 833), False, 'import uuid\n'), ((1008, 1065), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (1016, 1065), False, 'from rest_framework.response import Response\n'), ((1283, 1336), 'pandas.read_csv', 'pd.read_csv', (['"""/home/codeforvision/weatherHistory.csv"""'], {}), "('/home/codeforvision/weatherHistory.csv')\n", (1294, 1336), True, 'import pandas as pd\n'), ((1368, 1402), 'numpy.random.randint', 'np.random.randint', (['(20)', '(8000)', '(96453)'], {}), '(20, 8000, 96453)\n', (1385, 1402), True, 'import numpy as np\n'), ((1432, 1447), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (1444, 1447), True, 'import pandas as pd\n'), ((1481, 1507), 'pandas.concat', 'pd.concat', (['[df, b]'], {'axis': '(1)'}), '([df, b], axis=1)\n', (1490, 1507), True, 'import pandas as pd\n'), ((1882, 1947), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.1)', 'random_state': '(0)'}), '(features, labels, test_size=0.1, random_state=0)\n', (1898, 1947), False, 'from sklearn.model_selection import train_test_split\n'), ((2062, 2078), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2076, 2078), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2319, 2337), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2335, 2337), False, 'from sklearn.linear_model import LinearRegression\n')] |
import numpy as np
from liegroups.numpy import _base
from liegroups.numpy.so3 import SO3
class SE3(_base.SpecialEuclideanBase):
"""Homogeneous transformation matrix in :math:`SE(3)` using active (alibi) transformations.
.. math::
SE(3) &= \\left\\{ \\mathbf{T}=
\\begin{bmatrix}
\\mathbf{C} & \\mathbf{r} \\\\
\\mathbf{0}^T & 1
\\end{bmatrix} \\in \\mathbb{R}^{4 \\times 4} ~\\middle|~ \\mathbf{C} \\in SO(3), \\mathbf{r} \\in \\mathbb{R}^3 \\right\\} \\\\
\\mathfrak{se}(3) &= \\left\\{ \\boldsymbol{\\Xi} =
\\boldsymbol{\\xi}^\\wedge \\in \\mathbb{R}^{4 \\times 4} ~\\middle|~
\\boldsymbol{\\xi}=
\\begin{bmatrix}
\\boldsymbol{\\rho} \\\\ \\boldsymbol{\\phi}
\\end{bmatrix} \\in \\mathbb{R}^6, \\boldsymbol{\\rho} \\in \\mathbb{R}^3, \\boldsymbol{\\phi} \in \\mathbb{R}^3 \\right\\}
:cvar ~liegroups.SE2.dim: Dimension of the rotation matrix.
:cvar ~liegroups.SE2.dof: Underlying degrees of freedom (i.e., dimension of the tangent space).
:ivar rot: Storage for the rotation matrix :math:`\mathbf{C}`.
:ivar trans: Storage for the translation vector :math:`\mathbf{r}`.
"""
dim = 4
"""Dimension of the transformation matrix."""
dof = 6
"""Underlying degrees of freedom (i.e., dimension of the tangent space)."""
RotationType = SO3
def adjoint(self):
"""Adjoint matrix of the transformation.
.. math::
\\text{Ad}(\\mathbf{T}) =
\\begin{bmatrix}
\\mathbf{C} & \\mathbf{r}^\\wedge\\mathbf{C} \\\\
\\mathbf{0} & \\mathbf{C}
\\end{bmatrix}
\\in \\mathbb{R}^{6 \\times 6}
"""
rotmat = self.rot.as_matrix()
return np.vstack(
[np.hstack([rotmat,
self.RotationType.wedge(self.trans).dot(rotmat)]),
np.hstack([np.zeros((3, 3)), rotmat])]
)
@classmethod
def curlyvee(cls, Psi):
""":math:`SE(3)` curlyvee operator as defined by Barfoot.
.. math::
\\boldsymbol{\\xi} =
\\boldsymbol{\\Psi}^\\curlyvee
This is the inverse operation to :meth:`~liegroups.SE3.curlywedge`.
"""
if Psi.ndim < 3:
Psi = np.expand_dims(Psi, axis=0)
if Psi.shape[1:3] != (cls.dof, cls.dof):
raise ValueError("Psi must have shape ({},{}) or (N,{},{})".format(
cls.dof, cls.dof, cls.dof, cls.dof))
xi = np.empty([Psi.shape[0], cls.dof])
xi[:, 0:3] = cls.RotationType.vee(Psi[:, 0:3, 3:6])
xi[:, 3:6] = cls.RotationType.vee(Psi[:, 0:3, 0:3])
return np.squeeze(xi)
@classmethod
def curlywedge(cls, xi):
""":math:`SE(3)` curlywedge operator as defined by Barfoot.
.. math::
\\boldsymbol{\\Psi} =
\\boldsymbol{\\xi}^\\curlywedge =
\\begin{bmatrix}
\\boldsymbol{\\phi}^\\wedge & \\boldsymbol{\\rho}^\\wedge \\\\
\\mathbf{0} & \\boldsymbol{\\phi}^\\wedge
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE3.curlyvee`.
"""
xi = np.atleast_2d(xi)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
Psi = np.zeros([xi.shape[0], cls.dof, cls.dof])
Psi[:, 0:3, 0:3] = cls.RotationType.wedge(xi[:, 3:6])
Psi[:, 0:3, 3:6] = cls.RotationType.wedge(xi[:, 0:3])
Psi[:, 3:6, 3:6] = Psi[:, 0:3, 0:3]
return np.squeeze(Psi)
@classmethod
def exp(cls, xi):
"""Exponential map for :math:`SE(3)`, which computes a transformation from a tangent vector:
.. math::
\\mathbf{T}(\\boldsymbol{\\xi}) =
\\exp(\\boldsymbol{\\xi}^\\wedge) =
\\begin{bmatrix}
\\exp(\\boldsymbol{\\phi}^\\wedge) & \\mathbf{J} \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 1
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE3.log`.
"""
if len(xi) != cls.dof:
raise ValueError("xi must have length {}".format(cls.dof))
rho = xi[0:3]
phi = xi[3:6]
return cls(cls.RotationType.exp(phi),
cls.RotationType.left_jacobian(phi).dot(rho))
@classmethod
def left_jacobian_Q_matrix(cls, xi):
"""The :math:`\\mathbf{Q}` matrix used to compute :math:`\\mathcal{J}` in :meth:`~liegroups.SE3.left_jacobian` and :math:`\\mathcal{J}^{-1}` in :meth:`~liegroups.SE3.inv_left_jacobian`.
.. math::
\\mathbf{Q}(\\boldsymbol{\\xi}) =
\\frac{1}{2}\\boldsymbol{\\rho}^\\wedge &+
\\left( \\frac{\\phi - \\sin \\phi}{\\phi^3} \\right)
\\left(
\\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge +
\\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge +
\\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge
\\right) \\\\ &+
\\left( \\frac{\\phi^2 + 2 \\cos \\phi - 2}{2 \\phi^4} \\right)
\\left(
\\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge +
\\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\phi}^\\wedge -
3 \\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge
\\right) \\\\ &+
\\left( \\frac{2 \\phi - 3 \\sin \\phi + \\phi \\cos \\phi}{2 \\phi^5} \\right)
\\left(
\\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\phi}^\\wedge +
\\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\phi}^\\wedge \\boldsymbol{\\rho}^\\wedge \\boldsymbol{\\phi}^\\wedge
\\right)
"""
if len(xi) != cls.dof:
raise ValueError("xi must have length {}".format(cls.dof))
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
rx = SO3.wedge(rho)
px = SO3.wedge(phi)
ph = np.linalg.norm(phi)
ph2 = ph * ph
ph3 = ph2 * ph
ph4 = ph3 * ph
ph5 = ph4 * ph
cph = np.cos(ph)
sph = np.sin(ph)
m1 = 0.5
m2 = (ph - sph) / ph3
m3 = (0.5 * ph2 + cph - 1.) / ph4
m4 = (ph - 1.5 * sph + 0.5 * ph * cph) / ph5
t1 = rx
t2 = px.dot(rx) + rx.dot(px) + px.dot(rx).dot(px)
t3 = px.dot(px).dot(rx) + rx.dot(px).dot(px) - 3. * px.dot(rx).dot(px)
t4 = px.dot(rx).dot(px).dot(px) + px.dot(px).dot(rx).dot(px)
return m1 * t1 + m2 * t2 + m3 * t3 + m4 * t4
@classmethod
def inv_left_jacobian(cls, xi):
""":math:`SE(3)` inverse left Jacobian.
.. math::
\\mathcal{J}^{-1}(\\boldsymbol{\\xi}) =
\\begin{bmatrix}
\\mathbf{J}^{-1} & -\\mathbf{J}^{-1} \\mathbf{Q} \\mathbf{J}^{-1} \\\\
\\mathbf{0} & \\mathbf{J}^{-1}
\\end{bmatrix}
with :math:`\\mathbf{J}^{-1}` as in :meth:`liegroups.SO3.inv_left_jacobian` and :math:`\\mathbf{Q}` as in :meth:`~liegroups.SE3.left_jacobian_Q_matrix`.
"""
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
# Near |phi|==0, use first order Taylor expansion
if np.isclose(np.linalg.norm(phi), 0.):
return np.identity(cls.dof) - 0.5 * cls.curlywedge(xi)
so3_inv_jac = SO3.inv_left_jacobian(phi)
Q_mat = cls.left_jacobian_Q_matrix(xi)
jac = np.zeros([cls.dof, cls.dof])
jac[0:3, 0:3] = so3_inv_jac
jac[0:3, 3:6] = -so3_inv_jac.dot(Q_mat).dot(so3_inv_jac)
jac[3:6, 3:6] = so3_inv_jac
return jac
@classmethod
def left_jacobian(cls, xi):
""":math:`SE(3)` left Jacobian.
.. math::
\\mathcal{J}(\\boldsymbol{\\xi}) =
\\begin{bmatrix}
\\mathbf{J} & \\mathbf{Q} \\\\
\\mathbf{0} & \\mathbf{J}
\\end{bmatrix}
with :math:`\\mathbf{J}` as in :meth:`liegroups.SO3.left_jacobian` and :math:`\\mathbf{Q}` as in :meth:`~liegroups.SE3.left_jacobian_Q_matrix`.
"""
rho = xi[0:3] # translation part
phi = xi[3:6] # rotation part
# Near |phi|==0, use first order Taylor expansion
if np.isclose(np.linalg.norm(phi), 0.):
return np.identity(cls.dof) + 0.5 * cls.curlywedge(xi)
so3_jac = SO3.left_jacobian(phi)
Q_mat = cls.left_jacobian_Q_matrix(xi)
jac = np.zeros([cls.dof, cls.dof])
jac[0:3, 0:3] = so3_jac
jac[0:3, 3:6] = Q_mat
jac[3:6, 3:6] = so3_jac
return jac
def log(self):
"""Logarithmic map for :math:`SE(3)`, which computes a tangent vector from a transformation:
.. math::
\\boldsymbol{\\xi}(\\mathbf{T}) =
\\ln(\\mathbf{T})^\\vee =
\\begin{bmatrix}
\\mathbf{J} ^ {-1} \\mathbf{r} \\\\
\\ln(\\boldsymbol{C}) ^\\vee
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE3.exp`.
"""
phi = self.RotationType.log(self.rot)
rho = self.RotationType.inv_left_jacobian(phi).dot(self.trans)
return np.hstack([rho, phi])
@classmethod
def odot(cls, p, directional=False):
""":math:`SE(3)` odot operator as defined by Barfoot.
This is the Jacobian of a vector
.. math::
\\mathbf{p} =
\\begin{bmatrix}
sx \\\\ sy \\\\ sz \\\\ s
\\end{bmatrix} =
\\begin{bmatrix}
\\boldsymbol{\\epsilon} \\\\ \\eta
\\end{bmatrix}
with respect to a perturbation in the underlying parameters of :math:`\\mathbf{T}`.
If :math:`\\mathbf{p}` is given in Euclidean coordinates and directional=False, the missing scale value :math:`\\eta` is assumed to be 1 and the Jacobian is 3x6. If directional=True, :math:`\\eta` is assumed to be 0:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & -\\boldsymbol{\\epsilon}^\\wedge
\\end{bmatrix}
If :math:`\\mathbf{p}` is given in Homogeneous coordinates, the Jacobian is 4x6:
.. math::
\\mathbf{p}^\\odot =
\\begin{bmatrix}
\\eta \\mathbf{1} & -\\boldsymbol{\\epsilon}^\\wedge \\\\
\\mathbf{0}^T & \\mathbf{0}^T
\\end{bmatrix}
"""
p = np.atleast_2d(p)
result = np.zeros([p.shape[0], p.shape[1], cls.dof])
if p.shape[1] == cls.dim - 1:
# Assume scale parameter is 1 unless p is a direction
# ptor, in which case the scale is 0
if not directional:
result[:, 0:3, 0:3] = np.eye(3)
result[:, 0:3, 3:6] = cls.RotationType.wedge(-p)
elif p.shape[1] == cls.dim:
# Broadcast magic
result[:, 0:3, 0:3] = p[:, 3][:, None, None] * np.eye(3)
result[:, 0:3, 3:6] = cls.RotationType.wedge(-p[:, 0:3])
else:
raise ValueError("p must have shape ({},), ({},), (N,{}) or (N,{})".format(
cls.dim - 1, cls.dim, cls.dim - 1, cls.dim))
return np.squeeze(result)
@classmethod
def vee(cls, Xi):
""":math:`SE(3)` vee operator as defined by Barfoot.
.. math::
\\boldsymbol{\\xi} = \\boldsymbol{\\Xi} ^\\vee
This is the inverse operation to :meth:`~liegroups.SE3.wedge`.
"""
if Xi.ndim < 3:
Xi = np.expand_dims(Xi, axis=0)
if Xi.shape[1:3] != (cls.dim, cls.dim):
raise ValueError("Xi must have shape ({},{}) or (N,{},{})".format(
cls.dim, cls.dim, cls.dim, cls.dim))
xi = np.empty([Xi.shape[0], cls.dof])
xi[:, 0:3] = Xi[:, 0:3, 3]
xi[:, 3:6] = cls.RotationType.vee(Xi[:, 0:3, 0:3])
return np.squeeze(xi)
@classmethod
def wedge(cls, xi):
""":math:`SE(3)` wedge operator as defined by Barfoot.
.. math::
\\boldsymbol{\\Xi} =
\\boldsymbol{\\xi} ^\\wedge =
\\begin{bmatrix}
\\boldsymbol{\\phi} ^\\wedge & \\boldsymbol{\\rho} \\\\
\\mathbf{0} ^ T & 0
\\end{bmatrix}
This is the inverse operation to :meth:`~liegroups.SE2.vee`.
"""
xi = np.atleast_2d(xi)
if xi.shape[1] != cls.dof:
raise ValueError(
"xi must have shape ({},) or (N,{})".format(cls.dof, cls.dof))
Xi = np.zeros([xi.shape[0], cls.dim, cls.dim])
Xi[:, 0:3, 0:3] = cls.RotationType.wedge(xi[:, 3:6])
Xi[:, 0:3, 3] = xi[:, 0:3]
return np.squeeze(Xi)
| [
"liegroups.numpy.so3.SO3.left_jacobian",
"liegroups.numpy.so3.SO3.wedge",
"numpy.empty",
"numpy.zeros",
"numpy.expand_dims",
"liegroups.numpy.so3.SO3.inv_left_jacobian",
"numpy.hstack",
"numpy.identity",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"numpy.squeeze",
"numpy.eye",
"numpy.at... | [((2578, 2611), 'numpy.empty', 'np.empty', (['[Psi.shape[0], cls.dof]'], {}), '([Psi.shape[0], cls.dof])\n', (2586, 2611), True, 'import numpy as np\n'), ((2748, 2762), 'numpy.squeeze', 'np.squeeze', (['xi'], {}), '(xi)\n', (2758, 2762), True, 'import numpy as np\n'), ((3272, 3289), 'numpy.atleast_2d', 'np.atleast_2d', (['xi'], {}), '(xi)\n', (3285, 3289), True, 'import numpy as np\n'), ((3449, 3490), 'numpy.zeros', 'np.zeros', (['[xi.shape[0], cls.dof, cls.dof]'], {}), '([xi.shape[0], cls.dof, cls.dof])\n', (3457, 3490), True, 'import numpy as np\n'), ((3675, 3690), 'numpy.squeeze', 'np.squeeze', (['Psi'], {}), '(Psi)\n', (3685, 3690), True, 'import numpy as np\n'), ((6301, 6315), 'liegroups.numpy.so3.SO3.wedge', 'SO3.wedge', (['rho'], {}), '(rho)\n', (6310, 6315), False, 'from liegroups.numpy.so3 import SO3\n'), ((6329, 6343), 'liegroups.numpy.so3.SO3.wedge', 'SO3.wedge', (['phi'], {}), '(phi)\n', (6338, 6343), False, 'from liegroups.numpy.so3 import SO3\n'), ((6358, 6377), 'numpy.linalg.norm', 'np.linalg.norm', (['phi'], {}), '(phi)\n', (6372, 6377), True, 'import numpy as np\n'), ((6484, 6494), 'numpy.cos', 'np.cos', (['ph'], {}), '(ph)\n', (6490, 6494), True, 'import numpy as np\n'), ((6509, 6519), 'numpy.sin', 'np.sin', (['ph'], {}), '(ph)\n', (6515, 6519), True, 'import numpy as np\n'), ((7756, 7782), 'liegroups.numpy.so3.SO3.inv_left_jacobian', 'SO3.inv_left_jacobian', (['phi'], {}), '(phi)\n', (7777, 7782), False, 'from liegroups.numpy.so3 import SO3\n'), ((7845, 7873), 'numpy.zeros', 'np.zeros', (['[cls.dof, cls.dof]'], {}), '([cls.dof, cls.dof])\n', (7853, 7873), True, 'import numpy as np\n'), ((8772, 8794), 'liegroups.numpy.so3.SO3.left_jacobian', 'SO3.left_jacobian', (['phi'], {}), '(phi)\n', (8789, 8794), False, 'from liegroups.numpy.so3 import SO3\n'), ((8857, 8885), 'numpy.zeros', 'np.zeros', (['[cls.dof, cls.dof]'], {}), '([cls.dof, cls.dof])\n', (8865, 8885), True, 'import numpy as np\n'), ((9591, 9612), 'numpy.hstack', 'np.hstack', (['[rho, phi]'], {}), '([rho, phi])\n', (9600, 9612), True, 'import numpy as np\n'), ((10866, 10882), 'numpy.atleast_2d', 'np.atleast_2d', (['p'], {}), '(p)\n', (10879, 10882), True, 'import numpy as np\n'), ((10900, 10943), 'numpy.zeros', 'np.zeros', (['[p.shape[0], p.shape[1], cls.dof]'], {}), '([p.shape[0], p.shape[1], cls.dof])\n', (10908, 10943), True, 'import numpy as np\n'), ((11625, 11643), 'numpy.squeeze', 'np.squeeze', (['result'], {}), '(result)\n', (11635, 11643), True, 'import numpy as np\n'), ((12170, 12202), 'numpy.empty', 'np.empty', (['[Xi.shape[0], cls.dof]'], {}), '([Xi.shape[0], cls.dof])\n', (12178, 12202), True, 'import numpy as np\n'), ((12312, 12326), 'numpy.squeeze', 'np.squeeze', (['xi'], {}), '(xi)\n', (12322, 12326), True, 'import numpy as np\n'), ((12785, 12802), 'numpy.atleast_2d', 'np.atleast_2d', (['xi'], {}), '(xi)\n', (12798, 12802), True, 'import numpy as np\n'), ((12961, 13002), 'numpy.zeros', 'np.zeros', (['[xi.shape[0], cls.dim, cls.dim]'], {}), '([xi.shape[0], cls.dim, cls.dim])\n', (12969, 13002), True, 'import numpy as np\n'), ((13114, 13128), 'numpy.squeeze', 'np.squeeze', (['Xi'], {}), '(Xi)\n', (13124, 13128), True, 'import numpy as np\n'), ((2353, 2380), 'numpy.expand_dims', 'np.expand_dims', (['Psi'], {'axis': '(0)'}), '(Psi, axis=0)\n', (2367, 2380), True, 'import numpy as np\n'), ((7640, 7659), 'numpy.linalg.norm', 'np.linalg.norm', (['phi'], {}), '(phi)\n', (7654, 7659), True, 'import numpy as np\n'), ((8660, 8679), 'numpy.linalg.norm', 'np.linalg.norm', (['phi'], {}), '(phi)\n', (8674, 8679), True, 'import numpy as np\n'), ((11948, 11974), 'numpy.expand_dims', 'np.expand_dims', (['Xi'], {'axis': '(0)'}), '(Xi, axis=0)\n', (11962, 11974), True, 'import numpy as np\n'), ((7685, 7705), 'numpy.identity', 'np.identity', (['cls.dof'], {}), '(cls.dof)\n', (7696, 7705), True, 'import numpy as np\n'), ((8705, 8725), 'numpy.identity', 'np.identity', (['cls.dof'], {}), '(cls.dof)\n', (8716, 8725), True, 'import numpy as np\n'), ((11168, 11177), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (11174, 11177), True, 'import numpy as np\n'), ((11366, 11375), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (11372, 11375), True, 'import numpy as np\n'), ((1975, 1991), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1983, 1991), True, 'import numpy as np\n')] |
### K: Model: Autoencoder
import sys
import time
import pandas as pd
import os
import math
import numpy as np
from numpy import mean, std
from unit import remove_columns_with_one_value, remove_nan_columns, load_dataset
from unit import display_general_information, display_feature_distribution
from collections import Counter
#from imblearn.over_sampling import RandomOverSampler, RandomUnderSampler
import sklearn
from sklearn import set_config
from sklearn.impute import SimpleImputer
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from sklearn.metrics import f1_score, classification_report, accuracy_score
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split, PredefinedSplit, RandomizedSearchCV
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif, chi2, mutual_info_classif
from sklearn.utils import class_weight
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
import keras.utils
from keras import metrics
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv2D, MaxPooling2D, Flatten, LSTM
from keras.optimizers import RMSprop, Adam
from keras.constraints import maxnorm
###############################################################################
## Define constants
###############################################################################
pd.set_option ('display.max_rows', None)
pd.set_option ('display.max_columns', 5)
BOT_IOT_DIRECTORY = '../../../../datasets/bot-iot/'
BOT_IOT_FEATURE_NAMES = 'UNSW_2018_IoT_Botnet_Dataset_Feature_Names.csv'
BOT_IOT_FILE_5_PERCENT_SCHEMA = 'UNSW_2018_IoT_Botnet_Full5pc_{}.csv' # 1 - 4
FIVE_PERCENT_FILES = 4
BOT_IOT_FILE_FULL_SCHEMA = 'UNSW_2018_IoT_Botnet_Dataset_{}.csv' # 1 - 74
FULL_FILES = 74
FILE_NAME = BOT_IOT_DIRECTORY + BOT_IOT_FILE_5_PERCENT_SCHEMA
FEATURES = BOT_IOT_DIRECTORY + BOT_IOT_FEATURE_NAMES
NAN_VALUES = ['?', '.']
TARGET = 'attack'
INDEX_COLUMN = 'pkSeqID'
LABELS = ['attack', 'category', 'subcategory']
STATE = 0
try:
STATE = int (sys.argv [1])
except:
pass
#for STATE in [1, 2, 3, 4, 5]:
np.random.seed (STATE)
print ('STATE:', STATE)
###############################################################################
## Load dataset
###############################################################################
df = load_dataset (FILE_NAME, FIVE_PERCENT_FILES, INDEX_COLUMN, NAN_VALUES)
###############################################################################
## Clean dataset
###############################################################################
###############################################################################
### Remove columns with only one value
df, log = remove_columns_with_one_value (df, verbose = False)
print (log)
###############################################################################
### Remove redundant columns, useless columns and unused targets
### K: _number columns are numerical representations of other existing columns.
### K: category and subcategory are other labels.
### K: saddr and daddr may specialize the model to a single network
redundant_columns = ['state_number', 'proto_number', 'flgs_number']
other_targets = ['category', 'subcategory']
misc_columns = ['saddr', 'daddr']
print ('Removing redundant columns:', redundant_columns)
print ('Removing useless targets:', other_targets)
print ('Removing misc columns:', misc_columns)
columns_to_remove = redundant_columns + other_targets + misc_columns
df.drop (axis = 'columns', columns = columns_to_remove, inplace = True)
###############################################################################
### Remove NaN columns (with a lot of NaN values)
df, log = remove_nan_columns (df, 1/2, verbose = False)
print (log)
###############################################################################
### Encode categorical features
print ('Encoding categorical features (ordinal encoding).')
my_encoder = OrdinalEncoder ()
df ['flgs'] = my_encoder.fit_transform (df ['flgs'].values.reshape (-1, 1))
df ['proto'] = my_encoder.fit_transform (df ['proto'].values.reshape (-1, 1))
df ['sport'] = my_encoder.fit_transform (df ['sport'].astype (str).values.reshape (-1, 1))
df ['dport'] = my_encoder.fit_transform (df ['dport'].astype (str).values.reshape (-1, 1))
df ['state'] = my_encoder.fit_transform (df ['state'].values.reshape (-1, 1))
print ('Objects:', list (df.select_dtypes ( ['object']).columns))
###############################################################################
## Quick sanity check
###############################################################################
display_general_information (df)
###############################################################################
## Split dataset into train, validation and test sets
###############################################################################
### Isolate attack and normal samples
## K: Dataset is too big? Drop.
#drop_indices = np.random.choice (df.index, int (df.shape [0] * 0.5),
# replace = False)
#df = df.drop (drop_indices)
mask = df [TARGET] == 0
# 0 == normal
df_normal = df [mask]
# 1 == attack
df_attack = df [~mask]
print ('Attack set:')
print (df_attack [TARGET].value_counts ())
print ('Normal set:')
print (df_normal [TARGET].value_counts ())
### Sample and drop random attacks
df_random_attacks = df_attack.sample (n = df_normal.shape [0], random_state = STATE)
df_attack = df_attack.drop (df_random_attacks.index)
### Assemble test set
df_test = pd.DataFrame ()
df_test = pd.concat ( [df_test, df_normal])
df_test = pd.concat ( [df_test, df_random_attacks])
print ('Test set:')
print (df_test [TARGET].value_counts ())
X_test_df = df_test.iloc [:, :-1]
y_test_df = df_test.iloc [:, -1]
### K: y_test is required to plot the roc curve in the end
df_train = df_attack
VALIDATION_SIZE = 1/4
print ('\nSplitting dataset (validation/train):', VALIDATION_SIZE)
X_train_df, X_val_df, y_train_df, y_val_df = train_test_split (
df.loc [:, df.columns != TARGET],
df [TARGET],
test_size = VALIDATION_SIZE,
random_state = STATE,)
print ('X_train_df shape:', X_train_df.shape)
print ('y_train_df shape:', y_train_df.shape)
print ('X_val_df shape:', X_val_df.shape)
print ('y_val_df shape:', y_val_df.shape)
print ('X_test_df shape:', X_test_df.shape)
print ('y_test_df shape:', y_test_df.shape)
###############################################################################
## Convert dataframe to a numpy array
###############################################################################
print ('\nConverting dataframe to numpy array.')
X_train = X_train_df.values
y_train = y_train_df.values
X_val = X_val_df.values
y_val = y_val_df.values
X_test = X_test_df.values
y_test = y_test_df.values
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
###############################################################################
## Apply normalization
###############################################################################
### K: NOTE: Only use derived information from the train set to avoid leakage.
print ('\nApplying normalization.')
startTime = time.time ()
scaler = StandardScaler ()
scaler.fit (X_train)
X_train = scaler.transform (X_train)
X_val = scaler.transform (X_val)
X_test = scaler.transform (X_test)
print (str (time.time () - startTime), 'to normalize data.')
###############################################################################
## Perform feature selection
###############################################################################
### K: Let the autoencoder reconstruct the data.
###############################################################################
NUMBER_OF_FEATURES = 9 #'all'
print ('\nSelecting top', NUMBER_OF_FEATURES, 'features.')
startTime = time.time ()
#fs = SelectKBest (score_func = mutual_info_classif, k = NUMBER_OF_FEATURES)
### K: ~30 minutes to FAIL fit mutual_info_classif to 5% bot-iot
#fs = SelectKBest (score_func = chi2, k = NUMBER_OF_FEATURES) # X must be >= 0
### K: ~4 seconds to fit chi2 to 5% bot-iot (MinMaxScaler (0, 1))
fs = SelectKBest (score_func = f_classif, k = NUMBER_OF_FEATURES)
### K: ~4 seconds to fit f_classif to 5% bot-iot
fs.fit (X_train, y_train)
X_train = fs.transform (X_train)
X_val = fs.transform (X_val)
X_test = fs.transform (X_test)
print (str (time.time () - startTime), 'to select features.')
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
bestFeatures = []
for feature in range (len (fs.scores_)):
bestFeatures.append ({'f': feature, 's': fs.scores_ [feature]})
bestFeatures = sorted (bestFeatures, key = lambda k: k ['s'])
for feature in bestFeatures:
print ('Feature %d: %f' % (feature ['f'], feature ['s']))
###############################################################################
## Create learning model (Autoencoder) and tune hyperparameters
###############################################################################
'''
###############################################################################
#Hyperparameter tuning
test_fold = np.repeat ([-1, 0], [X_train.shape [0], X_val.shape [0]])
myPreSplit = PredefinedSplit (test_fold)
def create_model (learn_rate = 0.01, dropout_rate = 0.0, weight_constraint = 0,
metrics = ['mse']):
model = Sequential ()
model.add (Dense (X_train.shape [1], activation = 'relu',
input_shape = (X_train.shape [1], )))
model.add (Dense (32, activation = 'relu'))
model.add (Dense (8, activation = 'relu'))
model.add (Dense (32, activation = 'relu'))
model.add (Dense (X_train.shape [1], activation = None))
model.compile (loss = 'mean_squared_error',
optimizer = 'adam',
metrics = metrics)
return model
model = KerasRegressor (build_fn = create_model, verbose = 2)
batch_size = [5000, 10000]#, 50]
epochs = [10]#, 5, 10]
learn_rate = [0.001, 0.01, 0.1]#, 0.2, 0.3]
dropout_rate = [0.0]#, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
weight_constraint = [0]#1, 2, 3, 4, 5]
param_grid = dict (batch_size = batch_size, epochs = epochs,
dropout_rate = dropout_rate, learn_rate = learn_rate,
weight_constraint = weight_constraint)
grid = GridSearchCV (estimator = model, param_grid = param_grid,
scoring = 'neg_mean_squared_error', cv = myPreSplit,
verbose = 2, n_jobs = 1)
grid_result = grid.fit (np.vstack ( (X_train, X_val)),#, axis = 1),
np.vstack ( (X_train, X_val)))#, axis = 1))
print (grid_result.best_params_)
print ("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_ ['mean_test_score']
stds = grid_result.cv_results_ ['std_test_score']
params = grid_result.cv_results_ ['params']
for mean, stdev, param in zip (means, stds, params):
print ("%f (%f) with: %r" % (mean, stdev, param))
'''
###############################################################################
## Finished model
METRICS = [keras.metrics.MeanSquaredError (name = 'MSE'),
keras.metrics.RootMeanSquaredError (name = 'RMSE'),
keras.metrics.MeanAbsoluteError (name = 'MAE'),]
NUMBER_OF_EPOCHS = 25
BATCH_SIZE = 5000
LEARNING_RATE = 0.001
print ('\nCreating learning model.')
clf = Sequential ()
clf.add (Dense (X_train.shape [1], activation = 'relu',
input_shape = (X_train.shape [1], )))
clf.add (Dense (32, activation = 'relu'))
clf.add (Dense (8, activation = 'relu'))
clf.add (Dense (32, activation = 'relu'))
clf.add (Dense (X_train.shape [1], activation = None))
###############################################################################
## Compile the network
###############################################################################
print ('\nCompiling the network.')
clf.compile (loss = 'mean_squared_error',
optimizer = Adam (lr = LEARNING_RATE),
metrics = METRICS)
print ('Model summary:')
clf.summary ()
###############################################################################
## Fit the network
###############################################################################
print ('\nFitting the network.')
startTime = time.time ()
history = clf.fit (X_train, X_train,
batch_size = BATCH_SIZE,
epochs = NUMBER_OF_EPOCHS,
verbose = 2, #1 = progress bar, not useful for logging
workers = 0,
use_multiprocessing = True,
#class_weight = 'auto',
validation_data = (X_val, X_val))
print (str (time.time () - startTime), 's to train model.')
###############################################################################
## Analyze results
###############################################################################
X_val_pred = clf.predict (X_val)
X_train_pred = clf.predict (X_train)
print ('Train error:' , mean_squared_error (X_train_pred, X_train))
print ('Validation error:', mean_squared_error (X_val_pred, X_val))
### K: This looks like another hyperparameter to be adjusted by using a
### separate validation set that contains normal and anomaly samples.
### K: I've guessed 1%, this may be a future line of research.
THRESHOLD_SAMPLE_PERCENTAGE = 1/100
train_mse_element_wise = np.mean (np.square (X_train_pred - X_train), axis = 1)
val_mse_element_wise = np.mean (np.square (X_val_pred - X_val), axis = 1)
max_threshold_val = np.max (val_mse_element_wise)
print ('max_Thresh val:', max_threshold_val)
print ('samples:')
print (int (round (val_mse_element_wise.shape [0] *
THRESHOLD_SAMPLE_PERCENTAGE)))
top_n_values_val = np.partition (-val_mse_element_wise,
int (round (val_mse_element_wise.shape [0] *
THRESHOLD_SAMPLE_PERCENTAGE)))
top_n_values_val = -top_n_values_val [: int (round (val_mse_element_wise.shape [0] *
THRESHOLD_SAMPLE_PERCENTAGE))]
### K: O limiar de classificacao sera a mediana dos N maiores custos obtidos
### ao validar a rede no conjunto de validacao. N e um hiperparametro que pode
### ser ajustado, mas e necessario um conjunto de validacao com amostras
### anomalas em adicao ao conjunto de validacao atual, que so tem amostras nao
### anomalas. @TODO: Desenvolver e validar o conjunto com esta nova tecnica.
threshold = np.median (top_n_values_val)
print ('Thresh val:', threshold)
### K: NOTE: Only look at test results when publishing...
sys.exit ()
X_test_pred = clf.predict (X_test)
print (X_test_pred.shape)
print ('Test error:', mean_squared_error (X_test_pred, X_test))
y_pred = np.mean (np.square (X_test_pred - X_test), axis = 1)
y_test, y_pred = zip (*sorted (zip (y_test, y_pred)))
# 0 == normal
# 1 == attack
print ('\nPerformance on TEST set:')
print ('\nMSE (pred, real) | Label (ordered)')
tp, tn, fp, fn = 0, 0, 0, 0
for label, pred in zip (y_test, y_pred):
if ((pred >= threshold) and (label == 0)):
print ('True negative.')
tn += 1
elif ((pred >= threshold) and (label == 1)):
print ('False negative!')
fn += 1
elif ((pred < threshold) and (label == 1)):
print ('True positive.')
tp += 1
elif ((pred < threshold) and (label == 0)):
print ('False positive!')
fp += 1
print ('Confusion matrix:')
print ('tp | fp')
print ('fn | tn')
print (tp, '|', fp)
print (fn, '|', tn)
print ('TP:', tp)
print ('TN:', tn)
print ('FP:', fp)
print ('FN:', fn)
| [
"numpy.random.seed",
"unit.load_dataset",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"pandas.set_option",
"unit.display_general_information",
"pandas.DataFrame",
"numpy.max",
"pandas.concat",
"sklearn.metrics.mean_squared_error",
"numpy.median",
"unit.r... | [((2211, 2250), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (2224, 2250), True, 'import pandas as pd\n'), ((2252, 2291), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(5)'], {}), "('display.max_columns', 5)\n", (2265, 2291), True, 'import pandas as pd\n'), ((2928, 2949), 'numpy.random.seed', 'np.random.seed', (['STATE'], {}), '(STATE)\n', (2942, 2949), True, 'import numpy as np\n'), ((3158, 3227), 'unit.load_dataset', 'load_dataset', (['FILE_NAME', 'FIVE_PERCENT_FILES', 'INDEX_COLUMN', 'NAN_VALUES'], {}), '(FILE_NAME, FIVE_PERCENT_FILES, INDEX_COLUMN, NAN_VALUES)\n', (3170, 3227), False, 'from unit import remove_columns_with_one_value, remove_nan_columns, load_dataset\n'), ((3537, 3585), 'unit.remove_columns_with_one_value', 'remove_columns_with_one_value', (['df'], {'verbose': '(False)'}), '(df, verbose=False)\n', (3566, 3585), False, 'from unit import remove_columns_with_one_value, remove_nan_columns, load_dataset\n'), ((4529, 4573), 'unit.remove_nan_columns', 'remove_nan_columns', (['df', '(1 / 2)'], {'verbose': '(False)'}), '(df, 1 / 2, verbose=False)\n', (4547, 4573), False, 'from unit import remove_columns_with_one_value, remove_nan_columns, load_dataset\n'), ((4773, 4789), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (4787, 4789), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder\n'), ((5455, 5486), 'unit.display_general_information', 'display_general_information', (['df'], {}), '(df)\n', (5482, 5486), False, 'from unit import display_general_information, display_feature_distribution\n'), ((6359, 6373), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6371, 6373), True, 'import pandas as pd\n'), ((6385, 6416), 'pandas.concat', 'pd.concat', (['[df_test, df_normal]'], {}), '([df_test, df_normal])\n', (6394, 6416), True, 'import pandas as pd\n'), ((6429, 6468), 'pandas.concat', 'pd.concat', (['[df_test, df_random_attacks]'], {}), '([df_test, df_random_attacks])\n', (6438, 6468), True, 'import pandas as pd\n'), ((6814, 6927), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df.loc[:, df.columns != TARGET]', 'df[TARGET]'], {'test_size': 'VALIDATION_SIZE', 'random_state': 'STATE'}), '(df.loc[:, df.columns != TARGET], df[TARGET], test_size=\n VALIDATION_SIZE, random_state=STATE)\n', (6830, 6927), False, 'from sklearn.model_selection import train_test_split, PredefinedSplit, RandomizedSearchCV\n'), ((8323, 8334), 'time.time', 'time.time', ([], {}), '()\n', (8332, 8334), False, 'import time\n'), ((8345, 8361), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8359, 8361), False, 'from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler\n'), ((8971, 8982), 'time.time', 'time.time', ([], {}), '()\n', (8980, 8982), False, 'import time\n'), ((9276, 9331), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'f_classif', 'k': 'NUMBER_OF_FEATURES'}), '(score_func=f_classif, k=NUMBER_OF_FEATURES)\n', (9287, 9331), False, 'from sklearn.feature_selection import SelectKBest\n'), ((12639, 12651), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12649, 12651), False, 'from keras.models import Sequential\n'), ((13562, 13573), 'time.time', 'time.time', ([], {}), '()\n', (13571, 13573), False, 'import time\n'), ((14864, 14892), 'numpy.max', 'np.max', (['val_mse_element_wise'], {}), '(val_mse_element_wise)\n', (14870, 14892), True, 'import numpy as np\n'), ((15834, 15861), 'numpy.median', 'np.median', (['top_n_values_val'], {}), '(top_n_values_val)\n', (15843, 15861), True, 'import numpy as np\n'), ((15956, 15966), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15964, 15966), False, 'import sys\n'), ((12662, 12737), 'keras.layers.Dense', 'Dense', (['X_train.shape[1]'], {'activation': '"""relu"""', 'input_shape': '(X_train.shape[1],)'}), "(X_train.shape[1], activation='relu', input_shape=(X_train.shape[1],))\n", (12667, 12737), False, 'from keras.layers import Dense, Dropout\n'), ((12778, 12806), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (12783, 12806), False, 'from keras.layers import Dense, Dropout\n'), ((12820, 12847), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (12825, 12847), False, 'from keras.layers import Dense, Dropout\n'), ((12862, 12890), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (12867, 12890), False, 'from keras.layers import Dense, Dropout\n'), ((12904, 12944), 'keras.layers.Dense', 'Dense', (['X_train.shape[1]'], {'activation': 'None'}), '(X_train.shape[1], activation=None)\n', (12909, 12944), False, 'from keras.layers import Dense, Dropout\n'), ((14334, 14375), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['X_train_pred', 'X_train'], {}), '(X_train_pred, X_train)\n', (14352, 14375), False, 'from sklearn.metrics import cohen_kappa_score, mean_squared_error\n'), ((14406, 14443), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['X_val_pred', 'X_val'], {}), '(X_val_pred, X_val)\n', (14424, 14443), False, 'from sklearn.metrics import cohen_kappa_score, mean_squared_error\n'), ((14723, 14756), 'numpy.square', 'np.square', (['(X_train_pred - X_train)'], {}), '(X_train_pred - X_train)\n', (14732, 14756), True, 'import numpy as np\n'), ((14801, 14830), 'numpy.square', 'np.square', (['(X_val_pred - X_val)'], {}), '(X_val_pred - X_val)\n', (14810, 14830), True, 'import numpy as np\n'), ((16051, 16090), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['X_test_pred', 'X_test'], {}), '(X_test_pred, X_test)\n', (16069, 16090), False, 'from sklearn.metrics import cohen_kappa_score, mean_squared_error\n'), ((16112, 16143), 'numpy.square', 'np.square', (['(X_test_pred - X_test)'], {}), '(X_test_pred - X_test)\n', (16121, 16143), True, 'import numpy as np\n'), ((13237, 13259), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'LEARNING_RATE'}), '(lr=LEARNING_RATE)\n', (13241, 13259), False, 'from keras.optimizers import RMSprop, Adam\n'), ((8501, 8512), 'time.time', 'time.time', ([], {}), '()\n', (8510, 8512), False, 'import time\n'), ((9517, 9528), 'time.time', 'time.time', ([], {}), '()\n', (9526, 9528), False, 'import time\n'), ((14005, 14016), 'time.time', 'time.time', ([], {}), '()\n', (14014, 14016), False, 'import time\n')] |
import tensorflow as tf
import torch
import numpy as np
from typing import Union
# compute loss for a batch
def cross_entropy_loss(outputs: tf.Tensor, labels: np.ndarray) -> tf.Tensor:
"""
Calculate the cross entropy loss between the predicted probability distributions
and the labels.
Args:
outputs: Model output given as tensor of shape `[batch_size, num_classes]`.
labels: True class given as a numpy array of shape `[batch_size,]`.
Returns:
The mean loss for this batch.
"""
labels = labels.squeeze()
labels = tf.cast(labels, tf.int64)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=outputs)
return tf.reduce_mean(loss)
def to_numpy(x: Union[torch.Tensor, tf.Tensor]) -> np.ndarray:
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
elif isinstance(x, tf.Tensor):
x = x.numpy()
return x
def accuracy(
outputs: Union[torch.Tensor, tf.Tensor, np.ndarray],
labels: Union[torch.Tensor, tf.Tensor, np.ndarray],
) -> float:
"""
Calculate the accuracy given the predicted probability distribution and label.
Args:
outputs: Model output given as tensor of shape `[batch_size, num_classes]`.
labels: True class given as tensor of shape `[batch_size,]`.
Returns:
The accuracy for this batch.
"""
outputs, labels = to_numpy(outputs), to_numpy(labels).squeeze()
assert outputs.shape[0] == labels.shape[0]
pred = np.argmax(outputs, axis=1)
correct = (pred == labels).sum()
total = labels.shape[0]
accuracy = 1.0 * correct / total
return accuracy
| [
"tensorflow.cast",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"numpy.argmax",
"tensorflow.reduce_mean"
] | [((574, 599), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.int64'], {}), '(labels, tf.int64)\n', (581, 599), True, 'import tensorflow as tf\n'), ((611, 688), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'outputs'}), '(labels=labels, logits=outputs)\n', (657, 688), True, 'import tensorflow as tf\n'), ((700, 720), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (714, 720), True, 'import tensorflow as tf\n'), ((1511, 1537), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (1520, 1537), True, 'import numpy as np\n')] |
import os
import copy
import json
import numpy as np
from typing import Callable, List
from typing import Optional
from obj_data import ObjCLEVRVisionLanguageCLIPDataset, ObjCLEVRVisionLanguageCLIPDataModule, \
ObjAugCLEVRVisionLanguageCLIPDataset, ObjAugCLEVRVisionLanguageCLIPDataModule
class ObjCATERVisionLanguageCLIPDataset(ObjCLEVRVisionLanguageCLIPDataset):
def __init__(self,
data_root: str,
max_num_images: Optional[int],
clip_transforms: Callable,
tokenizer: str = 'clip',
max_n_objects: int = 10,
split: str = "train",
clip_len: int = 301,
prompt: str = 'a {color} {shape}',
is_video: bool = False,
shuffle_obj: bool = False,
pad_text: str = 'background'):
self.cater_subset = 'cater_cameramotion' if \
'cameramotion' in data_root else 'cater'
super().__init__(
data_root,
max_num_images,
clip_transforms,
tokenizer=tokenizer,
max_n_objects=max_n_objects,
split=split,
clip_len=clip_len,
prompt=prompt,
is_video=is_video,
shuffle_obj=shuffle_obj,
pad_text=pad_text)
def _generate_text(self, index: int):
"""Generate text descriptions of each object in the scene."""
img_idx = self._get_idx(index)[0]
anno = self.annos[img_idx]
colors = [obj['color'] for obj in anno['objects']]
shapes = [obj['shape'] for obj in anno['objects']]
sizes = [obj['size'] for obj in anno['objects']]
# e.g. 'a large red cone'
texts = [
self.prompt.format(size=size, color=color, shape=shape)
for size, color, shape in zip(sizes, colors, shapes)
]
# pad with some special texts, e.g. 'background'
texts = texts + [self.pad_text] * (self.text_num - len(texts))
# shuffle the order of objects
if self.split == 'train' and self.shuffle_obj:
np.random.shuffle(texts)
return texts
def get_files(self) -> List[str]:
"""Load the image (video) path and loaded annotations (lists)."""
self.data_path = os.path.join(self.data_root, "videos")
assert os.path.exists(
self.data_path), f"Path {self.data_path} does not exist"
with open(
os.path.join('./data/',
f'{self.cater_subset}_{self.split}_annos.json'),
'r') as f:
self.anno_paths = json.load(f)
self.anno_paths.sort()
img_paths, all_annos = [], []
for i, anno_name in enumerate(self.anno_paths):
if self.max_num_images is not None and \
len(img_paths) > self.max_num_images:
break
anno_path = os.path.join(self.data_root, 'scenes', anno_name)
with open(anno_path, 'r') as f:
anno = json.load(f)
img_name = anno['image_filename'].replace('CLEVR_new', 'CATER_new')
image_path = os.path.join(self.data_path, img_name)
assert os.path.exists(image_path), f"{image_path} does not exist"
img_paths.append(image_path)
all_annos.append(anno)
return img_paths, all_annos
class ObjCATERVisionLanguageCLIPDataModule(ObjCLEVRVisionLanguageCLIPDataModule
):
def __init__(self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clip_transforms: Callable,
num_workers: int,
tokenizer: str = 'clip',
max_n_objects: int = 10,
prompt: str = 'a {color} {shape}',
shuffle_obj: bool = False,
pad_text: str = 'background'):
super().__init__(
data_root,
train_batch_size,
val_batch_size,
clip_transforms,
num_workers,
tokenizer=tokenizer,
max_n_objects=max_n_objects,
prompt=prompt,
shuffle_obj=shuffle_obj,
pad_text=pad_text)
def _build_dataset(self):
self.train_dataset = ObjCATERVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_train_images,
clip_transforms=self.clip_transforms,
tokenizer=self.tokenizer,
max_n_objects=self.max_n_objects,
split='train',
prompt=self.prompt,
shuffle_obj=self.shuffle_obj,
pad_text=self.pad_text,
)
self.val_dataset = ObjCATERVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_val_images,
clip_transforms=self.val_clip_transforms,
tokenizer=self.tokenizer,
max_n_objects=self.max_n_objects,
split='val',
prompt=self.prompt,
shuffle_obj=self.shuffle_obj,
pad_text=self.pad_text,
)
class ObjAugCATERVisionLanguageCLIPDataset(ObjAugCLEVRVisionLanguageCLIPDataset
):
def __init__(self,
data_root: str,
max_num_images: Optional[int],
clip_transforms: Callable,
tokenizer: str = 'clip',
max_n_objects: int = 10,
split: str = "train",
clip_len: int = 301,
prompt: str = 'a {color} {shape}',
is_video: bool = False,
shuffle_obj: bool = False,
pad_text: str = 'background',
flip_img: bool = False):
self.cater_subset = 'cater_cameramotion' if \
'cameramotion' in data_root else 'cater'
super().__init__(
data_root,
max_num_images,
clip_transforms,
tokenizer=tokenizer,
max_n_objects=max_n_objects,
split=split,
clip_len=clip_len,
prompt=prompt,
is_video=is_video,
shuffle_obj=shuffle_obj,
pad_text=pad_text,
flip_img=flip_img)
def _generate_text(self, index: int):
"""Generate text descriptions of each object in the scene."""
img_idx = self._get_idx(index)[0]
anno = self.annos[img_idx]
colors = [obj['color'] for obj in anno['objects']]
shapes = [obj['shape'] for obj in anno['objects']]
sizes = [obj['size'] for obj in anno['objects']]
texts = [
self.prompt.format(size=size, color=color, shape=shape)
for size, color, shape in zip(sizes, colors, shapes)
]
# pad with some special texts, e.g. 'background'
# `True` in `obj_mask` stands for foreground objects
obj_mask = np.zeros(self.text_num, dtype=np.bool)
obj_mask[:len(texts)] = True
texts = texts + [self.pad_text] * (self.text_num - len(texts))
# shuffle the order of objects
shuffled_texts, idx, shuffled_obj_mask = None, None, None
if self.split == 'train':
idx = np.arange(len(texts))
if self.shuffle_obj:
np.random.shuffle(idx)
shuffled_texts = [texts[i] for i in idx]
else:
shuffled_texts = copy.deepcopy(texts)
shuffled_obj_mask = obj_mask[idx]
return texts, shuffled_texts, idx, obj_mask, shuffled_obj_mask
def get_files(self) -> List[str]:
"""Load the image (video) path and loaded annotations (lists)."""
self.data_path = os.path.join(self.data_root, "videos")
assert os.path.exists(
self.data_path), f"Path {self.data_path} does not exist"
with open(
os.path.join('./data/',
f'{self.cater_subset}_{self.split}_annos.json'),
'r') as f:
self.anno_paths = json.load(f)
self.anno_paths.sort()
img_paths, all_annos = [], []
for i, anno_name in enumerate(self.anno_paths):
if self.max_num_images is not None and \
len(img_paths) > self.max_num_images:
break
anno_path = os.path.join(self.data_root, 'scenes', anno_name)
with open(anno_path, 'r') as f:
anno = json.load(f)
img_name = anno['image_filename'].replace('CLEVR_new', 'CATER_new')
image_path = os.path.join(self.data_path, img_name)
assert os.path.exists(image_path), f"{image_path} does not exist"
img_paths.append(image_path)
all_annos.append(anno)
return img_paths, all_annos
class ObjAugCATERVisionLanguageCLIPDataModule(
ObjAugCLEVRVisionLanguageCLIPDataModule):
def __init__(self,
data_root: str,
train_batch_size: int,
val_batch_size: int,
clip_transforms: Callable,
num_workers: int,
tokenizer: str = 'clip',
max_n_objects: int = 10,
prompt: str = 'a {color} {shape}',
shuffle_obj: bool = False,
pad_text: str = 'background',
flip_img: bool = False):
super().__init__(
data_root,
train_batch_size,
val_batch_size,
clip_transforms,
num_workers,
tokenizer=tokenizer,
max_n_objects=max_n_objects,
prompt=prompt,
shuffle_obj=shuffle_obj,
pad_text=pad_text,
flip_img=flip_img)
def _build_dataset(self):
self.train_dataset = ObjAugCATERVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_train_images,
clip_transforms=self.clip_transforms,
tokenizer=self.tokenizer,
max_n_objects=self.max_n_objects,
split='train',
prompt=self.prompt,
shuffle_obj=self.shuffle_obj,
pad_text=self.pad_text,
flip_img=self.flip_img,
)
self.val_dataset = ObjAugCATERVisionLanguageCLIPDataset(
data_root=self.data_root,
max_num_images=self.num_val_images,
clip_transforms=self.val_clip_transforms,
tokenizer=self.tokenizer,
max_n_objects=self.max_n_objects,
split='val',
prompt=self.prompt,
shuffle_obj=self.shuffle_obj,
pad_text=self.pad_text,
flip_img=self.flip_img,
)
| [
"copy.deepcopy",
"json.load",
"numpy.zeros",
"os.path.exists",
"os.path.join",
"numpy.random.shuffle"
] | [((2314, 2352), 'os.path.join', 'os.path.join', (['self.data_root', '"""videos"""'], {}), "(self.data_root, 'videos')\n", (2326, 2352), False, 'import os\n'), ((2368, 2398), 'os.path.exists', 'os.path.exists', (['self.data_path'], {}), '(self.data_path)\n', (2382, 2398), False, 'import os\n'), ((7028, 7066), 'numpy.zeros', 'np.zeros', (['self.text_num'], {'dtype': 'np.bool'}), '(self.text_num, dtype=np.bool)\n', (7036, 7066), True, 'import numpy as np\n'), ((7810, 7848), 'os.path.join', 'os.path.join', (['self.data_root', '"""videos"""'], {}), "(self.data_root, 'videos')\n", (7822, 7848), False, 'import os\n'), ((7864, 7894), 'os.path.exists', 'os.path.exists', (['self.data_path'], {}), '(self.data_path)\n', (7878, 7894), False, 'import os\n'), ((2130, 2154), 'numpy.random.shuffle', 'np.random.shuffle', (['texts'], {}), '(texts)\n', (2147, 2154), True, 'import numpy as np\n'), ((2647, 2659), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2656, 2659), False, 'import json\n'), ((2942, 2991), 'os.path.join', 'os.path.join', (['self.data_root', '"""scenes"""', 'anno_name'], {}), "(self.data_root, 'scenes', anno_name)\n", (2954, 2991), False, 'import os\n'), ((3177, 3215), 'os.path.join', 'os.path.join', (['self.data_path', 'img_name'], {}), '(self.data_path, img_name)\n', (3189, 3215), False, 'import os\n'), ((3235, 3261), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (3249, 3261), False, 'import os\n'), ((8143, 8155), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8152, 8155), False, 'import json\n'), ((8438, 8487), 'os.path.join', 'os.path.join', (['self.data_root', '"""scenes"""', 'anno_name'], {}), "(self.data_root, 'scenes', anno_name)\n", (8450, 8487), False, 'import os\n'), ((8673, 8711), 'os.path.join', 'os.path.join', (['self.data_path', 'img_name'], {}), '(self.data_path, img_name)\n', (8685, 8711), False, 'import os\n'), ((8731, 8757), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (8745, 8757), False, 'import os\n'), ((2488, 2559), 'os.path.join', 'os.path.join', (['"""./data/"""', 'f"""{self.cater_subset}_{self.split}_annos.json"""'], {}), "('./data/', f'{self.cater_subset}_{self.split}_annos.json')\n", (2500, 2559), False, 'import os\n'), ((3059, 3071), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3068, 3071), False, 'import json\n'), ((7403, 7425), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (7420, 7425), True, 'import numpy as np\n'), ((7534, 7554), 'copy.deepcopy', 'copy.deepcopy', (['texts'], {}), '(texts)\n', (7547, 7554), False, 'import copy\n'), ((7984, 8055), 'os.path.join', 'os.path.join', (['"""./data/"""', 'f"""{self.cater_subset}_{self.split}_annos.json"""'], {}), "('./data/', f'{self.cater_subset}_{self.split}_annos.json')\n", (7996, 8055), False, 'import os\n'), ((8555, 8567), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8564, 8567), False, 'import json\n')] |
import torch
from torch import nn
import os
from torch import optim
import numpy as np
import pickle
from smpl_torch_batch import SMPLModel
from torch.utils.data import Dataset, DataLoader
from sys import platform
class Joint2SMPLDataset(Dataset):
'''
Regression Data with Joint and Theta, Beta.
Predict Pose angles and Betas from input joints.
Train/val: 1:1
'''
def __init__(self, pickle_file, batch_size=64,fix_beta_zero=False):
super(Joint2SMPLDataset, self).__init__()
assert(os.path.isfile(pickle_file))
with open(pickle_file, 'rb') as f:
dataset = pickle.load(f)
self.thetas = dataset['thetas']
self.joints = dataset['joints']
self.fix_beta_zero = fix_beta_zero
if not fix_beta_zero:
self.betas = dataset['betas']
print(self.joints.shape)
self.batch_size = batch_size
self.length = self.joints.shape[0]
print(self.length)
def __getitem__(self, item):
js = self.joints[item]
ts = self.thetas[item]
if self.fix_beta_zero:
bs = np.zeros(10, dtype=np.float64)
else:
bs = self.betas[item]
return {'joints': js, 'thetas': ts, 'betas': bs}
def rand_val_batch(self):
length = self.length // self.batch_size
item = np.random.randint(0, length)
js = self.joints[item*self.batch_size: (item+1)*self.batch_size]
ts = self.thetas[item*self.batch_size: (item+1)*self.batch_size]
if self.fix_beta_zero:
bs = np.zeros((self.batch_size, 10), dtype=np.float64)
else:
bs = self.betas[item*self.batch_size: (item+1)*self.batch_size]
return {'joints': js, 'thetas': ts, 'betas': bs}
def __len__(self):
return self.length
class ResBlock1d(nn.Module):
def __init__(self, indim=256, outdim=None, use_dropout=False):
super(ResBlock1d, self).__init__()
if outdim is None:
outdim = indim
self.indim = indim
self.outdim = outdim
model = [
nn.Linear(indim, indim),
nn.BatchNorm1d(indim),
nn.LeakyReLU(0.2)
]
if use_dropout:
model.append(nn.Dropout(0.5))
self.model = nn.Sequential(*model)
if outdim != indim:
self.linear = nn.Linear(indim, outdim)
def forward(self, x):
out = x + self.model(x)
if self.outdim != self.indim:
out = self.linear(out)
return out
class ResidualRegressor(nn.Module):
def __init__(self, hidden_dim=256, indim=57, thetadim=72, betadim=10,
batch_size=64, hidden_layer=3, use_dropout=False):
super(ResidualRegressor, self).__init__()
model = [
nn.Linear(indim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.LeakyReLU(0.2)
]
for i in range(hidden_layer):
model += [ResBlock1d(indim=hidden_dim, use_dropout=use_dropout)]
self.feature_extractor = nn.Sequential(*model)
self.theta_predictor = nn.Linear(hidden_dim, thetadim)
#self.beta_predictor = nn.Linear(hidden_dim, betadim)
def forward(self, x):
h = self.feature_extractor(x)
theta = self.theta_predictor(h)
#beta = self.beta_predictor(h)
return theta
class AcosRegressor(nn.Module):
def __init__(self, hidden_dim=256, indim=72, thetadim=72, betadim=10,
batch_size=64, hidden_layer=3, use_dropout=False):
super(AcosRegressor, self).__init__()
self.limbs_index = torch.tensor([
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24],
[2, 13, 4, 5, 6, 7, 13, 13, 8, 9, 10, 11, 14, 15, 16, 17, 18, 19, 20, 16, 21, 22, 23]
#[1, 2, 8, 9, 3, 4, 7, 8, 12, 12, 9, 10, 14, 14, 13, 13, 15, 16]
], dtype=torch.long)
self.limbs_index -= torch.ones_like(self.limbs_index) # convert to 0-index
model = [ResBlock1d(indim=23*23+24*3, outdim=hidden_dim)]
for i in range(hidden_layer):
model += [ResBlock1d(indim=hidden_dim, use_dropout=use_dropout)]
model += [nn.Linear(hidden_dim, thetadim)]
self.model = nn.Sequential(*model)
self.clamp_eps = 1e-6
self.norm_eps = 1e-9
def forward(self, x):
# expect N * 19 * 3
vec = x[:, self.limbs_index[0], :] - x[:, self.limbs_index[0], :]
# 20190220: normalize vector!!!
norm_vec = torch.norm(vec, dim=2, keepdim=True) + self.norm_eps
vec /= norm_vec
prod = torch.bmm(vec, vec.transpose(1, 2))
# 20190220;clamp input to avoid NaN
prod = torch.clamp(prod, min=(-1+self.clamp_eps), max=1-self.clamp_eps)
if torch.isnan(prod).any():
print('prod nan')
angles = torch.acos(prod).view(-1, 23*23)
if torch.isnan(angles).any():
print('angles nan')
# 20190301: Only use bone vectors and angles (Bad)
features = torch.cat((x.view(-1, 24*3), angles), dim=1)
return self.model(features)
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES']='0'
torch.backends.cudnn.enabled=True
batch_size = 64
max_batch_num = 40
#dataset = Joint2SMPLDataset('train_dataset.pickle', batch_size)
theta_var = 1.0
training_stage = 5
dataset = Joint2SMPLDataset('train_dataset_24_joints_1.0.pickle', batch_size, fix_beta_zero=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)
torch.set_default_dtype(torch.float64)
device = torch.device('cuda')
reg = AcosRegressor(batch_size=batch_size).cuda()
smpl = SMPLModel(device=device,
model_path = './model_24_joints.pkl',
simplify=True
)
loss_op = nn.L1Loss()
optimizer = optim.Adam(reg.parameters(), lr=0.0005, betas=(0.5, 0.999), weight_decay=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.25, patience=1, verbose=True)
batch_num = 0
ckpt_path = 'checkpoints_0303_24_joints'.format(theta_var)
if not os.path.isdir(ckpt_path):
os.mkdir(ckpt_path)
if batch_num > 0 and os.path.isfile('%s/regressor_%03d.pth' % (ckpt_path, batch_num)):
state_dict = torch.load_state_dict('%s/regressor_%03d.pth' % (ckpt_path, batch_num))
reg.load(state_dict)
# copy current file into checkpoint folder to record parameters, ugly.
if platform == 'linux':
cmd = 'cp train_acos_regressor_24_joints.py ./{}/snapshot.py'.format(ckpt_path)
else:
cmd = r'copy train_acos_regressor_24_joints.py {}\snapshot.py'.format(ckpt_path)
print(cmd)
os.system(cmd)
file = open('{}/validation.txt'.format(ckpt_path), 'w')
trans = torch.zeros((batch_size, 3), dtype=torch.float64, device=device)
while batch_num < max_batch_num:
batch_num += 1
print('Epoch %03d: training...' % batch_num)
reg.train()
for (i, data) in enumerate(dataloader):
joints = torch.as_tensor(data['joints'], device=device)
thetas = torch.as_tensor(data['thetas'], device=device)
betas = torch.as_tensor(data['betas'], device=device)
pred_thetas = reg(joints)
_, recon_joints = smpl(betas, pred_thetas, trans)
loss_joints = loss_op(recon_joints, joints)
loss_thetas = loss_(pred_thetas, thetas)
loss = loss_thetas + 5 * loss_joints
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 32 == 0:
print('batch %04d: loss joints: %10.6f loss thetas: % 10.6f' \
% (i, loss_joints.data.item(), loss_thetas.data.item()))
print('Validation: ')
reg.eval()
data = dataset.rand_val_batch()
joints = torch.as_tensor(data['joints'], device=device)
thetas = torch.as_tensor(data['thetas'], device=device)
betas = torch.as_tensor(data['betas'], device=device)
with torch.no_grad():
pred_thetas = reg(joints)
_, recon_joints = smpl(betas, pred_thetas, trans)
loss_joints = loss_op(recon_joints, joints)
loss_thetas = loss_op(pred_thetas, thetas)
line = 'batch %04d: loss joints: %10.6f loss thetas: % 10.6f' \
% (i, loss_joints.data.item(), loss_thetas.data.item())
print(line)
file.write(line+'\n')
scheduler.step(loss_joints)
if batch_num % 5 == 0:
print('Save models...')
torch.save(reg.state_dict(), '%s/regressor_%03d.pth' % (ckpt_path, batch_num))
'''
if batch_num % 20 == 0 and training_stage < 5:
# Fine-tuning on the next dataset with larger theta_var
line = 'Switching dataset from theta_var = {}'.format(theta_var)
theta_var += 0.2
training_stage += 1
dataset = Joint2SMPLDataset('train_dataset_{}.pickle'.format(training_stage),
batch_size, fix_beta_zero=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0, drop_last=True)
# Renew optimizer and scheduler
optimizer = optim.Adam(reg.parameters(), lr=0.0005, betas=(0.5, 0.999), weight_decay=1e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',
factor=0.25, patience=1, verbose=True)
line += ' to theta_var = {}\n'.format(theta_var)
file.write(line)
print(line)
'''
file.close()
| [
"torch.nn.Dropout",
"os.mkdir",
"torch.load_state_dict",
"torch.set_default_dtype",
"os.path.isfile",
"numpy.random.randint",
"pickle.load",
"torch.device",
"torch.no_grad",
"torch.isnan",
"torch.utils.data.DataLoader",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.Linear",
"torc... | [((5633, 5724), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(0)', 'drop_last': '(True)'}), '(dataset, batch_size=batch_size, shuffle=True, num_workers=0,\n drop_last=True)\n', (5643, 5724), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5730, 5768), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (5753, 5768), False, 'import torch\n'), ((5782, 5802), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5794, 5802), False, 'import torch\n'), ((5868, 5943), 'smpl_torch_batch.SMPLModel', 'SMPLModel', ([], {'device': 'device', 'model_path': '"""./model_24_joints.pkl"""', 'simplify': '(True)'}), "(device=device, model_path='./model_24_joints.pkl', simplify=True)\n", (5877, 5943), False, 'from smpl_torch_batch import SMPLModel\n'), ((5993, 6004), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (6002, 6004), False, 'from torch import nn\n'), ((6121, 6218), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer', '"""min"""'], {'factor': '(0.25)', 'patience': '(1)', 'verbose': '(True)'}), "(optimizer, 'min', factor=0.25,\n patience=1, verbose=True)\n", (6157, 6218), False, 'from torch import optim\n'), ((6889, 6903), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (6898, 6903), False, 'import os\n'), ((6982, 7046), 'torch.zeros', 'torch.zeros', (['(batch_size, 3)'], {'dtype': 'torch.float64', 'device': 'device'}), '((batch_size, 3), dtype=torch.float64, device=device)\n', (6993, 7046), False, 'import torch\n'), ((534, 561), 'os.path.isfile', 'os.path.isfile', (['pickle_file'], {}), '(pickle_file)\n', (548, 561), False, 'import os\n'), ((1386, 1414), 'numpy.random.randint', 'np.random.randint', (['(0)', 'length'], {}), '(0, length)\n', (1403, 1414), True, 'import numpy as np\n'), ((2354, 2375), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (2367, 2375), False, 'from torch import nn\n'), ((3133, 3154), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (3146, 3154), False, 'from torch import nn\n'), ((3186, 3217), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'thetadim'], {}), '(hidden_dim, thetadim)\n', (3195, 3217), False, 'from torch import nn\n'), ((3709, 3923), 'torch.tensor', 'torch.tensor', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22,\n 23, 24], [2, 13, 4, 5, 6, 7, 13, 13, 8, 9, 10, 11, 14, 15, 16, 17, 18, \n 19, 20, 16, 21, 22, 23]]'], {'dtype': 'torch.long'}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 21, 22, 23, 24], [2, 13, 4, 5, 6, 7, 13, 13, 8, 9, 10, 11, 14, \n 15, 16, 17, 18, 19, 20, 16, 21, 22, 23]], dtype=torch.long)\n', (3721, 3923), False, 'import torch\n'), ((4053, 4086), 'torch.ones_like', 'torch.ones_like', (['self.limbs_index'], {}), '(self.limbs_index)\n', (4068, 4086), False, 'import torch\n'), ((4371, 4392), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (4384, 4392), False, 'from torch import nn\n'), ((4835, 4901), 'torch.clamp', 'torch.clamp', (['prod'], {'min': '(-1 + self.clamp_eps)', 'max': '(1 - self.clamp_eps)'}), '(prod, min=-1 + self.clamp_eps, max=1 - self.clamp_eps)\n', (4846, 4901), False, 'import torch\n'), ((6312, 6336), 'os.path.isdir', 'os.path.isdir', (['ckpt_path'], {}), '(ckpt_path)\n', (6325, 6336), False, 'import os\n'), ((6346, 6365), 'os.mkdir', 'os.mkdir', (['ckpt_path'], {}), '(ckpt_path)\n', (6354, 6365), False, 'import os\n'), ((6391, 6455), 'os.path.isfile', 'os.path.isfile', (["('%s/regressor_%03d.pth' % (ckpt_path, batch_num))"], {}), "('%s/regressor_%03d.pth' % (ckpt_path, batch_num))\n", (6405, 6455), False, 'import os\n'), ((6478, 6549), 'torch.load_state_dict', 'torch.load_state_dict', (["('%s/regressor_%03d.pth' % (ckpt_path, batch_num))"], {}), "('%s/regressor_%03d.pth' % (ckpt_path, batch_num))\n", (6499, 6549), False, 'import torch\n'), ((8105, 8151), 'torch.as_tensor', 'torch.as_tensor', (["data['joints']"], {'device': 'device'}), "(data['joints'], device=device)\n", (8120, 8151), False, 'import torch\n'), ((8169, 8215), 'torch.as_tensor', 'torch.as_tensor', (["data['thetas']"], {'device': 'device'}), "(data['thetas'], device=device)\n", (8184, 8215), False, 'import torch\n'), ((8232, 8277), 'torch.as_tensor', 'torch.as_tensor', (["data['betas']"], {'device': 'device'}), "(data['betas'], device=device)\n", (8247, 8277), False, 'import torch\n'), ((628, 642), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (639, 642), False, 'import pickle\n'), ((1148, 1178), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (1156, 1178), True, 'import numpy as np\n'), ((1609, 1658), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 10)'], {'dtype': 'np.float64'}), '((self.batch_size, 10), dtype=np.float64)\n', (1617, 1658), True, 'import numpy as np\n'), ((2167, 2190), 'torch.nn.Linear', 'nn.Linear', (['indim', 'indim'], {}), '(indim, indim)\n', (2176, 2190), False, 'from torch import nn\n'), ((2204, 2225), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['indim'], {}), '(indim)\n', (2218, 2225), False, 'from torch import nn\n'), ((2239, 2256), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2251, 2256), False, 'from torch import nn\n'), ((2439, 2463), 'torch.nn.Linear', 'nn.Linear', (['indim', 'outdim'], {}), '(indim, outdim)\n', (2448, 2463), False, 'from torch import nn\n'), ((2874, 2902), 'torch.nn.Linear', 'nn.Linear', (['indim', 'hidden_dim'], {}), '(indim, hidden_dim)\n', (2883, 2902), False, 'from torch import nn\n'), ((2916, 2942), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_dim'], {}), '(hidden_dim)\n', (2930, 2942), False, 'from torch import nn\n'), ((2956, 2973), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2968, 2973), False, 'from torch import nn\n'), ((4317, 4348), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'thetadim'], {}), '(hidden_dim, thetadim)\n', (4326, 4348), False, 'from torch import nn\n'), ((4648, 4684), 'torch.norm', 'torch.norm', (['vec'], {'dim': '(2)', 'keepdim': '(True)'}), '(vec, dim=2, keepdim=True)\n', (4658, 4684), False, 'import torch\n'), ((7250, 7296), 'torch.as_tensor', 'torch.as_tensor', (["data['joints']"], {'device': 'device'}), "(data['joints'], device=device)\n", (7265, 7296), False, 'import torch\n'), ((7318, 7364), 'torch.as_tensor', 'torch.as_tensor', (["data['thetas']"], {'device': 'device'}), "(data['thetas'], device=device)\n", (7333, 7364), False, 'import torch\n'), ((7385, 7430), 'torch.as_tensor', 'torch.as_tensor', (["data['betas']"], {'device': 'device'}), "(data['betas'], device=device)\n", (7400, 7430), False, 'import torch\n'), ((8291, 8306), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8304, 8306), False, 'import torch\n'), ((2316, 2331), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (2326, 2331), False, 'from torch import nn\n'), ((4911, 4928), 'torch.isnan', 'torch.isnan', (['prod'], {}), '(prod)\n', (4922, 4928), False, 'import torch\n'), ((4983, 4999), 'torch.acos', 'torch.acos', (['prod'], {}), '(prod)\n', (4993, 4999), False, 'import torch\n'), ((5027, 5046), 'torch.isnan', 'torch.isnan', (['angles'], {}), '(angles)\n', (5038, 5046), False, 'import torch\n')] |
import numpy as np
import pandas as pd
from pathlib import Path
from src.utils import write_json, write_pkl, load_json
def convert_timeseries_into_mmap(data_dir, save_dir, n_rows=100000):
"""
read csv file and convert time series data into mmap file.
"""
save_path = Path(save_dir) / 'ts.dat'
shape = (n_rows, 24, 34)
write_file = np.memmap(save_path, dtype=np.float32, mode='w+', shape=shape)
ids = []
n = 0
info = {}
info['name'] = 'ts'
for split in ['train', 'val', 'test']:
print('split: ', split)
csv_path = Path(data_dir) / split / 'timeseries.csv'
df = pd.read_csv(csv_path)
arr = df.values
new = np.reshape(arr, (-1, 24, 35))
pos_to_id = new[:, 0, 0]
ids.append(pos_to_id)
new = new[:, :, 1:] # no patient column
write_file[n : n+len(new), :, :] = new
info[split + '_len'] = len(new)
n += len(new)
del new, arr
info['total'] = n
info['shape'] = shape
info['columns'] = list(df)[1:]
del df
ids = np.concatenate(ids)
id2pos = {pid: pos for pos, pid in enumerate(ids)}
pos2id = {pos:pid for pos, pid in enumerate(ids)}
assert len(set(ids)) == len(ids)
print('saving..')
write_pkl(id2pos, Path(save_dir) / 'id2pos.pkl')
write_pkl(pos2id, Path(save_dir) / 'pos2id.pkl')
write_json(info, Path(save_dir) / 'ts_info.json')
print(info)
def convert_into_mmap(data_dir, save_dir, csv_name, n_cols=None, n_rows=100000):
"""
read csv file and convert flat data into mmap file.
"""
csv_to_cols = {'diagnoses': 520, 'diagnoses_1033': 1034, 'labels': 5, 'flat': 58} # including patient column
n_cols = (csv_to_cols[csv_name] -1) if n_cols is None else n_cols
shape = (n_rows, n_cols)
save_path = Path(save_dir) / f'{csv_name}.dat'
write_file = np.memmap(save_path, dtype=np.float32, mode='w+', shape=shape)
info = {'name': csv_name, 'shape': shape}
n = 0
for split in ['train', 'val', 'test']:
print('split: ', split)
csv_path = Path(data_dir) / split / f'{csv_name}.csv'
df = pd.read_csv(csv_path)
arr = df.values[:, 1:] # cut out patient column
arr_len = len(arr)
write_file[n : n+arr_len, :] = arr # write into mmap
info[split + '_len'] = arr_len
n += arr_len
del arr
info['total'] = n
info['columns'] = list(df)[1:]
write_json(info, Path(save_dir) / f'{csv_name}_info.json')
print(info)
def read_mm(datadir, name):
"""
name can be one of {ts, diagnoses, labels, flat}.
"""
info = load_json(Path(datadir) / (name + '_info.json'))
dat_path = Path(datadir) / (name + '.dat')
data = np.memmap(dat_path, dtype=np.float32, shape=tuple(info['shape']))
return data, info
if __name__ == '__main__':
paths = load_json('paths.json')
data_dir = paths['eICU_path']
save_dir = paths['data_dir']
print(f'Load eICU processed data from {data_dir}')
print(f'Saving mmap data in {save_dir}')
print('--'*30)
Path(save_dir).mkdir(exist_ok=True)
print('** Converting time series **')
convert_timeseries_into_mmap(data_dir, save_dir)
for csv_name in ['flat', 'diagnoses', 'labels']:
print(f'** Converting {csv_name} **')
convert_into_mmap(data_dir, save_dir, csv_name)
print('--'*30)
print(f'Done! Saved data in {save_dir}') | [
"src.utils.load_json",
"pandas.read_csv",
"pathlib.Path",
"numpy.reshape",
"numpy.memmap",
"numpy.concatenate"
] | [((357, 419), 'numpy.memmap', 'np.memmap', (['save_path'], {'dtype': 'np.float32', 'mode': '"""w+"""', 'shape': 'shape'}), "(save_path, dtype=np.float32, mode='w+', shape=shape)\n", (366, 419), True, 'import numpy as np\n'), ((1076, 1095), 'numpy.concatenate', 'np.concatenate', (['ids'], {}), '(ids)\n', (1090, 1095), True, 'import numpy as np\n'), ((1882, 1944), 'numpy.memmap', 'np.memmap', (['save_path'], {'dtype': 'np.float32', 'mode': '"""w+"""', 'shape': 'shape'}), "(save_path, dtype=np.float32, mode='w+', shape=shape)\n", (1891, 1944), True, 'import numpy as np\n'), ((2889, 2912), 'src.utils.load_json', 'load_json', (['"""paths.json"""'], {}), "('paths.json')\n", (2898, 2912), False, 'from src.utils import write_json, write_pkl, load_json\n'), ((285, 299), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (289, 299), False, 'from pathlib import Path\n'), ((635, 656), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (646, 656), True, 'import pandas as pd\n'), ((695, 724), 'numpy.reshape', 'np.reshape', (['arr', '(-1, 24, 35)'], {}), '(arr, (-1, 24, 35))\n', (705, 724), True, 'import numpy as np\n'), ((1830, 1844), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1834, 1844), False, 'from pathlib import Path\n'), ((2154, 2175), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2165, 2175), True, 'import pandas as pd\n'), ((2717, 2730), 'pathlib.Path', 'Path', (['datadir'], {}), '(datadir)\n', (2721, 2730), False, 'from pathlib import Path\n'), ((1292, 1306), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1296, 1306), False, 'from pathlib import Path\n'), ((1345, 1359), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1349, 1359), False, 'from pathlib import Path\n'), ((1397, 1411), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (1401, 1411), False, 'from pathlib import Path\n'), ((2480, 2494), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (2484, 2494), False, 'from pathlib import Path\n'), ((2663, 2676), 'pathlib.Path', 'Path', (['datadir'], {}), '(datadir)\n', (2667, 2676), False, 'from pathlib import Path\n'), ((3103, 3117), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (3107, 3117), False, 'from pathlib import Path\n'), ((580, 594), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (584, 594), False, 'from pathlib import Path\n'), ((2098, 2112), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (2102, 2112), False, 'from pathlib import Path\n')] |
import sys
import os
import re
import multiprocessing
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
import numpy as np
import yaml
import datetime
import pickle
from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv
from baselines.common.vec_env.vec_video_recorder import VecVideoRecorder
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env
from baselines.common.tf_util import get_session
from baselines import logger
from importlib import import_module
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
"""Daniel: starts training procedure, do bunch of checks for cloth env.
The `model = learn(...)` is what calls algorithm-specific training code.
Our cloth_config path should be in `extra_args` for now, and we can use
that config for the random seed. This only applies if we didn't put in a
`-seed` argument on the command line.
We ALSO need that config path as input to even create the ClothEnv, and
other arguments. Put those in `extra_args`.
"""
env_type, env_id = get_env_type(args)
logger.log('env_type: {}, env_id: {}'.format(env_type, env_id))
extra_data = None # Daniel: added this
# Daniel: we have to figure out a cleaner way
cloth_cfg_path = extra_args['cloth_config'] if 'cloth_config' in extra_args else None
render_path = extra_args['render_path'] if 'render_path' in extra_args else None
init_state = extra_args['init_state'] if 'init_state' in extra_args else None
demos_path = extra_args['demos_path'] if 'demos_path' in extra_args else None
if env_type == 'cloth':
with open(cloth_cfg_path, 'r') as fh:
cloth_config = yaml.safe_load(fh)
extra_data = cloth_config
if args.seed is None:
args.seed = cloth_config['seed']
if 'clip_act_space' in cloth_config['env']:
extra_args['limit_act_range'] = cloth_config['env']['clip_act_space']
# Force us to state/restrict network design to help our sanity.
if cloth_config['env']['obs_type'] == '1d':
if args.network is None:
args.network = 'mlp'
else:
assert args.network == 'mlp', args.network
elif cloth_config['env']['obs_type'] == 'blender':
if args.network is None:
args.network = 'cloth_cnn'
else:
assert args.network == 'cloth_cnn', args.network
else:
raise ValueError(cloth_config['env']['obs_type'])
# Save the cloth file in case we want to verify the settings we used.
_cloth_path = osp.join(logger.get_dir(), os.path.basename(cloth_cfg_path))
with open(_cloth_path, 'w') as fh:
yaml.dump(cloth_config, fh, default_flow_style=False)
# Remove stuff because other dicts don't want unexpected args when we make network
extra_args.pop('cloth_config', None)
extra_args.pop('render_path', None)
extra_args.pop('init_state', None)
#extra_args.pop('demos_path', None) Actually we'd like to pass this in.
else:
assert 'cloth_config' not in extra_args
assert 'render_path' not in extra_args
assert 'init_state' not in extra_args
assert 'demos_path' not in extra_args
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args,
cloth_cfg_path=cloth_cfg_path,
render_path=render_path,
start_state_path=init_state)
if args.save_video_interval != 0:
env = VecVideoRecorder(env, osp.join(logger.get_dir(), "videos"),
record_video_trigger=lambda x: x % args.save_video_interval == 0,
video_length=args.save_video_length)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
# Debugging paths because we have to change paths for rendering the cloth.
logger.log('\nTraining {} on {}:{} with arguments \n{}'.format(
args.alg, env_type, env_id, alg_kwargs))
logger.log('path: {}'.format(os.path.dirname(__file__)))
logger.log('working dir: {}\n'.format(os.getcwd()))
# Daniel: new, don't forget to build an evaluation env without noise applied.
if args.alg == 'ddpg' or args.alg == 'imit':
# NOTE: this will cause duplicate print logs to appear. It SHOULD be OK though!
#eval_env = build_env(args,
# cloth_cfg_path=cloth_cfg_path,
# render_path=render_path,
# start_state_path=init_state)
eval_env = None
# TODO: check that adding eval env doesn't affect monitor.csv files.
model = learn(
env=env,
eval_env=eval_env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
else:
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env, extra_data
def build_env(args, cloth_cfg_path=None, render_path=None, start_state_path=None):
"""Daniel: actually construct the env, using 'vector envs' for parallelism.
For now our cloth env can follow the non-atari and non-retro stuff, because
I don't think we need a similar kind of 'wrapping' that they do. Note that
`VecFrameStack` is needed to stack frames, e.g., in Atari we do 4 frame
stacking. Without that, the states would be size (84,84,1).
The non-`args` parameters here are for the cloth env.
"""
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
seed = args.seed
env_type, env_id = get_env_type(args)
if env_type in {'atari', 'retro'}:
if alg == 'deepq':
env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})
elif alg == 'trpo_mpi':
env = make_env(env_id, env_type, seed=seed)
else:
frame_stack_size = 4
env = make_vec_env(env_id, env_type, nenv, seed,
gamestate=args.gamestate,
reward_scale=args.reward_scale)
env = VecFrameStack(env, frame_stack_size)
else:
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
get_session(config=config)
flatten_dict_observations = alg not in {'her'}
env = make_vec_env(env_id, env_type, args.num_env or 1, seed,
reward_scale=args.reward_scale,
flatten_dict_observations=flatten_dict_observations,
cloth_cfg_path=cloth_cfg_path,
render_path=render_path,
start_state_path=start_state_path)
# https://github.com/openai/baselines/issues/938
if env_type == 'mujoco' and alg != 'ddpg':
env = VecNormalize(env)
return env
def get_env_type(args):
"""Daniel: deduces environment type and ID.
- env_type is the class (e.g., 'atari')
- env_id is the actual title (e.g., 'PongNoFrameskip-v4').
We have to make some changes to support Cloth-v0, our custom env. Note
that env_type is used to get defaults for network architectures, etc.
"""
env_id = args.env
if env_id == 'Cloth-v0':
return 'cloth', env_id
if args.env_type is not None:
return args.env_type, env_id
# Re-parse the gym registry, since we could have new envs since last time.
for env in gym.envs.registry.all():
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id) # This is a set so add is idempotent
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
if ':' in env_id:
env_type = re.sub(r':.*', '', env_id)
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type in {'atari', 'retro'}:
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
"""Daniel: also works for cloth, just define a cloth() method in the alg module.
"""
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary,
evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def main(args):
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args(args)
extra_args = parse_cmdline_kwargs(unknown_args)
# Daniel: let's make it clear that if `--play` is enabled, we shouldn't be training.
if args.play:
assert args.num_timesteps == 0, \
'You should not call `--play` with more than one time steps.'
# Daniel: just add `args.save_path` for putting logs somewhere reasonable.
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
logger.configure(dir=args.save_path)
else:
logger.configure(dir=args.save_path, format_strs=[])
rank = MPI.COMM_WORLD.Get_rank()
# Daniel: made cloth_config_path a return value.
model, env, cloth_config = train(args, extra_args)
if args.save_path is not None and rank == 0:
save_path = osp.expanduser(args.save_path)
model.save(save_path)
# We support parallelism so might as well increase number of episodes.
NB_EVAL_EPIS = 50
# Subject to change depending on our reward threshold / design.
success_threshold = 3.0
if args.play:
logger.info("\nRunning trained model -- no more training is happening.")
logger.info("Will run for {} episodes.\n".format(NB_EVAL_EPIS))
# Some checks to ensure correct parameters. Comment out if not desired.
if args.env == 'Cloth-v0':
test_seed = 1600
if args.seed is None:
assert cloth_config['seed'] == test_seed, cloth_config['seed']
else:
assert args.seed == test_seed, args.seed
assert cloth_config['env']['force_grab'], 'Usually you want force_grab==True'
obs = env.reset()
state = model.initial_state if hasattr(model, 'initial_state') else None
dones = np.zeros((1,))
episode_rews = np.zeros((args.num_env,))
pstats = defaultdict(list)
pstats['load_path'] = extra_args['load_path']
logger.info('obs shape: {}'.format(obs.shape))
while True:
if state is not None:
actions, _, state, _ = model.step(obs, S=state, M=dones)
else:
# Daniel: DDPG/IMIT case happens here, so we should prob set noise=False.
actions, _, _, _ = model.step(obs, apply_noise=False)
logger.info('executing actions:\n{}'.format(actions))
obs, rew, done, epinfo = env.step(actions)
#episode_rew += rew[0] if isinstance(env, VecEnv) else rew
episode_rews += rew
# Daniel: don't render the cloth, we have our own way of doing it.
if args.env != 'Cloth-v0':
env.render()
done_boolean = done.any() if isinstance(done, np.ndarray) else done
if done_boolean:
logger.info('\n\n ************ FINISHED EPISODE, done: {} ************'.format(done))
logger.info('episode_rews: {}'.format(episode_rews))
logger.info('And epinfo: {} (len {})'.format(epinfo, len(epinfo)))
for d in range(args.num_env):
if done[d]:
pstats['r'].append(episode_rews[d])
pstats['c'].append(epinfo[d]['actual_coverage'])
pstats['iv'].append(epinfo[d]['variance_inv'])
pstats['oob'].append(epinfo[d]['out_of_bounds'])
pstats['s_c'].append(epinfo[d]['start_coverage'])
pstats['s_iv'].append(epinfo[d]['start_variance_inv'])
pstats['nsteps'].append(epinfo[d]['num_steps'])
episode_rews[d] = 0
nb_epis = len(pstats['r'])
logger.info('\nStats only for {} completed episodes:'.format(nb_epis))
logger.info('Play rewards: {:.3f} +/- {:.1f}'.format(
np.mean(pstats['r']), np.std(pstats['r'])))
logger.info('rewards max/min/median: {:.3f}, {:.3f}, {:.3f}'.format(
np.max(pstats['r']), np.min(pstats['r']), np.median(pstats['r'])))
logger.info('Num steps : {:.3f} +/- {:.1f}'.format(
np.mean(pstats['nsteps']), np.std(pstats['nsteps'])))
logger.info('Start inv-var: {:.3f} +/- {:.1f}'.format(
np.mean(pstats['s_iv']), np.std(pstats['s_iv'])))
logger.info('Final inv-var: {:.3f} +/- {:.1f}'.format(
np.mean(pstats['iv']), np.std(pstats['iv'])))
logger.info('Start coverage: {:.3f} +/- {:.1f}'.format(
np.mean(pstats['s_c']), np.std(pstats['s_c'])))
logger.info('Final coverage: {:.3f} +/- {:.1f}'.format(
np.mean(pstats['c']), np.std(pstats['c'])))
logger.info('Final coverage max/min/median: {:.3f}, {:.3f}, {:.3f}'.format(
np.max(pstats['c']), np.min(pstats['c']), np.median(pstats['c'])))
logger.info('Out of bounds total: {}'.format(np.sum(pstats['oob'])))
logger.info('')
nb_success = np.sum( np.array(pstats['r']) > success_threshold )
logger.info('Num exceeding coverage thresh: {} / {}'.format(nb_success, nb_epis))
logger.info('')
if len(pstats['r']) >= NB_EVAL_EPIS:
logger.info('\nDONE w/{} epis, breaking ...\n'.format(NB_EVAL_EPIS))
date = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
dname = 'policy-rollout-{}-{}-epis-{}-seed-{}-depth-{}-forcegrab-{}-stats-{}.pkl'.format(
args.alg,
NB_EVAL_EPIS,
cloth_config['init']['type'],
cloth_config['seed'],
cloth_config['env']['use_depth'],
cloth_config['env']['force_grab'],
date)
dname = osp.join('logs', dname)
with open(dname, 'wb') as fh:
pickle.dump(pstats, fh)
logger.info('saving at: {}'.format(dname))
break
# Daniel: env.step() above internally calls reset when we have DummyVecEnv
# Tbh, I am still not sure why baselines includes an extra env.reset() here?
if args.env != 'Cloth-v0':
obs = env.reset()
env.close()
return model
if __name__ == '__main__':
main(sys.argv)
| [
"pickle.dump",
"numpy.sum",
"baselines.common.tf_util.get_session",
"gym.envs.registry.all",
"yaml.dump",
"baselines.common.cmd_util.make_env",
"collections.defaultdict",
"tensorflow.ConfigProto",
"numpy.mean",
"yaml.safe_load",
"os.path.join",
"multiprocessing.cpu_count",
"numpy.std",
"os... | [((793, 809), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (804, 809), False, 'from collections import defaultdict\n'), ((821, 844), 'gym.envs.registry.all', 'gym.envs.registry.all', ([], {}), '()\n', (842, 844), False, 'import gym\n'), ((6753, 6780), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (6778, 6780), False, 'import multiprocessing\n'), ((8937, 8960), 'gym.envs.registry.all', 'gym.envs.registry.all', ([], {}), '()\n', (8958, 8960), False, 'import gym\n'), ((11009, 11028), 'baselines.common.cmd_util.common_arg_parser', 'common_arg_parser', ([], {}), '()\n', (11026, 11028), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((7498, 7607), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(allow_soft_placement=True, intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n', (7512, 7607), True, 'import tensorflow as tf\n'), ((7723, 7749), 'baselines.common.tf_util.get_session', 'get_session', ([], {'config': 'config'}), '(config=config)\n', (7734, 7749), False, 'from baselines.common.tf_util import get_session\n'), ((7819, 8063), 'baselines.common.cmd_util.make_vec_env', 'make_vec_env', (['env_id', 'env_type', '(args.num_env or 1)', 'seed'], {'reward_scale': 'args.reward_scale', 'flatten_dict_observations': 'flatten_dict_observations', 'cloth_cfg_path': 'cloth_cfg_path', 'render_path': 'render_path', 'start_state_path': 'start_state_path'}), '(env_id, env_type, args.num_env or 1, seed, reward_scale=args.\n reward_scale, flatten_dict_observations=flatten_dict_observations,\n cloth_cfg_path=cloth_cfg_path, render_path=render_path,\n start_state_path=start_state_path)\n', (7831, 8063), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((11523, 11559), 'baselines.logger.configure', 'logger.configure', ([], {'dir': 'args.save_path'}), '(dir=args.save_path)\n', (11539, 11559), False, 'from baselines import logger\n'), ((11578, 11630), 'baselines.logger.configure', 'logger.configure', ([], {'dir': 'args.save_path', 'format_strs': '[]'}), '(dir=args.save_path, format_strs=[])\n', (11594, 11630), False, 'from baselines import logger\n'), ((11646, 11671), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (11669, 11671), False, 'from mpi4py import MPI\n'), ((11851, 11881), 'os.path.expanduser', 'osp.expanduser', (['args.save_path'], {}), '(args.save_path)\n', (11865, 11881), True, 'import os.path as osp\n'), ((12134, 12209), 'baselines.logger.info', 'logger.info', (['"""\nRunning trained model -- no more training is happening."""'], {}), '("""\nRunning trained model -- no more training is happening.""")\n', (12145, 12209), False, 'from baselines import logger\n'), ((12826, 12840), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (12834, 12840), True, 'import numpy as np\n'), ((12864, 12889), 'numpy.zeros', 'np.zeros', (['(args.num_env,)'], {}), '((args.num_env,))\n', (12872, 12889), True, 'import numpy as np\n'), ((12907, 12924), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12918, 12924), False, 'from collections import defaultdict\n'), ((2516, 2534), 'yaml.safe_load', 'yaml.safe_load', (['fh'], {}), '(fh)\n', (2530, 2534), False, 'import yaml\n'), ((3482, 3498), 'baselines.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (3496, 3498), False, 'from baselines import logger\n'), ((3500, 3532), 'os.path.basename', 'os.path.basename', (['cloth_cfg_path'], {}), '(cloth_cfg_path)\n', (3516, 3532), False, 'import os\n'), ((3589, 3642), 'yaml.dump', 'yaml.dump', (['cloth_config', 'fh'], {'default_flow_style': '(False)'}), '(cloth_config, fh, default_flow_style=False)\n', (3598, 3642), False, 'import yaml\n'), ((5224, 5249), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5239, 5249), False, 'import os\n'), ((5294, 5305), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5303, 5305), False, 'import os\n'), ((7024, 7099), 'baselines.common.cmd_util.make_env', 'make_env', (['env_id', 'env_type'], {'seed': 'seed', 'wrapper_kwargs': "{'frame_stack': True}"}), "(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})\n", (7032, 7099), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((8312, 8329), 'baselines.common.vec_env.VecNormalize', 'VecNormalize', (['env'], {}), '(env)\n', (8324, 8329), False, 'from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv\n'), ((9425, 9450), 're.sub', 're.sub', (['""":.*"""', '""""""', 'env_id'], {}), "(':.*', '', env_id)\n", (9431, 9450), False, 'import re\n'), ((11466, 11491), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'MPI.COMM_WORLD.Get_rank', ([], {}), '()\n', (11489, 11491), False, 'from mpi4py import MPI\n'), ((4608, 4624), 'baselines.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (4622, 4624), False, 'from baselines import logger\n'), ((7150, 7187), 'baselines.common.cmd_util.make_env', 'make_env', (['env_id', 'env_type'], {'seed': 'seed'}), '(env_id, env_type, seed=seed)\n', (7158, 7187), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((7253, 7357), 'baselines.common.cmd_util.make_vec_env', 'make_vec_env', (['env_id', 'env_type', 'nenv', 'seed'], {'gamestate': 'args.gamestate', 'reward_scale': 'args.reward_scale'}), '(env_id, env_type, nenv, seed, gamestate=args.gamestate,\n reward_scale=args.reward_scale)\n', (7265, 7357), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((7434, 7470), 'baselines.common.vec_env.VecFrameStack', 'VecFrameStack', (['env', 'frame_stack_size'], {}), '(env, frame_stack_size)\n', (7447, 7470), False, 'from baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv\n'), ((16148, 16163), 'baselines.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (16159, 16163), False, 'from baselines import logger\n'), ((16359, 16374), 'baselines.logger.info', 'logger.info', (['""""""'], {}), "('')\n", (16370, 16374), False, 'from baselines import logger\n'), ((10861, 10885), 'baselines.common.cmd_util.parse_unknown_args', 'parse_unknown_args', (['args'], {}), '(args)\n', (10879, 10885), False, 'from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\n'), ((17108, 17131), 'os.path.join', 'osp.join', (['"""logs"""', 'dname'], {}), "('logs', dname)\n", (17116, 17131), True, 'import os.path as osp\n'), ((14926, 14946), 'numpy.mean', 'np.mean', (["pstats['r']"], {}), "(pstats['r'])\n", (14933, 14946), True, 'import numpy as np\n'), ((14948, 14967), 'numpy.std', 'np.std', (["pstats['r']"], {}), "(pstats['r'])\n", (14954, 14967), True, 'import numpy as np\n'), ((15079, 15098), 'numpy.max', 'np.max', (["pstats['r']"], {}), "(pstats['r'])\n", (15085, 15098), True, 'import numpy as np\n'), ((15100, 15119), 'numpy.min', 'np.min', (["pstats['r']"], {}), "(pstats['r'])\n", (15106, 15119), True, 'import numpy as np\n'), ((15121, 15143), 'numpy.median', 'np.median', (["pstats['r']"], {}), "(pstats['r'])\n", (15130, 15143), True, 'import numpy as np\n'), ((15238, 15263), 'numpy.mean', 'np.mean', (["pstats['nsteps']"], {}), "(pstats['nsteps'])\n", (15245, 15263), True, 'import numpy as np\n'), ((15265, 15289), 'numpy.std', 'np.std', (["pstats['nsteps']"], {}), "(pstats['nsteps'])\n", (15271, 15289), True, 'import numpy as np\n'), ((15388, 15411), 'numpy.mean', 'np.mean', (["pstats['s_iv']"], {}), "(pstats['s_iv'])\n", (15395, 15411), True, 'import numpy as np\n'), ((15413, 15435), 'numpy.std', 'np.std', (["pstats['s_iv']"], {}), "(pstats['s_iv'])\n", (15419, 15435), True, 'import numpy as np\n'), ((15534, 15555), 'numpy.mean', 'np.mean', (["pstats['iv']"], {}), "(pstats['iv'])\n", (15541, 15555), True, 'import numpy as np\n'), ((15557, 15577), 'numpy.std', 'np.std', (["pstats['iv']"], {}), "(pstats['iv'])\n", (15563, 15577), True, 'import numpy as np\n'), ((15676, 15698), 'numpy.mean', 'np.mean', (["pstats['s_c']"], {}), "(pstats['s_c'])\n", (15683, 15698), True, 'import numpy as np\n'), ((15700, 15721), 'numpy.std', 'np.std', (["pstats['s_c']"], {}), "(pstats['s_c'])\n", (15706, 15721), True, 'import numpy as np\n'), ((15820, 15840), 'numpy.mean', 'np.mean', (["pstats['c']"], {}), "(pstats['c'])\n", (15827, 15840), True, 'import numpy as np\n'), ((15842, 15861), 'numpy.std', 'np.std', (["pstats['c']"], {}), "(pstats['c'])\n", (15848, 15861), True, 'import numpy as np\n'), ((15980, 15999), 'numpy.max', 'np.max', (["pstats['c']"], {}), "(pstats['c'])\n", (15986, 15999), True, 'import numpy as np\n'), ((16001, 16020), 'numpy.min', 'np.min', (["pstats['c']"], {}), "(pstats['c'])\n", (16007, 16020), True, 'import numpy as np\n'), ((16022, 16044), 'numpy.median', 'np.median', (["pstats['c']"], {}), "(pstats['c'])\n", (16031, 16044), True, 'import numpy as np\n'), ((16108, 16129), 'numpy.sum', 'np.sum', (["pstats['oob']"], {}), "(pstats['oob'])\n", (16114, 16129), True, 'import numpy as np\n'), ((16201, 16222), 'numpy.array', 'np.array', (["pstats['r']"], {}), "(pstats['r'])\n", (16209, 16222), True, 'import numpy as np\n'), ((17206, 17229), 'pickle.dump', 'pickle.dump', (['pstats', 'fh'], {}), '(pstats, fh)\n', (17217, 17229), False, 'import pickle\n'), ((16544, 16567), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16565, 16567), False, 'import datetime\n')] |
from json import loads
from django.db.models.base import Model
from django.http.response import JsonResponse
from rest_framework.viewsets import ModelViewSet
from .models import Review, Area
from .serializer import ReviewSerializer, AreaSerializer
from django.views.decorators.csrf import csrf_exempt
from django.db import connection
import numpy as np
class ReviewViewSet(ModelViewSet):
queryset = Review.objects.all()
serializer_class = ReviewSerializer
class AreaViewSet(ModelViewSet):
queryset = Area.objects.all()
serializer_class = AreaSerializer
@csrf_exempt
def search_area_by_senses(request):
if request.method == 'POST':
dictQuery = loads(request.body.decode('utf-8'))
city = dictQuery['area']['city']
county = dictQuery['area']['county']
sight = dictQuery['sight']
touch = dictQuery['touch']
taste = dictQuery['taste']
with connection.cursor() as cursor:
sqlQuery = "CREATE VIEW sightCount AS " + \
"SELECT area_id, sight, MAX(cnt) " + \
"FROM(SELECT area_id, sight, count(sight) as cnt " + \
"FROM reviews_review GROUP BY sight, area_id) GROUP BY area_id;"
cursor.execute(sqlQuery)
sqlQuery = "CREATE VIEW touchCount AS " + \
"SELECT area_id, touch, MAX(cnt) " + \
"FROM(SELECT area_id, touch, count(touch) as cnt " + \
"FROM reviews_review GROUP BY touch, area_id) GROUP BY area_id;"
cursor.execute(sqlQuery)
sqlQuery = "CREATE VIEW tasteCount AS " + \
"SELECT area_id, taste, MAX(cnt) " + \
"FROM(SELECT area_id, taste, count(taste) as cnt " + \
"FROM reviews_review GROUP BY taste, area_id) GROUP BY area_id;"
cursor.execute(sqlQuery)
sqlQuery = "SELECT sightCount.area_id, sight, touch, taste " + \
"FROM sightCount, touchCount, tasteCount " + \
"WHERE sightCount.area_id = touchCount.area_id " + \
"AND touchCount.area_id = tasteCount.area_id;"
cursor.execute(sqlQuery)
maxSenseofArea = cursor.fetchall()
sqlQuery = "DROP VIEW sightCount;"
cursor.execute(sqlQuery)
sqlQuery = "DROP VIEW touchCount;"
cursor.execute(sqlQuery)
sqlQuery = "DROP VIEW tasteCount;"
cursor.execute(sqlQuery)
sqlQuery = "SELECT id FROM reviews_area WHERE city = '" + city + "' AND county = '" + county + "';"
cursor.execute(sqlQuery)
queryResult = cursor.fetchall()
findArea = []
for i in queryResult:
findArea.append(i[0])
sightResult = []
touchResult = []
tasteResult = []
sightTouch = []
touchTaste = []
tasteSight = []
allofthem = []
for i in maxSenseofArea:
if i[0] in findArea:
if sight == i[1]:
sightResult.append(i[0])
if touch == i[2]:
sightTouch.append(i[0])
if taste == i[3]:
allofthem.append(i[0])
if touch == i[2]:
touchResult.append(i[0])
if taste == i[3]:
touchTaste.append(i[0])
if taste == i[3]:
tasteResult.append(i[0])
if sight == i[1]:
tasteSight.append(i[0])
if len(allofthem) < 10 :
allofthem = np.unique(sightTouch+touchTaste+tasteSight)
if len(allofthem) < 10:
allofthem = np.unique(sightResult + touchResult + tasteResult)
allofthem = allofthem[:10]
response = Area.objects.filter(id__in=allofthem)
response = list(response.values())
response = {"result":response}
return JsonResponse(response, safe=False) | [
"django.http.response.JsonResponse",
"numpy.unique",
"django.db.connection.cursor"
] | [((4247, 4281), 'django.http.response.JsonResponse', 'JsonResponse', (['response'], {'safe': '(False)'}), '(response, safe=False)\n', (4259, 4281), False, 'from django.http.response import JsonResponse\n'), ((934, 953), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (951, 953), False, 'from django.db import connection\n'), ((3861, 3908), 'numpy.unique', 'np.unique', (['(sightTouch + touchTaste + tasteSight)'], {}), '(sightTouch + touchTaste + tasteSight)\n', (3870, 3908), True, 'import numpy as np\n'), ((3977, 4027), 'numpy.unique', 'np.unique', (['(sightResult + touchResult + tasteResult)'], {}), '(sightResult + touchResult + tasteResult)\n', (3986, 4027), True, 'import numpy as np\n')] |
import sys,os
from rdkit import Chem
srcPath = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(1, srcPath)
from src.UsefulFunctions import *
from src.SyntOn_BBs import *
def main(args):
with open(args.output + "_Scaffolds.smi", "w") as out:
scaffoldsCount = {}
for line in open(args.input):
sline = line.strip()
if sline:
scaffold, mol = generateScaffoldForBB(sline.split()[0], returnObjects=True)
if mol:
out.write(sline + " " + Chem.MolToSmiles(mol))
if scaffold:
if scaffold not in scaffoldsCount:
scaffoldsCount[scaffold] = 0
scaffoldsCount[scaffold] += 1
out.write(" " + scaffold + "\n")
else:
out.write(" linearMolecule\n")
scaffoldsCountSorted = {r: scaffoldsCount[r] for r in sorted(scaffoldsCount, key=scaffoldsCount.get, reverse=True)}
scaffoldsCount.clear()
with open(args.output + "_scaffoldsCounts.smi", "w") as outCounts:
for scaffold in scaffoldsCountSorted:
outCounts.write(scaffold + " " + str(scaffoldsCountSorted[scaffold]) + "\n")
with open(args.output + "_cumulativeprecentage.smi", "w") as outCumPer:
cumSum = 0
TotalCompNumb = sum(scaffoldsCountSorted.values())
TotalScaffNumb = len(scaffoldsCountSorted)
for ind,scaff in enumerate(scaffoldsCountSorted):
cumSum += scaffoldsCountSorted[scaff]
outCumPer.write(str(int(round((ind + 1) / TotalScaffNumb * 100))) + " " + str(
int(round(cumSum / TotalCompNumb * 100))) + "\n")
scaffoldsCountSorted.clear()
scaffoldPlot(args.output + "_cumulativeprecentage.smi", args.output)
def scaffoldPlot(cumPercentageFile, outName):
from matplotlib import pyplot as plt
from numpy import genfromtxt
Data = genfromtxt(cumPercentageFile, delimiter=' ', names=['x', 'y'])
fig, ax = plt.subplots()
ax.tick_params(axis='both', which='major', labelsize=12)
plt.plot(Data['x'], Data['y'], color="darkgreen")
plt.ylim(ymin=0, ymax=100)
plt.xlim(xmin=0, xmax=100)
plt.ylabel("Fraction of BBs, %", fontweight='bold', fontsize=14)
plt.xlabel("Fraction of scaffolds, %", fontweight='bold', fontsize=14)
plt.title("Cumulative Scaffold Frequency Plot", fontweight='bold', fontsize=14)
plt.savefig("Scaffolds_FreqPlot_" + outName + ".png")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="BBs Scaffold analysis. Generates meaningful BBs scaffolds after removing ring-containing leaving and protective groups. Count scaffolds occurrence in the provided collection of BBs, and construct cumulative scaffold frequency plot.",
epilog="Code implementation: <NAME>, <NAME>\n"
" Laboratoire de Chémoinformatique, Université de Strasbourg.\n\n"
"Knowledge base (SMARTS library): <NAME>, <NAME>, <NAME>, <NAME>\n"
" Institute of Organic Chemistry, National Academy of Sciences of Ukraine\n"
" Kyiv National Taras Shevchenko University\n"
"2021 Strasbourg, Kiev",
prog="SyntOn_BBScaffoldGeneration", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-i", "--input", type=str, help="Input BBs file.")
parser.add_argument("-o", "--output", type=str, help="Output files suffix name.")
args = parser.parse_args()
main(args) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"os.path.realpath",
"numpy.genfromtxt",
"sys.path.insert",
"rdkit.Chem.MolToSmiles",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.su... | [((92, 119), 'sys.path.insert', 'sys.path.insert', (['(1)', 'srcPath'], {}), '(1, srcPath)\n', (107, 119), False, 'import sys, os\n'), ((1967, 2029), 'numpy.genfromtxt', 'genfromtxt', (['cumPercentageFile'], {'delimiter': '""" """', 'names': "['x', 'y']"}), "(cumPercentageFile, delimiter=' ', names=['x', 'y'])\n", (1977, 2029), False, 'from numpy import genfromtxt\n'), ((2044, 2058), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2056, 2058), True, 'from matplotlib import pyplot as plt\n'), ((2124, 2173), 'matplotlib.pyplot.plot', 'plt.plot', (["Data['x']", "Data['y']"], {'color': '"""darkgreen"""'}), "(Data['x'], Data['y'], color='darkgreen')\n", (2132, 2173), True, 'from matplotlib import pyplot as plt\n'), ((2178, 2204), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)', 'ymax': '(100)'}), '(ymin=0, ymax=100)\n', (2186, 2204), True, 'from matplotlib import pyplot as plt\n'), ((2209, 2235), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmin': '(0)', 'xmax': '(100)'}), '(xmin=0, xmax=100)\n', (2217, 2235), True, 'from matplotlib import pyplot as plt\n'), ((2240, 2304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fraction of BBs, %"""'], {'fontweight': '"""bold"""', 'fontsize': '(14)'}), "('Fraction of BBs, %', fontweight='bold', fontsize=14)\n", (2250, 2304), True, 'from matplotlib import pyplot as plt\n'), ((2309, 2379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of scaffolds, %"""'], {'fontweight': '"""bold"""', 'fontsize': '(14)'}), "('Fraction of scaffolds, %', fontweight='bold', fontsize=14)\n", (2319, 2379), True, 'from matplotlib import pyplot as plt\n'), ((2384, 2463), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Scaffold Frequency Plot"""'], {'fontweight': '"""bold"""', 'fontsize': '(14)'}), "('Cumulative Scaffold Frequency Plot', fontweight='bold', fontsize=14)\n", (2393, 2463), True, 'from matplotlib import pyplot as plt\n'), ((2468, 2521), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Scaffolds_FreqPlot_' + outName + '.png')"], {}), "('Scaffolds_FreqPlot_' + outName + '.png')\n", (2479, 2521), True, 'from matplotlib import pyplot as plt\n'), ((2584, 3399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BBs Scaffold analysis. Generates meaningful BBs scaffolds after removing ring-containing leaving and protective groups. Count scaffolds occurrence in the provided collection of BBs, and construct cumulative scaffold frequency plot."""', 'epilog': '"""Code implementation: <NAME>, <NAME>\n Laboratoire de Chémoinformatique, Université de Strasbourg.\n\nKnowledge base (SMARTS library): <NAME>, <NAME>, <NAME>, <NAME>\n Institute of Organic Chemistry, National Academy of Sciences of Ukraine\n Kyiv National Taras Shevchenko University\n2021 Strasbourg, Kiev"""', 'prog': '"""SyntOn_BBScaffoldGeneration"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=\n \'BBs Scaffold analysis. Generates meaningful BBs scaffolds after removing ring-containing leaving and protective groups. Count scaffolds occurrence in the provided collection of BBs, and construct cumulative scaffold frequency plot.\'\n , epilog=\n """Code implementation: <NAME>, <NAME>\n Laboratoire de Chémoinformatique, Université de Strasbourg.\n\nKnowledge base (SMARTS library): <NAME>, <NAME>, <NAME>, <NAME>\n Institute of Organic Chemistry, National Academy of Sciences of Ukraine\n Kyiv National Taras Shevchenko University\n2021 Strasbourg, Kiev"""\n , prog=\'SyntOn_BBScaffoldGeneration\', formatter_class=argparse.\n RawTextHelpFormatter)\n', (2607, 3399), False, 'import argparse\n'), ((61, 87), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (77, 87), False, 'import sys, os\n'), ((540, 561), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (556, 561), False, 'from rdkit import Chem\n')] |
import numpy as np
from pathlib import Path
from argparse import ArgumentParser
import matplotlib.pyplot as plt
parser = ArgumentParser()
parser.add_argument("--logdir", type=Path, required=True)
parser.add_argument("--dot", type=int, default=-1)
parser.add_argument("--begin_iter", type=int, default=0)
args = parser.parse_args()
log_dir = args.logdir
loss_file_names = ["advloss_d.txt", "clsloss_f.txt", "cycloss.txt", "advloss_g.txt", "clsloss_r.txt", "recloss.txt"]
loss_files = [log_dir / fname for fname in loss_file_names]
losses = [np.loadtxt(file)[args.begin_iter:] for file in loss_files]
labels = [file.stem for file in loss_files]
print(f"#Iteration: {losses[0].shape[0]}")
for lab, los in zip(labels, losses):
x = np.arange(los.shape[0])
plt.plot(x, los, label=lab)
plt.plot(x[args.dot], los[args.dot], ".", color="red")
plt.legend()
plt.show()
w_adv = 1.0
w_cls = 1.0
w_cyc = 1.0
w_rec = 1.0
gen_loss = w_adv * losses[3] + w_cls * losses[1] + w_cyc * losses[2] + w_rec * losses[5]
cls_loss = losses[4]
advdis_loss = losses[0]
x = np.arange(gen_loss.shape[0])
plt.plot(x, gen_loss, label="generator total loss")
plt.plot(x[args.dot], gen_loss[args.dot], ".", color="red")
plt.plot(x, cls_loss, label="classifier total loss")
plt.plot(x[args.dot], cls_loss[args.dot], ".", color="red")
plt.plot(x, advdis_loss, label="discriminator total loss")
plt.plot(x[args.dot], advdis_loss[args.dot], ".", color="red")
plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.arange",
"numpy.loadtxt"
] | [((122, 138), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (136, 138), False, 'from argparse import ArgumentParser\n'), ((857, 869), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (867, 869), True, 'import matplotlib.pyplot as plt\n'), ((870, 880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (878, 880), True, 'import matplotlib.pyplot as plt\n'), ((1071, 1099), 'numpy.arange', 'np.arange', (['gen_loss.shape[0]'], {}), '(gen_loss.shape[0])\n', (1080, 1099), True, 'import numpy as np\n'), ((1100, 1151), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'gen_loss'], {'label': '"""generator total loss"""'}), "(x, gen_loss, label='generator total loss')\n", (1108, 1151), True, 'import matplotlib.pyplot as plt\n'), ((1152, 1211), 'matplotlib.pyplot.plot', 'plt.plot', (['x[args.dot]', 'gen_loss[args.dot]', '"""."""'], {'color': '"""red"""'}), "(x[args.dot], gen_loss[args.dot], '.', color='red')\n", (1160, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1264), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'cls_loss'], {'label': '"""classifier total loss"""'}), "(x, cls_loss, label='classifier total loss')\n", (1220, 1264), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1324), 'matplotlib.pyplot.plot', 'plt.plot', (['x[args.dot]', 'cls_loss[args.dot]', '"""."""'], {'color': '"""red"""'}), "(x[args.dot], cls_loss[args.dot], '.', color='red')\n", (1273, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1325, 1383), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'advdis_loss'], {'label': '"""discriminator total loss"""'}), "(x, advdis_loss, label='discriminator total loss')\n", (1333, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1384, 1446), 'matplotlib.pyplot.plot', 'plt.plot', (['x[args.dot]', 'advdis_loss[args.dot]', '"""."""'], {'color': '"""red"""'}), "(x[args.dot], advdis_loss[args.dot], '.', color='red')\n", (1392, 1446), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1459), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1457, 1459), True, 'import matplotlib.pyplot as plt\n'), ((1460, 1470), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1468, 1470), True, 'import matplotlib.pyplot as plt\n'), ((742, 765), 'numpy.arange', 'np.arange', (['los.shape[0]'], {}), '(los.shape[0])\n', (751, 765), True, 'import numpy as np\n'), ((770, 797), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'los'], {'label': 'lab'}), '(x, los, label=lab)\n', (778, 797), True, 'import matplotlib.pyplot as plt\n'), ((802, 856), 'matplotlib.pyplot.plot', 'plt.plot', (['x[args.dot]', 'los[args.dot]', '"""."""'], {'color': '"""red"""'}), "(x[args.dot], los[args.dot], '.', color='red')\n", (810, 856), True, 'import matplotlib.pyplot as plt\n'), ((550, 566), 'numpy.loadtxt', 'np.loadtxt', (['file'], {}), '(file)\n', (560, 566), True, 'import numpy as np\n')] |
from adjustText import adjust_text
import copy
import csv
import matplotlib
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import pickle
import scipy.cluster.hierarchy as SciPyClus
import scipy.stats as scs
from singscore.singscore import *
import sys
class PathDir:
dirCurrent = os.path.dirname(sys.argv[0])
dirBaseGit = os.path.dirname(os.path.normpath(dirCurrent))
pathOutFolder = dirBaseGit
for strFolder in ['figures']:
pathOutFolder = os.path.join(pathOutFolder, strFolder)
pathProcRNAData = dirBaseGit
for strFolder in ['preproc', 'rnaseq']:
pathProcRNAData = os.path.join(pathProcRNAData, strFolder)
pathRefData = dirBaseGit
for strFolder in ['preproc', 'ref']:
pathRefData = os.path.join(pathRefData, strFolder)
pathPublicData = dirBaseGit
for strFolder in ['preproc', 'public_data']:
pathPublicData = os.path.join(pathPublicData, strFolder)
class Process:
listLinesForDisp = ['MDA-MB-231',
'SUM159']
listLines = [strLine.replace('-','') for strLine in listLinesForDisp]
listDiffExprFiles = [
f'voom-limma_{strLine}_GAll-EVC_diffExpr.csv' for strLine in listLines]
listOfListsConds = [['NTC', 'NTC', 'NTC',
'EVC', 'EVC', 'EVC',
'g4', 'g4', 'g4',
'gAll', 'gAll', 'gAll'],
['NTC', 'NTC', 'NTC',
'EVC', 'EVC', 'EVC',
'g4', 'g4', 'g4',
'gAll', 'gAll', 'gAll']]
def quant_data(flagResult=False):
strQuantFile = 'Waryah_Oct2017_ZEB1-epiCRISPR_QuantGeneLevel_lengthScaledTPM.csv'
dfData = pd.read_table(os.path.join(PathDir.pathProcRNAData, strQuantFile),
sep=',', header=0, index_col=0)
return dfData
def diff_expr_data(flagResult=False):
listDFToMerge = []
for iFile in range(len(Process.listDiffExprFiles)):
strFileName = Process.listDiffExprFiles[iFile]
# strCond = strFileName.split('.csv')[0]
strCellLine = strFileName.split('_GAll-EVC_diffExpr.csv')[0].split('voom-limma_')[1]
dfIn = pd.read_csv(os.path.join(PathDir.pathProcRNAData, strFileName),
sep=',', header=0, index_col=0)
if iFile == 0:
dfIn.drop(labels=['t', 'P.Value'],
axis=1,
inplace=True)
else:
dfIn.drop(labels=['AveExpr', 't', 'P.Value'],
axis=1,
inplace=True)
arrayHasNullStats = dfIn['adj.P.Val'].isnull().astype(bool)
arrayHasNullDiffExpr = dfIn['logFC'].isnull().astype(bool)
arrayAdjPVals = dfIn['adj.P.Val'].values.astype(float)
arrayLogFC = dfIn['logFC'].values.astype(float)
arrayAdjPVals[np.where(arrayHasNullStats)[0]] = 1.0
arrayLogFC[np.where(arrayHasNullDiffExpr)[0]] = 0.0
dfIn['adj.P.Val'] = pd.Series(arrayAdjPVals, index=dfIn.index.tolist())
dfIn['logFC'] = pd.Series(arrayLogFC, index=dfIn.index.tolist())
listColumns = dfIn.columns.tolist()
dictColToRename = {}
for strCol in listColumns:
if np.bitwise_or(strCol == 'external_gene_name', strCol == 'AveExpr'):
dictColToRename[strCol] = strCol
else:
dictColToRename[strCol] = strCellLine + ':' + strCol
dfIn.rename(columns=dictColToRename,
inplace=True)
listDFToMerge.append(dfIn)
dfMerged = pd.concat(listDFToMerge, axis=1, sort=True)
return dfMerged
def tcga_scores(flagResult=False,
dfIn=pd.DataFrame(),
flagPerformExtraction=False):
strTempFileName = 'TCGA-BRCA-EpiMesScores.tsv'
pathOut = os.path.join(PathDir.pathOutFolder, 'figure_5')
if not os.path.exists(os.path.join(pathOut, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
listTCGAGenes = dfIn.index.tolist()
listTCGASamples = dfIn.columns.tolist()
numSamples = len(listTCGASamples)
dictEpiMesCellLine = Process.tan2012_tissue_genes()
listEpiTissueGenes = dictEpiMesCellLine['epi_genes']
listMesTissueGenes = dictEpiMesCellLine['mes_genes']
# create lists of the cell line/tissue epithelial/mesenchymal gene lists for scoring
listOutputEpiTissueGenesMatched = [strGene for strGene in listTCGAGenes
if strGene.split('|')[0] in listEpiTissueGenes]
listOutputMesTissueGenesMatched = [strGene for strGene in listTCGAGenes
if strGene.split('|')[0] in listMesTissueGenes]
dfScoresOut = pd.DataFrame(
{'Epithelial Score':np.zeros(numSamples, dtype=float),
'Mesenchymal Score':np.zeros(numSamples, dtype=float)},
index=listTCGASamples)
for iSample in range(numSamples):
print('Patient ' + '{}'.format(iSample))
strSample = listTCGASamples[iSample]
dfScore = score(up_gene=listOutputEpiTissueGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Epithelial Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScore = score(up_gene=listOutputMesTissueGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Mesenchymal Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScoresOut.to_csv(os.path.join(pathOut, strTempFileName),
sep='\t')
else:
dfScoresOut = pd.read_table(os.path.join(pathOut, strTempFileName),
sep='\t', index_col=0, header=0)
return dfScoresOut
def tcga_brca(flagResult=False,
flagPerformExtraction=False):
strPanCanRNASeqFile = 'EBPlusPlusAdjustPANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv'
strTempFileName = 'TCGA_BrCa_PreProc_RNA.pickle'
if not os.path.exists(os.path.join(PathDir.pathPublicData, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
# extract the TCGA pan-cancer patient metadata
dfMeta = pd.read_excel(
os.path.join(PathDir.pathPublicData, 'TCGA-CDR-SupplementalTableS1.xlsx'),
header=0, index_col=0, sheet_name='TCGA-CDR')
dfMeta.set_index('bcr_patient_barcode', inplace=True)
# identify patients which are flagged as the breast cancer cohort
listBRCAPatients = dfMeta[dfMeta['type']=='BRCA'].index.tolist()
dfTCGAPanCanSamples = pd.read_table(
os.path.join(PathDir.pathPublicData, strPanCanRNASeqFile),
sep='\t', header=None, index_col=None, nrows=1)
listTCGAPanCanColumns = dfTCGAPanCanSamples.iloc[0,:].tolist()
listTCGAPanCanSamples = listTCGAPanCanColumns[1:]
# extract primary tumour (index 01) samples from the full sample list
listBRCASamples = [strSample for strSample in listTCGAPanCanSamples
if np.bitwise_and(strSample[0:len('TCGA-NN-NNNN')] in listBRCAPatients,
strSample[13:15]=='01')]
# # # # # # # # # # # # # # # # # # # # #
# extract the TCGA pan-cancer RNA-seq data
#take this subset
dfTCGABrCa = pd.read_table(
os.path.join(PathDir.pathPublicData, strPanCanRNASeqFile),
sep='\t', header=0, index_col=0,
usecols=[listTCGAPanCanColumns[0]]+listBRCASamples)
dfTCGABrCa.to_pickle(os.path.join(PathDir.pathPublicData, strTempFileName))
else:
dfTCGABrCa = pd.read_pickle(os.path.join(PathDir.pathPublicData, strTempFileName))
return dfTCGABrCa
def ccle_brca(flagResult=False,
flagPerformExtraction=False):
strTempFile = 'CCLE_BRCA_RNA_Abund.tsv'
if not os.path.exists(os.path.join(PathDir.pathPublicData, strTempFile)):
flagPerformExtraction = True
if flagPerformExtraction:
#https://ndownloader.figshare.com/files/35020903
dfMetaData = pd.read_table(os.path.join(PathDir.pathPublicData, 'sample_info.csv'),
sep=',', index_col=0, header=0)
listBRCALinesACH = dfMetaData[dfMetaData['primary_disease'] == 'Breast Cancer'].index.tolist()
dictACHToCCLE = dict(zip(listBRCALinesACH,
dfMetaData['CCLE_Name'].reindex(listBRCALinesACH).values.tolist()))
#https://ndownloader.figshare.com/files/34989919
dfCCLE = pd.read_table(os.path.join(PathDir.pathPublicData, 'CCLE_expression.csv'),
sep=',', index_col=0, header=0)
dfBrCa = dfCCLE.reindex(listBRCALinesACH).copy(deep=True)
dfBrCa.rename(
index=dict(zip(listBRCALinesACH,[dictACHToCCLE[strLine] for strLine in listBRCALinesACH])),
inplace=True)
dfBrCa.to_csv(os.path.join(PathDir.pathPublicData, strTempFile),
sep='\t')
else:
dfBrCa = pd.read_table(os.path.join(PathDir.pathPublicData, strTempFile),
sep='\t', index_col=0)
return dfBrCa
def ccle_scores(flagResult=False,
flagPerformExtraction=False,
dfIn=pd.DataFrame()):
strTempFileName = 'CCLE-BRCA-EpiMesScores.tsv'
pathOut = os.path.join(PathDir.pathOutFolder, 'figure_5')
if not os.path.exists(os.path.join(pathOut, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
listCCLEGenes = dfIn.index.tolist()
listCellLines = dfIn.columns.tolist()
numSamples = len(listCellLines)
dictEpiMesCellLine = Process.tan2012_cell_line_genes()
listEpiCellLineGenes = dictEpiMesCellLine['epi_genes']
listMesCellLineGenes = dictEpiMesCellLine['mes_genes']
# create lists of the cell line/tissue epithelial/mesenchymal gene lists for scoring
listOutputEpiCellLineGenesMatched = [strGene for strGene in listCCLEGenes
if strGene.split(' (')[0] in listEpiCellLineGenes]
listOutputMesCellLineGenesMatched = [strGene for strGene in listCCLEGenes
if strGene.split(' (')[0] in listMesCellLineGenes]
dfScoresOut = pd.DataFrame(
{'Epithelial Score':np.zeros(numSamples, dtype=float),
'Mesenchymal Score':np.zeros(numSamples, dtype=float)},
index=listCellLines)
for iSample in range(numSamples):
print('Cell line ' + '{}'.format(iSample))
strSample = listCellLines[iSample]
dfScore = score(up_gene=listOutputEpiCellLineGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Epithelial Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScore = score(up_gene=listOutputMesCellLineGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Mesenchymal Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScoresOut.to_csv(os.path.join(pathOut, strTempFileName),
sep='\t')
else:
dfScoresOut = pd.read_table(os.path.join(pathOut, strTempFileName),
sep='\t', index_col=0, header=0)
return dfScoresOut
def local_scores(flagResult=False,
flagPerformExtraction=False,
dfIn=pd.DataFrame()):
strTempFileName = 'LocalData-EpiMesScores.tsv'
pathOut = os.path.join(PathDir.pathOutFolder, 'figure_5')
if not os.path.exists(os.path.join(pathOut, strTempFileName)):
flagPerformExtraction = True
if flagPerformExtraction:
listGenes = dfIn.index.tolist()
listConditions = dfIn.columns.tolist()
numSamples = len(listConditions)
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
dictEpiMesCellLine = Process.tan2012_cell_line_genes()
listEpiCellLineGenes = dictEpiMesCellLine['epi_genes']
listEpiCellLineGenesENSG = [dictHGNCToENSG[strGene] for strGene in listEpiCellLineGenes]
listMesCellLineGenes = dictEpiMesCellLine['mes_genes']
listMesCellLineGenesENSG = [dictHGNCToENSG[strGene] for strGene in listMesCellLineGenes]
# create lists of the cell line/tissue epithelial/mesenchymal gene lists for scoring
listOutputEpiCellLineGenesMatched = list(set(listGenes).intersection(listEpiCellLineGenesENSG))
listOutputMesCellLineGenesMatched = list(set(listGenes).intersection(listMesCellLineGenesENSG))
dfScoresOut = pd.DataFrame(
{'Epithelial Score':np.zeros(numSamples, dtype=float),
'Mesenchymal Score':np.zeros(numSamples, dtype=float)},
index=listConditions)
for iSample in range(numSamples):
print('Cell line ' + '{}'.format(iSample))
strSample = listConditions[iSample]
dfScore = score(up_gene=listOutputEpiCellLineGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Epithelial Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScore = score(up_gene=listOutputMesCellLineGenesMatched,
sample=dfIn[[strSample]])
dfScoresOut.loc[strSample,'Mesenchymal Score'] = \
dfScore['total_score'].values.astype(float)[0]
dfScoresOut.to_csv(os.path.join(pathOut, strTempFileName),
sep='\t')
else:
dfScoresOut = pd.read_table(os.path.join(pathOut, strTempFileName),
sep='\t', index_col=0, header=0)
return dfScoresOut
def all_epi_mes_scores(flagResult=False,
flagPerformExtraction=False):
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
pathOut = os.path.join(PathDir.pathOutFolder, 'figure_5')
flagScoreTCGA = False
if not os.path.exists(os.path.join(pathOut, 'TCGA-BRCA-EpiMesScores.tsv')):
flagScoreTCGA = True
flagScoreCCLE = False
if not os.path.exists(os.path.join(pathOut, 'CCLE-BRCA-EpiMesScores.tsv')):
flagScoreCCLE = True
flagScoreLocalData = False
if not os.path.exists(os.path.join(pathOut, 'LocalData-EpiMesScores.tsv')):
flagScoreLocalData = True
if np.any([flagScoreTCGA, flagScoreCCLE, flagScoreLocalData]):
flagPerformExtraction=True
if flagPerformExtraction:
dfLocalData = Process.quant_data()
listLocalGenesENSG = dfLocalData.index.tolist()
setLocalGenesENSG = set(listLocalGenesENSG)
dfTCGA = Process.tcga_brca()
listTCGAGenes = dfTCGA.index.tolist()
listTCGAGenesHGNC = [strGene.split('|')[0] for strGene in listTCGAGenes]
for strGene in list(set(listTCGAGenesHGNC).difference(set(dictHGNCToENSG.keys()))):
dictHGNCToENSG[strGene] = 'failed_map|'+strGene
listTCGAGenesENSG = [dictHGNCToENSG[strGene] for strGene in listTCGAGenesHGNC]
setTCGAGenesENSG = set(listTCGAGenesENSG)
dfCCLE = Process.ccle_brca()
listCCLEGenes = dfCCLE.columns.tolist()
listCCLEGenesHGNC = [strGene.split(' (')[0] for strGene in listCCLEGenes]
listCCLEGenesENSG = [dictHGNCToENSG[strGene] for strGene in listCCLEGenesHGNC]
setCCLEGenesENSG = set(listCCLEGenesENSG)
listCommonGenesENSG = list(setCCLEGenesENSG.intersection(setLocalGenesENSG.intersection(setTCGAGenesENSG)))
listCommonGenesHGNC = [dictENSGToHGNC[strGene] for strGene in listCommonGenesENSG]
listTCGAGenesOut = [strGene for strGene in listTCGAGenes if strGene.split('|')[0] in listCommonGenesHGNC]
listCCLEGenesOut = [strGene for strGene in listCCLEGenes if strGene.split(' (')[0] in listCommonGenesHGNC]
listLocalDataGenesOut = list(set(listLocalGenesENSG).intersection(listCommonGenesENSG))
dfTCGAScores = Process.tcga_scores(dfIn=dfTCGA.reindex(listTCGAGenesOut))
dfCCLEScores = Process.ccle_scores(dfIn=dfCCLE[listCCLEGenesOut].transpose())
dfLocalScores = Process.local_scores(dfIn=dfLocalData.reindex(listLocalDataGenesOut))
else:
dfTCGAScores = Process.tcga_scores()
dfCCLEScores = Process.ccle_scores()
dfLocalScores = Process.local_scores()
return {'TCGA':dfTCGAScores,
'CCLE':dfCCLEScores,
'LocalData':dfLocalScores}
def ccle_brca_subtypes(flagResult=False):
dfMeta = pd.read_table(os.path.join(PathDir.pathPublicData, 'sample_info.csv'),
sep=',', header=0, index_col=0)
listBreastLinesACH = dfMeta[dfMeta['primary_disease']=='Breast Cancer'].index.tolist()
listBreastLinesCCLE = dfMeta['CCLE_Name'].reindex(listBreastLinesACH).values.tolist()
listSubtype = dfMeta['lineage_molecular_subtype'].reindex(listBreastLinesACH).values.tolist()
for iLine in range(len(listBreastLinesACH)):
if not listSubtype[iLine] == listSubtype[iLine]:
listSubtype[iLine] = 'unknown'
return dict(zip(listBreastLinesCCLE, listSubtype))
def fig5_rnaseq_gene_lists(flagResult=False):
# select a subset of genes from the RNA-seq DE analyses for display in Fig. 5b
# define significance threshold for later use
numAdjPValThresh = 0.05
# we want to end up with a reasonable number for display
# --> set a maximum but it may be slightly lower as we filter on prior gene annotations
numMaxGenes = 60
# load dictionaries for mapping between ENSG and HGNC
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
# load some prior annotations of epithelial and mesenchymal genes
dictEpiMesGenes = Process.tan2012_cell_line_genes()
listEpiGenesENSG = [dictHGNCToENSG[strGene] for strGene in dictEpiMesGenes['epi_genes']]
listMesGenesENSG = [dictHGNCToENSG[strGene] for strGene in dictEpiMesGenes['mes_genes']]
# load the data
dfMergedRNA = Process.diff_expr_data()
# listDataGenes = dfMergedRNA.index.tolist()
listDataColumns = dfMergedRNA.columns.tolist()
listPValCols = [strCol for strCol in listDataColumns if 'adj.P.Val' in strCol]
listCellLines = [strCol.split(':adj.P.Val')[0] for strCol in listPValCols]
# only focus on genes that show significant DE in at least one condition
arrayIsSigInEitherLine = np.any(
dfMergedRNA[listPValCols].values.astype(float) < numAdjPValThresh,
axis=1)
listSigInEitherLine = [dfMergedRNA.index.tolist()[i] for i in np.where(arrayIsSigInEitherLine)[0]]
# define a dataframe for gene ranks by product-of-rank
dfRanks = pd.DataFrame(data=np.zeros((len(listSigInEitherLine), len(listCellLines)),
dtype=float),
index=listSigInEitherLine,
columns=listCellLines)
# step through the cell lines and rank genes
for iCond in range(len(listCellLines)):
strCellLine = listCellLines[iCond]
# extract the logFC and adj-p value data
arrayLogFC = np.nan_to_num(dfMergedRNA[f'{strCellLine}:logFC'].reindex(listSigInEitherLine).values.astype(float))
arrayAdjPVal = dfMergedRNA[f'{strCellLine}:adj.P.Val'].reindex(listSigInEitherLine).values.astype(float)
# fix any nan values to be non-significant
arrayAdjPVal[np.isnan(arrayAdjPVal)] = 1.0
# rank genes by the logFC*-log(p)
listGenesRanked = [listSigInEitherLine[i] for i in
np.argsort(np.product((arrayLogFC, -np.log10(arrayAdjPVal)), axis=0))]
dfRanks.loc[listGenesRanked, strCellLine] = np.arange(start=1, stop=len(listGenesRanked)+1)
# take the product of ranks across conditions and extract the final ranks
arrayProdRankAcrossCond = np.product(dfRanks.values.astype(float), axis=1)
arraySortedByProdRank = np.argsort(arrayProdRankAcrossCond)
listSortedByProdRank = [listSigInEitherLine[i] for i in arraySortedByProdRank]
listMesInList = list(set(listSigInEitherLine).intersection(listMesGenesENSG))
listEpiInList = list(set(listSigInEitherLine).intersection(listEpiGenesENSG))
# ZEB1 inactivation tends to drive increases in gene expression (mainly epithelial genes)
# so weight this towards up-regulated genes
numFromProdRank = numMaxGenes - len(listMesInList + listEpiInList)
numUpGenes = int((2/3)*numFromProdRank)
numDownGenes = int((1/3)*numFromProdRank)
# listSortedByProdRankHGNC = [dictENSGToHGNC[strGene] for strGene in listSortedByProdRank
# if strGene in dictENSGToHGNC.keys()]
listOutputDownGenes = listSortedByProdRank[0:numDownGenes]
listOutputUpGenes = listSortedByProdRank[-numUpGenes:]
listOutputGeneOrder = listOutputDownGenes + \
list(set(listMesInList).difference(set(listOutputDownGenes))) + \
list(set(listEpiInList).difference(set(listOutputUpGenes))) + \
listOutputUpGenes
return {'HeatmapOrder':listOutputGeneOrder,
'SigEitherLine':listSigInEitherLine}
def guides(flagResult=False,
strGuideFileName='hu_guides.txt'):
dfGuides = pd.read_table(os.path.join(PathDir.pathProcResults, strGuideFileName),
sep='\t', header=0, index_col=None)
return dfGuides
def off_targets(flagResult=False,
strOffTargetFileName='20180518_dCas_pipe_out.txt'):
dfGuides = Process.guides()
numGuides = np.shape(dfGuides)[0]
listGuides = [dfGuides['Sequence'].iloc[i] + '_' + dfGuides['PAM'].iloc[i] for i in range(numGuides)]
dictOffTargetGenes = dict()
listIsGuideRow = []
listIndicesForGuidesOfInt = []
listFile = []
with open(os.path.join(PathDir.pathProcResults, strOffTargetFileName)) as handFile:
listFileContents = csv.reader(handFile)
for listRow in listFileContents:
strRow = listRow[0]
listFile.append(strRow)
if 'Mismatch Risk:' in strRow:
listIsGuideRow.append(True)
strGuide = strRow.split('\t')[0]
if strGuide in listGuides:
listIndicesForGuidesOfInt.append(len(listIsGuideRow))
else:
listIsGuideRow.append(False)
arrayGuideRNARowIndices = np.where(listIsGuideRow)[0]
arrayGuideRNAOfIntRowIndices = np.array(listIndicesForGuidesOfInt, dtype=np.int)
listGuideAndPAM = []
listBindChr = []
listBindStart = []
listBindEnd = []
listNumMisMatch = []
listOffTargGene = []
listOffTargSeq = []
listScores = []
listStrand = []
for iGuide in range(len(listIndicesForGuidesOfInt)):
numStartRow = arrayGuideRNAOfIntRowIndices[iGuide]
numEndRow = arrayGuideRNARowIndices[arrayGuideRNARowIndices > numStartRow][0]
listToProc = listFile[numStartRow-1:numEndRow]
numMisMatches = -1
for iRow in range(len(listToProc)):
strFirstRow = listToProc[0]
strGuideAndPAM = strFirstRow.split('\t')[0]
strRow = listToProc[iRow]
if 'Mismatches: ' in strRow:
strMisMatches = strRow.split('Mismatches: ')[1]
numMisMatches = np.int(strMisMatches)
elif strRow[0:2] == '\t\t':
# extract the required information
arrayMisMatchInfo = strRow[2:].split('\t')
listGuideAndPAM.append(strGuideAndPAM)
listNumMisMatch.append(numMisMatches)
listBindChr.append(arrayMisMatchInfo[0])
listBindStart.append(arrayMisMatchInfo[1])
listBindEnd.append(arrayMisMatchInfo[2])
strOffTarg = arrayMisMatchInfo[3]
if '/' in strOffTarg:
listOffTargGene.append(strOffTarg.split('/')[0])
listOffTargSeq.append(strOffTarg.split('/')[1])
else:
listOffTargGene.append(strOffTarg)
listOffTargSeq.append('')
if len(arrayMisMatchInfo) >= 5:
listScores.append(arrayMisMatchInfo[4])
else:
listScores.append('')
if len(arrayMisMatchInfo) >= 6:
listStrand.append(arrayMisMatchInfo[5])
else:
listStrand.append('')
dfOffTargets = pd.DataFrame({'Guide_PAM':listGuideAndPAM,
'Binds_chr':listBindChr,
'Binds_start':listBindStart,
'Binds_end':listBindEnd,
'Binds_numMisMatch':listNumMisMatch,
'Binds_HGNC':listOffTargGene,
'Binds_seq':listOffTargSeq,
'Binds_score':listScores,
'Binds_strand':listStrand},
index=np.arange(len(listGuideAndPAM)))
return dfOffTargets
def dict_gtf_ensg_to_hgnc(flagResult=False,
numRelease=102,
strReference='h38',
flagPerformExtraction=False):
strTempFilename = f'GRC{strReference}_{numRelease}_ENSGToHGNC.pickle'
if not os.path.exists(os.path.join(PathDir.pathRefData, strTempFilename)):
flagPerformExtraction=True
if flagPerformExtraction:
strDataFile = f'Homo_sapiens.GRC{strReference}.{numRelease}.gtf.gz'
dfEnsDB = pd.read_csv(os.path.join(PathDir.pathRefData, strDataFile),
sep='\t',
compression='gzip',
header=None,
comment='#')
if numRelease >= 75:
arrayGeneRowIndices = np.where((dfEnsDB.iloc[:,2]=='gene').values.astype(np.bool))[0]
else:
arrayGeneRowIndices = np.where((dfEnsDB.iloc[:,2]=='exon').values.astype(np.bool))[0]
numGenes = len(arrayGeneRowIndices)
listGenes = [None]*numGenes
listGeneENSG = [None]*numGenes
strFirstGeneDetails = dfEnsDB.iloc[arrayGeneRowIndices[0],8]
listFirstGeneDetails = strFirstGeneDetails.split(';')
numGeneNameIndex = np.where(['gene_name "' in strDetails for strDetails in listFirstGeneDetails])[0][0]
numGeneIDIndex = np.where(['gene_id "' in strDetails for strDetails in listFirstGeneDetails])[0][0]
for iGene in range(numGenes):
strGeneDetails = dfEnsDB.iloc[arrayGeneRowIndices[iGene],8]
listGeneDetails = strGeneDetails.split(';')
strGene = listGeneDetails[numGeneNameIndex].split('gene_name "')[1].strip('"')
strENSG = listGeneDetails[numGeneIDIndex].split('gene_id "')[1].strip('"')
listGenes[iGene] = strGene
listGeneENSG[iGene] = strENSG
if len(listGeneENSG) > len(set(listGeneENSG)):
dfMapped = pd.DataFrame({'ENSG':listGeneENSG, 'HGNC':listGenes})
dfMapped.drop_duplicates(subset='ENSG', inplace=True)
dictEnsGeneToHGNC = dict(zip(dfMapped['ENSG'].values.tolist(),
dfMapped['HGNC'].values.tolist()))
else:
dictEnsGeneToHGNC = dict(zip(listGeneENSG, listGenes))
with open(os.path.join(PathDir.pathRefData, strTempFilename), 'wb') as handFile:
pickle.dump(dictEnsGeneToHGNC, handFile, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(os.path.join(PathDir.pathRefData, strTempFilename), 'rb') as handFile:
dictEnsGeneToHGNC = pickle.load(handFile)
return dictEnsGeneToHGNC
def tan2012_cell_line_genes(flagResult=False):
# Cell line epithelial & mesenchymal gene lists from:
# TZ Tan et al. (2012) [JP Thiery]. Epithelial-mesenchymal transition spectrum quantification
# and its efficacy in deciphering survival and drug responses of cancer patients.
# DOI: 10.15252/emmm.201404208
# load the gene lists
dfGeneLists = pd.read_csv(
os.path.join(PathDir.pathRefData, 'Thiery_generic_EMT_signatures.txt'),
sep='\t', header=0, index_col=None)
# extract as individual lists
listEpiGenes = dfGeneLists['genes'][dfGeneLists['epiMes_cellLine'] == 'epi'].values.tolist()
listMesGenes = dfGeneLists['genes'][dfGeneLists['epiMes_cellLine'] == 'mes'].values.tolist()
# update some more recently defined gene names
dictNewNames = {'C1orf106':'INAVA',
'GPR56':'ADGRG1',
'AIM1':'CRYBG1',
'C19orf21':'MISP',
'C10orf116':'ADIRF',
'C12orf24':'FAM216A',
'LEPRE1':'P3H1',
'LHFP':'LHFPL6',
'KDELC1':'POGLUT2',
'PTRF':'CAVIN1'}
for iGene in range(len(listEpiGenes)):
strGene = listEpiGenes[iGene]
if strGene in dictNewNames.keys():
listEpiGenes[iGene] = dictNewNames[strGene]
for iGene in range(len(listMesGenes)):
strGene = listMesGenes[iGene]
if strGene in dictNewNames.keys():
listMesGenes[iGene] = dictNewNames[strGene]
# return as a dictionary of lists
return {'epi_genes': listEpiGenes, 'mes_genes': listMesGenes}
def tan2012_tissue_genes(flagResult=False):
# Tissue epithelial & mesenchymal gene lists from:
# TZ Tan et al. (2012) [JP Thiery]. Epithelial-mesenchymal transition spectrum quantification
# and its efficacy in deciphering survival and drug responses of cancer patients.
# DOI: 10.15252/emmm.201404208
# load the gene lists
dfGeneLists = pd.read_csv(
os.path.join(PathDir.pathRefData, 'Thiery_generic_EMT_signatures.txt'),
sep='\t', header=0, index_col=None)
# extract as individual lists
listEpiGenes = dfGeneLists['genes'][dfGeneLists['epiMes_tumor'] == 'epi'].values.tolist()
listMesGenes = dfGeneLists['genes'][dfGeneLists['epiMes_tumor'] == 'mes'].values.tolist()
# update some more recently defined gene names
dictNewNames = {'C14orf139':'SYNE3',
'GUCY1B3':'GUCY1B1',
'KIAA1462':'JCAD',
'LHFP':'LHFPL6',
'PTRF':'CAVIN1',
'SEPT6':'SEPTIN6',
'C19orf21':'MISP',
'C1orf106':'INAVA',
'GPR56':'ADGRG1',
'PPAP2C':'PLPP2'
}
# dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
# dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
for iGene in range(len(listEpiGenes)):
strGene = listEpiGenes[iGene]
if strGene in dictNewNames.keys():
listEpiGenes[iGene] = dictNewNames[strGene]
for iGene in range(len(listMesGenes)):
strGene = listMesGenes[iGene]
if strGene in dictNewNames.keys():
listMesGenes[iGene] = dictNewNames[strGene]
# return as a dictionary of lists
return {'epi_genes': listEpiGenes, 'mes_genes': listMesGenes}
def go_all_gene_with_traversal(flagPerformExtraction=False,
flagProcessGOToArray=False):
strOutputSaveFile='GO_annot_traversed.pickle'
if not os.path.exists(os.path.join(PathDir.pathRefData, strOutputSaveFile)):
flagPerformExtraction=True
if flagPerformExtraction:
# extract the GO data
dfGeneOntology = Extract.goa_human(flagPerformExtraction=False)
dfGOMapping = Extract.go_obo(flagPerformExtraction=False)
strTempMembMatrixFile = 'GO_memb_matrix_proc.pickle'
strInitGraphFile = 'GO_annot-orig_graph.pickle'
# determine the unique list of GO annotations
listUniqueGONumsFromMapping = pd.unique(dfGOMapping['ID'].values.ravel()).tolist()
listUniqueGONumsFromOntology = pd.unique(dfGeneOntology['GO_Num'].values.ravel()).tolist()
listUniqueMessRNAs = sorted(pd.unique(dfGeneOntology['HGNC'].values.ravel()).tolist())
listUniqueGONums = sorted(list(set(listUniqueGONumsFromMapping + listUniqueGONumsFromOntology)))
numUniqueMessRNAs = np.int64(len(listUniqueMessRNAs))
numUniqueGONums = np.int64(len(listUniqueGONums))
if np.bitwise_or(not os.path.exists(os.path.join(PathDir.strDataPath, strTempMembMatrixFile)),
flagProcessGOToArray):
print('creating a membership matrix for GO annotations against genes, this may take some time')
arrayGOMembMatrix = np.zeros((numUniqueMessRNAs, numUniqueGONums), dtype=np.bool)
dictMessRNAIndices = dict(zip(listUniqueMessRNAs, np.arange(start=0, stop=len(listUniqueMessRNAs))))
dictGONumIndices = dict(zip(listUniqueGONums, np.arange(start=0, stop=len(listUniqueGONums))))
arrayProgCounter = np.linspace(start=0, stop=np.shape(dfGeneOntology)[0], num=100)[1:]
iProg = 0
for iRow in range(np.shape(dfGeneOntology)[0]):
strGene = dfGeneOntology['HGNC'].iloc[iRow]
strGOCat = dfGeneOntology['GO_Num'].iloc[iRow]
arrayGOMembMatrix[dictMessRNAIndices[strGene], dictGONumIndices[strGOCat]] = True
if iRow > arrayProgCounter[iProg]:
print(f'\t{(iProg+1)}% complete..')
iProg += 1
dfGOMembMatrix = pd.DataFrame(
data=arrayGOMembMatrix,
index=listUniqueMessRNAs,
columns=listUniqueGONums)
dfGOMembMatrix.to_pickle(os.path.join(PathDir.strDataPath, strTempMembMatrixFile))
else:
print('Loading pre-processed boolean membership matrix for GO annotations..')
dfGOMembMatrix = pd.read_pickle(os.path.join(PathDir.strDataPath, strTempMembMatrixFile))
arrayGOMembMatrix = dfGOMembMatrix.values.astype(np.bool)
if not os.path.exists(os.path.join(PathDir.strDataPath, strInitGraphFile)):
print('creating a directed graph of GO annotation structure/hierarchy')
graphAnnotRel = nx.DiGraph()
graphAnnotRel.add_nodes_from(listUniqueGONums)
arrayProgCounter = np.linspace(start=0, stop=len(listUniqueGONums), num=100)[1:]
iProg = 0
for iGONum in range(len(listUniqueGONums)):
strGONum = listUniqueGONums[iGONum]
if strGONum in dfGOMapping['ID'].values.tolist():
listParents = dfGOMapping['Parents'][dfGOMapping['ID'] == strGONum].tolist()[0]
if not (not listParents):
for strParent in listParents:
graphAnnotRel.add_edge(strGONum, strParent)
if iGONum > arrayProgCounter[iProg]:
print(f'\t{(iProg+1)}% complete..')
iProg += 1
nx.write_gpickle(graphAnnotRel, os.path.join(PathDir.strDataPath, strInitGraphFile))
else:
graphAnnotRel = nx.read_gpickle(os.path.join(PathDir.strDataPath, strInitGraphFile))
print('attempting to traverse GO graph structure')
print('warning: this uses an iterative while loop; ensure progression beyond this')
while Map.has_cycle(graphAnnotRel):
# while len(nx.find_cycle(graphAnnotRel)) > 0:
listCycles = nx.find_cycle(graphAnnotRel)
print('.. attempting to resolve cycle:')
for iCycleEdge in range(len(listCycles)):
print('\t\t.. ' + listCycles[iCycleEdge][0] + ' <- ' + listCycles[iCycleEdge][1])
listCycleNodes = list()
for iCycleEdge in range(len(listCycles)):
if listCycles[iCycleEdge][0] not in listCycleNodes:
listCycleNodes.append(listCycles[iCycleEdge][0])
if listCycles[iCycleEdge][1] not in listCycleNodes:
listCycleNodes.append(listCycles[iCycleEdge][1])
arrayNodeGenes = np.sum(dfGOMembMatrix[listCycleNodes].values.astype(np.float), axis=0)
strLeastPopulatedNode = listCycleNodes[np.argsort(arrayNodeGenes)[0]]
for iCycleEdge in range(len(listCycles)):
if listCycles[iCycleEdge][1] == strLeastPopulatedNode:
graphAnnotRel.remove_edge(listCycles[iCycleEdge][0], listCycles[iCycleEdge][1])
print('attempting topological sort prior to traversal to increase coverage')
# listTopologicalSort = nx.topological_sort(graphAnnotRel, reverse=True)
listTopologicalSort = list(reversed(list(nx.topological_sort(graphAnnotRel))))
for strGO in listTopologicalSort:
# I think in a digraph, neighbors lists only the parents/input nodes (whereas predecessors does the full
# traversal
listParentNodes = graphAnnotRel.neighbors(strGO)
if not (not listParentNodes):
numNodeMatrixIndex = listUniqueGONums.index(strGO)
arrayGeneIndices = np.where(arrayGOMembMatrix[:,numNodeMatrixIndex])[0]
for strParent in listParentNodes:
numParentNodeMatrixIndex = listUniqueGONums.index(strParent)
arrayGOMembMatrix[arrayGeneIndices, numParentNodeMatrixIndex] = True
dfGOMemb = pd.DataFrame(data=arrayGOMembMatrix, # values
index=listUniqueMessRNAs, # 1st column as index
columns=listUniqueGONums)
# save the full dataframe using pickle
dfGOMemb.to_pickle(os.path.join(PathDir.strDataPath, strOutputSaveFile))
else:
if os.path.exists(os.path.join(PathDir.pathRefData, strOutputSaveFile)):
# load the data from the specified files
print('Loading the pre-processed GO annotation (w/ traversal) data frame from ' +
os.path.join(PathDir.pathRefData, strOutputSaveFile))
dfGOMemb = pd.read_pickle(os.path.join(PathDir.pathRefData, strOutputSaveFile))
else:
print('Cannot load the pre-processed GO annotation (w/ traversal) data frame, ' +
os.path.join(PathDir.pathRefData, strOutputSaveFile) +
' does not exist, change flagPerformExtraction')
return dfGOMemb
def go_rnaseq_diffexpr_genes(flagPerformExtraction=False):
strOutputSaveFile = 'rnaseq_diff_expr_GO_annot.tsv'
if not os.path.exists(os.path.join(PathDir.pathRefData, strOutputSaveFile)):
flagPerformExtraction=True
if flagPerformExtraction:
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
# dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
dfAllGenes = Process.go_all_gene_with_traversal()
listRNASeqDEGenesENSG = Process.fig5_rnaseq_gene_lists()['SigEitherLine']
listRNASeqDEGenes = [dictENSGToHGNC[strGene] for strGene in listRNASeqDEGenesENSG
if strGene in dictENSGToHGNC.keys()]
dfTargetGenes = dfAllGenes.reindex(listRNASeqDEGenes).copy(deep=True)
dfTargetGenes[pd.isnull(dfTargetGenes)] = False
arrayGOObs = np.sum(dfTargetGenes.values.astype(float), axis=0)
listGOHasGene = [dfTargetGenes.columns.tolist()[i] for i in np.where(arrayGOObs > 0)[0]]
dfGOMemb = dfTargetGenes[listGOHasGene]
dfGOMemb.to_csv(os.path.join(PathDir.pathRefData, strOutputSaveFile),
sep='\t')
else:
dfGOMemb = pd.read_table(os.path.join(PathDir.pathRefData, strOutputSaveFile),
sep='\t')
return dfGOMemb
def transcription_factors(flagResult=False):
# 'Homo_sapiens_TF.txt' & 'Homo_sapiens_TF_cofactors.txt' from AnimalTFDB
# now seems to be hosted at http://bioinfo.life.hust.edu.cn/AnimalTFDB/#!/
dfTFs = pd.read_csv(os.path.join(PathDir.pathRefData, 'Homo_sapiens_TF.txt'),
sep='\t', header=0, index_col=1)
dfCoFactors = pd.read_csv(os.path.join(PathDir.pathRefData, 'Homo_sapiens_TF_cofactors.txt'),
sep='\t', header=0, index_col=1)
listAllENSG = dfTFs.index.tolist() + dfCoFactors.index.tolist()
listAllHGNC = dfTFs['Ensembl'].values.tolist() + dfCoFactors['Ensembl'].values.tolist()
listAllType = ['TF']*np.shape(dfTFs)[0] + ['CoFact']*np.shape(dfCoFactors)[0]
dfTFs = pd.DataFrame({'ENSG':listAllENSG,
'HGNC':listAllHGNC,
'Type':listAllType})
return dfTFs
class PlotFunc:
def es_ms_landscape(
flagResult=False,
handAxIn='undefined'):
listOfListsCellLineSubtypes = [['luminal'],
['HER2_amp', 'luminal_HER2_amp'],
['basal', 'basal_A'],
['basal_B'],
['unknown']]
listSubtypePlotColors = ['#53a9eb', # dark blue
'#aeb3f1', # light blue
'#f5a2bc', # pink
'#ec1c24', # red
'#ababab'] # gray
listCellLineSubtypesToDisp = ['Luminal',
'HER2$^{++}$',
'Basal',
'Basal B',
'Not classified']
numCellLineSubtypes = len(listCellLineSubtypesToDisp)
listSamplesToPlot = ['SUM159_EVC',
'SUM159_gAll',
'MDAMB231_EVC',
'MDAMB231_gAll']
numMaxXTicks = 5
numMaxYTicks = 5
numCellLineMarkerSize = 35
numCellLineMarkerLineWidth = 1.0
numScatterZOrder = 11
dictLineLabel = {'SUM159':'SUM159',
'MDAMB231':'MDA-MB-231'}
dictCondLabel = {'EVC': 'Empty\nvector',
'gAll': 'All gRNAs'}
dictOfDictOffsets = {'SUM159': {},
'MDAMB231': {}}
dictOfDictOffsets['SUM159']['EVC'] = (-0.08, 0.02)
dictOfDictOffsets['SUM159']['gAll'] = (-0.03, -0.07)
dictOfDictOffsets['MDAMB231']['EVC'] = (0.04, 0.10)
dictOfDictOffsets['MDAMB231']['gAll'] = (-0.07, -0.07)
dictAllScores = Process.all_epi_mes_scores()
dfTCGAScores = dictAllScores['TCGA']
dfCCLEScores = dictAllScores['CCLE']
arrayCCLELineHasNoScore = np.sum(np.isnan(dfCCLEScores.values.astype(float)), axis=1) > 0
listCCLELineNoScore = [dfCCLEScores.index.tolist()[i] for i in np.where(arrayCCLELineHasNoScore)[0]]
dfCCLEScores.drop(index=listCCLELineNoScore, inplace=True)
dfLocalScores = dictAllScores['LocalData']
dictBrCaLineToType = Process.ccle_brca_subtypes()
numMinES = np.min([np.min(dfLocalScores['Epithelial Score'].values.astype(float)),
np.min(dfTCGAScores['Epithelial Score'].values.astype(float)),
np.min(dfCCLEScores['Epithelial Score'].values.astype(float))
])
numMaxES = np.max([np.max(dfLocalScores['Epithelial Score'].values.astype(float)),
np.max(dfTCGAScores['Epithelial Score'].values.astype(float)),
np.max(dfCCLEScores['Epithelial Score'].values.astype(float))
])
numMinMS = np.min([np.min(dfLocalScores['Mesenchymal Score'].values.astype(float)),
np.min(dfTCGAScores['Mesenchymal Score'].values.astype(float)),
np.min(dfCCLEScores['Mesenchymal Score'].values.astype(float))
])
numMaxMS = np.max([np.max(dfLocalScores['Mesenchymal Score'].values.astype(float)),
np.max(dfTCGAScores['Mesenchymal Score'].values.astype(float)),
np.max(dfCCLEScores['Mesenchymal Score'].values.astype(float))
])
numMinScore = np.min([numMinES, numMinMS])
numMaxScore = np.max([numMaxES, numMaxMS])
handAxHex = handAxIn.hexbin(dfTCGAScores['Epithelial Score'].values.astype(float),
dfTCGAScores['Mesenchymal Score'].values.astype(float),
cmap=plt.cm.inferno,
bins='log',
gridsize=50,
alpha=0.5,
extent=[numMinScore-0.13, numMaxScore+0.08,
numMinScore-0.13, numMaxScore+0.08])
for iCellLine in range(np.shape(dfCCLEScores)[0]):
strCellLine = dfCCLEScores.index.tolist()[iCellLine]
strSubtype = dictBrCaLineToType[strCellLine]
strColor = listSubtypePlotColors[4]
for iSubtype in range(len(listOfListsCellLineSubtypes)):
if strSubtype in listOfListsCellLineSubtypes[iSubtype]:
strColor = listSubtypePlotColors[iSubtype]
plt.scatter(dfCCLEScores['Epithelial Score'].iloc[iCellLine],
dfCCLEScores['Mesenchymal Score'].iloc[iCellLine],
c=strColor, marker='^', s=25,
edgecolors=['k'],
zorder=numScatterZOrder)
for iSampleSet in range(len(listSamplesToPlot)):
strSampleSet = listSamplesToPlot[iSampleSet]
listLocalSamplesToPlot = [strSample for strSample in dfLocalScores.index.tolist()
if strSampleSet in strSample]
for strSample in listLocalSamplesToPlot:
plt.scatter(dfLocalScores['Epithelial Score'].loc[strSample],
dfLocalScores['Mesenchymal Score'].loc[strSample],
c='g', marker='^', s=25, edgecolors=['k'],
zorder=numScatterZOrder+1)
strLineShort = strSampleSet.split('_')[0]
strCondShort = strSampleSet.partition('_')[2]
strLine = dictLineLabel[strLineShort]
strCond = dictCondLabel[strCondShort]
numMeanEpiScore = np.mean(
dfLocalScores['Epithelial Score'].reindex(listLocalSamplesToPlot).values.astype(float))
numMeanMesScore = np.mean(
dfLocalScores['Mesenchymal Score'].reindex(listLocalSamplesToPlot).values.astype(float))
# handAxIn.annotate(
# strLine + '\n' + strCond,
# xy=(numMeanEpiScore, numMeanMesScore), xycoords='data',
# xytext=(numMeanEpiScore + dictOfDictOffsets[strLineShort][strCondShort][0],
# numMeanMesScore + dictOfDictOffsets[strLineShort][strCondShort][1]),
# textcoords='data',
# size=Plot.numFontSize*0.7, annotation_clip=False,
# horizontalalignment='center', verticalalignment='center', zorder=6,
# bbox=dict(boxstyle="round", fc='w', ec=(0.6, 0.6, 0.6), lw=2, alpha=1.0),
# arrowprops=dict(arrowstyle="wedge,tail_width=0.6",
# fc=(1.0, 1.0, 1.0), ec=(0.6, 0.6, 0.6),
# patchA=None,
# relpos=(0.5, 0.5),
# connectionstyle="arc3", lw=2, alpha=0.7, zorder=6)
# )
handAxIn.set_xlim([numMinScore-0.10, numMaxScore+0.05])
handAxIn.set_ylim([numMinScore-0.10, numMaxScore+0.05])
handAxIn.set_ylabel('Mesenchymal score', fontsize=Plot.numFontSize*0.7)
handAxIn.set_xlabel('Epithelial score', fontsize=Plot.numFontSize*0.7)
for handTick in handAxIn.xaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize*0.7)
for handTick in handAxIn.yaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize*0.7)
# tidy up the tick locations
arrayXTickLoc = plt.MaxNLocator(numMaxXTicks)
handAxIn.xaxis.set_major_locator(arrayXTickLoc)
arrayYTickLoc = plt.MaxNLocator(numMaxYTicks)
handAxIn.yaxis.set_major_locator(arrayYTickLoc)
arrayHexBinPlotPos = handAxIn.get_position()
numColorBarXStart = arrayHexBinPlotPos.x0 + 0.03*arrayHexBinPlotPos.width
numColorBarYStart = arrayHexBinPlotPos.y0 + 0.05*arrayHexBinPlotPos.height
numLegendPanelXStart = numMinScore-0.095
numLegendPanelYStart = numMinScore-0.095
numLegendPanelWidth = 0.30
numLegendPanelHeight = 0.24
numColorBarLabelXPos = numMinScore - 0.03
numColorBarLabelYPos = numMinScore + 0.11
numScatterLabelXPos = numMinScore + 0.11
numScatterLabelYPos = numColorBarLabelYPos
numScatterLegendXPos = numMinScore + 0.05
numScatterLegendYPos = numMinScore + 0.10
numScatterLegendYSpacing = 0.045*(numMaxScore - numMinScore)
numScatterLegendHMLESystemXOffset = 0.015 * (numMaxScore - numMinScore)
numScatterLegendTextXOffset = 0.015
numScatterLegendTextYOffset = -0.03
# # draw in a patch (white bounding box) as the background for the legend
# handPatch = handAxIn.add_patch(matplotlib.patches.Rectangle([numLegendPanelXStart, numLegendPanelYStart],
# numLegendPanelWidth, numLegendPanelHeight,
# edgecolor='k', lw=1.,
# facecolor='w', fill=True))
# handPatch.set_zorder(numScatterZOrder+1)
# handAxIn.text(numLegendPanelXStart + 0.25*numLegendPanelWidth,
# numLegendPanelYStart + 0.40*numLegendPanelHeight,
# 'log$_{10}$($n_{tumours}$)',
# fontsize=Plot.numFontSize*0.7,
# ha='center', va='center',
# rotation=90, zorder=numScatterZOrder+3)
#
# arrayCBarPos=handFigIn.add_axes([numColorBarXStart,numColorBarYStart,0.02,0.08])
# handSigColorBar = handFigIn.colorbar(handAxHex,cax=arrayCBarPos)
# handSigColorBar.ax.tick_params(labelsize=Plot.numFontSize*0.7)
#
# arrayTickLoc = plt.MaxNLocator(5)
# handSigColorBar.locator = arrayTickLoc
# handSigColorBar.update_ticks()
#
# listOutTickLabels = ['']*5
# listOutTickLabels[0] = 'Low'
# listOutTickLabels[-1] = 'High'
#
# handSigColorBar.ax.set_yticklabels(listOutTickLabels)
#
# handAxIn.text(numColorBarLabelXPos, numColorBarLabelYPos,
# 'TCGA sample\ndensity',
# fontsize=Plot.numFontSize*0.7, horizontalalignment='center', verticalalignment='center',
# weight='bold', zorder=numScatterZOrder+3)
# handAxIn.text(numScatterLabelXPos, numScatterLabelYPos,
# 'Cell line\nclassification',
# fontsize=Plot.numFontSize*0.7, horizontalalignment='center', verticalalignment='center',
# weight='bold', zorder=numScatterZOrder+3)
#
# for iType in range(numCellLineSubtypes):
# handAxIn.scatter(numScatterLegendXPos,
# numScatterLegendYPos+numScatterLegendTextYOffset*(iType+1),
# c=listSubtypePlotColors[iType],
# clip_on=False,
# marker='^',
# s=numCellLineMarkerSize, edgecolor='k',
# lw=numCellLineMarkerLineWidth,
# zorder=numScatterZOrder+3)
# handAxIn.text(numScatterLegendXPos+numScatterLegendTextXOffset,
# numScatterLegendYPos+numScatterLegendTextYOffset*(iType+1),
# listCellLineSubtypesToDisp[iType],
# fontsize=Plot.numFontSize*0.7,
# verticalalignment='center',
# horizontalalignment='left',
# zorder=numScatterZOrder+3)
# handAxIn.scatter(numScatterLegendXPos,
# numScatterLegendYPos + numScatterLegendTextYOffset * (numCellLineSubtypes + 1),
# c='g',
# clip_on=False,
# marker='^',
# s=numCellLineMarkerSize, edgecolor='k',
# lw=numCellLineMarkerLineWidth,
# zorder=numScatterZOrder + 3)
# handAxIn.text(numScatterLegendXPos + numScatterLegendTextXOffset,
# numScatterLegendYPos + numScatterLegendTextYOffset * (numCellLineSubtypes + 1),
# 'EpiCRISPR samples',
# fontsize=Plot.numFontSize * 0.7,
# verticalalignment='center',
# horizontalalignment='left',
# zorder=numScatterZOrder + 3)
return flagResult
def epi_mes_volcano(flagResult=False,
handAxInMDAMB231='undefined',
handAxInSUM159='undefined'):
numAdjPValThresh = 0.05
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(), dictENSGToHGNC.keys()))
dfMergedRNA = Process.diff_expr_data()
listDataGenes = dfMergedRNA.index.tolist()
setDataGenes = set(listDataGenes)
for strGene in setDataGenes.difference(set(dictENSGToHGNC.keys())):
dictENSGToHGNC[strGene] = strGene
dictEpiMesGenes = Process.tan2012_cell_line_genes()
listEpiGenes = dictEpiMesGenes['epi_genes']
listMesGenes = dictEpiMesGenes['mes_genes']
listEpiGenesENSG = [dictHGNCToENSG[strGene] for strGene in listEpiGenes
if strGene in dictHGNCToENSG.keys()]
listMesGenesENSG = [dictHGNCToENSG[strGene] for strGene in listMesGenes
if strGene in dictHGNCToENSG.keys()]
listOutputGeneOrder = Process.fig5_rnaseq_gene_lists()['HeatmapOrder']
arrayMaxAbsLogFC = np.max(np.abs(dfMergedRNA['MDAMB231:logFC'].values.astype(float)))
handAxInMDAMB231.scatter(dfMergedRNA['MDAMB231:logFC'].reindex(listEpiGenesENSG).values.astype(float),
-np.log10(dfMergedRNA['MDAMB231:adj.P.Val'].reindex(listEpiGenesENSG).values.astype(
float)),
lw=0.0,
s=4,
color='green',
alpha=0.9,
label='Epithelial',
zorder=5)
handAxInMDAMB231.scatter(dfMergedRNA['MDAMB231:logFC'].reindex(listMesGenesENSG).values.astype(float),
-np.log10(dfMergedRNA['MDAMB231:adj.P.Val'].reindex(listMesGenesENSG).values.astype(
float)),
lw=0.0,
s=4,
color='purple',
alpha=0.9,
label='Mesenchymal',
zorder=5)
handAxInMDAMB231.scatter(dfMergedRNA['MDAMB231:logFC'].values.astype(float),
-np.log10(dfMergedRNA['MDAMB231:adj.P.Val'].values.astype(float)),
lw=0.0,
s=4,
color='0.7',
alpha=0.4,
label='Other',
zorder=4)
handAxInMDAMB231.set_xlim([arrayMaxAbsLogFC*-1.05, arrayMaxAbsLogFC*1.05])
# hide the right and top spines
handAxInMDAMB231.spines['right'].set_visible(False)
handAxInMDAMB231.spines['top'].set_visible(False)
listHandTextMDAMB231 = [handAxInMDAMB231.text(
dfMergedRNA['MDAMB231:logFC'].loc[strGene].astype(float),
-np.log10(dfMergedRNA['MDAMB231:adj.P.Val'].loc[strGene].astype(float)),
dictENSGToHGNC[strGene],
fontsize=Plot.numFontSize,
ha='center')
for strGene in listOutputGeneOrder
if np.bitwise_and(dfMergedRNA['MDAMB231:adj.P.Val'].loc[strGene].astype(float) < numAdjPValThresh,
strGene in listEpiGenesENSG+listMesGenesENSG)]
adjust_text(listHandTextMDAMB231,
arrowprops=dict(arrowstyle='-|>,head_width=0.1,head_length=0.2',
color='k', lw=0.5,
connectionstyle="arc3",
alpha=0.25),
)
handAxInMDAMB231.set_xticks([-5, -2.5, 0, 2.5, 5])
handAxInMDAMB231.set_ylabel('-log$_{10}$(adj. $p$-value)', fontsize=Plot.numFontSize)
handAxInMDAMB231.set_xlabel('log$_{2}$(fold change)', fontsize=Plot.numFontSize)
handAxInMDAMB231.set_title('MDA-MB-231', fontsize=Plot.numFontSize)
for handTick in handAxInMDAMB231.yaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
for handTick in handAxInMDAMB231.xaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
arrayMaxAbsLogFC = np.max(np.abs(dfMergedRNA['SUM159:logFC'].values.astype(float)))
handAxInSUM159.scatter(dfMergedRNA['SUM159:logFC'].reindex(listEpiGenesENSG).values.astype(float),
-np.log10(dfMergedRNA['SUM159:adj.P.Val'].reindex(listEpiGenesENSG).values.astype(
float)),
lw=0.0,
s=4,
color='green',
alpha=0.9,
label='Epithelial',
zorder=5)
handAxInSUM159.scatter(dfMergedRNA['SUM159:logFC'].reindex(listMesGenesENSG).values.astype(float),
-np.log10(dfMergedRNA['SUM159:adj.P.Val'].reindex(listMesGenesENSG).values.astype(
float)),
lw=0.0,
s=4,
color='purple',
alpha=0.9,
label='Mesenchymal',
zorder=5)
handAxInSUM159.scatter(dfMergedRNA['SUM159:logFC'].values.astype(float),
-np.log10(dfMergedRNA['SUM159:adj.P.Val'].values.astype(float)),
lw=0.0,
s=4,
color='0.7',
alpha=0.4,
label='Other',
zorder=4)
handAxInSUM159.set_xlim([arrayMaxAbsLogFC*-1.05, arrayMaxAbsLogFC*1.05])
listHandTextSUM159 = [handAxInSUM159.text(
dfMergedRNA['SUM159:logFC'].loc[strGene].astype(float),
-np.log10(dfMergedRNA['SUM159:adj.P.Val'].loc[strGene].astype(float)),
dictENSGToHGNC[strGene],
fontsize=Plot.numFontSize,
ha='center')
for strGene in listOutputGeneOrder
if np.bitwise_and(dfMergedRNA['SUM159:adj.P.Val'].loc[strGene].astype(float) < numAdjPValThresh,
strGene in listEpiGenesENSG+listMesGenesENSG)]
adjust_text(listHandTextSUM159,
arrowProps=dict(arrowstyle=None)
)
handAxInSUM159.set_xlabel('log$_{2}$(fold change)', fontsize=Plot.numFontSize)
handAxInSUM159.set_ylabel('-log$_{10}$(adj. $p$-value)', fontsize=Plot.numFontSize)
handAxInSUM159.set_title('SUM159', fontsize=Plot.numFontSize)
# hide the right and top spines
handAxInSUM159.spines['right'].set_visible(False)
handAxInSUM159.spines['top'].set_visible(False)
for handTick in handAxInSUM159.yaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
handAxInSUM159.set_xticks([-5, -2.5, 0, 2.5, 5])
for handTick in handAxInSUM159.xaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
plt.legend(loc='lower right',
bbox_to_anchor=(1.25, 2.3),
fontsize=Plot.numFontSize,
scatterpoints=1,
ncol=1,
facecolor='white',
framealpha=1.0)
return flagResult
def tcga_sel_gene_hist(flagResult=False,
handAxIn='undefined',
strGeneIn='undefined',
flagLabelXAxis=False,
flagLabelYAxis=False
):
handAx2 = handAxIn.twinx()
dfTCGABrCa = Process.tcga_brca()
dfTCGABrCaScores = Process.tcga_scores()
arraySampleIsOfInt = np.bitwise_and(
dfTCGABrCaScores['Epithelial Score'].values.astype(float) < 0.10,
dfTCGABrCaScores['Mesenchymal Score'].values.astype(float) > 0.15)
arraySampleOfIntIndices = np.where(arraySampleIsOfInt)[0]
listSampleOfInt = [dfTCGABrCaScores.index.tolist()[i] for i in arraySampleOfIntIndices]
arrayOtherSampleIndicess = np.where(~arraySampleIsOfInt)[0]
listOtherSamples = [dfTCGABrCaScores.index.tolist()[i] for i in arrayOtherSampleIndicess]
strTCGAGene = [strGene for strGene in dfTCGABrCa.index.tolist()
if strGene.startswith(strGeneIn+'|')][0]
sliceData = dfTCGABrCa.loc[strTCGAGene]
numMinVal = np.min(sliceData.values.astype(float))
numMaxVal = np.max(sliceData.values.astype(float))
numRange = numMaxVal - numMinVal
arrayHistBins = np.linspace(start=numMinVal-0.05*numRange,
stop=numMaxVal+0.05*numRange,
num=30)
handAxIn.hist(sliceData[listOtherSamples].values.astype(float),
bins=arrayHistBins,
zorder=4,
alpha=0.7,
color='0.6')
handAxIn.set_xlim([numMinVal-0.05*numRange, numMaxVal+0.05*numRange])
for handTick in handAxIn.yaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize*0.7)
for handTick in handAxIn.xaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize*0.7)
handAx2.hist(sliceData[listSampleOfInt].values.astype(float),
bins=arrayHistBins,
zorder=5,
alpha=0.7,
color='#ec1c24')
handAx2.set_xlim([numMinVal-0.05*numRange, numMaxVal+0.05*numRange])
handAx2.tick_params(axis='y', labelsize=Plot.numFontSize*0.7, labelcolor='#ec1c24')
# for handTick in handAx2.yaxis.get_major_ticks():
# handTick.label.set_fontsize(Plot.numFontSize*0.7)
handAxIn.set_title(strGeneIn, fontsize=Plot.numFontSize*0.7)
if flagLabelXAxis:
handAxIn.set_xlabel('Abundance', fontsize=Plot.numFontSize*0.7)
if flagLabelYAxis:
handAxIn.set_ylabel('Frequency', fontsize=Plot.numFontSize*0.7)
return flagResult
def rnaseq_heatmap_and_annot(flagResult=False,
handAxInHeatmap='undefined',
handAxInHMCMap='undefined',
handAxInAnnot='undefined'):
dictOutRNASeqCond = {'SUM159:logFC':'SUM159',
'MDAMB231:logFC':'MDA-\nMB-231'}
dictGOLabel = {'GO:0070160':'Tight junction',
'GO:0005913':'Adherens junction',
'GO:0005911':'Cell-cell junction'}
listOutputSelGO = list(dictGOLabel.keys())
listTFs = Process.transcription_factors()
dictEpiMes = Process.tan2012_cell_line_genes()
dictENSGToHGNC = Process.dict_gtf_ensg_to_hgnc()
dfMergedRNA = Process.diff_expr_data()
listDataGenes = dfMergedRNA.index.tolist()
setDataGenes = set(listDataGenes)
for strGene in setDataGenes.difference(set(dictENSGToHGNC.keys())):
dictENSGToHGNC[strGene] = strGene
listFCOutCols = ['MDAMB231:logFC', 'SUM159:logFC']
listOutputGeneOrder = Process.fig5_rnaseq_gene_lists()['HeatmapOrder']
numMaxAbsFC = np.max(np.abs(
np.ravel(dfMergedRNA[listFCOutCols].reindex(listOutputGeneOrder).values.astype(float))))
handRNASeqHM = handAxInHeatmap.matshow(dfMergedRNA[listFCOutCols].reindex(listOutputGeneOrder),
vmin=-numMaxAbsFC, vmax=numMaxAbsFC,
cmap=plt.cm.PRGn, aspect='auto')
handAxInHeatmap.set_xticks([])
handAxInHeatmap.set_yticks([])
for iGene in range(len(listOutputGeneOrder)):
strENSG = listOutputGeneOrder[iGene]
if dictENSGToHGNC[strENSG] == dictENSGToHGNC[strENSG]:
strGeneOut = dictENSGToHGNC[strENSG]
else:
strGeneOut = strENSG
handAxInHeatmap.text(-0.7, iGene,
strGeneOut,
ha='right', va='center',
fontsize=Plot.numFontSize*0.65,
fontstyle='italic')
if iGene < len(listOutputGeneOrder)-1:
handAxInHeatmap.axhline(y=iGene+0.5,
xmin=0.0, xmax=1.0,
color='0.5', lw=0.25)
for iCond in range(len(listFCOutCols)):
handAxInHeatmap.text(iCond, -0.7,
dictOutRNASeqCond[listFCOutCols[iCond]],
ha='center', va='bottom',
fontsize=Plot.numFontSize)
if iCond < len(listFCOutCols)-1:
handAxInHeatmap.axvline(x=iCond+0.5,
ymin=0.0, ymax=1.0,
color='0.5', lw=0.25)
for strAxLoc in ['bottom', 'left', 'right', 'top']:
handAxInHeatmap.spines[strAxLoc].set_linewidth(0.1)
handSigColorBar = plt.colorbar(handRNASeqHM, cax=handAxInHMCMap,
orientation='horizontal')
handSigColorBar.ax.tick_params(width=0.5, length=2,
labelsize=Plot.numFontSize * 0.8)
arrayTickLoc = plt.MaxNLocator(3)
handSigColorBar.locator = arrayTickLoc
handSigColorBar.update_ticks()
for strAxLoc in ['bottom', 'left', 'right', 'top']:
handAxInHMCMap.spines[strAxLoc].set_linewidth(0.1)
handAxInHeatmap.text(0.5,
len(listOutputGeneOrder)+4.5,
'RNA-seq log$_{2}$FC',
ha='center', va='bottom',
fontsize=Plot.numFontSize)
dfGeneOntology = Process.go_rnaseq_diffexpr_genes()
listOutputGeneOrderHGNC = [dictENSGToHGNC[strGene] for strGene in listOutputGeneOrder]
dfGeneAnnot = dfGeneOntology[listOutputSelGO].reindex(listOutputGeneOrderHGNC)
dfGeneAnnot['Transcription factor'] = pd.Series([strGene in listTFs for strGene in listOutputGeneOrder],
index=listOutputGeneOrderHGNC)
dfGeneAnnot['Epithelial gene'] = pd.Series([strGene in dictEpiMes['epi_genes'] for strGene in listOutputGeneOrderHGNC],
index=listOutputGeneOrderHGNC)
dfGeneAnnot['Mesenchymal gene'] = pd.Series([strGene in dictEpiMes['mes_genes'] for strGene in listOutputGeneOrderHGNC],
index=listOutputGeneOrderHGNC)
listGeneAnnotCols = dfGeneAnnot.columns.tolist()
for iCol in range(len(listGeneAnnotCols)):
if listGeneAnnotCols[iCol] in dictGOLabel.keys():
listGeneAnnotCols[iCol] = listGeneAnnotCols[iCol] + '\n' + dictGOLabel[listGeneAnnotCols[iCol]]
handAxInAnnot.matshow(np.nan_to_num(dfGeneAnnot.values.astype(float)),
cmap=plt.cm.Greys,
vmin=0, vmax=1,
aspect='auto'
)
handAxInAnnot.set_xticks([])
handAxInAnnot.set_yticks([])
for iCol in range(len(listGeneAnnotCols)):
handAxInAnnot.text(iCol-0.3, -1,
listGeneAnnotCols[iCol],
ha='left', va='bottom',
fontsize=Plot.numFontSize * 0.7,
rotation=70
)
if iCol < len(listGeneAnnotCols)-1:
handAxInAnnot.axvline(x=iCol+0.5,
ymin=0.0, ymax=1.0,
color='0.5', lw=0.25)
for iRow in range(np.shape(dfGeneAnnot)[0]):
if iRow < np.shape(dfGeneAnnot)[0]-1:
handAxInAnnot.axhline(y=iRow+0.5,
xmin=0.0, xmax=1.0,
color='0.5', lw=0.25)
return flagResult
class Plot:
strOutputLoc = PathDir.pathOutFolder
listFileFormats = ['png', 'pdf']
numFontSize = 7
numScatterMarkerSize = 3
def figure_five(flagResult=False):
tupleFigSize = (6.5, 9.5)
numVolcanoHeight = 0.17
numVolcanoWidth = 0.37
numHexbinHeight = 0.33
numHexbinWidth = numHexbinHeight * (tupleFigSize[1] / tupleFigSize[0])
numHeatMapPanelHeight = 0.43
numCMapHeight = 0.0075
arrayGridSpec = matplotlib.gridspec.GridSpec(
nrows=3, ncols=2,
left=0.65, right=0.95,
bottom=0.05, top=0.38,
hspace=0.50, wspace=0.65
)
numABYPos = 0.93
numFig5YPos = 0.95
dictPanelLoc = {'Volcano:MDA-MB-231':[0.09, 0.90-numVolcanoHeight, numVolcanoWidth, numVolcanoHeight],
'Volcano:SUM159':[0.09, 0.48, numVolcanoWidth, numVolcanoHeight],
'HeatMap:RNA-seq':[0.64, 0.47, 0.14, numHeatMapPanelHeight],
'HeatMap_cmap:RNA-seq':[0.66, 0.455, 0.10, numCMapHeight],
'HeatMap:RNA-seq_GO':[0.79, 0.47, 0.18, numHeatMapPanelHeight],
'Hexbin_Landscape':[0.07, 0.05, numHexbinWidth, numHexbinHeight]
}
handFig = plt.figure(figsize=tupleFigSize)
# # # # # # # # # # # # #
# Volcano plots
# create the axes and pass to the associated plotting function
handAxMDAMB231 = handFig.add_axes(dictPanelLoc['Volcano:MDA-MB-231'])
handAxSUM159 = handFig.add_axes(dictPanelLoc['Volcano:SUM159'])
_ = PlotFunc.epi_mes_volcano(handAxInMDAMB231=handAxMDAMB231,
handAxInSUM159=handAxSUM159)
structAxPos = handAxMDAMB231.get_position()
handFig.text(structAxPos.x0-0.15*structAxPos.width,
numABYPos,
'a',
ha='left',
va='center',
fontsize=Plot.numFontSize*1.5,
fontweight='bold')
handFig.text(structAxPos.x0-0.15*structAxPos.width,
numFig5YPos,
'Fig. 5',
ha='left',
va='center',
fontsize=Plot.numFontSize*1.5,
fontweight='bold')
# # # # # # # # # # # # #
# RNA-seq logFC & GO results
handAxHMCMap = handFig.add_axes(dictPanelLoc['HeatMap_cmap:RNA-seq'])
handAxHeatmap = handFig.add_axes(dictPanelLoc['HeatMap:RNA-seq'])
handAxAnnot = handFig.add_axes(dictPanelLoc['HeatMap:RNA-seq_GO'])
_ = PlotFunc.rnaseq_heatmap_and_annot(handAxInHeatmap=handAxHeatmap,
handAxInHMCMap=handAxHMCMap,
handAxInAnnot=handAxAnnot)
structAxPos = handAxHeatmap.get_position()
handFig.text(structAxPos.x0-0.5*structAxPos.width,
numABYPos,
'b',
ha='left',
va='center',
fontsize=Plot.numFontSize*1.5,
fontweight='bold')
# # # # # # # # # # # # # # #
# Hexbin landscape
handAx = handFig.add_axes(dictPanelLoc['Hexbin_Landscape'])
_ = PlotFunc.es_ms_landscape(handAxIn=handAx)
# # # # # # # # # # # # # # #
# Histograms
listOutGenes = ['ZEB1', 'ESRP1',
'F11R', 'MAP7',
'CDS1', 'SH2D3A']
numOutGeneRow = 0
numOutGeneCol = 0
for iGene in range(len(listOutGenes)):
strOutGene = listOutGenes[iGene]
handAx = plt.subplot(arrayGridSpec[numOutGeneRow, numOutGeneCol])
if numOutGeneCol == 0:
flagLabelY = True
else:
flagLabelY = False
if numOutGeneRow == 2:
flagLabelX = True
else:
flagLabelX = False
_ = PlotFunc.tcga_sel_gene_hist(handAxIn=handAx,
strGeneIn=strOutGene,
flagLabelYAxis=flagLabelY,
flagLabelXAxis=flagLabelX)
numOutGeneCol += 1
if numOutGeneCol >= 2:
numOutGeneRow += 1
numOutGeneCol=0
pathOut = os.path.join(Plot.strOutputLoc, 'figure_5')
for strFormat in Plot.listFileFormats:
handFig.savefig(os.path.join(pathOut, 'Figure5.'+strFormat),
ext=strFormat, dpi=300)
plt.close(handFig)
return flagResult
def off_targets(flagResult=False):
numMaxXTicks = 4
numMaxYTicks = 4
dictENSGToHGNC = BiomartFunctions.IdentMappers.defineEnsemblGeneToHGNCSymbolDict()
dictHGNCToENSG = dict(zip(dictENSGToHGNC.values(),dictENSGToHGNC.keys()))
dfOffTargets = Process.off_targets()
dfGuides = Process.guides()
listUniqueGuides = [dfGuides['Sequence'].iloc[i] + '_' + dfGuides['PAM'].iloc[i] for i in range(np.shape(dfGuides)[0])]
arrayGridSpec = matplotlib.gridspec.GridSpec(nrows=1, ncols=4,
left=0.06, right=0.97,
bottom=0.13, top=0.90,
wspace=0.2)
dfMerged = Process.diff_expr_data()
listLogFCCols = [strCol for strCol in dfMerged.columns.tolist() if ':logFC' in strCol]
# listAdjPValCols = [strCol for strCol in dfMerged.columns.tolist() if ':padj' in strCol]
listAllDiffExprConds = [strCol.split(':logFC')[0] for strCol in listLogFCCols]
listScoreStrings = dfOffTargets['Binds_score'].values.tolist()
listScores = []
for strScore in listScoreStrings:
if len(strScore) > 0:
listScores.append(np.float(strScore))
else:
listScores.append(0.0)
arrayScores = np.array(listScores)
arraySortedByScoreIndices = np.argsort(arrayScores)[::-1]
# dfOut = dfOffTargets[['Guide_PAM', 'Binds_HGNC', 'Binds_score', 'Binds_numMisMatch']].reindex(arraySortedByScoreIndices)
# listGenomicLoc = []
# for iRow in arraySortedByScoreIndices:
# listGenomicLoc.append('chr' + '{}'.format(dfOffTargets['Binds_chr'].iloc[iRow]) + ':' +
# '{}'.format(dfOffTargets['Binds_start'].iloc[iRow]) + '-' +
# '{}'.format(dfOffTargets['Binds_end'].iloc[iRow]) +
# ' (' + dfOffTargets['Binds_strand'].iloc[iRow] + ')')
# dfOut['Position'] = pd.Series(listGenomicLoc, index=dfOut.index.tolist())
#
# listGeneFC231AllGuides = []
# # listGeneFC231Guide4 = []
# listGeneFC159AllGuides = []
# # listGeneFC159Guide4 = []
# for iRow in range(len(arraySortedByScoreIndices)):
# strGene = dfOut['Binds_HGNC'].iloc[iRow]
# if not strGene == 'NoGene':
# strGeneENSG = dictHGNCToENSG[strGene]
# if strGeneENSG in dfMerged.index.tolist():
# listGeneFC231AllGuides.append(dfMerged['MDAMB231:logFC'].loc[strGeneENSG])
# # listGeneFC231Guide4.append(dfMerged['MDAMB231-G4_vs_emptyVect:log2FoldChange'].loc[strGeneENSG])
# listGeneFC159AllGuides.append(dfMerged['SUM159:logFC'].loc[strGeneENSG])
# # listGeneFC159Guide4.append(dfMerged['SUM159-G4_vs_emptyVect:log2FoldChange'].loc[strGeneENSG])
# else:
# listGeneFC231AllGuides.append('-')
# # listGeneFC231Guide4.append('-')
# listGeneFC159AllGuides.append('-')
# else:
# listGeneFC231AllGuides.append('-')
# # listGeneFC231Guide4.append('-')
# listGeneFC159AllGuides.append('-')
# # listGeneFC159Guide4.append('-')
# dfOut['MDA-MB-231:logFC:g4_vs_EVC'] = pd.Series(listGeneFC231Guide4, index=dfOut.index.tolist())
# dfOut['MDA-MB-231:logFC:gAll_vs_EVC'] = pd.Series(listGeneFC231AllGuides, index=dfOut.index.tolist())
# dfOut['SUM159:logFC:gAll_vs_EVC'] = pd.Series(listGeneFC159AllGuides, index=dfOut.index.tolist())
# dfOut['SUM159:logFC:g4_vs_EVC'] = pd.Series(listGeneFC159Guide4, index=dfOut.index.tolist())
# dfOut.to_csv(os.path.join(Plot.strOutputLoc, 'Off_targets_to_check.tsv'), sep='\t', header=True, index=False)
for strCellLine in Process.listLines:
listDiffExprConds = [strCol for strCol in listAllDiffExprConds if strCellLine in strCol]
arrayFlatFC = np.ravel(np.nan_to_num(
dfMerged[[strCol for strCol in listLogFCCols if strCellLine in strCol]].values.astype(float)))
numMaxAbsVal = np.max(np.abs(arrayFlatFC))
for strDiffExpr in listDiffExprConds:
handFig = plt.figure()
handFig.set_size_inches(w=9,h=3.5)
strLogFC = strDiffExpr + ':logFC'
strPVal = strDiffExpr + ':adj.P.Val'
arrayGeneToPlot = np.bitwise_and(dfMerged[strLogFC].notnull(),
dfMerged[strPVal].notnull())
for iGuide in range(len(listUniqueGuides)):
listOffTargets = dfOffTargets['Binds_HGNC'][dfOffTargets['Guide_PAM'] == listUniqueGuides[iGuide]].tolist()
listOffTargetGenes = [strTarget
for strTarget in listOffTargets
if not np.bitwise_or(strTarget == 'NoGene', strTarget == 'ZEB1')]
listOffTargetGenesENSG = [dictHGNCToENSG[strGene] for strGene in listOffTargetGenes]
handAx = plt.subplot(arrayGridSpec[iGuide])
handAx.scatter(dfMerged[strLogFC].loc[arrayGeneToPlot].values.astype(float),
-np.log10(dfMerged[strPVal].loc[arrayGeneToPlot].values.astype(float)),
s=Plot.numScatterMarkerSize,
c='0.5',
alpha=0.2,
edgecolors=None)
handAx.scatter(dfMerged[strLogFC].loc[listOffTargetGenesENSG].values.astype(float),
-np.log10(dfMerged[strPVal].loc[listOffTargetGenesENSG].values.astype(float)),
s=Plot.numScatterMarkerSize,
edgecolors='r',
lw=1,
c=None)
handAx.scatter(dfMerged[strLogFC].loc[dictHGNCToENSG['ZEB1']].astype(float),
-np.log10(dfMerged[strPVal].loc[dictHGNCToENSG['ZEB1']].astype(float)),
s=Plot.numScatterMarkerSize,
edgecolors=[0.0, 1.0, 0.0],
lw=1,
c=None)
handAx.set_xlim([-numMaxAbsVal*1.03, numMaxAbsVal*1.03])
handAx.set_xlabel('log$_{2}$FC', fontsize=Plot.numFontSize)
arrayXTickLoc = plt.MaxNLocator(numMaxXTicks)
handAx.xaxis.set_major_locator(arrayXTickLoc)
arrayXTickLoc = plt.MaxNLocator(numMaxYTicks)
handAx.xaxis.set_major_locator(arrayXTickLoc)
if iGuide == 0:
handAx.set_ylabel('-log$_{10}$($p$-value)', fontsize=Plot.numFontSize)
for handTick in handAx.yaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
else:
handAx.set_ylabel('')
handAx.set_yticklabels([])
handAx.set_title(listUniqueGuides[iGuide], fontsize=Plot.numFontSize)
for handTick in handAx.xaxis.get_major_ticks():
handTick.label.set_fontsize(Plot.numFontSize)
handFig.text(x=0.5, y=0.99, s=strDiffExpr,
ha='center', va='top', fontsize=Plot.numFontSize*1.3)
for strExt in Plot.listFileFormats:
handFig.savefig(os.path.join(Plot.strOutputLoc,
'ZEB1_offTarg_'+strCellLine+'_'+strDiffExpr+'.'+strExt),
ext=strExt, dpi=300)
plt.close(handFig)
return flagResult
# _ = Process.all_epi_mes_scores()
_ = Plot.figure_five()
# dfDiffExpr = Process.diff_expr_data()
# listCommonGenes = Process.common_rna_genes()
# dfTCGARNA = Process.tcga_brca()
# dfTCGAScores = Process.tcga_scores(flagPerformExtraction=True)
# dfCCLERNA = Process.ccle_brca()
# dfCCLEScores = Process.ccle_scores(flagPerformExtraction=True)
# dictBrCaLineSubtype = Process.ccle_brca_subtypes()
# dfLocalScores = Process.local_scores()
# _ = Process.go_rnaseq_diffexpr_genes()
# _ = Process.tcga_brca()
# _ = Plot.off_targets()
# _ = PlotFunc.es_ms_landscape()
# _ = Plot.es_ms_landscape(
# strDataLoc=PathDir.pathProcResults,
# strFileName='ES_vs_MS_landscape',
# listQuantFileNames=Process.listQuantFiles,
# listLocalCellLines = Process.listLines,
# listOfListsConds=Process.listOfListsConds,
# dictOfDictXYOffsets=dictOfDictOffsets)
# _ = Output.merged_results()
# _ = Plot.heatmap()
# _ = Plot.diff_expr_genes_vs_tcga()
# _ = Plot.sig_meth_vs_rna()
# _ = Plot.ppi()
# _ = Process.tcga_scores()
# dfTCGA = TCGAFunctions.PanCancer.extract_mess_rna()
| [
"pickle.dump",
"csv.reader",
"numpy.abs",
"numpy.isnan",
"numpy.argsort",
"numpy.shape",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.bitwise_or",
"os.path.join",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"os.path.dirname",
"networkx.find_cycle",
"networkx.topological_sort",
... | [((387, 415), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (402, 415), False, 'import os\n'), ((449, 477), 'os.path.normpath', 'os.path.normpath', (['dirCurrent'], {}), '(dirCurrent)\n', (465, 477), False, 'import os\n'), ((569, 607), 'os.path.join', 'os.path.join', (['pathOutFolder', 'strFolder'], {}), '(pathOutFolder, strFolder)\n', (581, 607), False, 'import os\n'), ((712, 752), 'os.path.join', 'os.path.join', (['pathProcRNAData', 'strFolder'], {}), '(pathProcRNAData, strFolder)\n', (724, 752), False, 'import os\n'), ((846, 882), 'os.path.join', 'os.path.join', (['pathRefData', 'strFolder'], {}), '(pathRefData, strFolder)\n', (858, 882), False, 'import os\n'), ((990, 1029), 'os.path.join', 'os.path.join', (['pathPublicData', 'strFolder'], {}), '(pathPublicData, strFolder)\n', (1002, 1029), False, 'import os\n'), ((3831, 3874), 'pandas.concat', 'pd.concat', (['listDFToMerge'], {'axis': '(1)', 'sort': '(True)'}), '(listDFToMerge, axis=1, sort=True)\n', (3840, 3874), True, 'import pandas as pd\n'), ((3964, 3978), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3976, 3978), True, 'import pandas as pd\n'), ((4104, 4151), 'os.path.join', 'os.path.join', (['PathDir.pathOutFolder', '"""figure_5"""'], {}), "(PathDir.pathOutFolder, 'figure_5')\n", (4116, 4151), False, 'import os\n'), ((10148, 10162), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10160, 10162), True, 'import pandas as pd\n'), ((10239, 10286), 'os.path.join', 'os.path.join', (['PathDir.pathOutFolder', '"""figure_5"""'], {}), "(PathDir.pathOutFolder, 'figure_5')\n", (10251, 10286), False, 'import os\n'), ((12584, 12598), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (12596, 12598), True, 'import pandas as pd\n'), ((12675, 12722), 'os.path.join', 'os.path.join', (['PathDir.pathOutFolder', '"""figure_5"""'], {}), "(PathDir.pathOutFolder, 'figure_5')\n", (12687, 12722), False, 'import os\n'), ((15367, 15414), 'os.path.join', 'os.path.join', (['PathDir.pathOutFolder', '"""figure_5"""'], {}), "(PathDir.pathOutFolder, 'figure_5')\n", (15379, 15414), False, 'import os\n'), ((15878, 15936), 'numpy.any', 'np.any', (['[flagScoreTCGA, flagScoreCCLE, flagScoreLocalData]'], {}), '([flagScoreTCGA, flagScoreCCLE, flagScoreLocalData])\n', (15884, 15936), True, 'import numpy as np\n'), ((21825, 21860), 'numpy.argsort', 'np.argsort', (['arrayProdRankAcrossCond'], {}), '(arrayProdRankAcrossCond)\n', (21835, 21860), True, 'import numpy as np\n'), ((24557, 24606), 'numpy.array', 'np.array', (['listIndicesForGuidesOfInt'], {'dtype': 'np.int'}), '(listIndicesForGuidesOfInt, dtype=np.int)\n', (24565, 24606), True, 'import numpy as np\n'), ((43890, 43967), 'pandas.DataFrame', 'pd.DataFrame', (["{'ENSG': listAllENSG, 'HGNC': listAllHGNC, 'Type': listAllType}"], {}), "({'ENSG': listAllENSG, 'HGNC': listAllHGNC, 'Type': listAllType})\n", (43902, 43967), True, 'import pandas as pd\n'), ((47652, 47680), 'numpy.min', 'np.min', (['[numMinES, numMinMS]'], {}), '([numMinES, numMinMS])\n', (47658, 47680), True, 'import numpy as np\n'), ((47703, 47731), 'numpy.max', 'np.max', (['[numMaxES, numMaxMS]'], {}), '([numMaxES, numMaxMS])\n', (47709, 47731), True, 'import numpy as np\n'), ((51688, 51717), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['numMaxXTicks'], {}), '(numMaxXTicks)\n', (51703, 51717), True, 'import matplotlib.pyplot as plt\n'), ((51800, 51829), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['numMaxYTicks'], {}), '(numMaxYTicks)\n', (51815, 51829), True, 'import matplotlib.pyplot as plt\n'), ((63980, 64129), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'bbox_to_anchor': '(1.25, 2.3)', 'fontsize': 'Plot.numFontSize', 'scatterpoints': '(1)', 'ncol': '(1)', 'facecolor': '"""white"""', 'framealpha': '(1.0)'}), "(loc='lower right', bbox_to_anchor=(1.25, 2.3), fontsize=Plot.\n numFontSize, scatterpoints=1, ncol=1, facecolor='white', framealpha=1.0)\n", (63990, 64129), True, 'import matplotlib.pyplot as plt\n'), ((65568, 65660), 'numpy.linspace', 'np.linspace', ([], {'start': '(numMinVal - 0.05 * numRange)', 'stop': '(numMaxVal + 0.05 * numRange)', 'num': '(30)'}), '(start=numMinVal - 0.05 * numRange, stop=numMaxVal + 0.05 *\n numRange, num=30)\n', (65579, 65660), True, 'import numpy as np\n'), ((70010, 70082), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['handRNASeqHM'], {'cax': 'handAxInHMCMap', 'orientation': '"""horizontal"""'}), "(handRNASeqHM, cax=handAxInHMCMap, orientation='horizontal')\n", (70022, 70082), True, 'import matplotlib.pyplot as plt\n'), ((70283, 70301), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['(3)'], {}), '(3)\n', (70298, 70301), True, 'import matplotlib.pyplot as plt\n'), ((71042, 71146), 'pandas.Series', 'pd.Series', (['[(strGene in listTFs) for strGene in listOutputGeneOrder]'], {'index': 'listOutputGeneOrderHGNC'}), '([(strGene in listTFs) for strGene in listOutputGeneOrder], index=\n listOutputGeneOrderHGNC)\n', (71051, 71146), True, 'import pandas as pd\n'), ((71219, 71342), 'pandas.Series', 'pd.Series', (["[(strGene in dictEpiMes['epi_genes']) for strGene in listOutputGeneOrderHGNC]"], {'index': 'listOutputGeneOrderHGNC'}), "([(strGene in dictEpiMes['epi_genes']) for strGene in\n listOutputGeneOrderHGNC], index=listOutputGeneOrderHGNC)\n", (71228, 71342), True, 'import pandas as pd\n'), ((71417, 71540), 'pandas.Series', 'pd.Series', (["[(strGene in dictEpiMes['mes_genes']) for strGene in listOutputGeneOrderHGNC]"], {'index': 'listOutputGeneOrderHGNC'}), "([(strGene in dictEpiMes['mes_genes']) for strGene in\n listOutputGeneOrderHGNC], index=listOutputGeneOrderHGNC)\n", (71426, 71540), True, 'import pandas as pd\n'), ((73438, 73559), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', ([], {'nrows': '(3)', 'ncols': '(2)', 'left': '(0.65)', 'right': '(0.95)', 'bottom': '(0.05)', 'top': '(0.38)', 'hspace': '(0.5)', 'wspace': '(0.65)'}), '(nrows=3, ncols=2, left=0.65, right=0.95,\n bottom=0.05, top=0.38, hspace=0.5, wspace=0.65)\n', (73466, 73559), False, 'import matplotlib\n'), ((74260, 74292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'tupleFigSize'}), '(figsize=tupleFigSize)\n', (74270, 74292), True, 'import matplotlib.pyplot as plt\n'), ((77588, 77631), 'os.path.join', 'os.path.join', (['Plot.strOutputLoc', '"""figure_5"""'], {}), "(Plot.strOutputLoc, 'figure_5')\n", (77600, 77631), False, 'import os\n'), ((77812, 77830), 'matplotlib.pyplot.close', 'plt.close', (['handFig'], {}), '(handFig)\n', (77821, 77830), True, 'import matplotlib.pyplot as plt\n'), ((78359, 78466), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', ([], {'nrows': '(1)', 'ncols': '(4)', 'left': '(0.06)', 'right': '(0.97)', 'bottom': '(0.13)', 'top': '(0.9)', 'wspace': '(0.2)'}), '(nrows=1, ncols=4, left=0.06, right=0.97,\n bottom=0.13, top=0.9, wspace=0.2)\n', (78387, 78466), False, 'import matplotlib\n'), ((79253, 79273), 'numpy.array', 'np.array', (['listScores'], {}), '(listScores)\n', (79261, 79273), True, 'import numpy as np\n'), ((1832, 1883), 'os.path.join', 'os.path.join', (['PathDir.pathProcRNAData', 'strQuantFile'], {}), '(PathDir.pathProcRNAData, strQuantFile)\n', (1844, 1883), False, 'import os\n'), ((18173, 18228), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', '"""sample_info.csv"""'], {}), "(PathDir.pathPublicData, 'sample_info.csv')\n", (18185, 18228), False, 'import os\n'), ((23267, 23322), 'os.path.join', 'os.path.join', (['PathDir.pathProcResults', 'strGuideFileName'], {}), '(PathDir.pathProcResults, strGuideFileName)\n', (23279, 23322), False, 'import os\n'), ((23587, 23605), 'numpy.shape', 'np.shape', (['dfGuides'], {}), '(dfGuides)\n', (23595, 23605), True, 'import numpy as np\n'), ((23968, 23988), 'csv.reader', 'csv.reader', (['handFile'], {}), '(handFile)\n', (23978, 23988), False, 'import csv\n'), ((24489, 24513), 'numpy.where', 'np.where', (['listIsGuideRow'], {}), '(listIsGuideRow)\n', (24497, 24513), True, 'import numpy as np\n'), ((30700, 30770), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', '"""Thiery_generic_EMT_signatures.txt"""'], {}), "(PathDir.pathRefData, 'Thiery_generic_EMT_signatures.txt')\n", (30712, 30770), False, 'import os\n'), ((32477, 32547), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', '"""Thiery_generic_EMT_signatures.txt"""'], {}), "(PathDir.pathRefData, 'Thiery_generic_EMT_signatures.txt')\n", (32489, 32547), False, 'import os\n'), ((40609, 40702), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'arrayGOMembMatrix', 'index': 'listUniqueMessRNAs', 'columns': 'listUniqueGONums'}), '(data=arrayGOMembMatrix, index=listUniqueMessRNAs, columns=\n listUniqueGONums)\n', (40621, 40702), True, 'import pandas as pd\n'), ((43328, 43384), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', '"""Homo_sapiens_TF.txt"""'], {}), "(PathDir.pathRefData, 'Homo_sapiens_TF.txt')\n", (43340, 43384), False, 'import os\n'), ((43482, 43548), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', '"""Homo_sapiens_TF_cofactors.txt"""'], {}), "(PathDir.pathRefData, 'Homo_sapiens_TF_cofactors.txt')\n", (43494, 43548), False, 'import os\n'), ((48726, 48920), 'matplotlib.pyplot.scatter', 'plt.scatter', (["dfCCLEScores['Epithelial Score'].iloc[iCellLine]", "dfCCLEScores['Mesenchymal Score'].iloc[iCellLine]"], {'c': 'strColor', 'marker': '"""^"""', 's': '(25)', 'edgecolors': "['k']", 'zorder': 'numScatterZOrder'}), "(dfCCLEScores['Epithelial Score'].iloc[iCellLine], dfCCLEScores[\n 'Mesenchymal Score'].iloc[iCellLine], c=strColor, marker='^', s=25,\n edgecolors=['k'], zorder=numScatterZOrder)\n", (48737, 48920), True, 'import matplotlib.pyplot as plt\n'), ((64903, 64931), 'numpy.where', 'np.where', (['arraySampleIsOfInt'], {}), '(arraySampleIsOfInt)\n', (64911, 64931), True, 'import numpy as np\n'), ((65066, 65095), 'numpy.where', 'np.where', (['(~arraySampleIsOfInt)'], {}), '(~arraySampleIsOfInt)\n', (65074, 65095), True, 'import numpy as np\n'), ((76865, 76921), 'matplotlib.pyplot.subplot', 'plt.subplot', (['arrayGridSpec[numOutGeneRow, numOutGeneCol]'], {}), '(arrayGridSpec[numOutGeneRow, numOutGeneCol])\n', (76876, 76921), True, 'import matplotlib.pyplot as plt\n'), ((79311, 79334), 'numpy.argsort', 'np.argsort', (['arrayScores'], {}), '(arrayScores)\n', (79321, 79334), True, 'import numpy as np\n'), ((2342, 2392), 'os.path.join', 'os.path.join', (['PathDir.pathProcRNAData', 'strFileName'], {}), '(PathDir.pathProcRNAData, strFileName)\n', (2354, 2392), False, 'import os\n'), ((3468, 3534), 'numpy.bitwise_or', 'np.bitwise_or', (["(strCol == 'external_gene_name')", "(strCol == 'AveExpr')"], {}), "(strCol == 'external_gene_name', strCol == 'AveExpr')\n", (3481, 3534), True, 'import numpy as np\n'), ((4183, 4221), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (4195, 4221), False, 'import os\n'), ((6041, 6079), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (6053, 6079), False, 'import os\n'), ((6178, 6216), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (6190, 6216), False, 'import os\n'), ((6583, 6636), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFileName'], {}), '(PathDir.pathPublicData, strTempFileName)\n', (6595, 6636), False, 'import os\n'), ((6827, 6900), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', '"""TCGA-CDR-SupplementalTableS1.xlsx"""'], {}), "(PathDir.pathPublicData, 'TCGA-CDR-SupplementalTableS1.xlsx')\n", (6839, 6900), False, 'import os\n'), ((7252, 7309), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strPanCanRNASeqFile'], {}), '(PathDir.pathPublicData, strPanCanRNASeqFile)\n', (7264, 7309), False, 'import os\n'), ((8089, 8146), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strPanCanRNASeqFile'], {}), '(PathDir.pathPublicData, strPanCanRNASeqFile)\n', (8101, 8146), False, 'import os\n'), ((8298, 8351), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFileName'], {}), '(PathDir.pathPublicData, strTempFileName)\n', (8310, 8351), False, 'import os\n'), ((8407, 8460), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFileName'], {}), '(PathDir.pathPublicData, strTempFileName)\n', (8419, 8460), False, 'import os\n'), ((8654, 8703), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFile'], {}), '(PathDir.pathPublicData, strTempFile)\n', (8666, 8703), False, 'import os\n'), ((8883, 8938), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', '"""sample_info.csv"""'], {}), "(PathDir.pathPublicData, 'sample_info.csv')\n", (8895, 8938), False, 'import os\n'), ((9375, 9434), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', '"""CCLE_expression.csv"""'], {}), "(PathDir.pathPublicData, 'CCLE_expression.csv')\n", (9387, 9434), False, 'import os\n'), ((9766, 9815), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFile'], {}), '(PathDir.pathPublicData, strTempFile)\n', (9778, 9815), False, 'import os\n'), ((9903, 9952), 'os.path.join', 'os.path.join', (['PathDir.pathPublicData', 'strTempFile'], {}), '(PathDir.pathPublicData, strTempFile)\n', (9915, 9952), False, 'import os\n'), ((10318, 10356), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (10330, 10356), False, 'import os\n'), ((12191, 12229), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (12203, 12229), False, 'import os\n'), ((12328, 12366), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (12340, 12366), False, 'import os\n'), ((12754, 12792), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (12766, 12792), False, 'import os\n'), ((14826, 14864), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (14838, 14864), False, 'import os\n'), ((14963, 15001), 'os.path.join', 'os.path.join', (['pathOut', 'strTempFileName'], {}), '(pathOut, strTempFileName)\n', (14975, 15001), False, 'import os\n'), ((15475, 15526), 'os.path.join', 'os.path.join', (['pathOut', '"""TCGA-BRCA-EpiMesScores.tsv"""'], {}), "(pathOut, 'TCGA-BRCA-EpiMesScores.tsv')\n", (15487, 15526), False, 'import os\n'), ((15622, 15673), 'os.path.join', 'os.path.join', (['pathOut', '"""CCLE-BRCA-EpiMesScores.tsv"""'], {}), "(pathOut, 'CCLE-BRCA-EpiMesScores.tsv')\n", (15634, 15673), False, 'import os\n'), ((15774, 15825), 'os.path.join', 'os.path.join', (['pathOut', '"""LocalData-EpiMesScores.tsv"""'], {}), "(pathOut, 'LocalData-EpiMesScores.tsv')\n", (15786, 15825), False, 'import os\n'), ((21282, 21304), 'numpy.isnan', 'np.isnan', (['arrayAdjPVal'], {}), '(arrayAdjPVal)\n', (21290, 21304), True, 'import numpy as np\n'), ((23863, 23922), 'os.path.join', 'os.path.join', (['PathDir.pathProcResults', 'strOffTargetFileName'], {}), '(PathDir.pathProcResults, strOffTargetFileName)\n', (23875, 23922), False, 'import os\n'), ((27720, 27770), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strTempFilename'], {}), '(PathDir.pathRefData, strTempFilename)\n', (27732, 27770), False, 'import os\n'), ((27963, 28009), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strDataFile'], {}), '(PathDir.pathRefData, strDataFile)\n', (27975, 28009), False, 'import os\n'), ((29511, 29566), 'pandas.DataFrame', 'pd.DataFrame', (["{'ENSG': listGeneENSG, 'HGNC': listGenes}"], {}), "({'ENSG': listGeneENSG, 'HGNC': listGenes})\n", (29523, 29566), True, 'import pandas as pd\n'), ((29994, 30068), 'pickle.dump', 'pickle.dump', (['dictEnsGeneToHGNC', 'handFile'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(dictEnsGeneToHGNC, handFile, protocol=pickle.HIGHEST_PROTOCOL)\n', (30005, 30068), False, 'import pickle\n'), ((30213, 30234), 'pickle.load', 'pickle.load', (['handFile'], {}), '(handFile)\n', (30224, 30234), False, 'import pickle\n'), ((34209, 34261), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (34221, 34261), False, 'import os\n'), ((35548, 35609), 'numpy.zeros', 'np.zeros', (['(numUniqueMessRNAs, numUniqueGONums)'], {'dtype': 'np.bool'}), '((numUniqueMessRNAs, numUniqueGONums), dtype=np.bool)\n', (35556, 35609), True, 'import numpy as np\n'), ((36453, 36546), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'arrayGOMembMatrix', 'index': 'listUniqueMessRNAs', 'columns': 'listUniqueGONums'}), '(data=arrayGOMembMatrix, index=listUniqueMessRNAs, columns=\n listUniqueGONums)\n', (36465, 36546), True, 'import pandas as pd\n'), ((37204, 37216), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (37214, 37216), True, 'import networkx as nx\n'), ((38554, 38582), 'networkx.find_cycle', 'nx.find_cycle', (['graphAnnotRel'], {}), '(graphAnnotRel)\n', (38567, 38582), True, 'import networkx as nx\n'), ((40890, 40942), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strOutputSaveFile'], {}), '(PathDir.strDataPath, strOutputSaveFile)\n', (40902, 40942), False, 'import os\n'), ((40989, 41041), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (41001, 41041), False, 'import os\n'), ((41818, 41870), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (41830, 41870), False, 'import os\n'), ((42520, 42544), 'pandas.isnull', 'pd.isnull', (['dfTargetGenes'], {}), '(dfTargetGenes)\n', (42529, 42544), True, 'import pandas as pd\n'), ((42812, 42864), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (42824, 42864), False, 'import os\n'), ((42956, 43008), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (42968, 43008), False, 'import os\n'), ((48310, 48332), 'numpy.shape', 'np.shape', (['dfCCLEScores'], {}), '(dfCCLEScores)\n', (48318, 48332), True, 'import numpy as np\n'), ((49355, 49548), 'matplotlib.pyplot.scatter', 'plt.scatter', (["dfLocalScores['Epithelial Score'].loc[strSample]", "dfLocalScores['Mesenchymal Score'].loc[strSample]"], {'c': '"""g"""', 'marker': '"""^"""', 's': '(25)', 'edgecolors': "['k']", 'zorder': '(numScatterZOrder + 1)'}), "(dfLocalScores['Epithelial Score'].loc[strSample], dfLocalScores\n ['Mesenchymal Score'].loc[strSample], c='g', marker='^', s=25,\n edgecolors=['k'], zorder=numScatterZOrder + 1)\n", (49366, 49548), True, 'import matplotlib.pyplot as plt\n'), ((72695, 72716), 'numpy.shape', 'np.shape', (['dfGeneAnnot'], {}), '(dfGeneAnnot)\n', (72703, 72716), True, 'import numpy as np\n'), ((77707, 77752), 'os.path.join', 'os.path.join', (['pathOut', "('Figure5.' + strFormat)"], {}), "(pathOut, 'Figure5.' + strFormat)\n", (77719, 77752), False, 'import os\n'), ((82200, 82219), 'numpy.abs', 'np.abs', (['arrayFlatFC'], {}), '(arrayFlatFC)\n', (82206, 82219), True, 'import numpy as np\n'), ((82298, 82310), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (82308, 82310), True, 'import matplotlib.pyplot as plt\n'), ((85958, 85976), 'matplotlib.pyplot.close', 'plt.close', (['handFig'], {}), '(handFig)\n', (85967, 85976), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3091), 'numpy.where', 'np.where', (['arrayHasNullStats'], {}), '(arrayHasNullStats)\n', (3072, 3091), True, 'import numpy as np\n'), ((3125, 3155), 'numpy.where', 'np.where', (['arrayHasNullDiffExpr'], {}), '(arrayHasNullDiffExpr)\n', (3133, 3155), True, 'import numpy as np\n'), ((5175, 5208), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (5183, 5208), True, 'import numpy as np\n'), ((5247, 5280), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (5255, 5280), True, 'import numpy as np\n'), ((11323, 11356), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (11331, 11356), True, 'import numpy as np\n'), ((11395, 11428), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (11403, 11428), True, 'import numpy as np\n'), ((13956, 13989), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (13964, 13989), True, 'import numpy as np\n'), ((14028, 14061), 'numpy.zeros', 'np.zeros', (['numSamples'], {'dtype': 'float'}), '(numSamples, dtype=float)\n', (14036, 14061), True, 'import numpy as np\n'), ((20392, 20424), 'numpy.where', 'np.where', (['arrayIsSigInEitherLine'], {}), '(arrayIsSigInEitherLine)\n', (20400, 20424), True, 'import numpy as np\n'), ((25496, 25517), 'numpy.int', 'np.int', (['strMisMatches'], {}), '(strMisMatches)\n', (25502, 25517), True, 'import numpy as np\n'), ((28770, 28855), 'numpy.where', 'np.where', (['[(\'gene_name "\' in strDetails) for strDetails in listFirstGeneDetails]'], {}), '([(\'gene_name "\' in strDetails) for strDetails in listFirstGeneDetails]\n )\n', (28778, 28855), True, 'import numpy as np\n'), ((28884, 28962), 'numpy.where', 'np.where', (['[(\'gene_id "\' in strDetails) for strDetails in listFirstGeneDetails]'], {}), '([(\'gene_id "\' in strDetails) for strDetails in listFirstGeneDetails])\n', (28892, 28962), True, 'import numpy as np\n'), ((29907, 29957), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strTempFilename'], {}), '(PathDir.pathRefData, strTempFilename)\n', (29919, 29957), False, 'import os\n'), ((30106, 30156), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strTempFilename'], {}), '(PathDir.pathRefData, strTempFilename)\n', (30118, 30156), False, 'import os\n'), ((36644, 36700), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strTempMembMatrixFile'], {}), '(PathDir.strDataPath, strTempMembMatrixFile)\n', (36656, 36700), False, 'import os\n'), ((36863, 36919), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strTempMembMatrixFile'], {}), '(PathDir.strDataPath, strTempMembMatrixFile)\n', (36875, 36919), False, 'import os\n'), ((37030, 37081), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strInitGraphFile'], {}), '(PathDir.strDataPath, strInitGraphFile)\n', (37042, 37081), False, 'import os\n'), ((38082, 38133), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strInitGraphFile'], {}), '(PathDir.strDataPath, strInitGraphFile)\n', (38094, 38133), False, 'import os\n'), ((38201, 38252), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strInitGraphFile'], {}), '(PathDir.strDataPath, strInitGraphFile)\n', (38213, 38252), False, 'import os\n'), ((41318, 41370), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (41330, 41370), False, 'import os\n'), ((43816, 43831), 'numpy.shape', 'np.shape', (['dfTFs'], {}), '(dfTFs)\n', (43824, 43831), True, 'import numpy as np\n'), ((43848, 43869), 'numpy.shape', 'np.shape', (['dfCoFactors'], {}), '(dfCoFactors)\n', (43856, 43869), True, 'import numpy as np\n'), ((46203, 46236), 'numpy.where', 'np.where', (['arrayCCLELineHasNoScore'], {}), '(arrayCCLELineHasNoScore)\n', (46211, 46236), True, 'import numpy as np\n'), ((79154, 79172), 'numpy.float', 'np.float', (['strScore'], {}), '(strScore)\n', (79162, 79172), True, 'import numpy as np\n'), ((83182, 83216), 'matplotlib.pyplot.subplot', 'plt.subplot', (['arrayGridSpec[iGuide]'], {}), '(arrayGridSpec[iGuide])\n', (83193, 83216), True, 'import matplotlib.pyplot as plt\n'), ((84657, 84686), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['numMaxXTicks'], {}), '(numMaxXTicks)\n', (84672, 84686), True, 'import matplotlib.pyplot as plt\n'), ((84789, 84818), 'matplotlib.pyplot.MaxNLocator', 'plt.MaxNLocator', (['numMaxYTicks'], {}), '(numMaxYTicks)\n', (84804, 84818), True, 'import matplotlib.pyplot as plt\n'), ((35287, 35343), 'os.path.join', 'os.path.join', (['PathDir.strDataPath', 'strTempMembMatrixFile'], {}), '(PathDir.strDataPath, strTempMembMatrixFile)\n', (35299, 35343), False, 'import os\n'), ((36004, 36028), 'numpy.shape', 'np.shape', (['dfGeneOntology'], {}), '(dfGeneOntology)\n', (36012, 36028), True, 'import numpy as np\n'), ((39348, 39374), 'numpy.argsort', 'np.argsort', (['arrayNodeGenes'], {}), '(arrayNodeGenes)\n', (39358, 39374), True, 'import numpy as np\n'), ((39845, 39879), 'networkx.topological_sort', 'nx.topological_sort', (['graphAnnotRel'], {}), '(graphAnnotRel)\n', (39864, 39879), True, 'import networkx as nx\n'), ((40300, 40350), 'numpy.where', 'np.where', (['arrayGOMembMatrix[:, numNodeMatrixIndex]'], {}), '(arrayGOMembMatrix[:, numNodeMatrixIndex])\n', (40308, 40350), True, 'import numpy as np\n'), ((41221, 41273), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (41233, 41273), False, 'import os\n'), ((42702, 42726), 'numpy.where', 'np.where', (['(arrayGOObs > 0)'], {}), '(arrayGOObs > 0)\n', (42710, 42726), True, 'import numpy as np\n'), ((72744, 72765), 'numpy.shape', 'np.shape', (['dfGeneAnnot'], {}), '(dfGeneAnnot)\n', (72752, 72765), True, 'import numpy as np\n'), ((78310, 78328), 'numpy.shape', 'np.shape', (['dfGuides'], {}), '(dfGuides)\n', (78318, 78328), True, 'import numpy as np\n'), ((85747, 85848), 'os.path.join', 'os.path.join', (['Plot.strOutputLoc', "('ZEB1_offTarg_' + strCellLine + '_' + strDiffExpr + '.' + strExt)"], {}), "(Plot.strOutputLoc, 'ZEB1_offTarg_' + strCellLine + '_' +\n strDiffExpr + '.' + strExt)\n", (85759, 85848), False, 'import os\n'), ((41511, 41563), 'os.path.join', 'os.path.join', (['PathDir.pathRefData', 'strOutputSaveFile'], {}), '(PathDir.pathRefData, strOutputSaveFile)\n', (41523, 41563), False, 'import os\n'), ((35901, 35925), 'numpy.shape', 'np.shape', (['dfGeneOntology'], {}), '(dfGeneOntology)\n', (35909, 35925), True, 'import numpy as np\n'), ((82988, 83045), 'numpy.bitwise_or', 'np.bitwise_or', (["(strTarget == 'NoGene')", "(strTarget == 'ZEB1')"], {}), "(strTarget == 'NoGene', strTarget == 'ZEB1')\n", (83001, 83045), True, 'import numpy as np\n'), ((21488, 21510), 'numpy.log10', 'np.log10', (['arrayAdjPVal'], {}), '(arrayAdjPVal)\n', (21496, 21510), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from veneer.pest_runtime import *
from veneer.manage import start,kill_all_now
import pyapprox as pya
from pyapprox.utilities import total_degree_space_dimension
from funcs.read_data import file_settings, variables_prep
from funcs.modeling_funcs import vs_settings, \
modeling_settings, paralell_vs, obtain_initials, change_param_values
# Create the copy of models and veneer list
project_name = 'MW_BASE_RC10.rsproj'
veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe'
first_port=15003; num_copies = 2
_, things_to_record, _, _, _ = modeling_settings()
processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)
vs_list = vs_settings(ports, things_to_record)
from run_source import run_source_fix_samples, run_source_lsq
# obtain the initial values of parameters
initial_values = obtain_initials(vs_list[0])
# select 10 samples from the optimal sample set
fpath = f'{file_settings()[0]}adaptive/rmse_iter3/'
num_iter = int(fpath[-2])
samples_import = np.loadtxt(f'{fpath}samples_selected.txt')
rand_index = np.random.randint(0, samples_import.shape[1], 10)
samples_opt = samples_import[0:13, rand_index]
vals_opt = run_source_fix_samples(vs_list, samples_opt)
# import PCE and update the surrogate with added training samples
import pickle
from pyapprox.approximate import approximate
pce_load = pickle.load(open(f'{file_settings()[0]}adaptive/rmse_iter{num_iter - 1}/pce-rmse-iter{num_iter - 1}.pkl', "rb"))
# update training samples
# train_samples = pce_load.variable_transformation.map_from_canonical_space(pce_load.samples)
# train_vals = pce_load.values
train_sets = np.loadtxt(f'{file_settings()[0]}adaptive/rmse_iter{num_iter - 1}/train_samples.txt')
train_samples = train_sets[:13, :]
train_vals = train_sets[-1, :].reshape(train_samples.shape[1], 1)
train_samples = np.append(train_samples, samples_opt, axis=1)
train_vals = np.append(train_vals, vals_opt, axis=0)
train_set = np.append(train_samples, train_vals.T, axis=0)
np.savetxt(f'{fpath}train_samples.txt', train_set)
param_file = file_settings()[-1]
ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False)
nfolds = min(10, train_samples.shape[1])
solver_options = {'cv': nfolds}
nterms = total_degree_space_dimension(train_samples.shape[0], 3)
options = {'basis_type': 'expanding_basis', 'variable': ind_vars,
'verbosity': 0,
'options': {'max_num_init_terms': nterms,
'linear_solver_options': solver_options}}
approx_update = approximate(train_samples, train_vals, method='polynomial_chaos', options=options).approx
pickle.dump(approx_update, open(f'{fpath}pce-rmse-iter{num_iter}.pkl', "wb"))
# set the parameter values to initial values
for vs in vs_list:
vs = change_param_values(vs, initial_values, fromList=True)
kill_all_now(processes)
| [
"funcs.modeling_funcs.modeling_settings",
"funcs.read_data.variables_prep",
"pyapprox.utilities.total_degree_space_dimension",
"funcs.read_data.file_settings",
"numpy.savetxt",
"pyapprox.approximate.approximate",
"run_source.run_source_fix_samples",
"numpy.append",
"numpy.random.randint",
"veneer.... | [((593, 612), 'funcs.modeling_funcs.modeling_settings', 'modeling_settings', ([], {}), '()\n', (610, 612), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((632, 694), 'funcs.modeling_funcs.paralell_vs', 'paralell_vs', (['first_port', 'num_copies', 'project_name', 'veneer_name'], {}), '(first_port, num_copies, project_name, veneer_name)\n', (643, 694), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((706, 742), 'funcs.modeling_funcs.vs_settings', 'vs_settings', (['ports', 'things_to_record'], {}), '(ports, things_to_record)\n', (717, 742), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((865, 892), 'funcs.modeling_funcs.obtain_initials', 'obtain_initials', (['vs_list[0]'], {}), '(vs_list[0])\n', (880, 892), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((1037, 1079), 'numpy.loadtxt', 'np.loadtxt', (['f"""{fpath}samples_selected.txt"""'], {}), "(f'{fpath}samples_selected.txt')\n", (1047, 1079), True, 'import numpy as np\n'), ((1093, 1142), 'numpy.random.randint', 'np.random.randint', (['(0)', 'samples_import.shape[1]', '(10)'], {}), '(0, samples_import.shape[1], 10)\n', (1110, 1142), True, 'import numpy as np\n'), ((1201, 1245), 'run_source.run_source_fix_samples', 'run_source_fix_samples', (['vs_list', 'samples_opt'], {}), '(vs_list, samples_opt)\n', (1223, 1245), False, 'from run_source import run_source_fix_samples, run_source_lsq\n'), ((1865, 1910), 'numpy.append', 'np.append', (['train_samples', 'samples_opt'], {'axis': '(1)'}), '(train_samples, samples_opt, axis=1)\n', (1874, 1910), True, 'import numpy as np\n'), ((1924, 1963), 'numpy.append', 'np.append', (['train_vals', 'vals_opt'], {'axis': '(0)'}), '(train_vals, vals_opt, axis=0)\n', (1933, 1963), True, 'import numpy as np\n'), ((1976, 2022), 'numpy.append', 'np.append', (['train_samples', 'train_vals.T'], {'axis': '(0)'}), '(train_samples, train_vals.T, axis=0)\n', (1985, 2022), True, 'import numpy as np\n'), ((2023, 2073), 'numpy.savetxt', 'np.savetxt', (['f"""{fpath}train_samples.txt"""', 'train_set'], {}), "(f'{fpath}train_samples.txt', train_set)\n", (2033, 2073), True, 'import numpy as np\n'), ((2129, 2195), 'funcs.read_data.variables_prep', 'variables_prep', (['param_file'], {'product_uniform': '"""uniform"""', 'dummy': '(False)'}), "(param_file, product_uniform='uniform', dummy=False)\n", (2143, 2195), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((2278, 2333), 'pyapprox.utilities.total_degree_space_dimension', 'total_degree_space_dimension', (['train_samples.shape[0]', '(3)'], {}), '(train_samples.shape[0], 3)\n', (2306, 2333), False, 'from pyapprox.utilities import total_degree_space_dimension\n'), ((2851, 2874), 'veneer.manage.kill_all_now', 'kill_all_now', (['processes'], {}), '(processes)\n', (2863, 2874), False, 'from veneer.manage import start, kill_all_now\n'), ((2088, 2103), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (2101, 2103), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((2554, 2641), 'pyapprox.approximate.approximate', 'approximate', (['train_samples', 'train_vals'], {'method': '"""polynomial_chaos"""', 'options': 'options'}), "(train_samples, train_vals, method='polynomial_chaos', options=\n options)\n", (2565, 2641), False, 'from pyapprox.approximate import approximate\n'), ((2795, 2849), 'funcs.modeling_funcs.change_param_values', 'change_param_values', (['vs', 'initial_values'], {'fromList': '(True)'}), '(vs, initial_values, fromList=True)\n', (2814, 2849), False, 'from funcs.modeling_funcs import vs_settings, modeling_settings, paralell_vs, obtain_initials, change_param_values\n'), ((953, 968), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (966, 968), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((1675, 1690), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (1688, 1690), False, 'from funcs.read_data import file_settings, variables_prep\n'), ((1403, 1418), 'funcs.read_data.file_settings', 'file_settings', ([], {}), '()\n', (1416, 1418), False, 'from funcs.read_data import file_settings, variables_prep\n')] |
import numpy as np
import tensorflow as tf
def splitter(x):
row = np.array(x.split(',')).astype(np.float32)
label = row[0]
pixel = row[1:]
return label, pixel
def normalize(label, pixel):
"""Returns a normalized feature array between 0 and 1"""
pixel = pixel.astype(np.float32)
#return label, pixel/np.max(pixel)
return label, (pixel - np.min(pixel))/(np.max(pixel) - np.min(pixel))
def decode(line, default_values):
item = tf.decode_csv(line, default_values)
return item[0], item[1:] | [
"numpy.min",
"numpy.max",
"tensorflow.decode_csv"
] | [((461, 496), 'tensorflow.decode_csv', 'tf.decode_csv', (['line', 'default_values'], {}), '(line, default_values)\n', (474, 496), True, 'import tensorflow as tf\n'), ((371, 384), 'numpy.min', 'np.min', (['pixel'], {}), '(pixel)\n', (377, 384), True, 'import numpy as np\n'), ((387, 400), 'numpy.max', 'np.max', (['pixel'], {}), '(pixel)\n', (393, 400), True, 'import numpy as np\n'), ((403, 416), 'numpy.min', 'np.min', (['pixel'], {}), '(pixel)\n', (409, 416), True, 'import numpy as np\n')] |
# import numpy as np
def rect_to_points(r):
return (r[0],r[1]), (r[0]+r[2], r[1]+r[3])
def points_to_rect(p1,p2):
return (p1[0],p1[1], p2[0]-p1[0], p2[1]-p1[1])
def union_rect(r1, r2):
(a1,b1),(c1,d1) = rect_to_points(r1)
(a2,b2),(c2,d2) = rect_to_points(r2)
a = min(a1,a2)
b = min(b1,b2)
c = max(c1,c2)
d = max(d1,d2)
return points_to_rect((a,b),(c,d))
def intersection_rect(r1,r2, return_none=False):
# a1 c1
# x----x
# . .
# case1) x----x .
# c2 .
# .
# case2) x----x
# a2
(a1,b1),(c1,d1) = rect_to_points(r1)
(a2,b2),(c2,d2) = rect_to_points(r2)
if a1>c2 or c1<a2 or b1>d2 or d1<b2 :
# no intersection
if return_none:
return None
else:
return (0,0,0,0)
else:
a = max(a1,a2)
b = max(b1,b2)
c = min(c1,c2)
d = min(d1,d2)
return points_to_rect((a,b),(c,d))
def area_rect(r):
return r[2]*r[3]
def iou_rect(r1,r2):
ai = area_rect(intersection_rect(r1,r2))
return ai / (area_rect(r1)+area_rect(r2)-ai)
if __name__ == '__main__':
import numpy as np
import cv2
import u_opencv as ucv
rects = [
( (10,10,200,200), (90,90,200,200) ),
( (90,90,200,200), (10,10,200,200) ),
( (90,10,200,200), (10,90,200,200) ),
( (10,90,200,200), (90,10,200,200) ),
( (100,100,200,50), (200,50,50,200) ),
( (200,50,50,200), (100,100,200,50) ),
( (100,100,200,200), (150,150,50,50) ),
( (150,150,50,50), (100,100,200,200) ),
( (100,100,50,50), (300,300,50,50) ),
( (300,300,50,50), (100,100,50,50) ),
]
for i, (r1,r2) in enumerate(rects):
print('rectangles',i)
print(' area 1:', area_rect(r1))
print(' area 2:', area_rect(r2))
print(' area int:', area_rect(intersection_rect(r1,r2)))
print(' area iou:', iou_rect(r1,r2))
img = np.zeros((512,512,3),dtype=np.uint8)
ucv.rectangle( img, r1, ucv.color('darkblue'),thickness=3)
ucv.rectangle( img, r2, ucv.color('darkgreen'),thickness=3)
ucv.rectangle( img, intersection_rect(r1,r2), ucv.color('magenta'))
ucv.rectangle( img, union_rect(r1,r2), ucv.color('cyan'))
cv2.imshow( 'test', img)
cv2.waitKey(0)
| [
"cv2.waitKey",
"cv2.imshow",
"numpy.zeros",
"u_opencv.color"
] | [((2111, 2150), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)'], {'dtype': 'np.uint8'}), '((512, 512, 3), dtype=np.uint8)\n', (2119, 2150), True, 'import numpy as np\n'), ((2435, 2458), 'cv2.imshow', 'cv2.imshow', (['"""test"""', 'img'], {}), "('test', img)\n", (2445, 2458), False, 'import cv2\n'), ((2469, 2483), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2480, 2483), False, 'import cv2\n'), ((2181, 2202), 'u_opencv.color', 'ucv.color', (['"""darkblue"""'], {}), "('darkblue')\n", (2190, 2202), True, 'import u_opencv as ucv\n'), ((2248, 2270), 'u_opencv.color', 'ucv.color', (['"""darkgreen"""'], {}), "('darkgreen')\n", (2257, 2270), True, 'import u_opencv as ucv\n'), ((2338, 2358), 'u_opencv.color', 'ucv.color', (['"""magenta"""'], {}), "('magenta')\n", (2347, 2358), True, 'import u_opencv as ucv\n'), ((2407, 2424), 'u_opencv.color', 'ucv.color', (['"""cyan"""'], {}), "('cyan')\n", (2416, 2424), True, 'import u_opencv as ucv\n')] |
#! /usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
with open('seed', 'r') as file:
np.random.seed(seed=int(file.read()))
kernel = np.random.randint(256, size=(15**2))
# plt.imshow(kernel.reshape((15,15)))
# plt.show()
kernel_in = []
for pixel in kernel:
kernel_in.append(np.binary_repr(pixel, width=8))
kernel_in = "\n".join(kernel_in)
with open('kernel_test.in', 'w') as file:
file.write(kernel_in)
| [
"numpy.binary_repr",
"numpy.random.randint"
] | [((166, 202), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(15 ** 2)'}), '(256, size=15 ** 2)\n', (183, 202), True, 'import numpy as np\n'), ((314, 344), 'numpy.binary_repr', 'np.binary_repr', (['pixel'], {'width': '(8)'}), '(pixel, width=8)\n', (328, 344), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
import datetime
import numpy as np
from astropy.units import Quantity
from astropy.io import fits
__all__ = ['LogEnergyAxis',
'energy_bounds_equal_log_spacing',
'energy_bin_centers_log_spacing',
'np_to_pha',
]
def energy_bin_centers_log_spacing(energy_bounds):
"""Compute energy log bin centers.
TODO: give formula here.
Parameters
----------
energy_bounds : `~astropy.units.Quantity`
Array of energy bin edges.
Returns
-------
energy_center : `~astropy.units.Quantity`
Array of energy bin centers
"""
e_bounds = energy_bounds.value
e_center = np.sqrt(e_bounds[:-1] * e_bounds[1:])
return Quantity(e_center, energy_bounds.unit)
def energy_bounds_equal_log_spacing(energy_band, bins=10):
"""Make energy bounds array with equal-log spacing.
Parameters
----------
energy_band : `~astropy.units.Quantity`
Tuple ``(energy_min, energy_max)``
bins : int
Number of bins
Returns
-------
energy_bounds : `~astropy.units.Quantity`
Energy bounds array (1-dim with length ``bins + 1``).
"""
x_min, x_max = np.log10(energy_band.value)
energy_bounds = np.logspace(x_min, x_max, bins + 1)
energy_bounds = Quantity(energy_bounds, energy_band.unit)
return energy_bounds
class LogEnergyAxis(object):
"""Log10 energy axis.
Defines a transformation between:
* ``energy = 10 ** x``
* ``x = log10(energy)``
* ``pix`` in the range [0, ..., len(x)] via linear interpolation of the ``x`` array,
e.g. ``pix=0`` corresponds to ``x[0]``
and ``pix=0.3`` is ``0.5 * (0.3 * x[0] + 0.7 * x[1])``
.. note::
The `specutils.Spectrum1DLookupWCS <http://specutils.readthedocs.org/en/latest/api/specutils.wcs.specwcs.Spectrum1DLookupWCS.html>`__
class is similar (only that it doesn't include the ``log`` transformation and the API is different.
Also see this Astropy feature request: https://github.com/astropy/astropy/issues/2362
Parameters
----------
energy : `~astropy.units.Quantity`
Energy array
"""
def __init__(self, energy):
self.energy = energy
self.x = np.log10(energy.value)
self.pix = np.arange(len(self.x))
def world2pix(self, energy):
"""TODO: document.
"""
# Convert `energy` to `x = log10(energy)`
x = np.log10(energy.to(self.energy.unit).value)
# Interpolate in `x`
pix = np.interp(x, self.x, self.pix)
return pix
def pix2world(self, pix):
"""TODO: document.
"""
# Interpolate in `x = log10(energy)`
x = np.interp(pix, self.pix, self.x)
# Convert `x` to `energy`
energy = Quantity(10 ** x, self.energy.unit)
return energy
def closest_point(self, energy):
"""TODO: document
"""
x = np.log10(energy.value)
# TODO: I'm not sure which is faster / better here?
index = np.argmin(np.abs(self.x - x))
# np.searchsorted(self.x, x)
return index
def bin_edges(self, energy):
"""TODO: document.
Parameters
----------
TODO
Returns
-------
TODO
"""
try:
pix = np.where(energy >= self.energy)[0][-1]
except ValueError:
# Loop over es by hand
pix1 = np.empty_like(energy, dtype=int)
for ii in range(energy.size):
# print ii, e[ii], np.where(e[ii] >= self.e)
pix1[ii] = np.where(energy[ii] >= self.energy)[0][-1]
pix2 = pix1 + 1
energy1 = self.energy[pix1]
energy2 = self.energy[pix2]
return pix1, pix2, energy1, energy2
def np_to_pha(channel, counts, exposure, dstart, dstop,
dbase=None, stat_err=None, quality=None, syserr=None,
obj_ra=0., obj_dec=0., obj_name='DUMMY', creator='DUMMY',
version='v0.0.0', telescope='DUMMY', instrument='DUMMY', filter='NONE'):
"""Create PHA FITS table extension from numpy arrays.
Parameters
----------
dat : numpy 1D array float
Binned spectral data [counts]
dat_err : numpy 1D array float
Statistical errors associated with dat [counts]
chan : numpu 1D array int
Corresponding channel numbers for dat
exposure : float
Exposure [s]
dstart : astropy.time.Time
Observation start time.
dstop : astropy.time.Time
Observation stop time.
dbase : astropy.time.Time
Base date used for TSTART/TSTOP.
quality : numpy 1D array integer
Quality flags for the channels (optional)
syserr : numpy 1D array float
Fractional systematic error for the channel (optional)
obj_ra/obj_dec : float
Object RA/DEC J2000 [deg]
Returns
-------
pha : `~astropy.io.fits.BinTableHDU`
PHA FITS HDU
Notes
-----
For more info on the PHA FITS file format see:
http://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/summary/ogip_92_007_summary.html
"""
# Create PHA FITS table extension from data
cols = [fits.Column(name='CHANNEL',
format='I',
array=channel,
unit='channel'),
fits.Column(name='COUNTS',
format='1E',
array=counts,
unit='count')
]
if stat_err is not None:
cols.append(fits.Column(name='STAT_ERR',
format='1E',
array=stat_err,
unit='count'))
if syserr is not None:
cols.append(fits.Column(name='SYS_ERR',
format='E',
array=syserr))
if quality is not None:
cols.append(fits.Column(name='QUALITY',
format='I',
array=quality))
hdu = fits.new_table(cols)
header = hdu.header
header['EXTNAME'] = 'SPECTRUM', 'name of this binary table extension'
header['TELESCOP'] = telescope, 'Telescope (mission) name'
header['INSTRUME'] = instrument, 'Instrument name'
header['FILTER'] = filter, 'Instrument filter in use'
header['EXPOSURE'] = exposure, 'Exposure time'
header['BACKFILE'] = 'none', 'Background FITS file'
header['CORRFILE'] = 'none', 'Correlation FITS file'
header['RESPFILE'] = 'none', 'Redistribution matrix file (RMF)'
header['ANCRFILE'] = 'none', 'Ancillary response file (ARF)'
header['HDUCLASS'] = 'OGIP', 'Format conforms to OGIP/GSFC spectral standards'
header['HDUCLAS1'] = 'SPECTRUM', 'Extension contains a spectrum'
header['HDUVERS '] = '1.2.1', 'Version number of the format'
poisserr = False
if stat_err is None:
poisserr = True
header['POISSERR'] = poisserr, 'Are Poisson Distribution errors assumed'
header['CHANTYPE'] = 'PHA', 'Channels assigned by detector electronics'
header['DETCHANS'] = len(channel), 'Total number of detector channels available'
header['TLMIN1'] = channel[0], 'Lowest Legal channel number'
header['TLMAX1'] = channel[-1], 'Highest Legal channel number'
header['XFLT0001'] = 'none', 'XSPEC selection filter description'
header['OBJECT'] = obj_name, 'OBJECT from the FIRST input file'
header['RA-OBJ'] = obj_ra, 'RA of First input object'
header['DEC-OBJ'] = obj_dec, 'DEC of First input object'
header['EQUINOX'] = 2000.00, 'Equinox of the FIRST object'
header['RADECSYS'] = 'FK5', 'Co-ordinate frame used for equinox'
header['DATE-OBS'] = dstart.datetime.strftime('%Y-%m-%d'), 'EARLIEST observation date of files'
header['TIME-OBS'] = dstart.datetime.strftime('%H:%M:%S'), 'EARLIEST time of all input files'
header['DATE-END'] = dstop.datetime.strftime('%Y-%m-%d'), 'LATEST observation date of files'
header['TIME-END'] = dstop.datetime.strftime('%H:%M:%S'), 'LATEST time of all input files'
header['CREATOR'] = '{0} {1}'.format(creator, version), 'Program name that produced this file'
header['HDUCLAS2'] = 'NET', 'Extension contains a bkgr substr. spec.'
header['HDUCLAS3'] = 'COUNT', 'Extension contains counts'
header['HDUCLAS4'] = 'TYPE:I', 'Single PHA file contained'
header['HDUVERS1'] = '1.2.1', 'Obsolete - included for backwards compatibility'
if syserr is None:
header['SYS_ERR'] = 0, 'No systematic error was specified'
header['GROUPING'] = 0, 'No grouping data has been specified'
if quality is None:
header['QUALITY '] = 0, 'No data quality information specified'
header['AREASCAL'] = 1., 'Nominal effective area'
header['BACKSCAL'] = 1., 'Background scale factor'
header['CORRSCAL'] = 0., 'Correlation scale factor'
header['FILENAME'] = 'several', 'Spectrum was produced from more than one file'
header['ORIGIN'] = 'dummy', 'origin of fits file'
header['DATE'] = datetime.datetime.today().strftime('%Y-%m-%d'), 'FITS file creation date (yyyy-mm-dd)'
header['PHAVERSN'] = '1992a', 'OGIP memo number for file format'
if dbase is not None:
header['TIMESYS'] = 'MJD', 'The time system is MJD'
header['TIMEUNIT'] = 's', 'unit for TSTARTI/F and TSTOPI/F, TIMEZERO'
header['MJDREF'] = dbase.mjd, 'MJD for reference time'
header['TSTART'] = (dstart - dbase).sec, 'Observation start time [s]'
header['TSTOP'] = (dstop - dbase).sec, 'Observation stop time [s]'
return hdu
| [
"astropy.units.Quantity",
"numpy.abs",
"datetime.datetime.today",
"numpy.logspace",
"astropy.io.fits.new_table",
"numpy.empty_like",
"numpy.where",
"numpy.interp",
"astropy.io.fits.Column",
"numpy.log10",
"numpy.sqrt"
] | [((766, 803), 'numpy.sqrt', 'np.sqrt', (['(e_bounds[:-1] * e_bounds[1:])'], {}), '(e_bounds[:-1] * e_bounds[1:])\n', (773, 803), True, 'import numpy as np\n'), ((815, 853), 'astropy.units.Quantity', 'Quantity', (['e_center', 'energy_bounds.unit'], {}), '(e_center, energy_bounds.unit)\n', (823, 853), False, 'from astropy.units import Quantity\n'), ((1287, 1314), 'numpy.log10', 'np.log10', (['energy_band.value'], {}), '(energy_band.value)\n', (1295, 1314), True, 'import numpy as np\n'), ((1335, 1370), 'numpy.logspace', 'np.logspace', (['x_min', 'x_max', '(bins + 1)'], {}), '(x_min, x_max, bins + 1)\n', (1346, 1370), True, 'import numpy as np\n'), ((1391, 1432), 'astropy.units.Quantity', 'Quantity', (['energy_bounds', 'energy_band.unit'], {}), '(energy_bounds, energy_band.unit)\n', (1399, 1432), False, 'from astropy.units import Quantity\n'), ((6207, 6227), 'astropy.io.fits.new_table', 'fits.new_table', (['cols'], {}), '(cols)\n', (6221, 6227), False, 'from astropy.io import fits\n'), ((2343, 2365), 'numpy.log10', 'np.log10', (['energy.value'], {}), '(energy.value)\n', (2351, 2365), True, 'import numpy as np\n'), ((2631, 2661), 'numpy.interp', 'np.interp', (['x', 'self.x', 'self.pix'], {}), '(x, self.x, self.pix)\n', (2640, 2661), True, 'import numpy as np\n'), ((2809, 2841), 'numpy.interp', 'np.interp', (['pix', 'self.pix', 'self.x'], {}), '(pix, self.pix, self.x)\n', (2818, 2841), True, 'import numpy as np\n'), ((2894, 2929), 'astropy.units.Quantity', 'Quantity', (['(10 ** x)', 'self.energy.unit'], {}), '(10 ** x, self.energy.unit)\n', (2902, 2929), False, 'from astropy.units import Quantity\n'), ((3041, 3063), 'numpy.log10', 'np.log10', (['energy.value'], {}), '(energy.value)\n', (3049, 3063), True, 'import numpy as np\n'), ((5305, 5375), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""CHANNEL"""', 'format': '"""I"""', 'array': 'channel', 'unit': '"""channel"""'}), "(name='CHANNEL', format='I', array=channel, unit='channel')\n", (5316, 5375), False, 'from astropy.io import fits\n'), ((5467, 5534), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""COUNTS"""', 'format': '"""1E"""', 'array': 'counts', 'unit': '"""count"""'}), "(name='COUNTS', format='1E', array=counts, unit='count')\n", (5478, 5534), False, 'from astropy.io import fits\n'), ((3150, 3168), 'numpy.abs', 'np.abs', (['(self.x - x)'], {}), '(self.x - x)\n', (3156, 3168), True, 'import numpy as np\n'), ((5677, 5748), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""STAT_ERR"""', 'format': '"""1E"""', 'array': 'stat_err', 'unit': '"""count"""'}), "(name='STAT_ERR', format='1E', array=stat_err, unit='count')\n", (5688, 5748), False, 'from astropy.io import fits\n'), ((5900, 5953), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""SYS_ERR"""', 'format': '"""E"""', 'array': 'syserr'}), "(name='SYS_ERR', format='E', array=syserr)\n", (5911, 5953), False, 'from astropy.io import fits\n'), ((6072, 6126), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': '"""QUALITY"""', 'format': '"""I"""', 'array': 'quality'}), "(name='QUALITY', format='I', array=quality)\n", (6083, 6126), False, 'from astropy.io import fits\n'), ((3550, 3582), 'numpy.empty_like', 'np.empty_like', (['energy'], {'dtype': 'int'}), '(energy, dtype=int)\n', (3563, 3582), True, 'import numpy as np\n'), ((9206, 9231), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9229, 9231), False, 'import datetime\n'), ((3430, 3461), 'numpy.where', 'np.where', (['(energy >= self.energy)'], {}), '(energy >= self.energy)\n', (3438, 3461), True, 'import numpy as np\n'), ((3713, 3748), 'numpy.where', 'np.where', (['(energy[ii] >= self.energy)'], {}), '(energy[ii] >= self.energy)\n', (3721, 3748), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.