code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Vector class for 1D heat problem
Note: values at two consecutive time points are grouped as pairs
"""
import numpy as np
from pymgrit.core.vector import Vector
class VectorHeat1D2Pts(Vector):
"""
Vector class for grouping values at two consecutive time points
"""
def __init__(self, size, dtau):
"""
Constructor.
One vector object contains values at two consecutive time points and spacing between these time points.
:param size: number of spatial degrees of freedom
:param dtau: time-step size within pair
"""
super().__init__()
self.size = size
self.dtau = dtau
self.values_first_time_point = np.zeros(size)
self.values_second_time_point = np.zeros(size)
def __add__(self, other):
"""
Addition of two vector objects (self and other)
:param other: vector object to be added to self
:return: sum of vector object self and input object other
"""
tmp = VectorHeat1D2Pts(self.size, self.dtau)
first_self, second_self, dtau_self = self.get_values()
first_other, second_other, dtau_other = other.get_values()
tmp.set_values(first_self + first_other, second_self + second_other, dtau_self)
return tmp
def __sub__(self, other):
"""
Subtraction of two vector objects (self and other)
:param other: vector object to be subtracted from self
:return: difference of vector object self and input object other
"""
tmp = VectorHeat1D2Pts(self.size, self.dtau)
first_self, second_self, dtau_self = self.get_values()
first_other, second_other, dtau_other = other.get_values()
tmp.set_values(first_self - first_other, second_self - second_other, dtau_self)
return tmp
def norm(self):
"""
Norm of a vector object
:return: 2-norm of vector object
"""
return np.linalg.norm(np.append(self.values_first_time_point, self.values_second_time_point))
def clone(self):
"""
Clone vector object
:return: vector object with zero values
"""
tmp = VectorHeat1D2Pts(self.size, self.dtau)
tmp.set_values(self.values_first_time_point, self.values_second_time_point, self.dtau)
return tmp
def clone_zero(self):
"""
Initialize vector object with zeros
:return: vector object with zero values
"""
return VectorHeat1D2Pts(self.size, self.dtau)
def clone_rand(self):
"""
Initialize vector object with random values
:return: vector object with random values
"""
tmp = VectorHeat1D2Pts(self.size, self.dtau)
tmp.set_values(np.random.rand(self.size), np.random.rand(self.size), self.dtau)
return tmp
def get_values(self):
"""
Get vector data
:return: tuple of values of member variables
"""
return self.values_first_time_point, self.values_second_time_point, self.dtau
def set_values(self, first_time_point, second_time_point, dtau):
"""
Set vector data
:param first_time_point: values for first time point
:param second_time_point: values for second time point
:param dtau: time-step size within pair
"""
self.values_first_time_point = first_time_point
self.values_second_time_point = second_time_point
self.dtau = dtau
def pack(self):
"""
Pack data
:return: values of vector object
"""
return np.array([self.values_first_time_point, self.values_second_time_point])
def unpack(self, values):
"""
Unpack and set data
:param values: values for vector object
"""
self.values_first_time_point = values[0]
self.values_second_time_point = values[1]
| [
"numpy.array",
"numpy.append",
"numpy.random.rand",
"numpy.zeros"
] | [((702, 716), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (710, 716), True, 'import numpy as np\n'), ((757, 771), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (765, 771), True, 'import numpy as np\n'), ((3625, 3696), 'numpy.array', 'np.array', (['[self.values_first_time_point, self.values_second_time_point]'], {}), '([self.values_first_time_point, self.values_second_time_point])\n', (3633, 3696), True, 'import numpy as np\n'), ((1986, 2056), 'numpy.append', 'np.append', (['self.values_first_time_point', 'self.values_second_time_point'], {}), '(self.values_first_time_point, self.values_second_time_point)\n', (1995, 2056), True, 'import numpy as np\n'), ((2776, 2801), 'numpy.random.rand', 'np.random.rand', (['self.size'], {}), '(self.size)\n', (2790, 2801), True, 'import numpy as np\n'), ((2803, 2828), 'numpy.random.rand', 'np.random.rand', (['self.size'], {}), '(self.size)\n', (2817, 2828), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import csv
import codecs
import os
import glob
from collections import defaultdict
SPACE = " "
EMPTY = " "
INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '!':4, '-':5, ';':6, ':':7, '...':8, '':0}
PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?', 4:'!', 5:'-', 6:';', 7:':', 8:'...'}
REDUCED_PUNCTUATION_VOCABULARY = {0:SPACE, 1:',', 2:'.', 3:'?'}
REDUCED_INV_PUNCTUATION_CODES = {EMPTY:0, SPACE:0, ',':1, '.':2, '?':3, '':0}
EOS_PUNCTUATION_CODES = [2,3,4,5,6,7,8]
END = "<END>"
UNK = "<UNK>"
EMP = "<EMP>"
NA = "NA"
#PAUSE_FEATURE_NAME = 'pause_before'
#ALL_POSSIBLE_INPUT_FEATURES = {'word', 'pos', 'pause_before', 'speech_rate_norm', 'f0_mean', 'f0_range', 'i0_mean', 'i0_range'}
def pad(l, size, padding):
if size >= len(l):
return l + [padding] * abs((len(l)-size))
else:
return l[0:size]
def read_proscript(filename, add_end=False):
columns = defaultdict(list) # each value in each column is appended to a list
skip_columns = []
with open(filename) as f:
reader = csv.DictReader(f, delimiter='|') # read rows into a dictionary format
for row in reader: # read a row as {column1: value1, column2: value2,...}
for (k,v) in row.items(): # go over each column name and value
if not k in skip_columns:
if "word" in k or "punctuation" in k or "pos" in k:
columns[k].append(v) # append the value into the appropriate list
else:
try:
columns[k].append(float(v)) # real value
except ValueError:
skip_columns.append(k)
if add_end and not columns['word'][-1] == END:
for k in columns.keys():
if "word" in k or "pos" in k:
columns[k].append(END)
elif "punctuation" in k:
columns[k].append("")
else:
columns[k].append(0.0)
return columns
def checkArgument(argname, isFile=False, isDir=False, createDir=False):
if not argname:
return False
else:
if isFile and not os.path.isfile(argname):
return False
if isDir:
if not os.path.isdir(argname):
if createDir:
print("Creating directory %s"%(argname))
os.makedirs(argname)
else:
return False
return True
def iterable_to_dict(arr):
return dict((x.strip(), i) for (i, x) in enumerate(arr))
def read_vocabulary(file_name):
with codecs.open(file_name, 'r', 'utf-8') as f:
return iterable_to_dict(f.readlines())
def to_array(arr, dtype=np.int32):
# minibatch of 1 sequence as column
return np.array([arr], dtype=dtype).T
def create_pause_bins():
bins = np.arange(0, 1, 0.05)
bins = np.concatenate((bins, np.arange(1, 2, 0.1)))
bins = np.concatenate((bins, np.arange(2, 5, 0.2)))
bins = np.concatenate((bins, np.arange(5, 10, 0.5)))
bins = np.concatenate((bins, np.arange(10, 20, 1)))
return bins
def create_pause_bins9():
bins = np.array([ 0. , 0.25, 0.5 , 0.75, 1. , 2. , 3. , 4. , 5. ])
return bins
def create_pause_bins2():
return [0.0, 1.14]
def create_pause_bins3():
return [0.0, 0.2, 1.0]
def create_semitone_bins():
bins = np.arange(-20, -10, 1)
bins = np.concatenate((bins, np.arange(-10, -5, 0.5)))
bins = np.concatenate((bins, np.arange(-5, 0, 0.25)))
bins = np.concatenate((bins, np.arange(0, 5, 0.25)))
bins = np.concatenate((bins, np.arange(5, 10, 0.5)))
bins = np.concatenate((bins, np.arange(10, 20, 1)))
return bins
def levels_from_file(filename):
with open(filename) as f:
lst = [float(line.rstrip()) for line in f]
return lst
def get_level_maker(levels_file):
levels_list = levels_from_file(levels_file)
def get_level(value):
level = 0
for level_bin in levels_list:
if value > level_bin:
level +=1
else:
return level
return level
no_of_levels = len(levels_list) + 1
return get_level, no_of_levels
#OBSOLETE
def convert_value_to_level_sequence(value_sequence, bins):
levels = []
for value in value_sequence:
level = 0
for bin_no, bin_upper_limit in enumerate(bins):
if value > bin_upper_limit:
level += 1
else:
break
levels.append(level)
return levels
def reducePuncCode(puncCode):
if puncCode in [4, 5, 6, 7, 8]: #period
return 2
else:
return puncCode
def reducePunc(punc):
if punc and not punc.isspace():
puncCode = INV_PUNCTUATION_CODES[punc]
reducedPuncCode = reducePuncCode(puncCode)
return PUNCTUATION_VOCABULARY[reducedPuncCode]
else:
return punc
| [
"codecs.open",
"os.makedirs",
"os.path.isdir",
"csv.DictReader",
"collections.defaultdict",
"os.path.isfile",
"numpy.arange",
"numpy.array"
] | [((920, 937), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (931, 937), False, 'from collections import defaultdict\n'), ((2497, 2518), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.05)'], {}), '(0, 1, 0.05)\n', (2506, 2518), True, 'import numpy as np\n'), ((2780, 2837), 'numpy.array', 'np.array', (['[0.0, 0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([0.0, 0.25, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0, 5.0])\n', (2788, 2837), True, 'import numpy as np\n'), ((3000, 3022), 'numpy.arange', 'np.arange', (['(-20)', '(-10)', '(1)'], {}), '(-20, -10, 1)\n', (3009, 3022), True, 'import numpy as np\n'), ((1046, 1078), 'csv.DictReader', 'csv.DictReader', (['f'], {'delimiter': '"""|"""'}), "(f, delimiter='|')\n", (1060, 1078), False, 'import csv\n'), ((2267, 2303), 'codecs.open', 'codecs.open', (['file_name', '"""r"""', '"""utf-8"""'], {}), "(file_name, 'r', 'utf-8')\n", (2278, 2303), False, 'import codecs\n'), ((2432, 2460), 'numpy.array', 'np.array', (['[arr]'], {'dtype': 'dtype'}), '([arr], dtype=dtype)\n', (2440, 2460), True, 'import numpy as np\n'), ((2549, 2569), 'numpy.arange', 'np.arange', (['(1)', '(2)', '(0.1)'], {}), '(1, 2, 0.1)\n', (2558, 2569), True, 'import numpy as np\n'), ((2602, 2622), 'numpy.arange', 'np.arange', (['(2)', '(5)', '(0.2)'], {}), '(2, 5, 0.2)\n', (2611, 2622), True, 'import numpy as np\n'), ((2655, 2676), 'numpy.arange', 'np.arange', (['(5)', '(10)', '(0.5)'], {}), '(5, 10, 0.5)\n', (2664, 2676), True, 'import numpy as np\n'), ((2709, 2729), 'numpy.arange', 'np.arange', (['(10)', '(20)', '(1)'], {}), '(10, 20, 1)\n', (2718, 2729), True, 'import numpy as np\n'), ((3053, 3076), 'numpy.arange', 'np.arange', (['(-10)', '(-5)', '(0.5)'], {}), '(-10, -5, 0.5)\n', (3062, 3076), True, 'import numpy as np\n'), ((3109, 3131), 'numpy.arange', 'np.arange', (['(-5)', '(0)', '(0.25)'], {}), '(-5, 0, 0.25)\n', (3118, 3131), True, 'import numpy as np\n'), ((3164, 3185), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(0.25)'], {}), '(0, 5, 0.25)\n', (3173, 3185), True, 'import numpy as np\n'), ((3218, 3239), 'numpy.arange', 'np.arange', (['(5)', '(10)', '(0.5)'], {}), '(5, 10, 0.5)\n', (3227, 3239), True, 'import numpy as np\n'), ((3272, 3292), 'numpy.arange', 'np.arange', (['(10)', '(20)', '(1)'], {}), '(10, 20, 1)\n', (3281, 3292), True, 'import numpy as np\n'), ((1924, 1947), 'os.path.isfile', 'os.path.isfile', (['argname'], {}), '(argname)\n', (1938, 1947), False, 'import os\n'), ((1987, 2009), 'os.path.isdir', 'os.path.isdir', (['argname'], {}), '(argname)\n', (2000, 2009), False, 'import os\n'), ((2080, 2100), 'os.makedirs', 'os.makedirs', (['argname'], {}), '(argname)\n', (2091, 2100), False, 'import os\n')] |
import os
import numpy as np
from keras import backend as K
from keras.losses import mean_absolute_error
import utils
from model import wdsr_b
def psnr(hr, sr, max_val=2):
mse = K.mean(K.square(hr - sr))
return 10.0 / np.log(10) * K.log(max_val ** 2 / mse)
def data_generator(path, batch_size=8, input_shape=96, scale=2):
'''data generator for fit_generator'''
fns = os.listdir(path)
n = len(fns)
i = 0
while True:
lrs, hrs = [], []
for b in range(batch_size):
if i == 0:
np.random.shuffle(fns)
fn = fns[i]
fn = os.path.join(path, fn)
lr, hr = utils.pair(fn, input_shape, scale)
lr = utils.normalization(lr)
hr = utils.normalization(hr)
lrs.append(lr)
hrs.append(hr)
i = (i + 1) % n
lrs = np.array(lrs)
hrs = np.array(hrs)
yield lrs, hrs
model = wdsr_b()
model.compile(optimizer='adam',
loss=mean_absolute_error, metrics=[psnr])
model.fit_generator(data_generator('./datasets/train/'),
steps_per_epoch=50,
epochs=1250)
| [
"utils.normalization",
"utils.pair",
"numpy.log",
"keras.backend.log",
"numpy.array",
"model.wdsr_b",
"keras.backend.square",
"os.path.join",
"os.listdir",
"numpy.random.shuffle"
] | [((946, 954), 'model.wdsr_b', 'wdsr_b', ([], {}), '()\n', (952, 954), False, 'from model import wdsr_b\n'), ((389, 405), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (399, 405), False, 'import os\n'), ((193, 210), 'keras.backend.square', 'K.square', (['(hr - sr)'], {}), '(hr - sr)\n', (201, 210), True, 'from keras import backend as K\n'), ((243, 268), 'keras.backend.log', 'K.log', (['(max_val ** 2 / mse)'], {}), '(max_val ** 2 / mse)\n', (248, 268), True, 'from keras import backend as K\n'), ((871, 884), 'numpy.array', 'np.array', (['lrs'], {}), '(lrs)\n', (879, 884), True, 'import numpy as np\n'), ((899, 912), 'numpy.array', 'np.array', (['hrs'], {}), '(hrs)\n', (907, 912), True, 'import numpy as np\n'), ((230, 240), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (236, 240), True, 'import numpy as np\n'), ((614, 636), 'os.path.join', 'os.path.join', (['path', 'fn'], {}), '(path, fn)\n', (626, 636), False, 'import os\n'), ((658, 692), 'utils.pair', 'utils.pair', (['fn', 'input_shape', 'scale'], {}), '(fn, input_shape, scale)\n', (668, 692), False, 'import utils\n'), ((710, 733), 'utils.normalization', 'utils.normalization', (['lr'], {}), '(lr)\n', (729, 733), False, 'import utils\n'), ((751, 774), 'utils.normalization', 'utils.normalization', (['hr'], {}), '(hr)\n', (770, 774), False, 'import utils\n'), ((550, 572), 'numpy.random.shuffle', 'np.random.shuffle', (['fns'], {}), '(fns)\n', (567, 572), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import sys
sys.path.append("../ar/")
import fastopc, time
import functionLib as lib
import micStream
nStrips = 16
lStrip = 64
client = fastopc.FastOPC('localhost:7890')
pixels = lib.Pixels(nStrips, lStrip, 0)
theoStrip = np.zeros([lStrip, 3])
stream = micStream.Stream(fps=40, nBuffers=4)
powerSmooth = lib.ExpFilter(val=0.05, alpha_rise=0.05, alpha_decay=0.05)
nColorWheel = 1500
colorWheel = lib.getColorWheel(nColorWheel)
frameCount = 0
while True:
success = stream.readAndCalc()
if success:
frameNumEff = np.mod(frameCount, nColorWheel)
power = np.sum(stream.freqSpectrum[10//5:300//5])
powerSmooth.update(power)
displayPower = int(122*power/powerSmooth.value)
theoStrip = displayPower * colorWheel[frameNumEff]
pixels.update(theoStrip, 0.9, 0.2)
#print(displayPower * colorWheel[frameNumEff])
client.putPixels(0, pixels.getArrayForDisplay())
frameCount+=1
| [
"sys.path.append",
"numpy.sum",
"numpy.zeros",
"functionLib.Pixels",
"numpy.mod",
"functionLib.ExpFilter",
"fastopc.FastOPC",
"micStream.Stream",
"functionLib.getColorWheel"
] | [((53, 78), 'sys.path.append', 'sys.path.append', (['"""../ar/"""'], {}), "('../ar/')\n", (68, 78), False, 'import sys\n'), ((178, 211), 'fastopc.FastOPC', 'fastopc.FastOPC', (['"""localhost:7890"""'], {}), "('localhost:7890')\n", (193, 211), False, 'import fastopc, time\n'), ((225, 255), 'functionLib.Pixels', 'lib.Pixels', (['nStrips', 'lStrip', '(0)'], {}), '(nStrips, lStrip, 0)\n', (235, 255), True, 'import functionLib as lib\n'), ((268, 289), 'numpy.zeros', 'np.zeros', (['[lStrip, 3]'], {}), '([lStrip, 3])\n', (276, 289), True, 'import numpy as np\n'), ((300, 336), 'micStream.Stream', 'micStream.Stream', ([], {'fps': '(40)', 'nBuffers': '(4)'}), '(fps=40, nBuffers=4)\n', (316, 336), False, 'import micStream\n'), ((352, 410), 'functionLib.ExpFilter', 'lib.ExpFilter', ([], {'val': '(0.05)', 'alpha_rise': '(0.05)', 'alpha_decay': '(0.05)'}), '(val=0.05, alpha_rise=0.05, alpha_decay=0.05)\n', (365, 410), True, 'import functionLib as lib\n'), ((443, 473), 'functionLib.getColorWheel', 'lib.getColorWheel', (['nColorWheel'], {}), '(nColorWheel)\n', (460, 473), True, 'import functionLib as lib\n'), ((575, 606), 'numpy.mod', 'np.mod', (['frameCount', 'nColorWheel'], {}), '(frameCount, nColorWheel)\n', (581, 606), True, 'import numpy as np\n'), ((623, 668), 'numpy.sum', 'np.sum', (['stream.freqSpectrum[10 // 5:300 // 5]'], {}), '(stream.freqSpectrum[10 // 5:300 // 5])\n', (629, 668), True, 'import numpy as np\n')] |
import tensorflow as tf
try:
# the endless shuffle of keras modules
import tensorflow.keras as keras
from tensorflow.keras import layers
print("Using TF-Keras version:", keras.__version__)
except ImportError:
import keras
import keras.layers as layers
print("Using Keras version:", keras.__version__)
import numpy as np
import math
# sphinx-doc hickup: a member named `call` seems to cause all kinds of sphinx-hickup
# error starting with non-existing line-12 docstrings, if automatic :member: doc
# is activated in index.rst.
class ResidualBlock(layers.Layer):
""" Residual Block layer for Keras
The residual block consists of two fully connected layers with units neurons
followed by two BatchNorms and ReLUs:
.. code-block:: none
# ┌──────────────────────────────────────────────────┐
# │ ┌─────┐ ┌──┐ ┌────┐ ┌─────┐ ┌──┐ ┌────┐ ▼
# ──┴─►│Dense│─►│BN│─►│ReLU│───►│Dense│─►│BN│─►│ReLU│─ + ─► highway=True
# └─────┘ └──┘ └────┘ └─────┘ └──┘ └────┘
#
# ┌──────────────────────────────────────────┐
# │ ┌─────┐ ┌──┐ ┌────┐ ┌─────┐ ┌──┐ ▼ ┌────┐
# ──┴─►│Dense│─►│BN│─►│ReLU│───►│Dense│─►│BN│─ + ─►│ReLU│─► highway=False
# └─────┘ └──┘ └────┘ └─────┘ └──┘ └────┘
The additive residual connection either bridges all layers (highway), or
connects just before the last ReLU.
:param units: Positive integer, number of hidden units.
:param highway: Boolean, whether to use highway connection or not.
"""
def __init__(self, units, highway=False, **kwargs):
self.units=units
self.highway=highway
super(ResidualBlock, self).__init__(**kwargs)
self.dense1 = layers.Dense(self.units)
self.bn1 = layers.BatchNormalization()
self.relu = layers.ReLU()
self.dense2 = layers.Dense(self.units)
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.ReLU()
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
'highway': self.highway
})
return config
def call(self, inputs): # This member name kills sphinx's autodoc for members! Beware!
x=self.dense1(inputs)
x=self.bn1(x)
x=self.relu(x)
x=self.dense2(x)
x=self.bn2(x)
if self.highway:
x=self.relu2(x)
x=x+inputs
else:
x=x+inputs
x=self.relu2(x)
return x
class ResidualDense(layers.Layer):
""" Residual Dense layer for Keras
The residual dense layer consists of a fully connected layer followed by BatchNorm and ReLU:
.. code-block:: none
# ┌─────────────────────────┐
# │ ┌─────┐ ┌──┐ ┌────┐ ▼
# ──┴─►│Dense│─►│BN│─►│ReLU│─ + ─►
# └─────┘ └──┘ └────┘
:param units: Positive integer, number of hidden units.
:param regularizer: Positive float, regularization strength for the Dense layer.
"""
def __init__(self, units, regularizer=0, **kwargs):
self.units=units
self.regularizer=regularizer
super(ResidualDense, self).__init__(**kwargs)
if self.regularizer != 0:
self.dense1 = layers.Dense(self.units,
kernel_regularizer=keras.regularizers.l2(self.regularizer))
else:
self.dense1 = layers.Dense(self.units)
self.bn1 = layers.BatchNormalization()
self.relu = layers.ReLU()
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
'regularizer': self.regularizer
})
return config
def call(self, inputs):
x=self.dense1(inputs)
x=self.relu(x)
x=self.bn1(x)
x=x+inputs
return x
class ResidualDenseStack(layers.Layer):
""" Residual Dense layer for Keras
The residual dense layer stack consists of `layer_count` :class:`ResidualDense` layers.
.. code-block:: none
# ┌─────────── n ─────────────┐ n = layer_count repetitions
# ┌─────────────────────────┐
# │ ┌─────┐ ┌──┐ ┌────┐ ▼
# ──┴─►│Dense│─►│BN│─►│ReLU│─ + ─►
# └─────┘ └──┘ └────┘
:param units: Positive integer, number of hidden units.
:param layer_count: Positive integer, number of layer-blocks, each a `ResidualDense` block.
:param regularizer: Positive float, regularization strength for the Dense layer.
"""
def __init__(self, units, layer_count, regularizer=0, **kwargs):
self.units=units
self.layer_count=layer_count
self.regularizer=regularizer
super(ResidualDenseStack, self).__init__(**kwargs)
self.rd=[]
for _ in range(0, self.layer_count):
self.rd.append(ResidualDense(self.units, regularizer=self.regularizer))
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
'layers': self.layer_count,
'regularizer': self.regularizer
})
return config
def call(self, inputs):
x=self.rd[0](inputs)
for i in range(1, self.layer_count):
x=self.rd[i](x)
return x
class ParallelResidualDenseStacks(layers.Layer):
""" Parallel Residual Dense Stacks layer for Keras
The parallel residual dense layer stacks consist of `stacks` count parallel
:class:`ResidualDenseStack`, each of which consists of `layer_count` :class:`ResidualDense`
layers. The output of all parallel stacks is concatenated and scaled down to `units` units.
.. code-block:: none
# ┌─────────── n ─────────────┐ n = layer_count repetitions
# ┌─────────────────────────┐
# │ ┌─────┐ ┌──┐ ┌────┐ ▼ ┌──────┐
# ┌─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │
# │ └─────┘ └──┘ └────┘ │ │
# │ │ │
# │ ┌─────────── n ─────────────┐ │ │
# │ ┌─────────────────────────┐ │ │
# │ │ ┌─────┐ ┌──┐ ┌────┐ ▼ │concat│ ┌─────┐ ┌────┐
# ├─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │ ─►│Dense│─►│ReLU│─►
# ──┤ └─────┘ └──┘ └────┘ │ │ └─────┘ └────┘
# │ . │ │ scale down to
# │ . `stacks` reps │ │ `units`.
# │ . │ │
# │ ┌─────────── n ─────────────┐ │ │
# │ ┌─────────────────────────┐ │ │
# │ │ ┌─────┐ ┌──┐ ┌────┐ ▼ │ │
# └─────┴─►│Dense│─►│BN│─►│ReLU│─ + ─► │ │
# └─────┘ └──┘ └────┘ └──────┘
:param units: Positive integer, number of hidden units.
:param layer_count: Positive integer, number of layer-blocks, each a `ResidualDense` block.
:param stacks: Positive integer, number of parallel stacks.
:param regularizer: Positive float, regularization strength for the Dense layer.
"""
def __init__(self, units, layer_count, stacks, dispatch, regularizer=0, **kwargs):
super(ParallelResidualDenseStacks, self).__init__(**kwargs)
self.units=units
self.layer_count=layer_count
self.stacks=stacks
self.dispatch=dispatch
self.regularizer=regularizer
if self.dispatch is True:
self.scale = layers.Dense(units*stacks, activation=None)
else:
self.scale = layers.Dense(units, activation=None)
self.rds=[]
for _ in range(0, self.stacks):
self.rds.append(ResidualDenseStack(self.units, self.layer_count,
regularizer=self.regularizer))
self.rescale_relu = layers.ReLU()
self.concat = layers.Concatenate()
if self.regularizer != 0:
self.rescale = layers.Dense(self.units,
kernel_regularizer=keras.regularizers.l2(self.regularizer))
else:
self.rescale = layers.Dense(self.units)
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
'layers': self.layer_count,
'stacks': self.stacks,
'dispatch': self.dispatch,
'regularizer': self.regularizer
})
return config
def call(self, inputs):
xa=[]
# Scale up
x=self.scale(inputs)
for i in range(0, self.stacks):
if self.dispatch:
xa.append(self.rds[i](x[:,i*self.units:(i+1)*self.units]))
else:
xa.append(self.rds[i](x))
x=self.concat(xa)
x=self.rescale(x)
x=self.rescale_relu(x)
return x
class SelfAttention(layers.Layer):
""" Self-attention layer for Keras
The self-attention layer learns three matrices (key :math:`W_k`, query :math:`W_q`, value :math:`W_v`)
that provide context-information for the :math:`input`.
Input is mutiplied with all three matrices, then :math:`W_k` and :math:`W_q` are multiplied,
scaled down by :math:`\\sqrt{\\dim{input}[-1]}` and normalized, either by LayerNorm,
BatchNorm or Softmax or not at all. The result is then multiplied with :math:`W_v`, and, if hidden
dimension of the :math:`W_{x_i}` matrices is different from input units last dimension,
rescaled by a final dense matrix multiply. Output has same shape as input.
.. code-block:: none
#
# ┌──┐
# ┌► │Wk│───┐ ┌─────┐
# │ └──┘ │ │Scale│
# │ ┌──┐ × ─►│Norm │─┐ (opt.)
# ─┼─►│Wq│───┘ └─────┘ │ ┌─────┐
# │ └──┘ │ │Scale│──►
# │ ┌──┐ × ─►│Dense│
# └► │Wv│───────────────┘ └─────┘
# └──┘
#
:param units: Positive integer, number of hidden units. The matrices :math:`W_{x_i}` are of shape :math:`hs \\times hs`.
:param norm: either 'batchnorm', 'layernorm', 'softmax', or None
"""
def __init__(self, units=None, norm=None, **kwargs):
super(SelfAttention, self).__init__(**kwargs)
self.units = units
self.norm = norm
if self.norm=="layernorm":
self.norm = layers.LayerNormalization(axis=-1)
elif self.norm=="batchnorm":
self.norm = layers.BatchNormalization()
elif self.norm=="softmax":
self.norm = layers.Softmax()
elif self.norm==None or self.norm == "none":
self.norm = None
else:
raise ValueError("Unknown norm: {}".format(self.norm))
self.pm = layers.Permute((2,1))
def build(self, input_shape):
self.fact = math.sqrt(input_shape[-1])
if self.units is None:
dim2 = input_shape[-1]
else:
dim2 = self.units
self.scale = self.add_weight(shape=(dim2, input_shape[-1]),
initializer="random_normal", name='w1', trainable=True)
self.w_keys = self.add_weight(shape=(input_shape[-1], dim2),
initializer="random_normal", name='w2', trainable=True)
self.w_queries = self.add_weight(shape=(input_shape[-1], dim2),
initializer="random_normal", name='w3', trainable=True)
self.w_values = self.add_weight(shape=(input_shape[-1], dim2),
initializer="random_normal", name='w4', trainable=True)
def get_config(self):
config = super().get_config()
config.update({
'units': self.units,
'norm': self.norm
})
return config
def call(self, inputs):
vk = tf.matmul(inputs, self.w_keys)
vq = tf.matmul(inputs, self.w_queries)
vv = tf.matmul(inputs, self.w_values)
kq = tf.matmul(vk, vq, transpose_b=True)
kqs = kq/self.fact
if self.norm is not None:
sn = self.norm(kqs)
else:
sn = kqs
out = tf.matmul(sn, self.pm(vv), transpose_b=True)
if self.units is not None:
out = tf.matmul(out, self.scale)
return out
class MultiHeadSelfAttention(layers.Layer):
""" Multi-head self-attention layer for Keras
The multi-head self-attention layer concatenates the output of `heads` :class:`SelfAttention`
layers. Each of the self-attention layers has an additive residual connection.
If `mh_normalize` is True, the concatenated output is normalized.
After scaling down to the number of units, the output is then passed through a
ReLU and Dense layer again with residual connection.
Finally, optional normalization and a final optional ReLU is applied.
Output has same shape as input.
.. code-block:: none
# ┌──────────────┐
# │ ┌────────┐ ▼ ┌──────┐ ┌────┐
# ┌─┴─►│SelfAtt.│─ + ─►│ │ │ │
# │ └────────┘ │ │ │ │
# │ ┌──────────────┐ │ │ │ │ ┌───────────────────┐ ┌────┐
# ─┤ │ ┌────────┐ ▼ │ │ │Opt.│ ┌─────┐ │ ┌────┐ ┌─────┐ ▼ │Opt │
# ├─┴─►│SelfAtt.│─ + ─►│ │─►│Norm│─►│Scale│─┴─►│ReLU│─►│Dense│─ + ─►│Norm│─►
# │ └────────┘ │concat│ │ │ └─────┘ └────┘ └─────┘ └────┘
# │ . │ or │ │ │
# │ . head │ relu │ │ │
# │ . reps │ +add │ │ │
# │ ┌──────────────┐ │ │ │ │
# │ │ ┌────────┐ ▼ │ │ │ │
# └─┴─►│SelfAtt.│─ + ─►│ │ │ │
# └────────┘ └──────┘ └────┘
:param units: Positive integer `hs`, number of hidden units.
:param heads: Positive integer, number of self-attention heads.
:param mh_normalize: Boolean, whether to normalize the output of the multi-head self-attention.
:param norm: either 'batchnorm', 'layernorm, or 'softmax', the normalization used within each self-attention head.
:param join_heads_by_add: on true heads are added after additional relu-nonlin, instead of concatenated (original all-you-need).
"""
def __init__(self, heads, units=None, norm=None, mh_normalize=True,
final_relu=False, join_heads_by_add=False, **kwargs):
super(MultiHeadSelfAttention, self).__init__(**kwargs)
self.heads=heads
self.units = units
self.norm = norm
self.mh_normalize = mh_normalize
self.final_relu = final_relu
self.mhsa=[]
for _ in range(0,self.heads):
self.mhsa.append(SelfAttention(units=self.units, norm=self.norm))
self.join_heads_by_add = join_heads_by_add
if self.join_heads_by_add is False:
self.cc = layers.Concatenate(axis=1)
if self.mh_normalize is True:
self.ln1 = layers.LayerNormalization()
self.ln2 = layers.LayerNormalization()
self.relu1 = layers.ReLU()
self.relu2 = layers.ReLU()
self.pm = layers.Permute((2,1))
def build(self, input_shape):
if self.join_heads_by_add is False:
self.w_heads = self.add_weight(shape=(self.heads * input_shape[-1], input_shape[-1]),
initializer="random_normal", name='w5concat', trainable=True)
else:
self.w_heads = self.add_weight(shape=(input_shape[-1], input_shape[-1]),
initializer="random_normal", name='w5add', trainable=True)
self.lin = self.add_weight(shape=(input_shape[-1], input_shape[-1]),
initializer="random_normal", name='w6', trainable=True)
def get_config(self):
config = super().get_config()
config.update({
'heads': self.heads,
'units': self.units,
'norm': self.norm,
'mh_normalize': self.mh_normalize,
'final_relu': self.final_relu,
'join_heads_by_add': self.join_heads_by_add
})
return config
def call(self, inputs):
xa=[]
for i in range(0, self.heads):
xa.append(self.pm(self.mhsa[i](inputs)+inputs))
if self.join_heads_by_add is True:
for i in range(len(xa)):
if i==0:
x=self.relu2(xa[i])
else:
x=x+self.relu2(xa[i])
x=self.pm(x)
else:
x=self.pm(self.cc(xa))
if self.mh_normalize is True:
x = self.ln1(x)
xt = tf.matmul(x, self.w_heads)
x = self.relu1(xt)
x = tf.matmul(x, self.lin) + xt
if self.mh_normalize is True:
x = self.ln2(x)
return x
class PositionalEncoding(layers.Layer):
""" Positional encoding layer.
adds sinusoid of different frequencies to the input. Can be used to add sequence-information to input
data for transformers or attention layers.
:param amplitude: float, amplitude of the encoding, default=1.0.
:param trainable: boolean, whether the weights of the layer are trainable, default=False.
"""
def __init__(self, amplitude=1.0, trainable=False, **kwargs):
super(PositionalEncoding, self).__init__(**kwargs)
self.amplitude = amplitude
self.trainable = trainable
# positional encoding taken from: https://www.tensorflow.org/text/tutorials/transformer
@staticmethod
def _get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))
return pos * angle_rates
def _positional_encoding(self, position, d_model):
angle_rads = PositionalEncoding._get_angles(np.arange(position)[:, np.newaxis],
np.arange(d_model)[np.newaxis, :],
d_model)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...] * self.amplitude
return tf.cast(pos_encoding, dtype=tf.float32)
def get_config(self):
config = super().get_config()
config.update({
'amplitude': self.amplitude,
'trainable': self.trainable,
})
return config
def build(self, input_shape):
self.pe = self._positional_encoding(input_shape[1], input_shape[2])
def call(self, inputs):
return tf.add(inputs, self.pe)
| [
"keras.regularizers.l2",
"math.sqrt",
"numpy.float32",
"tensorflow.add",
"keras.layers.LayerNormalization",
"keras.layers.Concatenate",
"tensorflow.matmul",
"tensorflow.cast",
"keras.layers.ReLU",
"keras.layers.Dense",
"numpy.sin",
"numpy.cos",
"keras.layers.Permute",
"numpy.arange",
"ke... | [((1789, 1813), 'keras.layers.Dense', 'layers.Dense', (['self.units'], {}), '(self.units)\n', (1801, 1813), True, 'import keras.layers as layers\n'), ((1833, 1860), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1858, 1860), True, 'import keras.layers as layers\n'), ((1881, 1894), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (1892, 1894), True, 'import keras.layers as layers\n'), ((1917, 1941), 'keras.layers.Dense', 'layers.Dense', (['self.units'], {}), '(self.units)\n', (1929, 1941), True, 'import keras.layers as layers\n'), ((1961, 1988), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (1986, 1988), True, 'import keras.layers as layers\n'), ((2010, 2023), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (2021, 2023), True, 'import keras.layers as layers\n'), ((3551, 3578), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3576, 3578), True, 'import keras.layers as layers\n'), ((3599, 3612), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (3610, 3612), True, 'import keras.layers as layers\n'), ((8286, 8299), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (8297, 8299), True, 'import keras.layers as layers\n'), ((8322, 8342), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (8340, 8342), True, 'import keras.layers as layers\n'), ((11215, 11237), 'keras.layers.Permute', 'layers.Permute', (['(2, 1)'], {}), '((2, 1))\n', (11229, 11237), True, 'import keras.layers as layers\n'), ((11292, 11318), 'math.sqrt', 'math.sqrt', (['input_shape[-1]'], {}), '(input_shape[-1])\n', (11301, 11318), False, 'import math\n'), ((12317, 12347), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w_keys'], {}), '(inputs, self.w_keys)\n', (12326, 12347), True, 'import tensorflow as tf\n'), ((12361, 12394), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w_queries'], {}), '(inputs, self.w_queries)\n', (12370, 12394), True, 'import tensorflow as tf\n'), ((12408, 12440), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.w_values'], {}), '(inputs, self.w_values)\n', (12417, 12440), True, 'import tensorflow as tf\n'), ((12454, 12489), 'tensorflow.matmul', 'tf.matmul', (['vk', 'vq'], {'transpose_b': '(True)'}), '(vk, vq, transpose_b=True)\n', (12463, 12489), True, 'import tensorflow as tf\n'), ((15583, 15596), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (15594, 15596), True, 'import keras.layers as layers\n'), ((15618, 15631), 'keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (15629, 15631), True, 'import keras.layers as layers\n'), ((15650, 15672), 'keras.layers.Permute', 'layers.Permute', (['(2, 1)'], {}), '((2, 1))\n', (15664, 15672), True, 'import keras.layers as layers\n'), ((17205, 17231), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.w_heads'], {}), '(x, self.w_heads)\n', (17214, 17231), True, 'import tensorflow as tf\n'), ((18622, 18649), 'numpy.sin', 'np.sin', (['angle_rads[:, 0::2]'], {}), '(angle_rads[:, 0::2])\n', (18628, 18649), True, 'import numpy as np\n'), ((18734, 18761), 'numpy.cos', 'np.cos', (['angle_rads[:, 1::2]'], {}), '(angle_rads[:, 1::2])\n', (18740, 18761), True, 'import numpy as np\n'), ((18845, 18884), 'tensorflow.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (18852, 18884), True, 'import tensorflow as tf\n'), ((19244, 19267), 'tensorflow.add', 'tf.add', (['inputs', 'self.pe'], {}), '(inputs, self.pe)\n', (19250, 19267), True, 'import tensorflow as tf\n'), ((3500, 3524), 'keras.layers.Dense', 'layers.Dense', (['self.units'], {}), '(self.units)\n', (3512, 3524), True, 'import keras.layers as layers\n'), ((7921, 7966), 'keras.layers.Dense', 'layers.Dense', (['(units * stacks)'], {'activation': 'None'}), '(units * stacks, activation=None)\n', (7933, 7966), True, 'import keras.layers as layers\n'), ((8004, 8040), 'keras.layers.Dense', 'layers.Dense', (['units'], {'activation': 'None'}), '(units, activation=None)\n', (8016, 8040), True, 'import keras.layers as layers\n'), ((8571, 8595), 'keras.layers.Dense', 'layers.Dense', (['self.units'], {}), '(self.units)\n', (8583, 8595), True, 'import keras.layers as layers\n'), ((10834, 10868), 'keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {'axis': '(-1)'}), '(axis=-1)\n', (10859, 10868), True, 'import keras.layers as layers\n'), ((12731, 12757), 'tensorflow.matmul', 'tf.matmul', (['out', 'self.scale'], {}), '(out, self.scale)\n', (12740, 12757), True, 'import tensorflow as tf\n'), ((15395, 15421), 'keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (15413, 15421), True, 'import keras.layers as layers\n'), ((15483, 15510), 'keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {}), '()\n', (15508, 15510), True, 'import keras.layers as layers\n'), ((15534, 15561), 'keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {}), '()\n', (15559, 15561), True, 'import keras.layers as layers\n'), ((17271, 17293), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.lin'], {}), '(x, self.lin)\n', (17280, 17293), True, 'import tensorflow as tf\n'), ((10930, 10957), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (10955, 10957), True, 'import keras.layers as layers\n'), ((18347, 18366), 'numpy.arange', 'np.arange', (['position'], {}), '(position)\n', (18356, 18366), True, 'import numpy as np\n'), ((18439, 18457), 'numpy.arange', 'np.arange', (['d_model'], {}), '(d_model)\n', (18448, 18457), True, 'import numpy as np\n'), ((3419, 3458), 'keras.regularizers.l2', 'keras.regularizers.l2', (['self.regularizer'], {}), '(self.regularizer)\n', (3440, 3458), False, 'import keras\n'), ((8489, 8528), 'keras.regularizers.l2', 'keras.regularizers.l2', (['self.regularizer'], {}), '(self.regularizer)\n', (8510, 8528), False, 'import keras\n'), ((11017, 11033), 'keras.layers.Softmax', 'layers.Softmax', ([], {}), '()\n', (11031, 11033), True, 'import keras.layers as layers\n'), ((18185, 18204), 'numpy.float32', 'np.float32', (['d_model'], {}), '(d_model)\n', (18195, 18204), True, 'import numpy as np\n')] |
"""Functions that work on collections of shapes
"""
from __future__ import division, print_function
import numpy as np
from .convex import convex_area, convex_centroid
__all__ = ['recenter_polygon', 'centroid_for_shapes',
'centroid_for_uncomputed_shapes', 'recenter_system',
'rescale_and_recenter_system', 'rotate_polygon',
'rotate_system', 'mirror_polygon', 'mirror_system',
'find_concave_outline']
def recenter_polygon(vertices):
"""Returns a new convex polygon with centroid at (0,0)
Args:
vertices (list): list of (x,y) vertices of convex polygon
Returns:
A list just like the input with the recentered vertices (but possibly
transformed into numpy arrays)
"""
centroid = convex_centroid(vertices)
new_verts = []
for v in vertices:
v = np.array(v)
new_verts.append(v - centroid)
return new_verts
def centroid_for_shapes(centroids, areas = None):
"""Calculates the centroid for a set of shapes
Requires pre-computed centroids and areas
Args:
centroids (list): list of (x,y) centroids for each shape
areas (list): list of areas (floats) for each shape (if not given,
assumes they are all equal)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
gc = np.zeros(2)
area = 0
if areas is None:
areas = np.ones(len(centroids))
for pc, a in zip(centroids, areas):
gc += np.array(pc)*a
area += a
gc /= area
return np.array(gc)
def centroid_for_uncomputed_shapes(shape_list):
"""Like centroid_for_shapes but calculates centroids & areas
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
centroids = []
areas = []
for s in shape_list:
centroids.append(convex_centroid(s))
areas.append(convex_area(s))
return centroid_for_shapes(centroids, areas)
def recenter_system(shape_list):
"""Recenters a set of shapes around the centroid of all of them
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
centroid_for_uncomputed_shapes() on that list returns (0,0)
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([s+c for s in ns])
return final_shapes, center
def rescale_and_recenter_system(shape_list, total_area):
"""Recenters a set of shapes and resizes them to have a total fixed area
Args:
shape_list (list): a list of list of vertices (one for each shape)
total_area (float): the area to fix the shapes to
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
`centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing
the areas gets to `total_area`
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Find rescaling factor
tot_a = sum(areas)
dim_scale = np.sqrt(total_area / tot_a)
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([(s+c)*dim_scale for s in ns])
return final_shapes, center
def rotate_polygon(vertices, angle, center_point = [0., 0.]):
"""Rotates a shape around a given point (the origin)
Args:
vertices (list): A list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A list of vertices rotated around the center point
"""
np_o = np.array(center_point)
np_vs = [np.array(v) - np_o for v in vertices]
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return [np.dot(rot_mat, v)+np_o for v in np_vs]
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list]
def mirror_polygon(vertices, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
If center_point is None, mirror around the center of the shape
Args:
vertices (list): A list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new polygon with rotated vertices
"""
if center_point is None:
center_point = convex_centroid(vertices)
xm = -1 if axes[0] else 1
ym = -1 if axes[1] else 1
return [np.array([xm*(v[0]-center_point[0])+center_point[0],
ym*(v[1]-center_point[1])+center_point[1]]) for v
in vertices]
def mirror_system(shape_list, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
Mirrors around the center of the system if center_point is None
Args:
shape_list (list): A list of list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [mirror_polygon(s, axes, center_point) for s in shape_list]
def _point_equal(p1, p2):
return p1[0]==p2[0] and p1[1] == p2[1]
def _arr_eq(a1, a2):
return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))
def find_concave_outline(shape_list):
"""Find the outline of a set of shapes
Assuming all shapes have edges in common with other shapes where they touch,
provides a set of vertices for drawing the outline
Args:
shape_list (list): A list of list of (x,y) vertices
Returns:
A list of ordered (x,y) vertices for drawing an outline
"""
# Find the most lower-right point
current_shape = shape_list[0]
current_pt = current_shape[0]
test_idx = 1
next_test_dir = 1
for s in shape_list:
for i in range(len(s)):
p = s[i]
if ((p[0] < current_pt[0]) or
(p[0] == current_pt[0] and p[1] < current_pt[1])):
# Replace
current_pt = p
current_shape = s
test_idx = (i+1) % len(s)
next_test_dir = 1
vertex_list = [current_pt]
# Keep going until you reach back to the first point
while not _point_equal(current_shape[test_idx], vertex_list[0]):
# Iterate through all the shapes to try to find a matching edge
checking = True
for s in (s for s in shape_list if not _arr_eq(s, current_shape)):
if checking: # Way to break out if match found
for i in range(len(s)):
spt = s[i]
if _point_equal(current_pt, spt):
spt_after = s[(i+1) % len(s)]
spt_before = s[(i-1) % len(s)]
test_pt = current_shape[test_idx]
if _point_equal(test_pt, spt_after):
test_idx = (i-1) % len(s)
next_test_dir = -1
current_shape = s
checking = False
elif _point_equal(test_pt, spt_before):
test_idx = (i+1) % len(s)
next_test_dir = 1
current_shape = s
checking = False
# Have you exhausted all shapes?
if checking:
current_pt = current_shape[test_idx]
vertex_list.append(current_pt)
test_idx += next_test_dir
test_idx %= len(current_shape)
return vertex_list
| [
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] | [((1354, 1365), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1362, 1365), True, 'import numpy as np\n'), ((1554, 1566), 'numpy.array', 'np.array', (['gc'], {}), '(gc)\n', (1562, 1566), True, 'import numpy as np\n'), ((4369, 4396), 'numpy.sqrt', 'np.sqrt', (['(total_area / tot_a)'], {}), '(total_area / tot_a)\n', (4376, 4396), True, 'import numpy as np\n'), ((5017, 5039), 'numpy.array', 'np.array', (['center_point'], {}), '(center_point)\n', (5025, 5039), True, 'import numpy as np\n'), ((849, 860), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (857, 860), True, 'import numpy as np\n'), ((6483, 6595), 'numpy.array', 'np.array', (['[xm * (v[0] - center_point[0]) + center_point[0], ym * (v[1] - center_point\n [1]) + center_point[1]]'], {}), '([xm * (v[0] - center_point[0]) + center_point[0], ym * (v[1] -\n center_point[1]) + center_point[1]])\n', (6491, 6595), True, 'import numpy as np\n'), ((1495, 1507), 'numpy.array', 'np.array', (['pc'], {}), '(pc)\n', (1503, 1507), True, 'import numpy as np\n'), ((5053, 5064), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (5061, 5064), True, 'import numpy as np\n'), ((5217, 5235), 'numpy.dot', 'np.dot', (['rot_mat', 'v'], {}), '(rot_mat, v)\n', (5223, 5235), True, 'import numpy as np\n'), ((5116, 5129), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5122, 5129), True, 'import numpy as np\n'), ((5173, 5186), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5179, 5186), True, 'import numpy as np\n'), ((5188, 5201), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5194, 5201), True, 'import numpy as np\n'), ((2742, 2753), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2750, 2753), True, 'import numpy as np\n'), ((4040, 4051), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (4048, 4051), True, 'import numpy as np\n'), ((5132, 5145), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5138, 5145), True, 'import numpy as np\n')] |
from sympy import Rational, Symbol, latex, UnevaluatedExpr
import sympy as sp
import numpy as np
u = lambda x : UnevaluatedExpr(x)
# Helper functions
def explain_add(a, b):
assert(np.shape(a) == np.shape(b))
rows, columns = np.shape(a)
return sp.Matrix([[Symbol(f"({latex(u(a[i,j]))} + {latex(u(b[i,j]))})") for j in range(columns)] for i in range(rows)])
def symbolic_matrix(character, rows, columns):
# row or column vector
if rows == 1:
return sp.Matrix([[Symbol(f"{{{character}}}_{{{j+1}}}") for j in range(columns)] for i in range(rows)])
if columns == 1:
return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}}}") for j in range(columns)] for i in range(rows)])
return sp.Matrix([[Symbol(f"{{{character}}}_{{{i+1}, {j+1}}}") for j in range(columns)] for i in range(rows)])
def explain_multiply(a, b):
# #rows in b == #columns in a
assert(np.shape(a)[1] == np.shape(b)[0])
rows = np.shape(a)[0]
columns = np.shape(b)[1]
result = np.empty(shape=(rows, columns), dtype=object)
for i in range(rows):
row = a[i,:]
for j in range(columns):
column = b[:,j]
zipped = zip(row, column)
mapped = list(map(lambda t: f"{latex(u(t[0]))} \cdot {latex(u(t[1]))}", zipped))
s = Symbol("")
result[i, j] = Symbol(" + ".join(mapped), evaluate=False)
return sp.Matrix(result) | [
"sympy.Symbol",
"numpy.empty",
"sympy.UnevaluatedExpr",
"sympy.Matrix",
"numpy.shape"
] | [((113, 131), 'sympy.UnevaluatedExpr', 'UnevaluatedExpr', (['x'], {}), '(x)\n', (128, 131), False, 'from sympy import Rational, Symbol, latex, UnevaluatedExpr\n'), ((234, 245), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (242, 245), True, 'import numpy as np\n'), ((1001, 1046), 'numpy.empty', 'np.empty', ([], {'shape': '(rows, columns)', 'dtype': 'object'}), '(shape=(rows, columns), dtype=object)\n', (1009, 1046), True, 'import numpy as np\n'), ((1412, 1429), 'sympy.Matrix', 'sp.Matrix', (['result'], {}), '(result)\n', (1421, 1429), True, 'import sympy as sp\n'), ((186, 197), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (194, 197), True, 'import numpy as np\n'), ((201, 212), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (209, 212), True, 'import numpy as np\n'), ((944, 955), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (952, 955), True, 'import numpy as np\n'), ((973, 984), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (981, 984), True, 'import numpy as np\n'), ((899, 910), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (907, 910), True, 'import numpy as np\n'), ((917, 928), 'numpy.shape', 'np.shape', (['b'], {}), '(b)\n', (925, 928), True, 'import numpy as np\n'), ((1302, 1312), 'sympy.Symbol', 'Symbol', (['""""""'], {}), "('')\n", (1308, 1312), False, 'from sympy import Rational, Symbol, latex, UnevaluatedExpr\n'), ((733, 780), 'sympy.Symbol', 'Symbol', (['f"""{{{character}}}_{{{i + 1}, {j + 1}}}"""'], {}), "(f'{{{character}}}_{{{i + 1}, {j + 1}}}')\n", (739, 780), False, 'from sympy import Rational, Symbol, latex, UnevaluatedExpr\n'), ((490, 528), 'sympy.Symbol', 'Symbol', (['f"""{{{character}}}_{{{j + 1}}}"""'], {}), "(f'{{{character}}}_{{{j + 1}}}')\n", (496, 528), False, 'from sympy import Rational, Symbol, latex, UnevaluatedExpr\n'), ((624, 662), 'sympy.Symbol', 'Symbol', (['f"""{{{character}}}_{{{i + 1}}}"""'], {}), "(f'{{{character}}}_{{{i + 1}}}')\n", (630, 662), False, 'from sympy import Rational, Symbol, latex, UnevaluatedExpr\n')] |
##
# Copyright 2018, <NAME>
# Licensed under MIT.
# Since: v1.0.0
##
import time
import cv2
import numpy as np
from src.common.package.config import application
from src.opencv.package.config import application as _application
from src.common.package.http import server as _server
from src.common.package.http.handler import Handler
from src.common.package.camera.capture import Capture as _capture
from src.common.package.frame.action import Action as _frame
from src.common.package.frame.draw import Draw as _draw
from src.opencv.package.opencv.opencv import OpenCV
# Constant
_opencv = OpenCV()
##
# StreamHandler class - inherit Handler
# This class provide handler for HTTP streaming
# Note: this class should override Handler.stream
##
class StreamHandler(Handler):
##
# Override method Handler.stream()
##
def stream(self):
Handler.stream(self)
print('[INFO] Overriding stream method...')
# Initialise capture
capture = _capture(src=application.CAPTURING_DEVICE,
use_pi_camera=application.USE_PI_CAMERA,
resolution=application.RESOLUTION,
frame_rate=application.FRAME_RATE)
if application.USE_PI_CAMERA:
print('[INFO] Warming up pi camera...')
else:
print('[INFO] Warming up camera...')
time.sleep(2.0)
print('[INFO] Start capturing...')
while True:
# Read a frame from capture
frame = capture.read()
# Down size frame to 50% (to increase performance on Raspberry Pi)
# frame = _frame.scale(frame=frame, scale=0.5)
# Convert frame to gray (to increase performance on Raspberry Pi)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Get frame dimensions
(height, width) = frame.shape[:2]
# OpenCV detection
detections = _opencv.dnn_face_detector(frame=frame,
scale_factor=1.0,
size=(300, 300),
mean=(104.0, 177.0, 123.0))
# Up size frame to 50% (how the frame was before down sizing)
# frame = _frame.scale(frame=frame, scale=2)
# If returns any detection
for i in range(0, detections.shape[2]):
# Get confidence associated with the detection
confidence = detections[0, 0, i, 2]
# Filter weak detection
if confidence < _application.CONFIDENCE:
continue
# Calculate coordinates
box = detections[0, 0, i, 3:7] * np.array([width,
height,
width,
height])
(left, top, right, bottom) = box.astype('int')
coordinates = {'left': left,
'top': top,
'right': right,
'bottom': bottom}
text = "{:.2f}%".format(confidence * 100)
frame = _draw.rectangle(frame=frame,
coordinates=coordinates,
text=text)
# Write date time on the frame
frame = _draw.text(frame=frame,
coordinates={'left': application.WIDTH - 150, 'top': application.HEIGHT - 20},
text=time.strftime('%d/%m/%Y %H:%M:%S', time.localtime()),
font_color=(0, 0, 255))
# Convert frame into buffer for streaming
retval, buffer = cv2.imencode('.jpg', frame)
# Write buffer to HTML Handler
self.wfile.write(b'--FRAME\r\n')
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(buffer))
self.end_headers()
self.wfile.write(buffer)
self.wfile.write(b'\r\n')
##
# Method main()
##
def main():
try:
address = ('', application.HTTP_PORT)
server = _server.Server(address, StreamHandler)
print('[INFO] HTTP server started successfully at %s' % str(server.server_address))
print('[INFO] Waiting for client to connect to port %s' % str(application.HTTP_PORT))
server.serve_forever()
except Exception as e:
server.socket.close()
print('[INFO] HTTP server closed successfully.')
print('[ERROR] Exception: %s' % str(e))
if __name__ == '__main__':
main()
| [
"src.opencv.package.opencv.opencv.OpenCV",
"cv2.cvtColor",
"src.common.package.http.handler.Handler.stream",
"time.sleep",
"src.common.package.http.server.Server",
"src.common.package.frame.draw.Draw.rectangle",
"numpy.array",
"cv2.imencode",
"src.common.package.camera.capture.Capture",
"time.loca... | [((591, 599), 'src.opencv.package.opencv.opencv.OpenCV', 'OpenCV', ([], {}), '()\n', (597, 599), False, 'from src.opencv.package.opencv.opencv import OpenCV\n'), ((860, 880), 'src.common.package.http.handler.Handler.stream', 'Handler.stream', (['self'], {}), '(self)\n', (874, 880), False, 'from src.common.package.http.handler import Handler\n'), ((981, 1144), 'src.common.package.camera.capture.Capture', '_capture', ([], {'src': 'application.CAPTURING_DEVICE', 'use_pi_camera': 'application.USE_PI_CAMERA', 'resolution': 'application.RESOLUTION', 'frame_rate': 'application.FRAME_RATE'}), '(src=application.CAPTURING_DEVICE, use_pi_camera=application.\n USE_PI_CAMERA, resolution=application.RESOLUTION, frame_rate=\n application.FRAME_RATE)\n', (989, 1144), True, 'from src.common.package.camera.capture import Capture as _capture\n'), ((1379, 1394), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1389, 1394), False, 'import time\n'), ((4335, 4373), 'src.common.package.http.server.Server', '_server.Server', (['address', 'StreamHandler'], {}), '(address, StreamHandler)\n', (4349, 4373), True, 'from src.common.package.http import server as _server\n'), ((1772, 1811), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1784, 1811), False, 'import cv2\n'), ((3885, 3912), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (3897, 3912), False, 'import cv2\n'), ((3313, 3377), 'src.common.package.frame.draw.Draw.rectangle', '_draw.rectangle', ([], {'frame': 'frame', 'coordinates': 'coordinates', 'text': 'text'}), '(frame=frame, coordinates=coordinates, text=text)\n', (3328, 3377), True, 'from src.common.package.frame.draw import Draw as _draw\n'), ((2762, 2802), 'numpy.array', 'np.array', (['[width, height, width, height]'], {}), '([width, height, width, height])\n', (2770, 2802), True, 'import numpy as np\n'), ((3727, 3743), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3741, 3743), False, 'import time\n')] |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import argparse
import sys
import numpy as np
DATA_DIM = 16
try:
import itimer as it
now = it.itime
get_mops = it.itime_mops_now
except:
from timeit import default_timer
now = default_timer
get_mops = lambda t0, t1, n: (n / (t1 - t0),t1-t0)
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
###############################################
def gen_data_x(nopt, data_dim=DATA_DIM):
data = np.random.rand(nopt, data_dim)
return data
def gen_data_y(nopt, classes_num=3):
data = np.random.randint(classes_num, size=nopt)
return data
##############################################
def run(name, alg, sizes=10, step=2, nopt=2**10):
parser = argparse.ArgumentParser()
parser.add_argument('--steps', type=int, default=sizes,
help='Number of steps')
parser.add_argument('--step', type=int, default=step,
help='Factor for each step')
parser.add_argument('--size', type=int, default=nopt,
help='Initial data size')
parser.add_argument('--repeat', type=int, default=100,
help='Iterations inside measured region')
parser.add_argument('--text', default='', help='Print with each result')
args = parser.parse_args()
nopt = args.size
repeat = args.repeat
train_data_size = 2**10
with open('perf_output.csv', 'w', 1) as fd, open("runtimes.csv", 'w', 1) as fd2:
for _ in xrange(args.steps):
print("TRAIN_DATA_SIZE: ", train_data_size)
print("TEST_DATA_SIZE: ", nopt)
x_train, y_train = gen_data_x(train_data_size), gen_data_y(train_data_size)
x_test = gen_data_x(nopt)
n_neighbors = 5
print('ERF: {}: Size: {}'.format(name, nopt), end=' ', flush=True)
sys.stdout.flush()
predictions = alg(x_train, y_train, x_test, k=n_neighbors) # warmup
t0 = now()
for _ in xrange(repeat):
predictions = alg(x_train, y_train, x_test, k=n_neighbors)
mops, time = get_mops(t0, now(), nopt)
result_mops = mops * repeat
print('MOPS:', result_mops, args.text)
fd.write('{},{}\n'.format(nopt, result_mops))
fd2.write('{},{}\n'.format(nopt, time))
print("TIME: ", time)
nopt *= args.step
repeat = max(repeat - args.step, 1)
| [
"numpy.random.rand",
"numpy.random.randint",
"sys.stdout.flush",
"argparse.ArgumentParser"
] | [((2141, 2171), 'numpy.random.rand', 'np.random.rand', (['nopt', 'data_dim'], {}), '(nopt, data_dim)\n', (2155, 2171), True, 'import numpy as np\n'), ((2238, 2279), 'numpy.random.randint', 'np.random.randint', (['classes_num'], {'size': 'nopt'}), '(classes_num, size=nopt)\n', (2255, 2279), True, 'import numpy as np\n'), ((2409, 2434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2432, 2434), False, 'import argparse\n'), ((3543, 3561), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3559, 3561), False, 'import sys\n')] |
#!/usr/bin/env python2
#Author: <NAME>
import itertools
import numpy as np
from operator import mul
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
#read input
f, n = map(int, raw_input().split())
X = []
y = []
for _ in range(n):
line = raw_input().split()
X.append([float(x) for x in line[:-1]])
y.append([float(line[-1])])
q = int(raw_input())
Xt = []
for _ in range(q):
Xt.append([float(x) for x in raw_input().split()])
#add new features as monomials of degree <= 3
X = np.array(X)
Xt = np.array(Xt)
for i in range(2, 4):
for var in itertools.product(range(f), repeat=i):
X = np.hstack((X, reduce(mul, [X[:, j] for j in var]).reshape(-1, 1)))
Xt = np.hstack((Xt, reduce(mul, [Xt[:, j] for j in var]).reshape(-1, 1)))
#use sklearn to compute output
for yt in LinearRegression().fit(X, y).predict(Xt):
print(yt[0])
| [
"sklearn.linear_model.LinearRegression",
"numpy.array"
] | [((578, 589), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (586, 589), True, 'import numpy as np\n'), ((599, 611), 'numpy.array', 'np.array', (['Xt'], {}), '(Xt)\n', (607, 611), True, 'import numpy as np\n'), ((914, 932), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (930, 932), False, 'from sklearn.linear_model import LinearRegression\n')] |
import logging
import numpy as np
from fbpca import pca
from scipy.stats import norm
from .sparseutils import nonzero_mean, nonzero_std, find_zeroed_indices
log = logging.getLogger("ALRA")
def choose_k(X, k=100, pval_thresh=1e-10, noise_start=80, n_iter=2):
if k > min(X.shape):
raise ValueError(
f"`k` must be smaller than `min(N, M)`. Maximum value "
f"can be {min(X.shape)} but `{k}` given"
)
if noise_start > k - 5:
raise ValueError("At least 5 singular values must be considered noise.")
U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)
differences = np.diff(s)
mean = np.mean(differences[noise_start - 1 :])
std = np.std(differences[noise_start - 1 :], ddof=1)
probabilities = norm.pdf(differences, loc=mean, scale=std)
k = np.max(np.argwhere(probabilities < pval_thresh)) + 1
return k
def ALRA(X, k=None, n_iter=10):
"""Adaptively-thresholded Low Rank Approximation.
Parameters
----------
X: array_like
k: int
n_iter: int
Returns
-------
np.array
"""
if k is None:
k = choose_k(X)
log.info(f"No `k` given. Automatically determined `k={k}`.")
# Compute the SVD and compute the rank-k reconstruction
U, s, Va = pca(X, k=k, n_iter=n_iter, raw=True)
X_rank_k = U * s @ Va
X_rank_k = np.ma.masked_array(X_rank_k)
# Find the absolute values of the minimum expression levels for each gene
minimum_expressions = np.abs(np.min(X_rank_k, axis=0))
# Zero out all expressions with values below the gene minimum value
X_rank_k[X_rank_k <= minimum_expressions] = np.ma.masked
# Rescale the expressions so the first two moments match the original matrix
X_mean, X_std = nonzero_mean(X, axis=0), nonzero_std(X, axis=0, ddof=1)
X_rk_mean, X_rk_std = X_rank_k.mean(axis=0), X_rank_k.std(axis=0, ddof=1)
scale = X_std / X_rk_std
translate = -X_rk_mean * scale + X_mean
scale_columns = ~np.isnan(X_std) & ~np.isnan(X_rk_std)
X_rank_k[:, scale_columns] *= scale[scale_columns]
X_rank_k[:, scale_columns] += translate[scale_columns]
# Values can become negative during rescaling, so we zero those out
X_rank_k[X_rank_k < 0] = np.ma.masked
# Restore potentially zeroed out expression values which appeared in the
# original expression matrix. Where both values are non-zero, prefer the
# rank-k approximation
zeroed_out_indices = find_zeroed_indices(X_rank_k, X)
X_rank_k[zeroed_out_indices] = X[zeroed_out_indices]
log.info(
f"{len(zeroed_out_indices[0])} original expression values were "
f"zeroed out during imputation and restored to original values."
)
X_rank_k = X_rank_k.filled(0)
return X_rank_k
| [
"numpy.std",
"scipy.stats.norm.pdf",
"fbpca.pca",
"numpy.isnan",
"numpy.min",
"numpy.mean",
"numpy.diff",
"numpy.ma.masked_array",
"numpy.argwhere",
"logging.getLogger"
] | [((165, 190), 'logging.getLogger', 'logging.getLogger', (['"""ALRA"""'], {}), "('ALRA')\n", (182, 190), False, 'import logging\n'), ((570, 606), 'fbpca.pca', 'pca', (['X'], {'k': 'k', 'n_iter': 'n_iter', 'raw': '(True)'}), '(X, k=k, n_iter=n_iter, raw=True)\n', (573, 606), False, 'from fbpca import pca\n'), ((626, 636), 'numpy.diff', 'np.diff', (['s'], {}), '(s)\n', (633, 636), True, 'import numpy as np\n'), ((649, 687), 'numpy.mean', 'np.mean', (['differences[noise_start - 1:]'], {}), '(differences[noise_start - 1:])\n', (656, 687), True, 'import numpy as np\n'), ((699, 744), 'numpy.std', 'np.std', (['differences[noise_start - 1:]'], {'ddof': '(1)'}), '(differences[noise_start - 1:], ddof=1)\n', (705, 744), True, 'import numpy as np\n'), ((767, 809), 'scipy.stats.norm.pdf', 'norm.pdf', (['differences'], {'loc': 'mean', 'scale': 'std'}), '(differences, loc=mean, scale=std)\n', (775, 809), False, 'from scipy.stats import norm\n'), ((1284, 1320), 'fbpca.pca', 'pca', (['X'], {'k': 'k', 'n_iter': 'n_iter', 'raw': '(True)'}), '(X, k=k, n_iter=n_iter, raw=True)\n', (1287, 1320), False, 'from fbpca import pca\n'), ((1363, 1391), 'numpy.ma.masked_array', 'np.ma.masked_array', (['X_rank_k'], {}), '(X_rank_k)\n', (1381, 1391), True, 'import numpy as np\n'), ((1504, 1528), 'numpy.min', 'np.min', (['X_rank_k'], {'axis': '(0)'}), '(X_rank_k, axis=0)\n', (1510, 1528), True, 'import numpy as np\n'), ((826, 866), 'numpy.argwhere', 'np.argwhere', (['(probabilities < pval_thresh)'], {}), '(probabilities < pval_thresh)\n', (837, 866), True, 'import numpy as np\n'), ((1995, 2010), 'numpy.isnan', 'np.isnan', (['X_std'], {}), '(X_std)\n', (2003, 2010), True, 'import numpy as np\n'), ((2014, 2032), 'numpy.isnan', 'np.isnan', (['X_rk_std'], {}), '(X_rk_std)\n', (2022, 2032), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch import nn
from rlkit.policies.base import ExplorationPolicy, Policy
from rlkit.torch.core import eval_np
from rlkit.torch.distributions import TanhNormal
from rlkit.torch.networks import Mlp
from rlkit.torch.modules import Attention, preprocess_attention_input
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
class TanhGaussianPolicy(Mlp, ExplorationPolicy):
"""
Usage:
```
policy = TanhGaussianPolicy(...)
action, mean, log_std, _ = policy(obs)
action, mean, log_std, _ = policy(obs, deterministic=True)
action, mean, log_std, log_prob = policy(obs, return_log_prob=True)
```
Here, mean and log_std are the mean and log_std of the Gaussian that is
sampled from.
If deterministic is True, action = tanh(mean).
If return_log_prob is False (default), log_prob = None
This is done because computing the log_prob can be a bit expensive.
"""
def __init__(
self,
hidden_sizes,
obs_dim,
action_dim,
std=None,
init_w=1e-3,
last_layer_init_w=None,
last_layer_init_b=None,
initial_log_std_offset=0,
**kwargs
):
super().__init__(
hidden_sizes,
input_size=obs_dim,
output_size=action_dim,
init_w=init_w,
last_layer_init_w=last_layer_init_w,
last_layer_init_b=last_layer_init_b,
**kwargs
)
self.log_std = None
self.std = std
self.initial_log_std_offset = initial_log_std_offset
if std is None:
last_hidden_size = obs_dim
if len(hidden_sizes) > 0:
last_hidden_size = hidden_sizes[-1]
self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)
if last_layer_init_w is None:
self.last_fc_log_std.weight.data.uniform_(-init_w, init_w)
else:
last_layer_init_w(self.last_fc_log_std.weight)
if last_layer_init_b is None:
self.last_fc_log_std.bias.data.uniform_(-init_w, init_w)
else:
last_layer_init_b(self.last_fc_log_std.bias)
else:
self.log_std = np.log(std)
assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX
def get_action(self, obs_np, deterministic=False):
with torch.no_grad():
actions = self.get_actions(obs_np[None],
deterministic=deterministic)
return actions[0, :], {}
def get_actions(self, obs_np, deterministic=False):
return eval_np(self, obs_np, deterministic=deterministic)[0]
def forward(
self,
obs,
reparameterize=True,
deterministic=False,
return_log_prob=False,
):
"""
:param obs: Observation
:param deterministic: If True, do not sample
:param return_log_prob: If True, return a sample and its log probability
"""
if self.normalizer is not None:
obs = self.normalizer.normalize(obs)
h = obs
for i, fc in enumerate(self.fcs):
h = self.hidden_activation(fc(h))
mean = self.last_fc(h)
if self.std is None:
log_std = self.last_fc_log_std(h) + self.initial_log_std_offset
log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
std = torch.exp(log_std)
else:
std = self.std
log_std = self.log_std
log_prob = None
entropy = None
mean_action_log_prob = None
pre_tanh_value = None
if deterministic:
action = torch.tanh(mean)
else:
tanh_normal = TanhNormal(mean, std)
if return_log_prob:
if reparameterize is True:
action, pre_tanh_value = tanh_normal.rsample(
return_pretanh_value=True
)
else:
action, pre_tanh_value = tanh_normal.sample(
return_pretanh_value=True
)
log_prob = tanh_normal.log_prob(
action,
pre_tanh_value=pre_tanh_value
)
log_prob = log_prob.sum(dim=1, keepdim=True)
else:
if reparameterize is True:
action = tanh_normal.rsample()
else:
action = tanh_normal.sample()
return (
action, mean, log_std, log_prob, entropy, std,
mean_action_log_prob, pre_tanh_value,
)
class MakeDeterministic(nn.Module, Policy):
def __init__(self, stochastic_policy):
super().__init__()
self.stochastic_policy = stochastic_policy
def get_action(self, observation):
return self.stochastic_policy.get_action(observation,
deterministic=True)
def to(self, device):
super().to(device)
self.stochastic_policy.to(device)
class AttentionTanhGaussianPolicy(TanhGaussianPolicy):
"""
Usage:
```
policy = TanhGaussianPolicy(...)
action, mean, log_std, _ = policy(obs)
action, mean, log_std, _ = policy(obs, deterministic=True)
action, mean, log_std, log_prob = policy(obs, return_log_prob=True)
```
Here, mean and log_std are the mean and log_std of the Gaussian that is
sampled from.
If deterministic is True, action = tanh(mean).
If return_log_prob is False (default), log_prob = None
This is done because computing the log_prob can be a bit expensive.
"""
def __init__(self,
hidden_sizes,
embed_dim,
z_size,
max_objects,
z_goal_size,
action_dim,
std=None,
init_w=1e-3,
n_frames=None,
attention_kwargs=None,
**kwargs):
if attention_kwargs is None:
attention_kwargs = {}
attention = Attention(embed_dim, z_goal_size, z_size,
**attention_kwargs)
super().__init__(hidden_sizes,
attention.output_dim + attention.embed_dim,
action_dim,
std,
init_w,
**kwargs)
self.z_goal_size = z_goal_size
self.z_size = z_size
self.n_frames = n_frames
self.attention = attention
def forward(self, obs,
reparameterize=True,
deterministic=False,
return_log_prob=False):
"""
:param obs: Observation
:param deterministic: If True, do not sample
:param return_log_prob: If True, return a sample and its log probability
"""
x, g, n_objects = preprocess_attention_input(obs,
self.z_size,
self.z_goal_size,
self.n_frames)
goal_embedding = self.attention.embed(g)
state_embedding = self.attention.embed(x)
h = self.attention.forward(state_embedding, goal_embedding, n_objects)
return super().forward(torch.cat((h, goal_embedding.squeeze(1)), dim=1),
reparameterize=reparameterize,
deterministic=deterministic,
return_log_prob=return_log_prob)
def to(self, device):
super().to(device)
self.attention.to(device)
class DeepSetTanhGaussianPolicy(TanhGaussianPolicy):
def __init__(self,
hidden_sizes,
key_query_size, # unused
z_size,
max_objects,
z_goal_size,
value_size, # unused
action_dim,
embed_dim,
aggregation_dim,
std=None,
init_w=1e-3,
n_frames=None,
**kwargs):
super().__init__(hidden_sizes,
aggregation_dim + embed_dim,
action_dim,
std,
init_w,
**kwargs)
self.z_goal_size = z_goal_size
self.z_size = z_size
self.n_frames = n_frames
self.embedding = nn.Linear(z_size, embed_dim)
self.pre_aggregation = nn.Sequential(nn.Linear(embed_dim,
aggregation_dim),
nn.ReLU(),
nn.Linear(aggregation_dim,
aggregation_dim))
for layer in (self.embedding,
self.pre_aggregation[0],
self.pre_aggregation[2]):
if 'hidden_init' in kwargs:
kwargs['hidden_init'](layer.weight)
nn.init.zeros_(layer.bias)
def forward(self, obs,
reparameterize=True,
deterministic=False,
return_log_prob=False):
"""
:param obs: Observation
:param deterministic: If True, do not sample
:param return_log_prob: If True, return a sample and its log probability
"""
x, g, n_objects = preprocess_attention_input(obs,
self.z_size,
self.z_goal_size,
self.n_frames)
goal_embedding = self.embedding(g)
state_embedding = self.embedding(x)
h = self.pre_aggregation(state_embedding).sum(dim=1)
return super().forward(torch.cat((h, goal_embedding.squeeze(1)), dim=1),
reparameterize=reparameterize,
deterministic=deterministic,
return_log_prob=return_log_prob)
def to(self, device):
super().to(device)
| [
"rlkit.torch.modules.Attention",
"rlkit.torch.distributions.TanhNormal",
"torch.nn.ReLU",
"numpy.log",
"torch.exp",
"rlkit.torch.core.eval_np",
"torch.clamp",
"torch.nn.init.zeros_",
"rlkit.torch.modules.preprocess_attention_input",
"torch.nn.Linear",
"torch.no_grad",
"torch.tanh"
] | [((6195, 6256), 'rlkit.torch.modules.Attention', 'Attention', (['embed_dim', 'z_goal_size', 'z_size'], {}), '(embed_dim, z_goal_size, z_size, **attention_kwargs)\n', (6204, 6256), False, 'from rlkit.torch.modules import Attention, preprocess_attention_input\n'), ((7027, 7104), 'rlkit.torch.modules.preprocess_attention_input', 'preprocess_attention_input', (['obs', 'self.z_size', 'self.z_goal_size', 'self.n_frames'], {}), '(obs, self.z_size, self.z_goal_size, self.n_frames)\n', (7053, 7104), False, 'from rlkit.torch.modules import Attention, preprocess_attention_input\n'), ((8638, 8666), 'torch.nn.Linear', 'nn.Linear', (['z_size', 'embed_dim'], {}), '(z_size, embed_dim)\n', (8647, 8666), False, 'from torch import nn\n'), ((9629, 9706), 'rlkit.torch.modules.preprocess_attention_input', 'preprocess_attention_input', (['obs', 'self.z_size', 'self.z_goal_size', 'self.n_frames'], {}), '(obs, self.z_size, self.z_goal_size, self.n_frames)\n', (9655, 9706), False, 'from rlkit.torch.modules import Attention, preprocess_attention_input\n'), ((1802, 1841), 'torch.nn.Linear', 'nn.Linear', (['last_hidden_size', 'action_dim'], {}), '(last_hidden_size, action_dim)\n', (1811, 1841), False, 'from torch import nn\n'), ((2277, 2288), 'numpy.log', 'np.log', (['std'], {}), '(std)\n', (2283, 2288), True, 'import numpy as np\n'), ((2420, 2435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2433, 2435), False, 'import torch\n'), ((2663, 2713), 'rlkit.torch.core.eval_np', 'eval_np', (['self', 'obs_np'], {'deterministic': 'deterministic'}), '(self, obs_np, deterministic=deterministic)\n', (2670, 2713), False, 'from rlkit.torch.core import eval_np\n'), ((3420, 3466), 'torch.clamp', 'torch.clamp', (['log_std', 'LOG_SIG_MIN', 'LOG_SIG_MAX'], {}), '(log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n', (3431, 3466), False, 'import torch\n'), ((3485, 3503), 'torch.exp', 'torch.exp', (['log_std'], {}), '(log_std)\n', (3494, 3503), False, 'import torch\n'), ((3741, 3757), 'torch.tanh', 'torch.tanh', (['mean'], {}), '(mean)\n', (3751, 3757), False, 'import torch\n'), ((3798, 3819), 'rlkit.torch.distributions.TanhNormal', 'TanhNormal', (['mean', 'std'], {}), '(mean, std)\n', (3808, 3819), False, 'from rlkit.torch.distributions import TanhNormal\n'), ((8712, 8749), 'torch.nn.Linear', 'nn.Linear', (['embed_dim', 'aggregation_dim'], {}), '(embed_dim, aggregation_dim)\n', (8721, 8749), False, 'from torch import nn\n'), ((8851, 8860), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8858, 8860), False, 'from torch import nn\n'), ((8907, 8950), 'torch.nn.Linear', 'nn.Linear', (['aggregation_dim', 'aggregation_dim'], {}), '(aggregation_dim, aggregation_dim)\n', (8916, 8950), False, 'from torch import nn\n'), ((9244, 9270), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['layer.bias'], {}), '(layer.bias)\n', (9258, 9270), False, 'from torch import nn\n')] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier
def plotSensores():
### Letra A
df_a = pd.read_csv("data/bernardo/bernardo-A-3-emg.csv")
df_a_avg = np.genfromtxt("data/bernardo/bernardo-A-3-avg.csv", delimiter=',', dtype=float)
df_a_avg = pd.DataFrame(df_a_avg)
df_a = df_a.drop(df_a.columns[[0]], axis=1)
df_a.columns = ["sensor1", "sensor2", "sensor3", "sensor4", "sensor5", "sensor6", "sensor7", "sensor8"]
### Letra B
df_b = pd.read_csv("data/bernardo/bernardo-D-3-emg.csv")
df_b_avg = np.genfromtxt("data/bernardo/bernardo-D-3-avg.csv", delimiter=',', dtype=float)
df_b_avg = pd.DataFrame(df_b_avg)
df_b = df_b.drop(df_b.columns[[0]], axis=1)
df_b.columns = ["sensor1", "sensor2", "sensor3", "sensor4", "sensor5", "sensor6", "sensor7", "sensor8"]
plt.figure()
fig, axes = plt.subplots(figsize=(8,8),nrows=2, ncols=2)
## Letra A
df_a.plot(legend=False,ax=axes[0, 0])
axes[0,0].set_ylabel("Letra A - Todos os sensores")
df_a_avg.plot(legend=False,ax=axes[1, 0])
axes[1,0].set_ylabel(u"Letra A - Valores Médios");
## Letra A
df_b.plot(legend=False,ax=axes[0, 1])
axes[0,1].set_ylabel("Letra D - Todos os sensores")
df_b_avg.plot(legend=False,ax=axes[1, 1])
axes[1,1].set_ylabel(u"Letra D - Valores Médios");
for ax in axes:
for bx in ax:
bx.set_xticks([], [])
bx.set_yticks([], [])
plt.show()
def plot_crossvalidation():
plt.figure()
df = pd.Series([78,78,76,62], index=["Grupo 1", "Grupo 2", "Grupo 3", "Grupo 4"])
ax =df.plot(kind='bar', rot=0, title="10-Fold Cross-Validation")
ax.grid(True, which='major', axis='y')
ax.set_ylim(0,100)
plt.show()
def plotLeaveOneOut():
featureMatrix = pd.read_csv("featureMatrix.csv")
n_estimators = 150
i = 5
## 15 - 0.333333333333 0.4 0.466666666667 0.733333333333 0.733333333333
## 10 - 0.5 0.5 0.6 0.6 0.6
## 5 - 0.8 0.6 0.4 1.0 0.8
## 3 - 0.666666666667 0.666666666667 0.666666666667 1.0 1.0
#
# s1 = featureMatrix.iloc[0:i,:]
# s2 = featureMatrix.iloc[i:i*2,:]
# s3 = featureMatrix.iloc[i*2:i*3,:]
# s4 = featureMatrix.iloc[i*3:i*4,:]
# s5 = featureMatrix.iloc[i*4:i*5,:]
#
# ### W/o S1
# trainingwos1 = s2.append(s3).append(s4).append(s5)
# clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)
# clf.fit(trainingwos1.iloc[:,:24], trainingwos1.iloc[:,24])
# scorewos1 = clf.score(s1.iloc[:,:24], s1.iloc[:,24])
# ### W/o S2
# trainingwos2 = s1.append(s3).append(s4).append(s5)
# clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)
# clf.fit(trainingwos2.iloc[:,:24], trainingwos2.iloc[:,24])
# scorewos2 = clf.score(s2.iloc[:,:24], s2.iloc[:,24])
# ### W/o S3
# trainingwos3 = s1.append(s2).append(s4).append(s5)
# clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)
# clf.fit(trainingwos3.iloc[:,:24], trainingwos3.iloc[:,24])
# scorewos3 = clf.score(s3.iloc[:,:24], s3.iloc[:,24])
# ### W/o S4
# trainingwos4 = s1.append(s2).append(s3).append(s5)
# clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)
# clf.fit(trainingwos4.iloc[:,:24], trainingwos4.iloc[:,24])
# scorewos4 = clf.score(s4.iloc[:,:24], s4.iloc[:,24])
# ### W/o S5
# trainingwos5 = s1.append(s2).append(s3).append(s4)
# clf = RandomForestClassifier(n_estimators=n_estimators, random_state=30)
# clf.fit(trainingwos5.iloc[:,:24], trainingwos5.iloc[:,24])
# scorewos5 = clf.score(s5.iloc[:,:24], s5.iloc[:,24])
# print scorewos1, scorewos2, scorewos3, scorewos4, scorewos5
plt.figure()
mdict = {'Grupo 1': [0.66, 0.66, 0.66, 1.0, 1.0], 'Grupo 2': [0.8, 0.6, 0.4, 1.0, 0.8],
'Grupo 3':[0.5, 0.5, 0.6, 0.6, 0.6], 'Grupo 4': [0.33, 0.4, 0.46, 0.73, 0.73]}
df = pd.DataFrame(mdict)
df.index = ["P1", "P2", "P3", "P4", "P5"]
ax = df.plot(kind='bar', title=u"Validação por 'Leave One Subject Out'", rot=0)
ax.set_ylim(0,1.2)
ax.grid(True, which='major', axis='y')
plt.show() | [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.genfromtxt",
"matplotlib.pyplot.figure",
"pandas.Series",
"matplotlib.pyplot.subplots"
] | [((260, 309), 'pandas.read_csv', 'pd.read_csv', (['"""data/bernardo/bernardo-A-3-emg.csv"""'], {}), "('data/bernardo/bernardo-A-3-emg.csv')\n", (271, 309), True, 'import pandas as pd\n'), ((326, 405), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/bernardo/bernardo-A-3-avg.csv"""'], {'delimiter': '""","""', 'dtype': 'float'}), "('data/bernardo/bernardo-A-3-avg.csv', delimiter=',', dtype=float)\n", (339, 405), True, 'import numpy as np\n'), ((421, 443), 'pandas.DataFrame', 'pd.DataFrame', (['df_a_avg'], {}), '(df_a_avg)\n', (433, 443), True, 'import pandas as pd\n'), ((628, 677), 'pandas.read_csv', 'pd.read_csv', (['"""data/bernardo/bernardo-D-3-emg.csv"""'], {}), "('data/bernardo/bernardo-D-3-emg.csv')\n", (639, 677), True, 'import pandas as pd\n'), ((694, 773), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/bernardo/bernardo-D-3-avg.csv"""'], {'delimiter': '""","""', 'dtype': 'float'}), "('data/bernardo/bernardo-D-3-avg.csv', delimiter=',', dtype=float)\n", (707, 773), True, 'import numpy as np\n'), ((789, 811), 'pandas.DataFrame', 'pd.DataFrame', (['df_b_avg'], {}), '(df_b_avg)\n', (801, 811), True, 'import pandas as pd\n'), ((975, 987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (985, 987), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1050), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)', 'nrows': '(2)', 'ncols': '(2)'}), '(figsize=(8, 8), nrows=2, ncols=2)\n', (1016, 1050), True, 'import matplotlib.pyplot as plt\n'), ((1597, 1607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1605, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1654), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1652, 1654), True, 'import matplotlib.pyplot as plt\n'), ((1664, 1743), 'pandas.Series', 'pd.Series', (['[78, 78, 76, 62]'], {'index': "['Grupo 1', 'Grupo 2', 'Grupo 3', 'Grupo 4']"}), "([78, 78, 76, 62], index=['Grupo 1', 'Grupo 2', 'Grupo 3', 'Grupo 4'])\n", (1673, 1743), True, 'import pandas as pd\n'), ((1880, 1890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1888, 1890), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1967), 'pandas.read_csv', 'pd.read_csv', (['"""featureMatrix.csv"""'], {}), "('featureMatrix.csv')\n", (1946, 1967), True, 'import pandas as pd\n'), ((3874, 3886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3884, 3886), True, 'import matplotlib.pyplot as plt\n'), ((4091, 4110), 'pandas.DataFrame', 'pd.DataFrame', (['mdict'], {}), '(mdict)\n', (4103, 4110), True, 'import pandas as pd\n'), ((4311, 4321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4319, 4321), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
import yaml
import numpy as np
def read_evals(fyml):
with open(fyml, 'r') as f:
evd = yaml.safe_load(f)
elist = evd['evals']
return np.array(elist)
def main():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('nup', type=int)
parser.add_argument('ndn', type=int)
parser.add_argument('--lam', type=float, default=1)
parser.add_argument('--e2e', type=float, default=1)
parser.add_argument('--tol', type=float, default=1e-12)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
nup = args.nup
ndn = args.ndn
prefix = 'evals-l%f-e%f-nup%d-ndn%d' % (args.lam, args.e2e, nup, ndn)
fyml0 = '../fci/%s.yml' % prefix
fyml1 = '../eri/%s.yml' % prefix
e0 = read_evals(fyml0)
e1 = read_evals(fyml1)
de = e1-e0
sel = abs(de) > args.tol
idx = np.where(sel)[0]
print(idx)
print(de[sel])
if __name__ == '__main__':
main() # set no global variable
| [
"yaml.safe_load",
"numpy.where",
"numpy.array",
"argparse.ArgumentParser"
] | [((166, 181), 'numpy.array', 'np.array', (['elist'], {}), '(elist)\n', (174, 181), True, 'import numpy as np\n'), ((244, 260), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (258, 260), False, 'from argparse import ArgumentParser\n'), ((116, 133), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (130, 133), False, 'import yaml\n'), ((866, 879), 'numpy.where', 'np.where', (['sel'], {}), '(sel)\n', (874, 879), True, 'import numpy as np\n')] |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext
from numba import types
from numba.core.typing import signature
from . import stubs
import numba_dppy.dpnp_glue as dpnp_lowering
from numba.core.extending import overload, register_jitable
import numpy as np
from numba_dppy import dpctl_functions
import numba_dppy
@register_jitable
def common_impl(a, b, out, dpnp_func, PRINT_DEBUG):
if a.size == 0:
raise ValueError("Passed Empty array")
sycl_queue = dpctl_functions.get_current_queue()
b_usm = dpctl_functions.malloc_shared(b.size * b.itemsize, sycl_queue)
dpctl_functions.queue_memcpy(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)
out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)
dpnp_func(out_usm, b_usm, a.size)
dpctl_functions.queue_memcpy(
sycl_queue, out.ctypes, out_usm, out.size * out.itemsize
)
dpctl_functions.free_with_queue(b_usm, sycl_queue)
dpctl_functions.free_with_queue(out_usm, sycl_queue)
dpnp_ext._dummy_liveness_func([a.size, out.size])
if PRINT_DEBUG:
print("dpnp implementation")
@register_jitable
def common_shape_impl(a, out, dpnp_func, PRINT_DEBUG):
if a.size == 0:
raise ValueError("Passed Empty array")
sycl_queue = dpctl_functions.get_current_queue()
a_usm = dpctl_functions.malloc_shared(a.size * a.itemsize, sycl_queue)
dpctl_functions.queue_memcpy(sycl_queue, a_usm, a.ctypes, a.size * a.itemsize)
out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)
dpnp_func(a_usm, out_usm, a.shapeptr, a.ndim)
dpctl_functions.queue_memcpy(
sycl_queue, out.ctypes, out_usm, out.size * out.itemsize
)
dpctl_functions.free_with_queue(a_usm, sycl_queue)
dpctl_functions.free_with_queue(out_usm, sycl_queue)
dpnp_ext._dummy_liveness_func([a.size, out.size])
if PRINT_DEBUG:
print("dpnp implementation")
@overload(stubs.dpnp.zeros_like)
def dpnp_zeros_like_impl(a, dtype=None):
name = "zeros_like"
dpnp_lowering.ensure_dpnp(name)
ret_type = types.void
"""
dpnp source:
https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224
Function declaration:
void dpnp_initval_c(void* result1, void* value, size_t size)
"""
res_dtype = dtype
if dtype == types.none:
res_dtype = a.dtype
name_dtype = res_dtype.name
else:
name_dtype = res_dtype.dtype.name
sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)
dpnp_func = dpnp_ext.dpnp_func("dpnp_" + name, [name_dtype, "NONE"], sig)
PRINT_DEBUG = dpnp_lowering.DEBUG
def dpnp_impl(a, dtype=None):
b = np.zeros(1, dtype=res_dtype)
out = np.zeros(a.shape, dtype=res_dtype)
common_impl(a, b, out, dpnp_func, PRINT_DEBUG)
return out
return dpnp_impl
@overload(stubs.dpnp.ones_like)
def dpnp_ones_like_impl(a, dtype=None):
name = "ones_like"
dpnp_lowering.ensure_dpnp(name)
ret_type = types.void
"""
dpnp source:
https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224
Function declaration:
void dpnp_initval_c(void* result1, void* value, size_t size)
"""
res_dtype = dtype
if dtype == types.none:
res_dtype = a.dtype
name_dtype = res_dtype.name
else:
name_dtype = res_dtype.dtype.name
sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)
dpnp_func = dpnp_ext.dpnp_func("dpnp_" + name, [name_dtype, "NONE"], sig)
PRINT_DEBUG = dpnp_lowering.DEBUG
def dpnp_impl(a, dtype=None):
b = np.ones(1, dtype=res_dtype)
out = np.ones(a.shape, dtype=res_dtype)
common_impl(a, b, out, dpnp_func, PRINT_DEBUG)
return out
return dpnp_impl
@overload(stubs.dpnp.full_like)
def dpnp_full_like_impl(a, b):
name = "full_like"
dpnp_lowering.ensure_dpnp(name)
ret_type = types.void
"""
dpnp source:
https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_common.cpp#L224
Function declaration:
void dpnp_initval_c(void* result1, void* value, size_t size)
"""
sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)
dpnp_func = dpnp_ext.dpnp_func("dpnp_" + name, [b.dtype.name, "NONE"], sig)
res_dtype = b.dtype
PRINT_DEBUG = dpnp_lowering.DEBUG
def dpnp_impl(a, b):
out = np.ones(a.shape, dtype=res_dtype)
common_impl(a, b, out, dpnp_func, PRINT_DEBUG)
return out
return dpnp_impl
# TODO: This implementation is incorrect
@overload(stubs.dpnp.full)
def dpnp_full_impl(a, b):
name = "full"
dpnp_lowering.ensure_dpnp(name)
ret_type = types.void
"""
dpnp source:
https://github.com/IntelPython/dpnp/blob/0.5.1/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp#L70
Function declaration:
void dpnp_full_c(void* array_in, void* result, const size_t size)
"""
sig = signature(ret_type, types.voidptr, types.voidptr, types.intp)
dpnp_func = dpnp_ext.dpnp_func("dpnp_" + name, [b.dtype.name, "NONE"], sig)
res_dtype = b.dtype
PRINT_DEBUG = dpnp_lowering.DEBUG
def dpnp_impl(a, b):
if a.size == 0:
raise ValueError("Passed Empty array")
sycl_queue = dpctl_functions.get_current_queue()
b_usm = dpctl_functions.malloc_shared(b.size * b.itemsize, sycl_queue)
dpctl_functions.queue_memcpy(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)
out = np.arange(a.size, dtype=res_dtype)
out_usm = dpctl_functions.malloc_shared(out.size * out.itemsize, sycl_queue)
dpnp_func(b_usm, out_usm, a.size)
dpctl_functions.queue_memcpy(
sycl_queue, out.ctypes, out_usm, out.size * out.itemsize
)
dpctl_functions.free_with_queue(b_usm, sycl_queue)
dpctl_functions.free_with_queue(out_usm, sycl_queue)
dpnp_ext._dummy_liveness_func([a.size, out.size])
if PRINT_DEBUG:
print("dpnp implementation")
return out
return dpnp_impl
@overload(stubs.dpnp.trace)
def dpnp_trace_impl(a):
name = "trace"
dpnp_lowering.ensure_dpnp(name)
ret_type = types.void
"""
dpnp source:
https://github.com/IntelPython/dpnp/blob/0.6.2/dpnp/backend/kernels/dpnp_krnl_arraycreation.cpp#L218
Function declaration:
void dpnp_trace_c(const void* array1_in, void* result1, const size_t* shape_, const size_t ndim)
"""
sig = signature(ret_type, types.voidptr, types.voidptr, types.voidptr, types.intp)
dpnp_func = dpnp_ext.dpnp_func("dpnp_" + name, [a.dtype.name, "NONE"], sig)
PRINT_DEBUG = dpnp_lowering.DEBUG
def dpnp_impl(a):
diag_arr = numba_dppy.dpnp.diagonal(a, 0)
out = np.zeros(diag_arr.shape[:-1], dtype=a.dtype)
common_shape_impl(diag_arr, out, dpnp_func, PRINT_DEBUG)
return out
return dpnp_impl
| [
"numba_dppy.dpctl_functions.malloc_shared",
"numba_dppy.dpnp_glue.dpnpimpl.dpnp_func",
"numba_dppy.dpctl_functions.get_current_queue",
"numpy.zeros",
"numba.core.typing.signature",
"numpy.ones",
"numba_dppy.dpctl_functions.queue_memcpy",
"numba_dppy.dpnp.diagonal",
"numpy.arange",
"numba_dppy.dpnp... | [((2525, 2556), 'numba.core.extending.overload', 'overload', (['stubs.dpnp.zeros_like'], {}), '(stubs.dpnp.zeros_like)\n', (2533, 2556), False, 'from numba.core.extending import overload, register_jitable\n'), ((3489, 3519), 'numba.core.extending.overload', 'overload', (['stubs.dpnp.ones_like'], {}), '(stubs.dpnp.ones_like)\n', (3497, 3519), False, 'from numba.core.extending import overload, register_jitable\n'), ((4448, 4478), 'numba.core.extending.overload', 'overload', (['stubs.dpnp.full_like'], {}), '(stubs.dpnp.full_like)\n', (4456, 4478), False, 'from numba.core.extending import overload, register_jitable\n'), ((5249, 5274), 'numba.core.extending.overload', 'overload', (['stubs.dpnp.full'], {}), '(stubs.dpnp.full)\n', (5257, 5274), False, 'from numba.core.extending import overload, register_jitable\n'), ((6744, 6770), 'numba.core.extending.overload', 'overload', (['stubs.dpnp.trace'], {}), '(stubs.dpnp.trace)\n', (6752, 6770), False, 'from numba.core.extending import overload, register_jitable\n'), ((1054, 1089), 'numba_dppy.dpctl_functions.get_current_queue', 'dpctl_functions.get_current_queue', ([], {}), '()\n', (1087, 1089), False, 'from numba_dppy import dpctl_functions\n'), ((1103, 1165), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(b.size * b.itemsize)', 'sycl_queue'], {}), '(b.size * b.itemsize, sycl_queue)\n', (1132, 1165), False, 'from numba_dppy import dpctl_functions\n'), ((1170, 1248), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'b_usm', 'b.ctypes', '(b.size * b.itemsize)'], {}), '(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)\n', (1198, 1248), False, 'from numba_dppy import dpctl_functions\n'), ((1264, 1330), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(out.size * out.itemsize)', 'sycl_queue'], {}), '(out.size * out.itemsize, sycl_queue)\n', (1293, 1330), False, 'from numba_dppy import dpctl_functions\n'), ((1375, 1465), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'out.ctypes', 'out_usm', '(out.size * out.itemsize)'], {}), '(sycl_queue, out.ctypes, out_usm, out.size *\n out.itemsize)\n', (1403, 1465), False, 'from numba_dppy import dpctl_functions\n'), ((1481, 1531), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['b_usm', 'sycl_queue'], {}), '(b_usm, sycl_queue)\n', (1512, 1531), False, 'from numba_dppy import dpctl_functions\n'), ((1536, 1588), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['out_usm', 'sycl_queue'], {}), '(out_usm, sycl_queue)\n', (1567, 1588), False, 'from numba_dppy import dpctl_functions\n'), ((1594, 1643), 'numba_dppy.dpnp_glue.dpnpimpl._dummy_liveness_func', 'dpnp_ext._dummy_liveness_func', (['[a.size, out.size]'], {}), '([a.size, out.size])\n', (1623, 1643), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((1862, 1897), 'numba_dppy.dpctl_functions.get_current_queue', 'dpctl_functions.get_current_queue', ([], {}), '()\n', (1895, 1897), False, 'from numba_dppy import dpctl_functions\n'), ((1911, 1973), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(a.size * a.itemsize)', 'sycl_queue'], {}), '(a.size * a.itemsize, sycl_queue)\n', (1940, 1973), False, 'from numba_dppy import dpctl_functions\n'), ((1978, 2056), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'a_usm', 'a.ctypes', '(a.size * a.itemsize)'], {}), '(sycl_queue, a_usm, a.ctypes, a.size * a.itemsize)\n', (2006, 2056), False, 'from numba_dppy import dpctl_functions\n'), ((2072, 2138), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(out.size * out.itemsize)', 'sycl_queue'], {}), '(out.size * out.itemsize, sycl_queue)\n', (2101, 2138), False, 'from numba_dppy import dpctl_functions\n'), ((2195, 2285), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'out.ctypes', 'out_usm', '(out.size * out.itemsize)'], {}), '(sycl_queue, out.ctypes, out_usm, out.size *\n out.itemsize)\n', (2223, 2285), False, 'from numba_dppy import dpctl_functions\n'), ((2301, 2351), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['a_usm', 'sycl_queue'], {}), '(a_usm, sycl_queue)\n', (2332, 2351), False, 'from numba_dppy import dpctl_functions\n'), ((2356, 2408), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['out_usm', 'sycl_queue'], {}), '(out_usm, sycl_queue)\n', (2387, 2408), False, 'from numba_dppy import dpctl_functions\n'), ((2414, 2463), 'numba_dppy.dpnp_glue.dpnpimpl._dummy_liveness_func', 'dpnp_ext._dummy_liveness_func', (['[a.size, out.size]'], {}), '([a.size, out.size])\n', (2443, 2463), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((2626, 2657), 'numba_dppy.dpnp_glue.ensure_dpnp', 'dpnp_lowering.ensure_dpnp', (['name'], {}), '(name)\n', (2651, 2657), True, 'import numba_dppy.dpnp_glue as dpnp_lowering\n'), ((3086, 3147), 'numba.core.typing.signature', 'signature', (['ret_type', 'types.voidptr', 'types.voidptr', 'types.intp'], {}), '(ret_type, types.voidptr, types.voidptr, types.intp)\n', (3095, 3147), False, 'from numba.core.typing import signature\n'), ((3164, 3225), 'numba_dppy.dpnp_glue.dpnpimpl.dpnp_func', 'dpnp_ext.dpnp_func', (["('dpnp_' + name)", "[name_dtype, 'NONE']", 'sig'], {}), "('dpnp_' + name, [name_dtype, 'NONE'], sig)\n", (3182, 3225), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((3587, 3618), 'numba_dppy.dpnp_glue.ensure_dpnp', 'dpnp_lowering.ensure_dpnp', (['name'], {}), '(name)\n', (3612, 3618), True, 'import numba_dppy.dpnp_glue as dpnp_lowering\n'), ((4047, 4108), 'numba.core.typing.signature', 'signature', (['ret_type', 'types.voidptr', 'types.voidptr', 'types.intp'], {}), '(ret_type, types.voidptr, types.voidptr, types.intp)\n', (4056, 4108), False, 'from numba.core.typing import signature\n'), ((4125, 4186), 'numba_dppy.dpnp_glue.dpnpimpl.dpnp_func', 'dpnp_ext.dpnp_func', (["('dpnp_' + name)", "[name_dtype, 'NONE']", 'sig'], {}), "('dpnp_' + name, [name_dtype, 'NONE'], sig)\n", (4143, 4186), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((4537, 4568), 'numba_dppy.dpnp_glue.ensure_dpnp', 'dpnp_lowering.ensure_dpnp', (['name'], {}), '(name)\n', (4562, 4568), True, 'import numba_dppy.dpnp_glue as dpnp_lowering\n'), ((4830, 4891), 'numba.core.typing.signature', 'signature', (['ret_type', 'types.voidptr', 'types.voidptr', 'types.intp'], {}), '(ret_type, types.voidptr, types.voidptr, types.intp)\n', (4839, 4891), False, 'from numba.core.typing import signature\n'), ((4908, 4971), 'numba_dppy.dpnp_glue.dpnpimpl.dpnp_func', 'dpnp_ext.dpnp_func', (["('dpnp_' + name)", "[b.dtype.name, 'NONE']", 'sig'], {}), "('dpnp_' + name, [b.dtype.name, 'NONE'], sig)\n", (4926, 4971), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((5323, 5354), 'numba_dppy.dpnp_glue.ensure_dpnp', 'dpnp_lowering.ensure_dpnp', (['name'], {}), '(name)\n', (5348, 5354), True, 'import numba_dppy.dpnp_glue as dpnp_lowering\n'), ((5627, 5688), 'numba.core.typing.signature', 'signature', (['ret_type', 'types.voidptr', 'types.voidptr', 'types.intp'], {}), '(ret_type, types.voidptr, types.voidptr, types.intp)\n', (5636, 5688), False, 'from numba.core.typing import signature\n'), ((5705, 5768), 'numba_dppy.dpnp_glue.dpnpimpl.dpnp_func', 'dpnp_ext.dpnp_func', (["('dpnp_' + name)", "[b.dtype.name, 'NONE']", 'sig'], {}), "('dpnp_' + name, [b.dtype.name, 'NONE'], sig)\n", (5723, 5768), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((6818, 6849), 'numba_dppy.dpnp_glue.ensure_dpnp', 'dpnp_lowering.ensure_dpnp', (['name'], {}), '(name)\n', (6843, 6849), True, 'import numba_dppy.dpnp_glue as dpnp_lowering\n'), ((7154, 7230), 'numba.core.typing.signature', 'signature', (['ret_type', 'types.voidptr', 'types.voidptr', 'types.voidptr', 'types.intp'], {}), '(ret_type, types.voidptr, types.voidptr, types.voidptr, types.intp)\n', (7163, 7230), False, 'from numba.core.typing import signature\n'), ((7247, 7310), 'numba_dppy.dpnp_glue.dpnpimpl.dpnp_func', 'dpnp_ext.dpnp_func', (["('dpnp_' + name)", "[a.dtype.name, 'NONE']", 'sig'], {}), "('dpnp_' + name, [a.dtype.name, 'NONE'], sig)\n", (7265, 7310), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((3312, 3340), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'res_dtype'}), '(1, dtype=res_dtype)\n', (3320, 3340), True, 'import numpy as np\n'), ((3355, 3389), 'numpy.zeros', 'np.zeros', (['a.shape'], {'dtype': 'res_dtype'}), '(a.shape, dtype=res_dtype)\n', (3363, 3389), True, 'import numpy as np\n'), ((4273, 4300), 'numpy.ones', 'np.ones', (['(1)'], {'dtype': 'res_dtype'}), '(1, dtype=res_dtype)\n', (4280, 4300), True, 'import numpy as np\n'), ((4315, 4348), 'numpy.ones', 'np.ones', (['a.shape'], {'dtype': 'res_dtype'}), '(a.shape, dtype=res_dtype)\n', (4322, 4348), True, 'import numpy as np\n'), ((5075, 5108), 'numpy.ones', 'np.ones', (['a.shape'], {'dtype': 'res_dtype'}), '(a.shape, dtype=res_dtype)\n', (5082, 5108), True, 'import numpy as np\n'), ((5955, 5990), 'numba_dppy.dpctl_functions.get_current_queue', 'dpctl_functions.get_current_queue', ([], {}), '()\n', (5988, 5990), False, 'from numba_dppy import dpctl_functions\n'), ((6008, 6070), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(b.size * b.itemsize)', 'sycl_queue'], {}), '(b.size * b.itemsize, sycl_queue)\n', (6037, 6070), False, 'from numba_dppy import dpctl_functions\n'), ((6079, 6157), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'b_usm', 'b.ctypes', '(b.size * b.itemsize)'], {}), '(sycl_queue, b_usm, b.ctypes, b.size * b.itemsize)\n', (6107, 6157), False, 'from numba_dppy import dpctl_functions\n'), ((6173, 6207), 'numpy.arange', 'np.arange', (['a.size'], {'dtype': 'res_dtype'}), '(a.size, dtype=res_dtype)\n', (6182, 6207), True, 'import numpy as np\n'), ((6226, 6292), 'numba_dppy.dpctl_functions.malloc_shared', 'dpctl_functions.malloc_shared', (['(out.size * out.itemsize)', 'sycl_queue'], {}), '(out.size * out.itemsize, sycl_queue)\n', (6255, 6292), False, 'from numba_dppy import dpctl_functions\n'), ((6345, 6435), 'numba_dppy.dpctl_functions.queue_memcpy', 'dpctl_functions.queue_memcpy', (['sycl_queue', 'out.ctypes', 'out_usm', '(out.size * out.itemsize)'], {}), '(sycl_queue, out.ctypes, out_usm, out.size *\n out.itemsize)\n', (6373, 6435), False, 'from numba_dppy import dpctl_functions\n'), ((6463, 6513), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['b_usm', 'sycl_queue'], {}), '(b_usm, sycl_queue)\n', (6494, 6513), False, 'from numba_dppy import dpctl_functions\n'), ((6522, 6574), 'numba_dppy.dpctl_functions.free_with_queue', 'dpctl_functions.free_with_queue', (['out_usm', 'sycl_queue'], {}), '(out_usm, sycl_queue)\n', (6553, 6574), False, 'from numba_dppy import dpctl_functions\n'), ((6584, 6633), 'numba_dppy.dpnp_glue.dpnpimpl._dummy_liveness_func', 'dpnp_ext._dummy_liveness_func', (['[a.size, out.size]'], {}), '([a.size, out.size])\n', (6613, 6633), True, 'import numba_dppy.dpnp_glue.dpnpimpl as dpnp_ext\n'), ((7392, 7422), 'numba_dppy.dpnp.diagonal', 'numba_dppy.dpnp.diagonal', (['a', '(0)'], {}), '(a, 0)\n', (7416, 7422), False, 'import numba_dppy\n'), ((7437, 7481), 'numpy.zeros', 'np.zeros', (['diag_arr.shape[:-1]'], {'dtype': 'a.dtype'}), '(diag_arr.shape[:-1], dtype=a.dtype)\n', (7445, 7481), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..data_readers import KaldiMatrixIdentifier, KaldiARKReader, KaldiFrameIdentifier
from ..representation import CharacterRecognitionAnnotation, RegressionAnnotation
from ..config import PathField, BoolField, StringField
from ..utils import read_txt
from .format_converter import BaseFormatConverter, ConverterReturn
class KaldiSpeechRecognitionDataConverter(BaseFormatConverter):
__provider__ = 'kaldi_asr_data'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'annotation_file': PathField(description='file with gt transcription'),
'data_dir': PathField(description='directory with ark files', is_directory=True),
'features_subset_file': PathField(description='file with list testing ark files', optional=True),
'ivectors': BoolField(optional=True, default=False, description='include ivectors features')
})
return params
def configure(self):
self.annotation_file = self.get_value_from_config('annotation_file')
self.data_dir = self.get_value_from_config('data_dir')
self.feat_list_file = self.get_value_from_config('features_subset_file')
self.ivectors = self.get_value_from_config('ivectors')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
ark_list = self.select_subset()
transcripts = self.read_annotation()
annotations = []
for ark in ark_list:
ivect = None
if isinstance(ark, tuple):
ark, ivect = ark
utterances = KaldiARKReader.read_frames(ark)
for utt in utterances:
if utt not in transcripts:
continue
identifier = (
KaldiMatrixIdentifier(ark.name, utt)
if not ivect else [KaldiMatrixIdentifier(ark.name, utt), KaldiMatrixIdentifier(ivect.name, utt)]
)
gt = transcripts[utt]
annotations.append(CharacterRecognitionAnnotation(identifier, gt))
return ConverterReturn(annotations, None, None)
def select_subset(self):
if not self.ivectors:
if self.feat_list_file:
return [self.data_dir / ark for ark in read_txt(self.feat_list_file)]
return list(self.data_dir.glob('*.ark'))
if self.feat_list_file:
return [
(self.data_dir / ark.split(' ')[0], self.data_dir / ark.split(' ')[1])
for ark in read_txt(self.feat_list_file)
]
pairs = []
for ivector_file in self.data_dir.glob("*_ivector*.ark"):
feats_file = self.data_dir / ivector_file.name.replace('_ivector', '')
if not feats_file.exists():
continue
pairs.append((feats_file, ivector_file))
return pairs
def read_annotation(self):
trascript_dict = {}
for line in read_txt(self.annotation_file):
utterance_key, text = line.split(' ', 1)
trascript_dict[utterance_key] = text
return trascript_dict
class KaldiFeatureRegressionConverter(BaseFormatConverter):
__provider__ = 'kaldi_feat_regression'
@classmethod
def parameters(cls):
params = super().parameters()
params.update({
'data_dir': PathField(description='directory with ark files', is_directory=True),
'ref_data_dir': PathField(description='directory with ref data', is_directory=True, optional=True),
'features_subset_file': PathField(description='file with list testing ark files', optional=True),
'ivectors': BoolField(optional=True, default=False, description='include ivectors features'),
'ref_file_suffix': StringField(optional=True, default='_kaldi_score'),
'vectors_mode': BoolField(optional=True, default=True, description='Split data to vectors'),
'utterance_name_agnostic': BoolField(
optional=True, default=False, description='do not match names per utterance'
),
'use_numpy_data': BoolField(
optional=True, default=False, description='allow to search npz files instead of ark'
)
})
return params
def configure(self):
self.data_dir = self.get_value_from_config('data_dir')
self.feat_list_file = self.get_value_from_config('features_subset_file')
self.ivectors = self.get_value_from_config('ivectors')
self.ref_data_dir = self.get_value_from_config('ref_data_dir')
if self.ref_data_dir is None:
self.ref_data_dir = self.data_dir
self.ref_file_suffix = self.get_value_from_config('ref_file_suffix')
self.vectors_mode = self.get_value_from_config('vectors_mode')
self.utt_agnostic = self.get_value_from_config('utterance_name_agnostic')
self.file_ext = '.ark' if not self.get_value_from_config('use_numpy_data') else '.npz'
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
ark_list = self.select_subset()
annotation = []
for files in ark_list:
input_files, ref_ark = files[:-1], files[-1]
if not self.utt_agnostic:
annotation = self._convert_utt_specific(input_files, ref_ark, annotation)
else:
annotation = self._convert_utt_agnostic(input_files, ref_ark, annotation)
return ConverterReturn(annotation, None, None)
def _convert_utt_agnostic(self, input_files, ref_ark, annotation):
input_utts = []
for in_file in input_files:
input_utts.append(KaldiARKReader.read_frames(in_file) if in_file.suffix != '.npz' else np.load(in_file))
utt_ids = [list(in_utt.keys()) for in_utt in input_utts]
ref_scores = KaldiARKReader.read_frames(ref_ark) if ref_ark.suffix != '.npz' else np.load(ref_ark)
for idx, (_, ref_matrix) in enumerate(ref_scores.items()):
current_utts = [u[idx] for u in utt_ids]
if self.vectors_mode:
for v_idx, ref_v in enumerate(ref_matrix):
identifier = [
KaldiFrameIdentifier(in_file.name, utt, v_idx)
if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt, v_idx)
for in_file, utt in zip(input_files, current_utts)
]
if len(identifier) == 1:
identifier = identifier[0]
annotation.append(RegressionAnnotation(identifier, ref_v))
else:
identifier = [
KaldiMatrixIdentifier(in_file.name, utt)
if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt)
for in_file, utt in zip(input_files, current_utts)
]
if len(identifier) == 1:
identifier = identifier[0]
annotation.append(RegressionAnnotation(identifier, ref_matrix))
return annotation
def _convert_utt_specific(self, input_files, ref_ark, annotation):
utterances = (
KaldiARKReader.read_frames(input_files[0])
if input_files[0].suffix != '.npz' else dict(np.load(input_files[0])))
ref_scores = KaldiARKReader.read_frames(ref_ark) if ref_ark.suffix != '.npz' else dict(np.load(ref_ark))
for utt, matrix in utterances.items():
if utt not in ref_scores:
continue
ref_matrix = ref_scores[utt]
if self.vectors_mode:
for vector_id, _ in enumerate(matrix):
identifier = [
KaldiFrameIdentifier(in_file.name, utt, vector_id)
if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt, vector_id)
for in_file in input_files
]
if len(identifier) == 1:
identifier = identifier[0]
ref_vector = ref_matrix[vector_id]
annotation.append(RegressionAnnotation(identifier, ref_vector))
else:
identifier = [KaldiMatrixIdentifier(in_file.name, utt)
if in_file.suffix != '.npz' else generate_numpy_identifier(in_file.name, utt)
for in_file in input_files]
if len(identifier) == 1:
identifier = identifier[0]
annotation.append(RegressionAnnotation(identifier, ref_matrix))
return annotation
def select_subset(self):
if self.feat_list_file:
subset = []
for ark in read_txt(self.feat_list_file):
files = [self.data_dir / f for f in ark.split(' ')[:-1]]
files.append(self.ref_data_dir / ark.split(' ')[-1])
subset.append(files)
return subset
if not self.ivectors:
pairs = []
for ark_file in self.data_dir.glob('*{}'.format(self.file_ext)):
if self.data_dir == self.ref_data_dir and self.ref_file_suffix in ark_file.name:
continue
ref_file = self.ref_data_dir / ark_file.name.replace(self.file_ext, self.ref_file_suffix+self.file_ext)
pairs.append((ark_file, ref_file))
return pairs
triples = []
for ivector_file in self.data_dir.glob("*_ivector{}".format(self.file_ext)):
feats_file = self.data_dir / ivector_file.name.replace('_ivector', '')
ref_file = self.ref_data_dir / ivector_file.name.replace('_ivector', self.ref_file_suffix)
if not feats_file.exists() or not ref_file.exists():
continue
triples.append((feats_file, ivector_file, ref_file))
return triples
def generate_numpy_identifier(file_name, array_id, idx=None):
return '{}{}#{}'.format(array_id, '' if idx is None else '_{}'.format(idx), file_name)
| [
"numpy.load"
] | [((6591, 6607), 'numpy.load', 'np.load', (['ref_ark'], {}), '(ref_ark)\n', (6598, 6607), True, 'import numpy as np\n'), ((8030, 8053), 'numpy.load', 'np.load', (['input_files[0]'], {}), '(input_files[0])\n', (8037, 8053), True, 'import numpy as np\n'), ((8151, 8167), 'numpy.load', 'np.load', (['ref_ark'], {}), '(ref_ark)\n', (8158, 8167), True, 'import numpy as np\n'), ((6418, 6434), 'numpy.load', 'np.load', (['in_file'], {}), '(in_file)\n', (6425, 6434), True, 'import numpy as np\n')] |
import numpy as np
import os
import sys
sys.path.append(os.getcwd())
from mindware.components.feature_engineering.transformations.preprocessor.text2vector import \
Text2VectorTransformation
from mindware.components.feature_engineering.transformation_graph import DataNode
from mindware.components.utils.constants import *
from mindware.estimators import Classifier
x = np.array([[1, 'I am good', 'I am right', 3], [2, 'He is good', 'He is ok', 4],
[2.5, 'Everyone is good', 'Everyone is ok', 7], [1.3333, 'well', 'what', 5]])
y = np.array([0, 1, 0, 1])
t2v = Text2VectorTransformation()
data = (x, y)
feature_type = [NUMERICAL, TEXT, TEXT, DISCRETE]
datanode = DataNode(data, feature_type)
clf = Classifier(time_limit=20,
enable_meta_algorithm_selection=False,
include_algorithms=['random_forest'])
clf.fit(datanode, opt_strategy='combined')
print(clf.predict(datanode))
| [
"mindware.components.feature_engineering.transformation_graph.DataNode",
"os.getcwd",
"mindware.components.feature_engineering.transformations.preprocessor.text2vector.Text2VectorTransformation",
"mindware.estimators.Classifier",
"numpy.array"
] | [((376, 541), 'numpy.array', 'np.array', (["[[1, 'I am good', 'I am right', 3], [2, 'He is good', 'He is ok', 4], [2.5,\n 'Everyone is good', 'Everyone is ok', 7], [1.3333, 'well', 'what', 5]]"], {}), "([[1, 'I am good', 'I am right', 3], [2, 'He is good', 'He is ok', \n 4], [2.5, 'Everyone is good', 'Everyone is ok', 7], [1.3333, 'well',\n 'what', 5]])\n", (384, 541), True, 'import numpy as np\n'), ((551, 573), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (559, 573), True, 'import numpy as np\n'), ((581, 608), 'mindware.components.feature_engineering.transformations.preprocessor.text2vector.Text2VectorTransformation', 'Text2VectorTransformation', ([], {}), '()\n', (606, 608), False, 'from mindware.components.feature_engineering.transformations.preprocessor.text2vector import Text2VectorTransformation\n'), ((683, 711), 'mindware.components.feature_engineering.transformation_graph.DataNode', 'DataNode', (['data', 'feature_type'], {}), '(data, feature_type)\n', (691, 711), False, 'from mindware.components.feature_engineering.transformation_graph import DataNode\n'), ((719, 825), 'mindware.estimators.Classifier', 'Classifier', ([], {'time_limit': '(20)', 'enable_meta_algorithm_selection': '(False)', 'include_algorithms': "['random_forest']"}), "(time_limit=20, enable_meta_algorithm_selection=False,\n include_algorithms=['random_forest'])\n", (729, 825), False, 'from mindware.estimators import Classifier\n'), ((57, 68), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (66, 68), False, 'import os\n')] |
## @package Labelling_app
# Labelling app software developed with Grabcut
#
# @version 1
#
# Pontificia Universidad Javeriana
#
# Electronic Enginnering
#
# Developed by:
# - <NAME>
# Mail: <<EMAIL>>
# GitHub: andrearuizg
# - <NAME>
# Mail: <<EMAIL>>
# GitHub: PedroRuizCode
#
# With support of:
# - <NAME>
# Mail: <<EMAIL>>
# GitHub: calderonf
# - <NAME>
# Mail: <<EMAIL>>
# GitHub: JohnBetaCode
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage
from PyQt5.QtCore import Qt
import cv2
import numpy as np
from time import time
import random
## GUI class
class GUI(QMainWindow):
## The constructor
#
# Here you can configure the screen, buttons (rectangle, foreground,
# iteration, open file, new label, original image, segmented image,
# labelled image, previous, next, save, exit, reset), labels (video frame,
# label, show image), spin box (video frames), list (labels), image panel,
# checkbox (full screen, dark theme) and mouse events
# @param self The object pointer.
def __init__(self):
super().__init__()
app.setStyle('Fusion')
screen = app.primaryScreen()
rect = screen.size()
width = rect.width()
height = rect.height() - 30
self.setGeometry(10, 10, width, height)
self.setWindowTitle("Kiwi & PUJ - Labelling software")
self.setWindowIcon(QIcon("media/.icons/ICON.png"))
self.b_rec = QPushButton(self)
self.b_rec.setText('&Rectangle')
self.b_rec.move(((width // 2) - 210), 15)
self.b_rec.setEnabled(False)
self.b_rec.setShortcut('Ctrl+r')
self.b_rec.clicked.connect(self.rectangle_)
self.b_bg = QPushButton(self)
self.b_bg.setText('&Background')
self.b_bg.move(((width // 2) - 105), 15)
self.b_bg.setEnabled(False)
self.b_bg.setShortcut('Ctrl+b')
self.b_bg.clicked.connect(self.background_)
self.b_fg = QPushButton(self)
self.b_fg.setText('&Foreground')
self.b_fg.move(width // 2, 15)
self.b_fg.setEnabled(False)
self.b_fg.setShortcut('Ctrl+f')
self.b_fg.clicked.connect(self.foreground_)
self.b_it = QPushButton(self)
self.b_it.setText('&Iteration')
self.b_it.move(((width // 2) + 105), 15)
self.b_it.setEnabled(False)
self.b_it.setShortcut('Ctrl+i')
self.b_it.clicked.connect(self.iteration_)
f_open = QPushButton(self)
f_open.setText('&Open file')
f_open.setIcon(QIcon('media/.icons/file.png'))
f_open.move(10, 15)
f_open.setShortcut('Ctrl+o')
f_open.clicked.connect(self.open_)
t1 = QLabel(self)
t1.setText("Video frames")
t1.move(10, height - 175)
self.spin = QSpinBox(self)
self.spin.move(10, height - 150)
self.spin.setValue(30)
self.spin.setRange(1, 999)
self.spin.valueChanged.connect(self.sh_spin_val)
t1 = QLabel(self)
t1.setText("Labels")
t1.move(10, 90)
self.b_new = QPushButton(self)
self.b_new.setText('&New')
self.b_new.setIcon(QIcon('media/.icons/new.png'))
self.b_new.setEnabled(False)
self.b_new.setShortcut('Ctrl+n')
self.b_new.move(10, 120)
self.b_new.clicked.connect(self.new_label)
labels = open('/tmp/labels.txt', 'r').read()
self.labels = list(labels.split("\n"))
self.Label_n = QComboBox(self)
for n in range(len(self.labels) - 1):
self.Label_n.addItem(self.labels[n])
self.Label_n.move(10, 150)
self.Label_n.setEnabled(False)
self.Label_n.activated[str].connect(self.sel_LN)
t2 = QLabel(self)
t2.setText("Show image")
t2.move(10, height // 2)
self.b_or = QPushButton(self)
self.b_or.setText('Original')
self.b_or.move(10, (height // 2) + 30)
self.b_or.setEnabled(False)
self.b_or.clicked.connect(self.b_or_)
self.b_seg = QPushButton(self)
self.b_seg.setText('Segmented')
self.b_seg.move(10, (height // 2) + 60)
self.b_seg.setEnabled(False)
self.b_seg.clicked.connect(self.b_seg_)
self.b_lab = QPushButton(self)
self.b_lab.setText('Labels')
self.b_lab.move(10, (height // 2) + 90)
self.b_lab.setEnabled(False)
self.b_lab.clicked.connect(self.b_lab_)
self.b_pre = QPushButton(self)
self.b_pre.setText('Previous')
self.b_pre.setIcon(QIcon('media/.icons/undo.png'))
self.b_pre.move(10, height - 110)
self.b_pre.setShortcut('Ctrl+Left')
self.b_pre.setEnabled(False)
self.b_pre.clicked.connect(self.b_pre_)
self.b_nxt = QPushButton(self)
self.b_nxt.setText('Next')
self.b_nxt.setIcon(QIcon('media/.icons/redo.png'))
self.b_nxt.move(10, height - 80)
self.b_nxt.setShortcut('Ctrl+Right')
self.b_nxt.setEnabled(False)
self.b_nxt.clicked.connect(self.b_nxt_)
self.b_sav = QPushButton(self)
self.b_sav.setText('&SAVE')
self.b_sav.setIcon(QIcon('media/.icons/save.png'))
self.b_sav.move(10, height - 30)
self.b_sav.setEnabled(False)
self.b_sav.setShortcut('Ctrl+s')
self.b_sav.clicked.connect(self.b_sav_)
b_ext = QPushButton(self)
b_ext.setText('EXIT')
b_ext.setIcon(QIcon('media/.icons/exit.png'))
b_ext.move(width - 110, height - 30)
b_ext.clicked.connect(self.b_ext_)
b_res = QPushButton(self)
b_res.setText('RESET')
b_res.move(width - 110, height - 80)
b_res.clicked.connect(self.reset_)
self.image_1 = QLabel(self)
self.image_1.resize(640, 480)
self.image_1.move((width // 2) - 320, (height // 2) - 200)
self.dark = QCheckBox(self)
self.dark.setText('Dark theme')
self.dark.setChecked(True)
self.dark.move((width - 110), 15)
self.dark.toggled.connect(self.dark_)
self.fs = QCheckBox(self)
self.fs.setText('Full Screen')
self.fs.setChecked(True)
self.fs.move((width - 110), 35)
self.fs.toggled.connect(self.fullScreen_)
self.fullScreen_()
self.show()
self.reset_()
self.dark_()
self.image_1.mousePressEvent = self.mouse_down
self.image_1.mouseMoveEvent = self.mouse_move
self.image_1.mouseReleaseEvent = self.mouse_up
## Label selection function
#
# Select the label of the segmented image, and created the labelled image
# file
# @param self The object pointer.
# @param text Label gave by the user
def sel_LN(self, text):
for n in range(len(self.labels) - 1):
if text == self.labels[n]:
self.contour_()
self.colors = tuple(self.colors)
cv2.drawContours(self.img_out, self.contours, -1, n + 1,
thickness=cv2.FILLED)
cv2.drawContours(self.img_label, self.contours, -1,
self.colors[n], thickness=cv2.FILLED)
## Contour function
#
# Determine the contour of the segmented image
# @param self The object pointer.
def contour_(self):
imgray = cv2.cvtColor(self.out, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 1, 255, 0)
self.contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
## New label button function
#
# Set enable flag true
# @param self The object pointer.
def new_label(self):
self.Label_n.setEnabled(True)
self.b_lab_()
## Original picture button
#
# Show original picture
# @param self The object pointer.
def b_or_(self):
self.showImage_(self.img_in)
## Segmented picture button
#
# Show segmented picture
# @param self The object pointer.
def b_seg_(self):
self.showImage_(self.out)
## Labelled picture button
#
# Show labelled picture
# @param self The object pointer.
def b_lab_(self):
self.showImage_(self.img_label)
## Spin value function
#
# Update video frame variable
# @param self The object pointer.
def sh_spin_val(self):
self.value_sp = self.spin.value()
## Previous image button function
#
# Disable buttons and show warnings
# @param self The object pointer.
def b_pre_(self):
if self.flag_save == 0:
self.b_sav.setEnabled(False)
self.b_bg.setEnabled(False)
self.b_fg.setEnabled(False)
self.b_it.setEnabled(False)
self.b_new.setEnabled(False)
self.Label_n.setEnabled(False)
self.b_or.setEnabled(False)
self.b_seg.setEnabled(False)
self.b_lab.setEnabled(False)
if self.flag_save == 1:
self.show_alert()
if self.file_vid == 0:
self.file_num -= 1
self.load()
else:
if self.flag_vid == 1:
self.flag_vid = 0
self.file_num -= 2
self.frame_act -= int(self.value_sp)
self.load_vid()
else:
self.flag_save = 0
self.show_alert()
## Next image button function
#
# Disable buttons and show warnings
# @param self The object pointer.
def b_nxt_(self):
if self.flag_save == 0:
self.b_sav.setEnabled(False)
self.b_bg.setEnabled(False)
self.b_fg.setEnabled(False)
self.b_it.setEnabled(False)
self.b_new.setEnabled(False)
self.Label_n.setEnabled(False)
self.b_or.setEnabled(False)
self.b_seg.setEnabled(False)
self.b_lab.setEnabled(False)
if self.file_vid == 0:
self.file_num += 1
self.load()
else:
self.frame_act += int(self.value_sp)
self.load_vid()
else:
self.flag_save = 0
self.show_alert()
## Save button function
#
# Save files
# - for pictures only save the labelled mask
# - for videos save labelled mask and original frame
# @param self The object pointer.
def b_sav_(self):
str = (self.filename[self.file_num].split(".")[0]).split("/")[-1]
if self.file_vid == 0:
outfile = 'media/%s-mask.png' % (str)
outfile1 = 'media/%s.png' % (str)
else:
outfile = 'media/%s-frame-%s-mask.png' % (str, self.frame_act)
outfile1 = 'media/%s-frame-%s.png' % (str, self.frame_act)
original = '%s' % self.filename[self.file_num].split("/")[-1]
mask = '%s' % outfile.split("/")[-1]
tf = '%s' % (time() - self.ti)
self.d_time[self.frame_num, ...] = [original, mask, tf]
cv2.imwrite(outfile, self.img_out)
cv2.imwrite(outfile1, self.img_in)
self.frame_num += 1
self.flag_save = 0
self.flag_file = 1
## Exit button function
#
# Save time stamps csv and close app
# @param self The object pointer.
def b_ext_(self):
if self.flag_file == 1:
np.savetxt("media/timestamps.csv", self.d_time, delimiter=", ",
fmt='%s')
self.close()
QApplication.quit()
## Open button function
#
# Open file dialog window
# @param self The object pointer.
def open_(self):
self.filename, _ = QFileDialog.getOpenFileNames(None, 'Buscar Imagen',
'.', 'Image Files (*.png *.jpg *.jpeg *.bmp *.mp4)')
self.file_num = 0
self.frame_num = 1
self.flag_save = 0
self.flag_vid = 0
self.file_vid = 0
self.b_rec.setEnabled(True)
self.b_pre.setEnabled(True)
self.b_nxt.setEnabled(True)
self.load()
## Load function
#
# Open file in open cv
# @param self The object pointer.
def load(self):
self.flag_save = 0
if self.file_num < len(self.filename):
if (self.filename[self.file_num].split(".")[-1] in
['png', 'jpg', 'jpeg', 'bmp']):
self.img_in = cv2.imread(self.filename[self.file_num],
cv2.IMREAD_UNCHANGED)
self.img_in = cv2.resize(self.img_in, (640, 480))
self.img_copy = self.img_in.copy()
self.img_out = np.zeros((480, 640), np.uint8)
self.img_label = self.img_in.copy()
self.showImage_(self.img_in)
else:
self.file_vid = 1
self.vid = cv2.VideoCapture(self.filename[self.file_num])
self.length = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
self.frame_act = 1
self.load_vid()
else:
self.b_ext_()
if ((self.file_num == 0) and (self.file_vid == 0)):
self.b_pre.setEnabled(False)
else:
self.b_pre.setEnabled(True)
if ((self.file_num == (len(self.filename) - 1))
and (self.file_vid == 0)):
self.b_nxt.setEnabled(False)
else:
self.b_nxt.setEnabled(True)
## Load video function
#
# Open video frames
# @param self The object pointer.
def load_vid(self):
self.sh_spin_val()
if self.vid.isOpened():
if (self.frame_act <= self.length) and (self.frame_act > 0):
self.vid.set(1, self.frame_act)
ret, self.img_in = self.vid.read()
self.img_in = cv2.resize(self.img_in, (640, 480))
self.img_copy = self.img_in.copy()
self.img_out = np.zeros((480, 640), np.uint8)
self.img_label = self.img_in.copy()
self.showImage_(self.img_in)
else:
self.flag_vid = 1
self.vid.release()
self.file_vid = 0
self.file_num += 1
self.load()
## Show image function
#
# Show picture in Pixmap
# @param self The object pointer.
# @param image Image to display.
def showImage_(self, image):
size = image.shape
step = image.size / size[0]
qformat = QImage.Format_Indexed8
if len(size) == 3:
if size[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(image, size[1], size[0], step, qformat)
img = img.rgbSwapped()
self.image_1.setPixmap(QPixmap.fromImage(img))
self.resize(self.image_1.pixmap().size())
## Rectangle button function
#
# Enable flags to draw rectangle in picture
# @param self The object pointer.
def rectangle_(self):
self.b_bg.setEnabled(True)
self.b_fg.setEnabled(True)
self.b_it.setEnabled(True)
self.flag_rect = True
self.flag_circle_fg = False
self.flag_circle_bg = False
self.ini_points = []
self.ti = time()
## Background button function
#
# Enable flags to draw the background
# @param self The object pointer.
def background_(self):
self.flag_rect = False
self.flag_circle_fg = False
self.flag_circle_bg = True
## Foreground button function
#
# Enable flags to draw the foreground
# @param self The object pointer.
def foreground_(self):
self.flag_rect = False
self.flag_circle_fg = True
self.flag_circle_bg = False
## Iteration button function
#
# Iteration to make the segmented image
# @param self The object pointer.
def iteration_(self):
self.b_sav.setEnabled(True)
self.b_new.setEnabled(True)
self.b_or.setEnabled(True)
self.b_seg.setEnabled(True)
self.b_lab.setEnabled(True)
self.flag_save = 1
self.flag_rect = False
self.flag_circle_fg = False
self.flag_circle_bg = False
cv2.grabCut(self.img_in, self.mask, None, self.BGD_model,
self.FGD_model, 1, cv2.GC_INIT_WITH_MASK)
comp = (self.mask == 1) | (self.mask == 3)
self.m_out = np.where(comp, 1, 0).astype('uint8')
self.out = cv2.bitwise_and(self.img_in, self.img_in, mask=self.m_out)
self.showImage_(self.out)
## Dark theme function
#
# Set dark or white theme
# @param self The object pointer.
def dark_(self):
if self.dark.isChecked() is True:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
palette.setColor(QPalette.Disabled, QPalette.Base,
QColor(52, 52, 52))
palette.setColor(QPalette.Disabled, QPalette.Text,
QColor(57, 57, 57))
palette.setColor(QPalette.Disabled, QPalette.Button,
QColor(47, 47, 47))
palette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(67, 67, 67))
palette.setColor(QPalette.Disabled, QPalette.Window,
QColor(49, 49, 49))
palette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(57, 57, 57))
self.setPalette(palette)
if self.dark.isChecked() is False:
palette = QPalette()
palette.setColor(QPalette.Window, QColor(239, 239, 239))
palette.setColor(QPalette.WindowText, Qt.black)
self.setPalette(palette)
## Show alert function
#
# Show alert when the labelled picture is not save
# @param self The object pointer.
def show_alert(self):
warning = QMessageBox(self)
warning.setIcon(QMessageBox.Warning)
warning.setText("Remember to save the results")
warning.setWindowTitle("Warning")
warning.exec_()
## Maximized function
#
# Maximized window
# @param self The object pointer.
def maximized(self):
self.showMaximized()
## Full-screen function
#
# Full-screen window
# @param self The object pointer.
def fullScreen_(self):
if self.fs.isChecked() is True:
self.showFullScreen()
else:
self.showMaximized()
## Mouse move function
#
# Make the rectangle or circles when user is pressing the mouse
# @param self The object pointer.
# @param event The mouse event.
def mouse_move(self, event):
x = event.pos().x()
y = event.pos().y()
if self.flag_rect is True:
img_temp_m = self.img_in.copy()
self.fin_points = [x, y]
self.img_copy = cv2.rectangle(img_temp_m, tuple(self.ini_points),
tuple(self.fin_points), (0, 0, 255),
5)
if ((self.flag_circle_fg is True) and (self.start is True)):
cv2.circle(self.img_copy, (x, y), 3, (255, 255, 255), -1)
cv2.circle(self.mask, (x, y), 5, 1, -1)
if ((self.flag_circle_bg is True) and (self.start is True)):
cv2.circle(self.img_copy, (x, y), 3, (0, 0, 0), -1)
cv2.circle(self.mask, (x, y), 5, 0, -1)
self.showImage_(self.img_copy)
## Mouse down function
#
# Make the initial points of the rectangle or start circles
# @param self The object pointer.
# @param event The mouse event.
def mouse_down(self, event):
x = event.pos().x()
y = event.pos().y()
if self.flag_rect is True:
self.ini_points = [x, y]
if ((self.flag_rect is False) and ((self.flag_circle_fg is True)
or (self.flag_circle_bg is True))):
self.start = True
## Mouse up function
#
# Make the final points of the rectangle or finish circles
# @param self The object pointer.
# @param event The mouse event.
def mouse_up(self, event):
x = event.pos().x()
y = event.pos().y()
if self.flag_rect is True:
img_temp = self.img_in.copy()
self.fin_points = [x, y]
self.img_copy = cv2.rectangle(img_temp, tuple(self.ini_points),
tuple(self.fin_points), (0, 0, 255),
5)
self.mask = np.zeros((480, 640), np.uint8)
self.mask = cv2.rectangle(self.mask, tuple(self.ini_points),
tuple(self.fin_points), 3, -1)
self.flag_rect = False
self.start = False
self.showImage_(self.img_copy)
## Reset function
#
# Reset app
# @param self The object pointer.
def reset_(self):
self.flag_file = 0
self.d_time = np.zeros((10000, 3), dtype='U255')
self.d_time[0, ...] = ['Img. Original', 'Img. Mask', 'Time (s)']
self.BGD_model = np.zeros((1, 65), np.float64)
self.FGD_model = np.zeros((1, 65), np.float64)
self.ini_points, self.fin_points = [], []
self.flag_rect = False
self.flag_circle_fg = False
self.flag_circle_bg = False
self.start = False
self.mask = np.zeros((640, 480), np.uint8)
img = cv2.imread('media/.icons/INTRO.png', 1)
img = cv2.resize(img, (640, 480))
self.colors = np.random.randint(20, 255, (len(self.labels) - 1, 3))
self.colors = []
for n in range(len(self.labels) - 1):
color = []
for _ in range(3):
color.append(random.randrange(0, 255))
self.colors.append(tuple(color))
self.showImage_(img)
## @var flag_file
# It takes 0 value when the user hasn't chosen a file
#
# It takes 1 value when the user choose a file
## @var b_rec
# Rectangle push button variable
## @var b_bg
# Background push button variable
## @var b_fg
# Foreground push button variable
## @var b_it
# Iteration push button variable
## @var spin
# Value of video frame variable at spin box
## @var b_new
# New push button variable
## @var labels
# List of labels that the user wrote on the labels.txt
## @var Label_n
# List of labels
## @var b_or
# Original image push button variable
## @var b_seg
# Segmented image push button variable
## @var b_lab
# Labelled image push button variable
## @var b_pre
# Previous image push button variable
## @var b_nxt
# Next image push button variable
## @var b_sav
# Save image push button variable
## @var image_1
# Image panel variable
## @var dark
# Dark theme checkbox variable
## @var fs
# Full screen checkbox variable
## @var colors
# Variable of random colors generated when the application begins or
# restart
## @var value_sp
# Value at video frame spin box
## @var flag_save
# It takes 0 value when the user hasn't saved a file
#
# It takes 1 value when the user save a file
## @var file_vid
# It takes 0 value when the file is an image
#
# It takes 1 value when the file is a video
## @var flag_vid
# Overflow when video frame is the first or the last
## @var d_time
# List with the data of timestamps.csv
## @var file_num
# Number of file that shows the image panel
## @var frame_num
# Number of frame
## @var img_in
# Input image or frame
## @var img_copy
# Copy of input image or frame
## @var img_out
# Output image
## @var img_label
# Output labelled image
## @var vid
# Video frame
## @var length
# Length of video frames
## @var frame_act
# Actual video frame
## @var flag_rect
# It takes 0 value when the user hasn't pressed rectangle push button
#
# It takes 1 value when the user press rectangle push button
## @var flag_circle_fg
# It takes 0 value when the user hasn't pressed foreground push button
#
# It takes 1 value when the user press foreground push button
## @var flag_circle_bg
# It takes 0 value when the user hasn't pressed background push button
#
# It takes 1 value when the user press background push button
## @var ini_points
# Initial coordinates of mouse at image panel after the rectangle push
# button was pressed
## @var ti
# Previous time. This variable is update after the rectangle push button
# was pressed
## @var mask
# Output mask of Grabcut algorithm. It can It takes 4 posible values:
#
# 0 - True background
# 1 - True foreground
# 2 - Possible background
# 3 - Possible foreground
## @var m_out
# Output mask. 0 and 2 values It takess 0 value; 1 and 3 values It takess 1 value.
## @var out
# Out of segmented image
## @var fin_points
# When the mouse is moving, it It takess the actual value of mouse coordinates
#
# When the mouse is up, it It takess the last value of mouse coordinates when
# it was moving
## @var start
# It takes 0 value when the user hasn't pressed background or foreground push
# button. Can It takes 0 value when the user press background or foreground
# push button and up the mouse in the image panel
#
# It takes 1 value when the user press background or foreground push button
# and press the mouse in the image panel
## @var BGD_model
# Variable exclusive of Grabcut algorithm
## @var FGD_model
# Variable exclusive of Grabcut algorithm
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = GUI()
sys.exit(app.exec_())
| [
"cv2.bitwise_and",
"PyQt5.QtGui.QColor",
"PyQt5.QtGui.QPixmap.fromImage",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.savetxt",
"cv2.drawContours",
"cv2.resize",
"cv2.circle",
"PyQt5.QtGui.QPalette",
"PyQt5.QtGui.QImage",
"cv2.grabCut",
"PyQt5.QtGui.QIcon",
"cv2.threshold",
"numpy.zeros",
"t... | [((7423, 7465), 'cv2.cvtColor', 'cv2.cvtColor', (['self.out', 'cv2.COLOR_BGR2GRAY'], {}), '(self.out, cv2.COLOR_BGR2GRAY)\n', (7435, 7465), False, 'import cv2\n'), ((7488, 7520), 'cv2.threshold', 'cv2.threshold', (['imgray', '(1)', '(255)', '(0)'], {}), '(imgray, 1, 255, 0)\n', (7501, 7520), False, 'import cv2\n'), ((7556, 7620), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (7572, 7620), False, 'import cv2\n'), ((11155, 11189), 'cv2.imwrite', 'cv2.imwrite', (['outfile', 'self.img_out'], {}), '(outfile, self.img_out)\n', (11166, 11189), False, 'import cv2\n'), ((11198, 11232), 'cv2.imwrite', 'cv2.imwrite', (['outfile1', 'self.img_in'], {}), '(outfile1, self.img_in)\n', (11209, 11232), False, 'import cv2\n'), ((14816, 14862), 'PyQt5.QtGui.QImage', 'QImage', (['image', 'size[1]', 'size[0]', 'step', 'qformat'], {}), '(image, size[1], size[0], step, qformat)\n', (14822, 14862), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((15405, 15411), 'time.time', 'time', ([], {}), '()\n', (15409, 15411), False, 'from time import time\n'), ((16377, 16480), 'cv2.grabCut', 'cv2.grabCut', (['self.img_in', 'self.mask', 'None', 'self.BGD_model', 'self.FGD_model', '(1)', 'cv2.GC_INIT_WITH_MASK'], {}), '(self.img_in, self.mask, None, self.BGD_model, self.FGD_model, 1,\n cv2.GC_INIT_WITH_MASK)\n', (16388, 16480), False, 'import cv2\n'), ((16625, 16683), 'cv2.bitwise_and', 'cv2.bitwise_and', (['self.img_in', 'self.img_in'], {'mask': 'self.m_out'}), '(self.img_in, self.img_in, mask=self.m_out)\n', (16640, 16683), False, 'import cv2\n'), ((21975, 22009), 'numpy.zeros', 'np.zeros', (['(10000, 3)'], {'dtype': '"""U255"""'}), "((10000, 3), dtype='U255')\n", (21983, 22009), True, 'import numpy as np\n'), ((22108, 22137), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (22116, 22137), True, 'import numpy as np\n'), ((22163, 22192), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (22171, 22192), True, 'import numpy as np\n'), ((22393, 22423), 'numpy.zeros', 'np.zeros', (['(640, 480)', 'np.uint8'], {}), '((640, 480), np.uint8)\n', (22401, 22423), True, 'import numpy as np\n'), ((22438, 22477), 'cv2.imread', 'cv2.imread', (['"""media/.icons/INTRO.png"""', '(1)'], {}), "('media/.icons/INTRO.png', 1)\n", (22448, 22477), False, 'import cv2\n'), ((22492, 22519), 'cv2.resize', 'cv2.resize', (['img', '(640, 480)'], {}), '(img, (640, 480))\n', (22502, 22519), False, 'import cv2\n'), ((1474, 1504), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/ICON.png"""'], {}), "('media/.icons/ICON.png')\n", (1479, 1504), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((2622, 2652), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/file.png"""'], {}), "('media/.icons/file.png')\n", (2627, 2652), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((3240, 3269), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/new.png"""'], {}), "('media/.icons/new.png')\n", (3245, 3269), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((4628, 4658), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/undo.png"""'], {}), "('media/.icons/undo.png')\n", (4633, 4658), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((4933, 4963), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/redo.png"""'], {}), "('media/.icons/redo.png')\n", (4938, 4963), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((5239, 5269), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/save.png"""'], {}), "('media/.icons/save.png')\n", (5244, 5269), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((5525, 5555), 'PyQt5.QtGui.QIcon', 'QIcon', (['"""media/.icons/exit.png"""'], {}), "('media/.icons/exit.png')\n", (5530, 5555), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((11495, 11568), 'numpy.savetxt', 'np.savetxt', (['"""media/timestamps.csv"""', 'self.d_time'], {'delimiter': '""", """', 'fmt': '"""%s"""'}), "('media/timestamps.csv', self.d_time, delimiter=', ', fmt='%s')\n", (11505, 11568), True, 'import numpy as np\n'), ((14925, 14947), 'PyQt5.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['img'], {}), '(img)\n', (14942, 14947), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((16905, 16915), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (16913, 16915), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18537, 18547), 'PyQt5.QtGui.QPalette', 'QPalette', ([], {}), '()\n', (18545, 18547), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((20131, 20188), 'cv2.circle', 'cv2.circle', (['self.img_copy', '(x, y)', '(3)', '(255, 255, 255)', '(-1)'], {}), '(self.img_copy, (x, y), 3, (255, 255, 255), -1)\n', (20141, 20188), False, 'import cv2\n'), ((20201, 20240), 'cv2.circle', 'cv2.circle', (['self.mask', '(x, y)', '(5)', '(1)', '(-1)'], {}), '(self.mask, (x, y), 5, 1, -1)\n', (20211, 20240), False, 'import cv2\n'), ((20322, 20373), 'cv2.circle', 'cv2.circle', (['self.img_copy', '(x, y)', '(3)', '(0, 0, 0)', '(-1)'], {}), '(self.img_copy, (x, y), 3, (0, 0, 0), -1)\n', (20332, 20373), False, 'import cv2\n'), ((20386, 20425), 'cv2.circle', 'cv2.circle', (['self.mask', '(x, y)', '(5)', '(0)', '(-1)'], {}), '(self.mask, (x, y), 5, 0, -1)\n', (20396, 20425), False, 'import cv2\n'), ((21546, 21576), 'numpy.zeros', 'np.zeros', (['(480, 640)', 'np.uint8'], {}), '((480, 640), np.uint8)\n', (21554, 21576), True, 'import numpy as np\n'), ((7011, 7089), 'cv2.drawContours', 'cv2.drawContours', (['self.img_out', 'self.contours', '(-1)', '(n + 1)'], {'thickness': 'cv2.FILLED'}), '(self.img_out, self.contours, -1, n + 1, thickness=cv2.FILLED)\n', (7027, 7089), False, 'import cv2\n'), ((7139, 7232), 'cv2.drawContours', 'cv2.drawContours', (['self.img_label', 'self.contours', '(-1)', 'self.colors[n]'], {'thickness': 'cv2.FILLED'}), '(self.img_label, self.contours, -1, self.colors[n],\n thickness=cv2.FILLED)\n', (7155, 7232), False, 'import cv2\n'), ((11065, 11071), 'time.time', 'time', ([], {}), '()\n', (11069, 11071), False, 'from time import time\n'), ((12511, 12573), 'cv2.imread', 'cv2.imread', (['self.filename[self.file_num]', 'cv2.IMREAD_UNCHANGED'], {}), '(self.filename[self.file_num], cv2.IMREAD_UNCHANGED)\n', (12521, 12573), False, 'import cv2\n'), ((12646, 12681), 'cv2.resize', 'cv2.resize', (['self.img_in', '(640, 480)'], {}), '(self.img_in, (640, 480))\n', (12656, 12681), False, 'import cv2\n'), ((12764, 12794), 'numpy.zeros', 'np.zeros', (['(480, 640)', 'np.uint8'], {}), '((480, 640), np.uint8)\n', (12772, 12794), True, 'import numpy as np\n'), ((12971, 13017), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.filename[self.file_num]'], {}), '(self.filename[self.file_num])\n', (12987, 13017), False, 'import cv2\n'), ((13926, 13961), 'cv2.resize', 'cv2.resize', (['self.img_in', '(640, 480)'], {}), '(self.img_in, (640, 480))\n', (13936, 13961), False, 'import cv2\n'), ((14044, 14074), 'numpy.zeros', 'np.zeros', (['(480, 640)', 'np.uint8'], {}), '((480, 640), np.uint8)\n', (14052, 14074), True, 'import numpy as np\n'), ((16569, 16589), 'numpy.where', 'np.where', (['comp', '(1)', '(0)'], {}), '(comp, 1, 0)\n', (16577, 16589), True, 'import numpy as np\n'), ((16962, 16980), 'PyQt5.QtGui.QColor', 'QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (16968, 16980), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17086, 17104), 'PyQt5.QtGui.QColor', 'QColor', (['(25)', '(25)', '(25)'], {}), '(25, 25, 25)\n', (17092, 17104), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17159, 17177), 'PyQt5.QtGui.QColor', 'QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (17165, 17177), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17401, 17419), 'PyQt5.QtGui.QColor', 'QColor', (['(53)', '(53)', '(53)'], {}), '(53, 53, 53)\n', (17407, 17419), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17583, 17603), 'PyQt5.QtGui.QColor', 'QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (17589, 17603), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17654, 17674), 'PyQt5.QtGui.QColor', 'QColor', (['(42)', '(130)', '(218)'], {}), '(42, 130, 218)\n', (17660, 17674), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17834, 17852), 'PyQt5.QtGui.QColor', 'QColor', (['(52)', '(52)', '(52)'], {}), '(52, 52, 52)\n', (17840, 17852), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((17947, 17965), 'PyQt5.QtGui.QColor', 'QColor', (['(57)', '(57)', '(57)'], {}), '(57, 57, 57)\n', (17953, 17965), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18062, 18080), 'PyQt5.QtGui.QColor', 'QColor', (['(47)', '(47)', '(47)'], {}), '(47, 47, 47)\n', (18068, 18080), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18181, 18199), 'PyQt5.QtGui.QColor', 'QColor', (['(67)', '(67)', '(67)'], {}), '(67, 67, 67)\n', (18187, 18199), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18296, 18314), 'PyQt5.QtGui.QColor', 'QColor', (['(49)', '(49)', '(49)'], {}), '(49, 49, 49)\n', (18302, 18314), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18415, 18433), 'PyQt5.QtGui.QColor', 'QColor', (['(57)', '(57)', '(57)'], {}), '(57, 57, 57)\n', (18421, 18433), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((18594, 18615), 'PyQt5.QtGui.QColor', 'QColor', (['(239)', '(239)', '(239)'], {}), '(239, 239, 239)\n', (18600, 18615), False, 'from PyQt5.QtGui import QIcon, QPalette, QColor, QPixmap, QImage\n'), ((22750, 22774), 'random.randrange', 'random.randrange', (['(0)', '(255)'], {}), '(0, 255)\n', (22766, 22774), False, 'import random\n')] |
# Copyright 2017 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from sklearn.decomposition import PCA
from scipy.stats import multivariate_normal as gaussian
from .optimizer import get_prior_params
from .optimizer import get_posterior_params
from .optimizer import get_posterior_predictive_params
from .optimizer import optimize_maximum_likelihood
from .optimizer import calc_scatter_matrices
def get_space_walk(from_space, to_space):
U_model_to_D = ['U_model', 'U', 'X', 'D']
D_to_U_model = U_model_to_D[::-1]
assert from_space in U_model_to_D and to_space in U_model_to_D
from_idx = U_model_to_D.index(from_space)
to_idx = U_model_to_D.index(to_space)
if to_idx < from_idx:
spaces = D_to_U_model
from_idx = D_to_U_model.index(from_space)
to_idx = D_to_U_model.index(to_space)
else:
spaces = U_model_to_D
from_spaces = [x for x in spaces[from_idx: to_idx]]
to_spaces = [x for x in spaces[from_idx + 1: to_idx + 1]]
return zip(from_spaces, to_spaces)
def transform_D_to_X(data, pca):
return data if pca is None else pca.transform(data)
def transform_X_to_U(data, inv_A, m):
return np.matmul(data - m, inv_A.T)
def transform_U_to_U_model(data, relevant_U_dims):
return data[..., relevant_U_dims]
def transform_U_model_to_U(data, relevant_U_dims, U_dimensionality):
shape = (*data.shape[:-1], U_dimensionality)
U = np.zeros(shape)
U[..., relevant_U_dims] = data
return U
def transform_U_to_X(data, A, m):
return m + np.matmul(data, A.T)
def transform_X_to_D(data, pca):
return data if pca is None else pca.inverse_transform(data)
class Model:
def __init__(self, row_wise_data, labels, n_principal_components=None):
assert len(row_wise_data.shape) == 2
assert len(labels) == row_wise_data.shape[0]
self.pca = None
self.m = None
self.A = None
self.Psi = None
self.relevant_U_dims = None
self.inv_A = None
self.prior_params = None
self.posterior_params = None
self.posterior_predictive_params = None
self.fit(row_wise_data, labels, n_principal_components)
def calc_logp_posterior(self, v_model, category):
assert v_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.posterior_params[category]['mean']
cov_diag = self.posterior_params[category]['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(v_model)
def calc_logp_posterior_predictive(self, U_model, category):
assert U_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.posterior_predictive_params[category]['mean']
cov_diag = self.posterior_predictive_params[category]['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(U_model)
def calc_logp_marginal_likelihood(self, U_model):
""" Computes the log marginal likelihood on axis=-2. """
assert U_model.shape[-1] == self.get_dimensionality('U_model')
if len(U_model.shape) == 1:
U_model = U_model[None, :]
n = U_model.shape[-2]
psi_diag = self.prior_params['cov_diag']
n_psi_plus_eye = n * psi_diag + 1
log_constant = -.5 * n * np.log(2 * np.pi)
log_constant += -.5 * np.log(n_psi_plus_eye)
sum_of_squares = np.sum(U_model ** 2, axis=-2)
log_exponent_1 = -.5 * sum_of_squares
mean = U_model.mean(axis=-2)
log_exponent_2 = .5 * (n ** 2 * psi_diag * mean ** 2)
log_exponent_2 /= n_psi_plus_eye
logp_ml = log_constant + log_exponent_1 + log_exponent_2
logp_ml = np.sum(logp_ml, axis=-1)
return logp_ml
def calc_logp_prior(self, v_model):
assert v_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.prior_params['mean']
cov_diag = self.prior_params['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(v_model)
def calc_same_diff_likelihood_ratio(self, U_model_p, U_model_g):
assert U_model_p.shape[-1] == self.get_dimensionality('U_model')
assert U_model_g.shape[-1] == self.get_dimensionality('U_model')
U_model_same = np.concatenate([
U_model_p,
U_model_g,
])
ll_same = self.calc_logp_marginal_likelihood(U_model_same)
ll_p = self.calc_logp_marginal_likelihood(U_model_p)
ll_g = self.calc_logp_marginal_likelihood(U_model_g)
ll_same = ll_same - (ll_p + ll_g)
return ll_same
def fit(self, data, labels, n_principal_components=None):
if n_principal_components is None:
S_b, S_w = calc_scatter_matrices(data, labels)
matrix_rank = np.linalg.matrix_rank(S_w)
else:
matrix_rank = n_principal_components
if matrix_rank != data.shape[-1]:
self.pca = PCA(n_components=matrix_rank)
self.pca.fit(data)
X = self.transform(data, from_space='D', to_space='X')
self.m, self.A, self.Psi, self.relevant_U_dims, self.inv_A = \
optimize_maximum_likelihood(X, labels)
U_model = self.transform(X, from_space='X', to_space='U_model')
self.prior_params = \
get_prior_params(self.Psi, self.relevant_U_dims)
self.posterior_params = \
get_posterior_params(U_model, labels, self.prior_params)
self.posterior_predictive_params = \
get_posterior_predictive_params(self.posterior_params)
def get_dimensionality(self, space):
if space == 'U_model':
return self.relevant_U_dims.shape[0]
elif space == 'U':
return self.A.shape[0]
elif space == 'X':
return self.A.shape[0]
elif space == 'D':
if self.pca is None:
return self.m.shape[0]
else:
return self.pca.n_features_
else:
raise ValueError
def transform(self, data, from_space, to_space):
""" Potential_spaces: 'D' <---> 'X' <---> 'U' <---> 'U_model'.
DESCRIPTION
There are 6 basic transformations to move back and forth
between the data space, 'D', and the model's space, 'U_model':
1. From D to X.
(i.e. from data space to preprocessed space)
Uses the minimum number of components from
Principal Components Analysis that
captures 100% of the variance in the data.
2. From X to U.
(i.e. from preprocessed space to latent space)
See the bottom of p.533 of Ioffe 2006.
3. From U to U_model.
(i.e. from latent space to the model space)
See Fig 2 on p.537 of Ioffe 2006.
4. From U_model to U.
(i.e. from the model space to latent space)
5. From U to X.
(i.e. from the latent space to the preprocessed space)
6. From X to D.
(i.e. from the preprocessed space to the data space)
"""
if len(data.shape) == 1:
data = data[None, :]
if from_space == 'D' and to_space == 'X':
return transform_D_to_X(data, self.pca)
elif from_space == 'X' and to_space == 'U':
return transform_X_to_U(data, self.inv_A, self.m)
elif from_space == 'U' and to_space == 'U_model':
return transform_U_to_U_model(data, self.relevant_U_dims)
elif from_space == 'U_model' and to_space == 'U':
dim = self.get_dimensionality('U')
return transform_U_model_to_U(data, self.relevant_U_dims, dim)
elif from_space == 'U' and to_space == 'X':
return transform_U_to_X(data, self.A, self.m)
elif from_space == 'X' and to_space == 'D':
return transform_X_to_D(data, self.pca)
else:
transformed = data
for space_1, space_2 in get_space_walk(from_space, to_space):
transformed = self.transform(transformed, space_1, space_2)
return transformed
| [
"numpy.sum",
"numpy.log",
"numpy.zeros",
"numpy.linalg.matrix_rank",
"sklearn.decomposition.PCA",
"numpy.matmul",
"numpy.diag",
"numpy.concatenate"
] | [((1802, 1830), 'numpy.matmul', 'np.matmul', (['(data - m)', 'inv_A.T'], {}), '(data - m, inv_A.T)\n', (1811, 1830), True, 'import numpy as np\n'), ((2051, 2066), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2059, 2066), True, 'import numpy as np\n'), ((2167, 2187), 'numpy.matmul', 'np.matmul', (['data', 'A.T'], {}), '(data, A.T)\n', (2176, 2187), True, 'import numpy as np\n'), ((3990, 4019), 'numpy.sum', 'np.sum', (['(U_model ** 2)'], {'axis': '(-2)'}), '(U_model ** 2, axis=-2)\n', (3996, 4019), True, 'import numpy as np\n'), ((4291, 4315), 'numpy.sum', 'np.sum', (['logp_ml'], {'axis': '(-1)'}), '(logp_ml, axis=-1)\n', (4297, 4315), True, 'import numpy as np\n'), ((4849, 4887), 'numpy.concatenate', 'np.concatenate', (['[U_model_p, U_model_g]'], {}), '([U_model_p, U_model_g])\n', (4863, 4887), True, 'import numpy as np\n'), ((3893, 3910), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3899, 3910), True, 'import numpy as np\n'), ((3941, 3963), 'numpy.log', 'np.log', (['n_psi_plus_eye'], {}), '(n_psi_plus_eye)\n', (3947, 3963), True, 'import numpy as np\n'), ((5369, 5395), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['S_w'], {}), '(S_w)\n', (5390, 5395), True, 'import numpy as np\n'), ((5526, 5555), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'matrix_rank'}), '(n_components=matrix_rank)\n', (5529, 5555), False, 'from sklearn.decomposition import PCA\n'), ((3091, 3108), 'numpy.diag', 'np.diag', (['cov_diag'], {}), '(cov_diag)\n', (3098, 3108), True, 'import numpy as np\n'), ((3435, 3452), 'numpy.diag', 'np.diag', (['cov_diag'], {}), '(cov_diag)\n', (3442, 3452), True, 'import numpy as np\n'), ((4574, 4591), 'numpy.diag', 'np.diag', (['cov_diag'], {}), '(cov_diag)\n', (4581, 4591), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import norm
import GPy
class Cluster(object):
def __init__(self, n_arms=8, mu_init=0.0, var_init=1.0):
self.history = list()
self.n_arms = n_arms
# store for MAP estimates of the means
self.mu_init = mu_init
self.var_init = var_init
def update(self, a, r, messages=False):
self.history.append((a, r))
# self.update_estimates(messages)
def update_estimates(self, messages=False):
pass
def get_obs_log_prob(self, a, r):
return 1.0
def deepcopy(self):
copy = Cluster(self.n_arms)
copy.history = [(a, r) for a, r in self.history] # this is functionally a deep copy
# operation
copy.update_estimates()
return copy
def get_mean(self, a):
return self.mu_init
def get_var(self, a):
return self.var_init
def get_mean_var(self, a):
return self.get_mean(a), self.get_var(a)
class NoiseCluster(Cluster):
""" each hypothesis cluster is defined by n independent bandits
with the rewards in each normally distributed"""
def __init__(self, n_arms=8, mu_init=0.0, var_init=1.0):
Cluster.__init__(self, n_arms, mu_init, var_init)
# these are the updated estimates
self.mus = np.zeros(n_arms) + mu_init
self.var = var_init
def update(self, a, r, messages=False):
self.history.append((a, r))
self.update_estimates(messages)
def update_estimates(self, messages=None):
# this is a maximum likelihood estimate
arm_tot_rewards = np.zeros(self.mus.shape)
arm_counts = np.zeros(self.mus.shape)
for a, r in self.history:
arm_tot_rewards[a] += r
arm_counts[a] += 1
# adjust the mus for the prior (with a weight of 1 observation)
arm_tot_rewards += self.mu_init
arm_counts += 1
self.mus = arm_tot_rewards / arm_counts
# because there is prior, we can estimate the variance with as little as 1 observation
# note! this could potentially be problematic, causing a very low variance estimate for
# new clusters. However, I think it *should* be okay if we are updating multiple candidate
# clusters in the background (it will wash out?).
n = len(self.history)
# estimate the variance as the square of the sample standard deviation
if n > 1:
self.var = np.sum([(self.mus[a] - r) ** 2 for a, r in self.history]) / (n - 1)
else:
self.var = self.var_init
def get_obs_log_prob(self, a, r):
return norm.logpdf(r, loc=self.mus[a], scale=np.sqrt(self.var))
def get_mean(self, a):
return self.mus[a]
def get_mean_var(self, a):
return self.get_mean(a), self.get_var(a)
def deepcopy(self):
""" Returns a deep copy of the cluster. Used when augmenting hypotheses """
copy = NoiseCluster(n_arms=len(self.mus), mu_init=self.mu_init, var_init=self.var)
copy.history = [(a, r) for a, r in self.history] # this is functionally a deep copy
# operation
copy.update_estimates()
return copy
class GPCluster(Cluster):
def __init__(self, n_arms=8, mu_init=0.0, var_init=1.0, kernel=None):
Cluster.__init__(self, n_arms, mu_init, var_init)
self.X = np.zeros((1, 0))
self.y = np.zeros(0)
self.inv_A = np.eye(1)
self.m = None
self.y_offset = 0.0
if kernel is not None:
self.kernel = kernel
else:
self.kernel = GPy.kern.Linear
self.deepcopy_kwargs = dict(n_arms=n_arms, mu_init=mu_init, var_init=var_init, kernel=self.kernel)
def update(self, a, r, messages=False):
self.history.append((a, r))
self.update_estimates(messages)
def update_estimates(self, messages=False):
n_obs = len(self.history)
if n_obs > 0:
X = np.reshape([a for a, _ in self.history], (n_obs, 1))
y = np.reshape([r for _, r in self.history], (n_obs, 1))
self.y_offset = np.mean(y)
self.y_offset = np.mean(y)
if type(self.kernel) == list:
k = None
for k0 in self.kernel:
if k is None:
k = k0(input_dim=1)
else:
k += k0(input_dim=1)
else:
k = self.kernel(input_dim=1)
self.m = GPy.models.GPRegression(X, y-self.y_offset, k)
self.m.optimize(messages=messages)
def get_obs_log_prob(self, a, r):
if self.m is not None:
return self.m.log_predictive_density(np.array([[a]]), np.array([[r-self.y_offset]]))[0][0]
def get_model_prob(self):
return self.m.log_likelihood()
def get_mean_var(self, a):
if self.m is None:
return self.mu_init, self.var_init
mu, var = self.m.predict(np.array([[a]]))
return mu+self.y_offset, var
def get_mean(self, a):
mu, _ = self.get_mean_var(a)
return mu+self.y_offset
def get_var(self, a):
_, var = self.get_mean_var(a)
return var
def deepcopy(self):
""" Returns a deep copy of the cluster. Used when augmenting hypotheses """
copy = GPCluster(**self.deepcopy_kwargs)
copy.history = [(a, r) for a, r in self.history] # this is functionally a deep copy
# operation
copy.update_estimates()
return copy
def label_switch(assignments):
""" utility function that ensures the 0th block is always in cluster 0, the 1st
block is always in [0, 1], etc. Prevents a label switching problem"""
set_k = set()
label_key = dict()
for k in assignments:
if k not in set_k:
label_key[k] = len(set_k)
set_k.add(k)
label_switched_assignments = [label_key[k] for k in assignments]
return label_switched_assignments
class Hypothesis(object):
"""docstring for Hypothesis"""
def __init__(self, n_arms=8, mu_init=0.0, var_init=1.0, alpha=1.0, cluster_class=NoiseCluster, kernel=None):
"""
Parameters
----------
n_arms: int (default 8)
number of bandits in a block
mu_init: float (default 0.)
prior over the mean of the bandit arms
var_init: float (default 1.)
initial value for the sigma of the arm
alpha: float (default 1.)
concentration parameter of the prior
cluster_class: Cluster object (default NoiseCluster)
internal function to estimate value over arms
kernel: GPy kernel (None)
only valid with GPClusters, defaults to GPy.kern.linear for GPClusters
"""
self.cluster_assignments = list()
self.hypotheis_kwargs = dict(n_arms=n_arms, mu_init=mu_init, var_init=var_init, alpha=alpha,
cluster_class=cluster_class, kernel=kernel)
self.alpha = alpha
self.mu_init = mu_init
self.var_init = var_init
# initialize the clusters
self.clusters = dict()
# initialize the log prior probability
self.log_prior = 0.0
# initialize the log likelihood
self.log_likelihood = 0.0
# create a list of experiences for posterior calculations
self.experiences = list()
# when we create a cluster, use this class
self.cluster_class = cluster_class
self.cluster_kwargs = dict(n_arms=n_arms, mu_init=mu_init, var_init=var_init)
if kernel is not None:
self.cluster_kwargs['kernel'] = kernel
def update(self, block, arm, reward):
k = self.cluster_assignments[block]
cluster = self.clusters[k]
cluster.update(arm, reward)
self.clusters[k] = cluster
# Cache the experience for later
self.experiences.append(tuple([block, arm, reward]))
def get_mean(self, block, arm):
"""
:param block:
:param arm:
:return: mu
"""
# special case new blocks by integrating over the prior
if block == len(self.cluster_assignments):
# get the prior probability weights
w = [np.sum(np.array(self.cluster_assignments) == ii) for ii in set(self.cluster_assignments)]
w.append(self.alpha)
w /= np.sum(w) # normalize the prior
mu = w[-1] * self.mu_init
for k in range(len(w) - 1):
cluster = self.clusters[k]
mu += w[k] * cluster.get_mean(arm)
return mu
cluster = self.clusters[self.cluster_assignments[block]]
return cluster.get_mean(arm)
def get_var_prior(self, block, arm):
""" this is for the special case that is a new block that has not been seen. Function returns
the terms needed to calculate the variance over the full distribution."""
# get the prior probability weights
w = [np.sum(np.array(self.cluster_assignments) == ii) for ii in set(self.cluster_assignments)]
w.append(self.alpha)
w /= np.sum(w) # normalize the prior
# account for the possibility of a new cluster
var0 = w[-1] * self.var_init
var1 = w[-1] * (self.mu_init ** 2)
var2 = w[-1] * self.mu_init
for ii, cluster in enumerate(self.clusters.itervalues()):
# the variance calculation:
# var(f) = sum(w*sigma^2) + sum(w*mu^2) - (sum(w*mu))^2
mu_a, var = cluster.get_mean_var(arm)
var0 += w[ii] * cluster.get_var(arm)
var1 += w[ii] * (mu_a ** 2)
var2 += w[ii] * mu_a
return var0 + var1 - (var2 ** 2)
def get_var(self, block, a):
cluster = self.clusters[self.cluster_assignments[block]]
return cluster.get_var(a)
def update_log_likelihood(self):
self.log_likelihood = 0.0
for b, arm, rew in self.experiences:
cluster = self.clusters[self.cluster_assignments[b]]
self.log_likelihood += cluster.get_obs_log_prob(arm, rew)
def get_log_post(self):
return self.log_likelihood + self.log_prior
def get_obs_logprob(self, block, arm, r):
if block < len(self.cluster_assignments):
k = self.cluster_assignments[block]
cluster = self.clusters[k]
return cluster.get_obs_log_prob(arm, r)
else:
return norm.logpdf(r, loc=self.mu_init, scale=np.sqrt(self.var_init))
def update_crp_prior(self):
""" use the chinese restaurant process to calculate the prior probability of a given
set of assignments"""
if len(self.cluster_assignments) > 0:
k = max(self.cluster_assignments) + 1
else:
k = 1
n_k = np.zeros(k)
self.log_prior = 0.0
# b/c the CRP is exchangeable, it's easiest to just run through the process
for k0 in self.cluster_assignments:
if n_k[k0] == 0:
# log prob of a new cluster
self.log_prior += np.log(self.alpha / (np.sum(n_k) + self.alpha))
else:
# log prob of cluster reuse
self.log_prior += np.log(n_k[k0] / (np.sum(n_k) + self.alpha))
n_k[k0] += 1
def update_posterior(self):
self.update_log_likelihood()
self.update_crp_prior()
def augment_assignment(self, block, k):
"""
Parameters
----------
block: int
the block index
k: int
the cluster index
"""
# only augment new blocks, which are sequential for this data!
if block < len(self.cluster_assignments):
# return if this is not a new block of trials
return
# check if cluster "k" has already been assigned
if k not in self.cluster_assignments:
# if not, add a new reward cluster
self.clusters[k] = self.cluster_class(**self.cluster_kwargs)
self.cluster_assignments.append(k)
self.cluster_assignments = label_switch(self.cluster_assignments) # this is a check
self.update_crp_prior()
def deepcopy(self):
h_copy = Hypothesis(**self.hypotheis_kwargs)
h_copy.cluster_assignments = [k for k in self.cluster_assignments]
h_copy.clusters = {k: cluster.deepcopy() for k, cluster in self.clusters.iteritems()}
h_copy.experiences = [(b, a, r) for b, a, r in self.experiences]
h_copy.log_prior = self.log_prior
return h_copy
# make hypotheses hashable
def __hash__(self):
# define the hash key as a function of the
hash_key = ''
for k in self.cluster_assignments:
hash_key += str(k)
return hash(hash_key)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
| [
"numpy.sum",
"GPy.models.GPRegression",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"numpy.eye",
"numpy.sqrt"
] | [((1611, 1635), 'numpy.zeros', 'np.zeros', (['self.mus.shape'], {}), '(self.mus.shape)\n', (1619, 1635), True, 'import numpy as np\n'), ((1657, 1681), 'numpy.zeros', 'np.zeros', (['self.mus.shape'], {}), '(self.mus.shape)\n', (1665, 1681), True, 'import numpy as np\n'), ((3377, 3393), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (3385, 3393), True, 'import numpy as np\n'), ((3411, 3422), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (3419, 3422), True, 'import numpy as np\n'), ((3444, 3453), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (3450, 3453), True, 'import numpy as np\n'), ((9201, 9210), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (9207, 9210), True, 'import numpy as np\n'), ((10894, 10905), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (10902, 10905), True, 'import numpy as np\n'), ((1313, 1329), 'numpy.zeros', 'np.zeros', (['n_arms'], {}), '(n_arms)\n', (1321, 1329), True, 'import numpy as np\n'), ((3973, 4025), 'numpy.reshape', 'np.reshape', (['[a for a, _ in self.history]', '(n_obs, 1)'], {}), '([a for a, _ in self.history], (n_obs, 1))\n', (3983, 4025), True, 'import numpy as np\n'), ((4042, 4094), 'numpy.reshape', 'np.reshape', (['[r for _, r in self.history]', '(n_obs, 1)'], {}), '([r for _, r in self.history], (n_obs, 1))\n', (4052, 4094), True, 'import numpy as np\n'), ((4123, 4133), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4130, 4133), True, 'import numpy as np\n'), ((4162, 4172), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (4169, 4172), True, 'import numpy as np\n'), ((4513, 4561), 'GPy.models.GPRegression', 'GPy.models.GPRegression', (['X', '(y - self.y_offset)', 'k'], {}), '(X, y - self.y_offset, k)\n', (4536, 4561), False, 'import GPy\n'), ((4989, 5004), 'numpy.array', 'np.array', (['[[a]]'], {}), '([[a]])\n', (4997, 5004), True, 'import numpy as np\n'), ((8454, 8463), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (8460, 8463), True, 'import numpy as np\n'), ((2469, 2528), 'numpy.sum', 'np.sum', (['[((self.mus[a] - r) ** 2) for a, r in self.history]'], {}), '([((self.mus[a] - r) ** 2) for a, r in self.history])\n', (2475, 2528), True, 'import numpy as np\n'), ((2680, 2697), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (2687, 2697), True, 'import numpy as np\n'), ((9076, 9110), 'numpy.array', 'np.array', (['self.cluster_assignments'], {}), '(self.cluster_assignments)\n', (9084, 9110), True, 'import numpy as np\n'), ((10570, 10592), 'numpy.sqrt', 'np.sqrt', (['self.var_init'], {}), '(self.var_init)\n', (10577, 10592), True, 'import numpy as np\n'), ((4726, 4741), 'numpy.array', 'np.array', (['[[a]]'], {}), '([[a]])\n', (4734, 4741), True, 'import numpy as np\n'), ((4743, 4774), 'numpy.array', 'np.array', (['[[r - self.y_offset]]'], {}), '([[r - self.y_offset]])\n', (4751, 4774), True, 'import numpy as np\n'), ((8321, 8355), 'numpy.array', 'np.array', (['self.cluster_assignments'], {}), '(self.cluster_assignments)\n', (8329, 8355), True, 'import numpy as np\n'), ((11191, 11202), 'numpy.sum', 'np.sum', (['n_k'], {}), '(n_k)\n', (11197, 11202), True, 'import numpy as np\n'), ((11332, 11343), 'numpy.sum', 'np.sum', (['n_k'], {}), '(n_k)\n', (11338, 11343), True, 'import numpy as np\n')] |
import logging
import os
import numpy as np
from scipy.stats import spearmanr
from scipy.cluster.hierarchy import linkage, dendrogram
from scipy.special import comb
from pandas import DataFrame
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import tight_layout, figure, subplot, savefig, show, setp
import pandas as pd
import mpld3
from plottingscripts.plotting.scatter import plot_scatter_plot
from aslib_scenario.aslib_scenario import ASlibScenario
from asapy.utils.util_funcs import get_cdf_x_y
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "MIT"
__email__ = "<EMAIL>"
class PerformanceAnalysis(object):
def __init__(self,
output_dn: str,
scenario: ASlibScenario):
'''
Constructor
Arguments
---------
output_dn:str
output directory name
'''
self.logger = logging.getLogger("Performance Analysis")
self.scenario = scenario
self.output_dn = os.path.join(output_dn, "performance_plots")
if not os.path.isdir(self.output_dn):
os.mkdir(self.output_dn)
def reduce_algos(self, max_algos: int):
'''
use a greedy forward search wrt VBS performance
to shrink the set of algorithms to max_algos
Arguments
---------
max_algos: int
maximal number of algorithms to be selected
Returns
-------
typing.List[(str,float)]
list of tuples (algorithm, performance)
'''
performance_data = self.scenario.performance_data
if self.scenario.maximize[0]:
performance_data *= -1
bsa = performance_data.mean(axis=0).idxmin()
bsa_score = performance_data.mean(axis=0).min()
selected_algos = [[bsa,bsa_score]]
remaining_algos = set(self.scenario.algorithms)
remaining_algos.remove(bsa)
for i in range(1, max_algos):
sels = [a[0] for a in selected_algos]
best_algo = [
None, self.__get_vbs(performance_data=performance_data[sels])]
for algo in remaining_algos:
sels_ = sels[:]
sels_.append(algo)
vbs = self.__get_vbs(
performance_data=performance_data[sels_])
if vbs < best_algo[1]:
best_algo = [algo, vbs]
if best_algo[0] is None:
break
selected_algos.append(best_algo)
remaining_algos.remove(best_algo[0])
self.logger.debug(best_algo)
self.logger.warning("Because of algorithm filtering, we lost %f of VBS estimate." % (
best_algo[1] - self.__get_vbs(performance_data=performance_data)))
if self.scenario.maximize[0]:
performance_data *= -1
return selected_algos
def get_greedy_portfolio_constr(self):
'''
using greedy forward selection wrt to VBS optimization,
iteratively build portfolio
'''
algo_scores = self.reduce_algos(max_algos=len(self.scenario.algorithms))
algos = ["Adding %s" %(a[0]) for a in algo_scores]
if self.scenario.maximize[0]:
scores = ["%.2f" %(a[1]*-1) for a in algo_scores]
else:
scores = ["%.2f" %(a[1]) for a in algo_scores]
df = DataFrame(data=scores, index=algos, columns=["VBS score"])
return df.to_html()
def get_baselines(self):
'''
get baselines: best single algorithm and VBS
Returns
-------
str
html table with entries for bsa and vbs
'''
performance_data = self.scenario.performance_data
table_data = []
if self.scenario.maximize[0]:
maxis = performance_data.max(axis=1)
vbs_score = np.mean(performance_data.max(axis=1))
algo_perfs = performance_data.mean(axis=0)
best_indx = algo_perfs.idxmax()
bsa = algo_perfs[best_indx]
else:
vbs_score = np.mean(performance_data.min(axis=1))
algo_perfs = performance_data.mean(axis=0)
best_indx = algo_perfs.idxmin()
bsa = algo_perfs[best_indx]
unsolvable = int(np.sum(np.sum(self.scenario.runstatus_data.values == "ok", axis=1) == 0))
table_data.append(["Virtual Best Algorithm", vbs_score])
table_data.append(["Best Single Algorithm (%s)" % (best_indx), bsa])
table_data.append(["Instances not solved by any algorithm", unsolvable])
if self.scenario.performance_type[0] == "runtime":
n_inst = len(self.scenario.instances)
vbs_clean = (vbs_score*n_inst - 10*self.scenario.algorithm_cutoff_time*unsolvable) / (n_inst - unsolvable)
bsa_clean = (bsa*n_inst - 10*self.scenario.algorithm_cutoff_time*unsolvable) / (n_inst - unsolvable)
table_data.append(["VBS (w/o unsolved instances)", vbs_clean])
table_data.append(["Best Single Algorithm (w/o unsolved instances)", bsa_clean])
df = pd.DataFrame(data=list(map(lambda x: x[1], table_data)), index=list(
map(lambda x: x[0], table_data)), columns=[""])
return df.to_html(header=False)
def scatter_plots(self, plot_log_perf: bool=False):
'''
generate scatter plots of all pairs of algorithms in the performance data of the scenario
and save them in the output directory
Arguments
---------
plot_log_perf: bool
plot perf on log scale
Returns
-------
list of all generated file names of plots
'''
matplotlib.pyplot.close()
self.logger.info("Plotting scatter plots........")
plots = []
self.algorithms = self.scenario.algorithms
n_algos = len(self.scenario.algorithms)
if self.scenario.performance_type[0] == "runtime":
max_val = self.scenario.algorithm_cutoff_time
else:
max_val = self.scenario.performance_data.max().max()
for i in range(n_algos):
for j in range(i + 1, n_algos):
algo_1 = self.scenario.algorithms[i]
algo_2 = self.scenario.algorithms[j]
y_i = self.scenario.performance_data[algo_1].values
y_j = self.scenario.performance_data[algo_2].values
matplotlib.pyplot.close()
if self.scenario.performance_type[0] == "runtime":
fig = plot_scatter_plot(x_data=y_i, y_data=y_j, max_val=max_val,
labels=[algo_1, algo_2],
metric=self.scenario.performance_type[0])
else:
fig = figure(1, dpi=100)
ax1 = subplot(aspect='equal')
ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax1.scatter(y_i, y_j, marker='x', c='black')
ax1.set_xlabel(algo_1, fontsize=20)
ax1.set_ylabel(algo_2, fontsize=20)
if plot_log_perf:
ax1.set_xscale("log")
ax1.set_yscale("log")
fig.tight_layout()
out_name = os.path.join(self.output_dn,
"scatter_%s_%s.png" % (algo_1.replace("/", "_"), algo_2.replace("/", "_")))
fig.savefig(out_name)
plots.append((algo_1, algo_2, out_name))
return plots
def correlation_plot(self):
'''
generate correlation plot using spearman correlation coefficient and ward clustering
Returns
-------
file name of saved plot
'''
matplotlib.pyplot.close()
self.logger.info("Plotting correlation plots........")
perf_data = self.scenario.performance_data.values
algos = list(self.scenario.performance_data.columns)
n_algos = len(algos)
data = np.zeros((n_algos, n_algos)) + 1 # similarity
for i in range(n_algos):
for j in range(i + 1, n_algos):
y_i = np.array(perf_data[:, i],dtype=np.float64)
y_j = np.array(perf_data[:, j],dtype=np.float64)
if np.sum(perf_data[:, i]) == 0:
y_i += np.random.rand(y_i.shape[0])*0.00001
if np.sum(perf_data[:, j]) == 0:
y_j += np.random.rand(y_j.shape[0])*0.00001
rho, p = spearmanr(y_i, y_j)
data[i, j] = rho
data[j, i] = rho
link = linkage(data * -1, 'ward') # input is distance -> * -1
# plot clustering
fig, ax = plt.subplots()
dendrogram(link, labels=algos, orientation='right')
out_plot = os.path.join(self.output_dn, "algo_clustering.png")
plt.savefig(out_plot, format="png")
matplotlib.pyplot.close()
sorted_algos = [[a] for a in algos]
for l in link:
new_cluster = sorted_algos[int(l[0])][:]
new_cluster.extend(sorted_algos[int(l[1])][:])
sorted_algos.append(new_cluster)
sorted_algos = sorted_algos[-1]
# resort data
indx_list = []
for a in algos:
indx_list.append(sorted_algos.index(a))
indx_list = np.argsort(indx_list)
data = data[indx_list, :]
data = data[:, indx_list]
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.Blues)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[1]) + 0.5, minor=False)
plt.xlim(0, data.shape[0])
plt.ylim(0, data.shape[0])
# want a more natural, table-like display
ax.invert_yaxis()
ax.xaxis.tick_top()
ax.set_xticklabels(sorted_algos, minor=False)
ax.set_yticklabels(sorted_algos, minor=False)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=90, fontsize=12, ha="left")
labels = ax.get_yticklabels()
plt.setp(labels, rotation=0, fontsize=12)
fig.colorbar(heatmap)
plt.tight_layout()
out_plot = os.path.join(self.output_dn, "correlation_plot.png")
plt.savefig(out_plot, format="png")
return out_plot
def get_contribution_values(self):
'''
contribution value computation
Returns
------
pandas.DataFrame() with columns being "Average Performance", "Marginal Performance", "Shapley Values" and indexes being the algorithms
'''
self.logger.info("Get contribution scores........")
algorithms = self.scenario.algorithms
instances = self.scenario.instances
scenario = self.scenario
n_insts = len(scenario.instances)
performance_data = scenario.performance_data
if self.scenario.maximize[0] == True:
# Assume minimization
performance_data = performance_data * -1
max_perf = performance_data.max().max()
is_time_scenario = self.scenario.performance_type[0] == "runtime"
def metric(algo, inst):
if is_time_scenario:
perf = scenario.algorithm_cutoff_time - \
min(scenario.algorithm_cutoff_time,
performance_data[algo][inst])
return perf
else:
return max_perf - performance_data[algo][inst]
shapleys = self._get_VBS_Shap(instances, algorithms, metric)
#shapleys = dict((k,v/n_insts) for k,v in shapleys.items())
if self.scenario.maximize[0] == True:
performance_data = performance_data * -1
if self.scenario.maximize[0] == True:
# marginal contribution code assumes: smaller is better
self.scenario.performance_data = self.scenario.performance_data * - \
1
marginales = self._get_marginal_contribution()
if self.scenario.maximize[0] == True:
self.scenario.performance_data = self.scenario.performance_data * - \
1
averages = self._get_average_perf()
#=======================================================================
# sorted_avg = sorted(averages.items(), key=lambda x: x[1], reverse=True)
# for algo, avg in sorted_avg:
# print("%s,%.3f,%.3f,%.3f" %(algo, avg, marginales[algo], shapleys[algo]))
#=======================================================================
out_fns = []
for name, data_ in zip(["averages","marginales", "shapleys"], [averages, marginales, shapleys]):
matplotlib.pyplot.close()
fig = plt.figure()
plt.axis('equal')
ax = fig.gca()
colormap = plt.cm.gist_ncar
colors = [colormap(i) for i in np.linspace(
0, 0.9, len(self.scenario.algorithms))]
data_list = [data_[algo] for algo in algorithms]
# automatically detect precision for legend
mean_v = np.abs(np.mean(data_list))
prec = 2
while True:
if round(mean_v, prec) > 0:
prec += 1
break
prec += 1
print_str = "%s (%.{}f)".format(prec)
# rescale to fix pie plot issues
sum_v = sum(data_.values())
data_list = [v / sum_v for v in data_list]
labels = [print_str % (algo, data_[algo]) for algo in algorithms]
patches, texts = plt.pie(data_list, colors=colors)
plt.legend(
patches, labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
out_fn = os.path.join(
self.output_dn, "contribution_%s_pie_plot.png" % (name))
plt.savefig(out_fn, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, pad_inches=0.02, bbox_inches='tight')
out_fns.append(out_fn)
return out_fns
def _get_average_perf(self):
'''
compute average performance of algorithms
'''
averages = {}
for algorithm in self.scenario.algorithms:
averages[algorithm] = self.scenario.performance_data[
algorithm].mean()
return averages
def _get_marginal_contribution(self):
'''
compute marginal contribution of each algorithm
'''
marginales = {}
all_vbs = self.__get_vbs(self.scenario.performance_data)
self.logger.info("VBS: %f" % (all_vbs))
for algorithm in self.scenario.algorithms:
remaining_algos = list(
set(self.scenario.algorithms).difference([algorithm]))
perf_data = self.scenario.performance_data[remaining_algos]
rem_vbs = self.__get_vbs(perf_data)
marginales[algorithm] = rem_vbs - all_vbs
return marginales
def __get_vbs(self, performance_data):
return np.mean(performance_data.min(axis=1))
def _get_VBS_Shap(self, instances, algorithms, metric):
'''
instances - the instances to solve.
algorithms - the set of available algorithms.
metric - the performance metric from (algorithm,instance) to real number. Higher is better.
Returns a dictionary from algorithms to their Shapley value, where the coalitional function is
$$ v(S) = \frac{1}{|X|} \sum_{x\in X} \max_{s\in S} metric(s,x),$$
where X is the set of instances. This is the "average VBS game" with respect to the given instances,
algorithms and metric.
__author__ = "<NAME> al"
slight modification by me
'''
n = len(algorithms)
m = len(instances)
shapleys = {}
# For each instance
for instance in instances:
instance_algorithms = sorted(
algorithms, key=lambda a: metric(a, instance))
# For each algorithm, from worst to best.
for i in range(len(instance_algorithms)):
ialgorithm = instance_algorithms[i]
#print >> sys.stderr, 'Processing the rule corresponding to %d-th algorithm "%s" being the best in the coalition.' % (i,ialgorithm)
# The MC-rule is that you must have the algorithm and none of
# the better ones.
'''
If x is the instance and s_1^x,...,s_n^x are sorted from worst, then the rule's pattern is:
$$ p_i^x = s_i^x \wedge \bigwedge_{j=i+1}^n \overline{s}_j^x $$
and its weight is
$$ w_i^x = metric(s_i^x,x).$$
'''
pos = 1
neg = n - i - 1
metricvalue = metric(ialgorithm, instance)
# normalised as fraction of instances
value = 1 / float(m) * metricvalue
#value = metricvalue
#print >> sys.stderr, 'Value of this rule : 1/%d * %.4f = %.4f' % (m,metricvalue,value)
# Calculate the rule Shapley values, and add them to the global
# Shapley values.
# Shapley value for positive literals in the rule.
pos_shap = value / \
float(pos * comb(pos + neg, neg, exact=True))
# Shapley value for negative literals in the rule.
if neg > 0:
neg_shap = -value / \
float(neg * comb(pos + neg, pos, exact=True))
else:
neg_shap = None
# Update the Shapley value for the literals appearing in the
# rule.
for j in range(i, len(instance_algorithms)):
jalgorithm = instance_algorithms[j]
if jalgorithm not in shapleys:
shapleys[jalgorithm] = 0
if j == i:
shapleys[jalgorithm] += pos_shap
else:
shapleys[jalgorithm] += neg_shap
return shapleys
def get_cdf_plots(self, plot_log_perf: bool=False):
'''
plot the cummulative distribution function of each algorithm
Arguments
---------
plot_log_perf: bool
plot perf on log scale
Returns
-------
file name of saved plot
'''
matplotlib.pyplot.close()
self.logger.info("Plotting CDF plots........")
from cycler import cycler
gs = matplotlib.gridspec.GridSpec(1, 1)
fig = plt.figure()
ax1 = plt.subplot(gs[0:1, :])
colormap = plt.cm.gist_ncar
fig.gca().set_prop_cycle(cycler('color', [
colormap(i) for i in np.linspace(0, 0.9, len(self.scenario.algorithms))]))
if self.scenario.performance_type[0] == "runtime":
max_val = self.scenario.algorithm_cutoff_time
min_val = max(0.0005, self.scenario.performance_data.min().min())
else:
max_val = self.scenario.performance_data.max().max()
min_val = self.scenario.performance_data.min().min()
if plot_log_perf:
min_val = max(0.0005, min_val)
for algorithm in self.scenario.algorithms:
x, y = get_cdf_x_y(
self.scenario.performance_data[str(algorithm)], max_val)
x = np.array(x)
y = np.array(y)
x[x < min_val] = min_val
ax1.step(x, y, label=algorithm)
ax1.grid(
True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax1.set_xlabel(self.scenario.performance_measure[0])
ax1.set_ylabel("P(x<X)")
ax1.set_xlim([min_val, max_val])
if self.scenario.performance_type[0] == "runtime" or plot_log_perf:
ax1.set_xscale('log')
#ax1.legend(loc='lower right')
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
out_fn = os.path.join(self.output_dn, "cdf_plot.png")
plt.savefig(out_fn, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, pad_inches=0.02, bbox_inches='tight')
return out_fn
def get_violin_plots(self, plot_log_perf: bool=False):
'''
compute violin plots (fancy box plots) for each algorithm
Arguments
---------
plot_log_perf: bool
plot perf on log scale
'''
matplotlib.pyplot.close()
self.logger.info("Plotting vilion plots........")
cutoff = self.scenario.algorithm_cutoff_time
fig, ax = plt.subplots(nrows=1, ncols=1)
all_data = self.scenario.performance_data.values
if self.scenario.performance_type[0] == "runtime":
all_data[all_data > cutoff] = cutoff
if self.scenario.performance_type[0] == "runtime" or plot_log_perf:
all_data = np.log10(all_data)
y_label = "log(%s)" % (self.scenario.performance_type[0])
else:
y_label = "%s" % (self.scenario.performance_type[0])
n_points = all_data.shape[0]
all_data = [all_data[:, i] for i in range(all_data.shape[1])]
ax.violinplot(
all_data, showmeans=False, showmedians=True, points=n_points)
ax.yaxis.grid(True)
ax.set_ylabel(y_label)
plt.setp(ax, xticks=[y + 1 for y in range(len(all_data))],
xticklabels=self.scenario.performance_data.columns.values)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, fontsize=12, ha="right")
plt.tight_layout()
out_fn = os.path.join(self.output_dn, "violin_plot.png")
plt.savefig(out_fn)
return out_fn
def get_box_plots(self, plot_log_perf: bool=False):
'''
compute violin plots (fancy box plots) for each algorithm
Arguments
---------
plot_log_perf: bool
plot perf on log scale
'''
matplotlib.pyplot.close()
self.logger.info("Plotting box plots........")
cutoff = self.scenario.algorithm_cutoff_time
fig, ax = plt.subplots(nrows=1, ncols=1)
all_data = self.scenario.performance_data.values
if self.scenario.performance_type[0] == "runtime":
all_data[all_data > cutoff] = cutoff
n_points = all_data.shape[0]
all_data = [all_data[:, i] for i in range(all_data.shape[1])]
ax.boxplot(all_data)
ax.yaxis.grid(True)
y_label = "%s" % (self.scenario.performance_type[0])
ax.set_ylabel(y_label)
if self.scenario.performance_type[0] == "runtime" or plot_log_perf:
ax.set_yscale('log')
plt.setp(ax, xticks=[y + 1 for y in range(len(all_data))],
xticklabels=self.scenario.performance_data.columns.values)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=45, fontsize=12, ha="right")
try:
plt.tight_layout()
except ValueError:
pass
out_fn = os.path.join(self.output_dn, "box_plot.png")
plt.savefig(out_fn)
return out_fn
def get_bar_status_plot(self):
'''
get status distribution as stacked bar plot
'''
matplotlib.pyplot.close()
self.logger.info("Plotting bar plots........")
runstatus_data = self.scenario.runstatus_data
width = 0.5
stati = ["ok", "timeout", "memout", "not_applicable", "crash", "other"]
count_stats = np.array(
[runstatus_data[runstatus_data == status].count().values for status in stati])
count_stats = count_stats / len(self.scenario.instances)
colormap = plt.cm.gist_ncar
cc = [colormap(i) for i in np.linspace(0, 0.9, len(stati))]
bottom = np.zeros((len(self.scenario.algorithms)))
ind = np.arange(len(self.scenario.algorithms)) + 0.5
plots = []
for id, status in enumerate(stati):
plots.append(
plt.bar(ind, count_stats[id, :], width, color=cc[id], bottom=bottom))
bottom += count_stats[id, :]
plt.ylabel('Frequency of runstatus')
plt.xticks(
ind + width / 2., list(runstatus_data.columns), rotation=45, ha="right")
lgd = plt.legend(list(map(lambda x: x[0], plots)), stati, bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
try:
plt.tight_layout()
except ValueError:
pass
out_fn = os.path.join(self.output_dn, "status_bar_plot.png")
plt.savefig(out_fn, bbox_extra_artists=(lgd,), bbox_inches='tight')
return out_fn
def get_cd_diagram(self):
'''
computes critical distance plots with the orange package
'''
import Orange
# orange allows unfortunately only 20 algorithms for cd diagrams
MAX_ALGOS = 20
if self.scenario.maximize[0] == True:
# marginal contribution code assumes: smaller is better
self.scenario.performance_data = self.scenario.performance_data * - \
1
matplotlib.pyplot.close()
self.logger.info("Plotting critical distance plots........")
# labels of each technique
names = list(self.scenario.performance_data.columns)
if len(names) > MAX_ALGOS:
# sort algorithms by their average ranks
names = list(self.scenario.performance_data.rank(
axis=1).mean(axis=0).sort_values().index)
names = names[:MAX_ALGOS]
performance_data = self.scenario.performance_data[names]
else:
performance_data = self.scenario.performance_data
avranks = performance_data.rank(axis=1).mean(
axis=0).values # average ranking of each technique
number_of_datasets = len(self.scenario.instances) # number of datasets
cd = Orange.evaluation.compute_CD(
avranks, number_of_datasets, alpha="0.05", test='nemenyi')
out_fn = os.path.join(self.output_dn, "cd_diagram.png")
Orange.evaluation.graph_ranks(
avranks, names, cd=cd, width=12, textspace=2)
plt.savefig(out_fn)
if self.scenario.maximize[0] == True:
# marginal contribution code assumes: smaller is better
self.scenario.performance_data = self.scenario.performance_data * - \
1
return out_fn
def get_footprints(self, eps=0.05):
'''
computes the algorithm footprint in feature space,
i.e. all instances that can be solved within eps% of the VBS performance
Arguments
---------
eps: float
eps% threshold to VBS performance
'''
self.logger.info("Plotting footprints........")
self.scenario.feature_data = self.scenario.feature_data.fillna(
self.scenario.feature_data.mean())
# feature data
features = self.scenario.feature_data.values
# scale features
ss = StandardScaler()
features = ss.fit_transform(features)
# feature reduction: pca
# TODO: use feature selection first to use only important features
pca = PCA(n_components=2)
features = pca.fit_transform(features)
features = pd.DataFrame(
data=features, index=self.scenario.feature_data.index)
performance_data = self.scenario.performance_data
if self.scenario.maximize[0] == False:
vbs_perf = performance_data.min(axis=1)
else:
vbs_perf = performance_data.max(axis=1)
algorithms = self.scenario.algorithms
out_fns = []
for algo in algorithms:
out_fn = os.path.join(
self.output_dn, "footprint_%s" % (algo.replace("/", "_")))
algo_perf = performance_data[algo]
if self.scenario.maximize[0] == False:
vbs_perfs = vbs_perf * (1 + eps)
footprint = (algo_perf <= vbs_perfs) & (
self.scenario.runstatus_data[algo] == "ok")
else:
vbs_perfs = vbs_perf * (1 - eps)
footprint = (algo_perf >= vbs_perfs) & (
self.scenario.runstatus_data[algo] == "ok")
matplotlib.pyplot.close()
fig = plt.figure()
non_insts = footprint[footprint == False].index.tolist()
feature_not = features.loc[non_insts]
scatter = plt.scatter(
feature_not[0], feature_not[1], c="black", linewidths=0, alpha=0.5, s=60)
tooltip = mpld3.plugins.PointHTMLTooltip(scatter, non_insts,
voffset=10, hoffset=10)
mpld3.plugins.connect(fig, tooltip)
ok_insts = footprint[footprint == True].index.tolist()
features_ok = features.loc[ok_insts]
scatter = plt.scatter(
features_ok[0], features_ok[1], c="red", linewidths=0, alpha=0.5, s=60)
tooltip = mpld3.plugins.PointHTMLTooltip(scatter, ok_insts,
voffset=10, hoffset=10)
mpld3.plugins.connect(fig, tooltip)
#mpld3.save_html(fig, out_fn + ".html")
plt.tight_layout()
plt.savefig(out_fn + ".png", format="png")
out_fns.append([algo, out_fn + ".html", out_fn + ".png"])
return out_fns
def instance_hardness(self, eps=0.05):
'''
plot instances in 2d PCA feature space
and color them according to number of algorithms that are at most eps% away from oralce score
'''
matplotlib.pyplot.close()
self.logger.info("Plotting Instance hardness........")
self.scenario.feature_data = self.scenario.feature_data.fillna(
self.scenario.feature_data.mean())
# feature data
features = self.scenario.feature_data.values
insts = self.scenario.feature_data.index.tolist()
# scale features
ss = StandardScaler()
features = ss.fit_transform(features)
# feature reduction: pca
# TODO: use feature selection first to use only important features
pca = PCA(n_components=2)
features = pca.fit_transform(features)
features = pd.DataFrame(
data=features, index=self.scenario.feature_data.index)
performance_data = self.scenario.performance_data
if self.scenario.maximize[0] == False:
vbs_perf = performance_data.min(axis=1)
else:
vbs_perf = performance_data.max(axis=1)
algorithms = self.scenario.algorithms
hardness_insts = pd.DataFrame(data=np.zeros((len(insts))), index=insts)
for algo in algorithms:
out_fn = os.path.join(self.output_dn, "footprint_%s.html" % (algo))
algo_perf = performance_data[algo]
if self.scenario.maximize[0] == False:
vbs_perfs = vbs_perf * (1 + eps)
footprint = (algo_perf <= vbs_perfs) & (
self.scenario.runstatus_data[algo] == "ok")
else:
vbs_perfs = vbs_perf * (1 - eps)
footprint = (algo_perf >= vbs_perfs) & (
self.scenario.runstatus_data[algo] == "ok")
hardness_insts.loc[footprint[footprint].index.tolist()] += 1
fig = plt.figure()
x = []
y = []
c = []
insts_all = []
for i in range(len(algorithms) + 1):
insts = hardness_insts[
(hardness_insts == float(i)).values].index.tolist()
f = features.loc[insts]
x.extend(f[0])
y.extend(f[1])
c.extend([i] * len(insts))
insts_all.extend(insts)
scatter = plt.scatter(x, y, c=c, vmin=1, vmax=len(
algorithms), edgecolors="black", cmap=plt.cm.jet, linewidths=0, alpha=0.5, s=40)
out_fn = os.path.join(self.output_dn, "instance_hardness")
tooltip = mpld3.plugins.PointHTMLTooltip(scatter, insts_all,
voffset=10, hoffset=10)
mpld3.plugins.connect(fig, tooltip)
# mpld3 does not support legends
#mpld3.save_html(fig, out_fn + ".html")
plt.colorbar(scatter)
plt.savefig(out_fn+".png", bbox_inches='tight')
return out_fn+".html", out_fn+".png"
| [
"os.mkdir",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"matplotlib.pyplot.bar",
"scipy.cluster.hierarchy.linkage",
"scipy.special.comb",
"logging.getLogger",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"os.path.join",... | [((304, 325), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (318, 325), False, 'import matplotlib\n'), ((1044, 1085), 'logging.getLogger', 'logging.getLogger', (['"""Performance Analysis"""'], {}), "('Performance Analysis')\n", (1061, 1085), False, 'import logging\n'), ((1144, 1188), 'os.path.join', 'os.path.join', (['output_dn', '"""performance_plots"""'], {}), "(output_dn, 'performance_plots')\n", (1156, 1188), False, 'import os\n'), ((3624, 3682), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'scores', 'index': 'algos', 'columns': "['VBS score']"}), "(data=scores, index=algos, columns=['VBS score'])\n", (3633, 3682), False, 'from pandas import DataFrame\n'), ((6007, 6032), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (6030, 6032), False, 'import matplotlib\n'), ((8186, 8211), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (8209, 8211), False, 'import matplotlib\n'), ((9048, 9074), 'scipy.cluster.hierarchy.linkage', 'linkage', (['(data * -1)', '"""ward"""'], {}), "(data * -1, 'ward')\n", (9055, 9074), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((9149, 9163), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9161, 9163), True, 'import matplotlib.pyplot as plt\n'), ((9172, 9223), 'scipy.cluster.hierarchy.dendrogram', 'dendrogram', (['link'], {'labels': 'algos', 'orientation': '"""right"""'}), "(link, labels=algos, orientation='right')\n", (9182, 9223), False, 'from scipy.cluster.hierarchy import linkage, dendrogram\n'), ((9243, 9294), 'os.path.join', 'os.path.join', (['self.output_dn', '"""algo_clustering.png"""'], {}), "(self.output_dn, 'algo_clustering.png')\n", (9255, 9294), False, 'import os\n'), ((9303, 9338), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_plot'], {'format': '"""png"""'}), "(out_plot, format='png')\n", (9314, 9338), True, 'import matplotlib.pyplot as plt\n'), ((9347, 9372), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (9370, 9372), False, 'import matplotlib\n'), ((9781, 9802), 'numpy.argsort', 'np.argsort', (['indx_list'], {}), '(indx_list)\n', (9791, 9802), True, 'import numpy as np\n'), ((9890, 9904), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9902, 9904), True, 'import matplotlib.pyplot as plt\n'), ((10159, 10185), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'data.shape[0]'], {}), '(0, data.shape[0])\n', (10167, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10194, 10220), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', 'data.shape[0]'], {}), '(0, data.shape[0])\n', (10202, 10220), True, 'import matplotlib.pyplot as plt\n'), ((10481, 10534), 'matplotlib.pyplot.setp', 'plt.setp', (['labels'], {'rotation': '(90)', 'fontsize': '(12)', 'ha': '"""left"""'}), "(labels, rotation=90, fontsize=12, ha='left')\n", (10489, 10534), True, 'import matplotlib.pyplot as plt\n'), ((10581, 10622), 'matplotlib.pyplot.setp', 'plt.setp', (['labels'], {'rotation': '(0)', 'fontsize': '(12)'}), '(labels, rotation=0, fontsize=12)\n', (10589, 10622), True, 'import matplotlib.pyplot as plt\n'), ((10663, 10681), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10679, 10681), True, 'import matplotlib.pyplot as plt\n'), ((10702, 10754), 'os.path.join', 'os.path.join', (['self.output_dn', '"""correlation_plot.png"""'], {}), "(self.output_dn, 'correlation_plot.png')\n", (10714, 10754), False, 'import os\n'), ((10763, 10798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_plot'], {'format': '"""png"""'}), "(out_plot, format='png')\n", (10774, 10798), True, 'import matplotlib.pyplot as plt\n'), ((19187, 19212), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (19210, 19212), False, 'import matplotlib\n'), ((19317, 19351), 'matplotlib.gridspec.GridSpec', 'matplotlib.gridspec.GridSpec', (['(1)', '(1)'], {}), '(1, 1)\n', (19345, 19351), False, 'import matplotlib\n'), ((19367, 19379), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19377, 19379), True, 'import matplotlib.pyplot as plt\n'), ((19394, 19417), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:1, :]'], {}), '(gs[0:1, :])\n', (19405, 19417), True, 'import matplotlib.pyplot as plt\n'), ((20773, 20817), 'os.path.join', 'os.path.join', (['self.output_dn', '"""cdf_plot.png"""'], {}), "(self.output_dn, 'cdf_plot.png')\n", (20785, 20817), False, 'import os\n'), ((20827, 20994), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'pad_inches': '(0.02)', 'bbox_inches': '"""tight"""'}), "(out_fn, facecolor='w', edgecolor='w', orientation='portrait',\n papertype=None, format=None, transparent=False, pad_inches=0.02,\n bbox_inches='tight')\n", (20838, 20994), True, 'import matplotlib.pyplot as plt\n'), ((21340, 21365), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (21363, 21365), False, 'import matplotlib\n'), ((21496, 21526), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (21508, 21526), True, 'import matplotlib.pyplot as plt\n'), ((22415, 22469), 'matplotlib.pyplot.setp', 'plt.setp', (['labels'], {'rotation': '(45)', 'fontsize': '(12)', 'ha': '"""right"""'}), "(labels, rotation=45, fontsize=12, ha='right')\n", (22423, 22469), True, 'import matplotlib.pyplot as plt\n'), ((22479, 22497), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22495, 22497), True, 'import matplotlib.pyplot as plt\n'), ((22516, 22563), 'os.path.join', 'os.path.join', (['self.output_dn', '"""violin_plot.png"""'], {}), "(self.output_dn, 'violin_plot.png')\n", (22528, 22563), False, 'import os\n'), ((22572, 22591), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {}), '(out_fn)\n', (22583, 22591), True, 'import matplotlib.pyplot as plt\n'), ((22902, 22927), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (22925, 22927), False, 'import matplotlib\n'), ((23055, 23085), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (23067, 23085), True, 'import matplotlib.pyplot as plt\n'), ((23811, 23865), 'matplotlib.pyplot.setp', 'plt.setp', (['labels'], {'rotation': '(45)', 'fontsize': '(12)', 'ha': '"""right"""'}), "(labels, rotation=45, fontsize=12, ha='right')\n", (23819, 23865), True, 'import matplotlib.pyplot as plt\n'), ((23973, 24017), 'os.path.join', 'os.path.join', (['self.output_dn', '"""box_plot.png"""'], {}), "(self.output_dn, 'box_plot.png')\n", (23985, 24017), False, 'import os\n'), ((24026, 24045), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {}), '(out_fn)\n', (24037, 24045), True, 'import matplotlib.pyplot as plt\n'), ((24193, 24218), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (24216, 24218), False, 'import matplotlib\n'), ((25069, 25105), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency of runstatus"""'], {}), "('Frequency of runstatus')\n", (25079, 25105), True, 'import matplotlib.pyplot as plt\n'), ((25493, 25544), 'os.path.join', 'os.path.join', (['self.output_dn', '"""status_bar_plot.png"""'], {}), "(self.output_dn, 'status_bar_plot.png')\n", (25505, 25544), False, 'import os\n'), ((25553, 25620), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {'bbox_extra_artists': '(lgd,)', 'bbox_inches': '"""tight"""'}), "(out_fn, bbox_extra_artists=(lgd,), bbox_inches='tight')\n", (25564, 25620), True, 'import matplotlib.pyplot as plt\n'), ((26111, 26136), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (26134, 26136), False, 'import matplotlib\n'), ((26906, 26997), 'Orange.evaluation.compute_CD', 'Orange.evaluation.compute_CD', (['avranks', 'number_of_datasets'], {'alpha': '"""0.05"""', 'test': '"""nemenyi"""'}), "(avranks, number_of_datasets, alpha='0.05',\n test='nemenyi')\n", (26934, 26997), False, 'import Orange\n'), ((27024, 27070), 'os.path.join', 'os.path.join', (['self.output_dn', '"""cd_diagram.png"""'], {}), "(self.output_dn, 'cd_diagram.png')\n", (27036, 27070), False, 'import os\n'), ((27079, 27154), 'Orange.evaluation.graph_ranks', 'Orange.evaluation.graph_ranks', (['avranks', 'names'], {'cd': 'cd', 'width': '(12)', 'textspace': '(2)'}), '(avranks, names, cd=cd, width=12, textspace=2)\n', (27108, 27154), False, 'import Orange\n'), ((27176, 27195), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {}), '(out_fn)\n', (27187, 27195), True, 'import matplotlib.pyplot as plt\n'), ((28058, 28074), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (28072, 28074), False, 'from sklearn.preprocessing import StandardScaler\n'), ((28244, 28263), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (28247, 28263), False, 'from sklearn.decomposition import PCA\n'), ((28330, 28397), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'features', 'index': 'self.scenario.feature_data.index'}), '(data=features, index=self.scenario.feature_data.index)\n', (28342, 28397), True, 'import pandas as pd\n'), ((30724, 30749), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (30747, 30749), False, 'import matplotlib\n'), ((31107, 31123), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (31121, 31123), False, 'from sklearn.preprocessing import StandardScaler\n'), ((31293, 31312), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (31296, 31312), False, 'from sklearn.decomposition import PCA\n'), ((31379, 31446), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'features', 'index': 'self.scenario.feature_data.index'}), '(data=features, index=self.scenario.feature_data.index)\n', (31391, 31446), True, 'import pandas as pd\n'), ((32472, 32484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32482, 32484), True, 'import matplotlib.pyplot as plt\n'), ((33039, 33088), 'os.path.join', 'os.path.join', (['self.output_dn', '"""instance_hardness"""'], {}), "(self.output_dn, 'instance_hardness')\n", (33051, 33088), False, 'import os\n'), ((33108, 33182), 'mpld3.plugins.PointHTMLTooltip', 'mpld3.plugins.PointHTMLTooltip', (['scatter', 'insts_all'], {'voffset': '(10)', 'hoffset': '(10)'}), '(scatter, insts_all, voffset=10, hoffset=10)\n', (33138, 33182), False, 'import mpld3\n'), ((33240, 33275), 'mpld3.plugins.connect', 'mpld3.plugins.connect', (['fig', 'tooltip'], {}), '(fig, tooltip)\n', (33261, 33275), False, 'import mpld3\n'), ((33375, 33396), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['scatter'], {}), '(scatter)\n', (33387, 33396), True, 'import matplotlib.pyplot as plt\n'), ((33405, 33454), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_fn + '.png')"], {'bbox_inches': '"""tight"""'}), "(out_fn + '.png', bbox_inches='tight')\n", (33416, 33454), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1233), 'os.path.isdir', 'os.path.isdir', (['self.output_dn'], {}), '(self.output_dn)\n', (1217, 1233), False, 'import os\n'), ((1247, 1271), 'os.mkdir', 'os.mkdir', (['self.output_dn'], {}), '(self.output_dn)\n', (1255, 1271), False, 'import os\n'), ((8441, 8469), 'numpy.zeros', 'np.zeros', (['(n_algos, n_algos)'], {}), '((n_algos, n_algos))\n', (8449, 8469), True, 'import numpy as np\n'), ((13219, 13244), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (13242, 13244), False, 'import matplotlib\n'), ((13263, 13275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13273, 13275), True, 'import matplotlib.pyplot as plt\n'), ((13288, 13305), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (13296, 13305), True, 'import matplotlib.pyplot as plt\n'), ((14123, 14156), 'matplotlib.pyplot.pie', 'plt.pie', (['data_list'], {'colors': 'colors'}), '(data_list, colors=colors)\n', (14130, 14156), True, 'import matplotlib.pyplot as plt\n'), ((14169, 14248), 'matplotlib.pyplot.legend', 'plt.legend', (['patches', 'labels'], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(patches, labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)\n', (14179, 14248), True, 'import matplotlib.pyplot as plt\n'), ((14278, 14296), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14294, 14296), True, 'import matplotlib.pyplot as plt\n'), ((14319, 14386), 'os.path.join', 'os.path.join', (['self.output_dn', "('contribution_%s_pie_plot.png' % name)"], {}), "(self.output_dn, 'contribution_%s_pie_plot.png' % name)\n", (14331, 14386), False, 'import os\n'), ((14419, 14586), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out_fn'], {'facecolor': '"""w"""', 'edgecolor': '"""w"""', 'orientation': '"""portrait"""', 'papertype': 'None', 'format': 'None', 'transparent': '(False)', 'pad_inches': '(0.02)', 'bbox_inches': '"""tight"""'}), "(out_fn, facecolor='w', edgecolor='w', orientation='portrait',\n papertype=None, format=None, transparent=False, pad_inches=0.02,\n bbox_inches='tight')\n", (14430, 14586), True, 'import matplotlib.pyplot as plt\n'), ((20189, 20200), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (20197, 20200), True, 'import numpy as np\n'), ((20217, 20228), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (20225, 20228), True, 'import numpy as np\n'), ((21792, 21810), 'numpy.log10', 'np.log10', (['all_data'], {}), '(all_data)\n', (21800, 21810), True, 'import numpy as np\n'), ((23892, 23910), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23908, 23910), True, 'import matplotlib.pyplot as plt\n'), ((25413, 25431), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25429, 25431), True, 'import matplotlib.pyplot as plt\n'), ((29317, 29342), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (29340, 29342), False, 'import matplotlib\n'), ((29361, 29373), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (29371, 29373), True, 'import matplotlib.pyplot as plt\n'), ((29516, 29606), 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature_not[0]', 'feature_not[1]'], {'c': '"""black"""', 'linewidths': '(0)', 'alpha': '(0.5)', 's': '(60)'}), "(feature_not[0], feature_not[1], c='black', linewidths=0, alpha=\n 0.5, s=60)\n", (29527, 29606), True, 'import matplotlib.pyplot as plt\n'), ((29642, 29716), 'mpld3.plugins.PointHTMLTooltip', 'mpld3.plugins.PointHTMLTooltip', (['scatter', 'non_insts'], {'voffset': '(10)', 'hoffset': '(10)'}), '(scatter, non_insts, voffset=10, hoffset=10)\n', (29672, 29716), False, 'import mpld3\n'), ((29782, 29817), 'mpld3.plugins.connect', 'mpld3.plugins.connect', (['fig', 'tooltip'], {}), '(fig, tooltip)\n', (29803, 29817), False, 'import mpld3\n'), ((29957, 30045), 'matplotlib.pyplot.scatter', 'plt.scatter', (['features_ok[0]', 'features_ok[1]'], {'c': '"""red"""', 'linewidths': '(0)', 'alpha': '(0.5)', 's': '(60)'}), "(features_ok[0], features_ok[1], c='red', linewidths=0, alpha=\n 0.5, s=60)\n", (29968, 30045), True, 'import matplotlib.pyplot as plt\n'), ((30081, 30154), 'mpld3.plugins.PointHTMLTooltip', 'mpld3.plugins.PointHTMLTooltip', (['scatter', 'ok_insts'], {'voffset': '(10)', 'hoffset': '(10)'}), '(scatter, ok_insts, voffset=10, hoffset=10)\n', (30111, 30154), False, 'import mpld3\n'), ((30220, 30255), 'mpld3.plugins.connect', 'mpld3.plugins.connect', (['fig', 'tooltip'], {}), '(fig, tooltip)\n', (30241, 30255), False, 'import mpld3\n'), ((30321, 30339), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30337, 30339), True, 'import matplotlib.pyplot as plt\n'), ((30352, 30394), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_fn + '.png')"], {'format': '"""png"""'}), "(out_fn + '.png', format='png')\n", (30363, 30394), True, 'import matplotlib.pyplot as plt\n'), ((31867, 31923), 'os.path.join', 'os.path.join', (['self.output_dn', "('footprint_%s.html' % algo)"], {}), "(self.output_dn, 'footprint_%s.html' % algo)\n", (31879, 31923), False, 'import os\n'), ((6745, 6770), 'matplotlib.pyplot.close', 'matplotlib.pyplot.close', ([], {}), '()\n', (6768, 6770), False, 'import matplotlib\n'), ((8587, 8630), 'numpy.array', 'np.array', (['perf_data[:, i]'], {'dtype': 'np.float64'}), '(perf_data[:, i], dtype=np.float64)\n', (8595, 8630), True, 'import numpy as np\n'), ((8652, 8695), 'numpy.array', 'np.array', (['perf_data[:, j]'], {'dtype': 'np.float64'}), '(perf_data[:, j], dtype=np.float64)\n', (8660, 8695), True, 'import numpy as np\n'), ((8946, 8965), 'scipy.stats.spearmanr', 'spearmanr', (['y_i', 'y_j'], {}), '(y_i, y_j)\n', (8955, 8965), False, 'from scipy.stats import spearmanr\n'), ((10038, 10062), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (10047, 10062), True, 'import numpy as np\n'), ((10105, 10129), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (10114, 10129), True, 'import numpy as np\n'), ((13633, 13651), 'numpy.mean', 'np.mean', (['data_list'], {}), '(data_list)\n', (13640, 13651), True, 'import numpy as np\n'), ((24949, 25017), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'count_stats[id, :]', 'width'], {'color': 'cc[id]', 'bottom': 'bottom'}), '(ind, count_stats[id, :], width, color=cc[id], bottom=bottom)\n', (24956, 25017), True, 'import matplotlib.pyplot as plt\n'), ((4562, 4621), 'numpy.sum', 'np.sum', (["(self.scenario.runstatus_data.values == 'ok')"], {'axis': '(1)'}), "(self.scenario.runstatus_data.values == 'ok', axis=1)\n", (4568, 4621), True, 'import numpy as np\n'), ((6865, 6994), 'plottingscripts.plotting.scatter.plot_scatter_plot', 'plot_scatter_plot', ([], {'x_data': 'y_i', 'y_data': 'y_j', 'max_val': 'max_val', 'labels': '[algo_1, algo_2]', 'metric': 'self.scenario.performance_type[0]'}), '(x_data=y_i, y_data=y_j, max_val=max_val, labels=[algo_1,\n algo_2], metric=self.scenario.performance_type[0])\n', (6882, 6994), False, 'from plottingscripts.plotting.scatter import plot_scatter_plot\n'), ((7127, 7145), 'matplotlib.pyplot.figure', 'figure', (['(1)'], {'dpi': '(100)'}), '(1, dpi=100)\n', (7133, 7145), False, 'from matplotlib.pyplot import tight_layout, figure, subplot, savefig, show, setp\n'), ((7172, 7195), 'matplotlib.pyplot.subplot', 'subplot', ([], {'aspect': '"""equal"""'}), "(aspect='equal')\n", (7179, 7195), False, 'from matplotlib.pyplot import tight_layout, figure, subplot, savefig, show, setp\n'), ((8714, 8737), 'numpy.sum', 'np.sum', (['perf_data[:, i]'], {}), '(perf_data[:, i])\n', (8720, 8737), True, 'import numpy as np\n'), ((8827, 8850), 'numpy.sum', 'np.sum', (['perf_data[:, j]'], {}), '(perf_data[:, j])\n', (8833, 8850), True, 'import numpy as np\n'), ((8771, 8799), 'numpy.random.rand', 'np.random.rand', (['y_i.shape[0]'], {}), '(y_i.shape[0])\n', (8785, 8799), True, 'import numpy as np\n'), ((8884, 8912), 'numpy.random.rand', 'np.random.rand', (['y_j.shape[0]'], {}), '(y_j.shape[0])\n', (8898, 8912), True, 'import numpy as np\n'), ((18016, 18048), 'scipy.special.comb', 'comb', (['(pos + neg)', 'neg'], {'exact': '(True)'}), '(pos + neg, neg, exact=True)\n', (18020, 18048), False, 'from scipy.special import comb\n'), ((18223, 18255), 'scipy.special.comb', 'comb', (['(pos + neg)', 'pos'], {'exact': '(True)'}), '(pos + neg, pos, exact=True)\n', (18227, 18255), False, 'from scipy.special import comb\n')] |
""" Module to train and run LISRD-SIFT. """
import warnings
warnings.filterwarnings(action='once')
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as func
from .base_model import BaseModel, Mode
from .backbones.net_vlad import NetVLAD
from ..datasets.utils.homographies import warp_points
from ..utils.geometry_utils import keep_true_keypoints
class LisrdSiftModule(nn.Module):
def __init__(self, config, device):
super().__init__()
self._config = config
self._device = device
self._variances = ['sift', 'upright_sift']
self.vlad_sift = NetVLAD(
num_clusters=self._config['n_clusters'],
dim=self._config['meta_desc_dim'])
self.vlad_upright_sift = NetVLAD(
num_clusters=self._config['n_clusters'],
dim=self._config['meta_desc_dim'])
self.vlad_layers = {'sift': self.vlad_sift,
'upright_sift': self.vlad_upright_sift}
def forward(self, inputs, mode):
outputs = self._get_sift_desc(inputs)
self._compute_meta_descriptors(outputs)
return outputs
def _get_sift_desc(self, inputs):
images = np.uint8(inputs.cpu().numpy().transpose(0, 2, 3, 1) * 255)
descs = {v: [] for v in self._variances}
keypoints = []
assignments = []
tile = self._config['tile']
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_size = np.array(img.shape[:2])
tile_size = img_size / tile
sift = cv2.xfeatures2d.SIFT_create(nfeatures=self._config['n_kp'],
contrastThreshold=0.01)
points = sift.detect(img, None)
if len(points) == 0: # No point detected
keypoints.append(np.zeros((1, 2))) # Dummy kp
assignments.append(np.zeros(1, dtype=int))
for v in self._variances:
descs[v].append(np.ones((1, self._config['desc_size'])))
continue
for v in self._variances:
kp = points.copy()
if v == 'upright_sift':
for k in kp:
k.angle = 0. # Set all orientations to 0
_, desc = sift.compute(img, kp)
descs[v].append(desc)
points = [[k.pt[1], k.pt[0]] for k in points]
keypoints.append(np.array(points))
# For each keypoint, compute in which tile it lands up
ass = np.clip(points // tile_size, 0, tile - 1)
ass = ass[:, 1] + tile * ass[:, 0]
assignments.append(ass.astype(int))
outputs = {'keypoints': keypoints, 'assignments': assignments}
for v in self._variances:
outputs[v + '_desc'] = descs[v]
return outputs
def _compute_meta_descriptor(self, assignments, descs, netvlad):
b_size = len(assignments)
n_tiles = self._config['tile'] * self._config['tile']
meta_descs = []
for i in range(b_size):
meta_desc = []
for j in range(n_tiles):
if np.sum(assignments[i] == j) == 0: # no points in this tile
meta_desc.append( # Dummy meta desc
torch.ones(self._config['meta_desc_dim']
* self._config['n_clusters'],
dtype=torch.float, device=self._device))
continue
desc = descs[i][assignments[i] == j]
desc = desc.reshape(1, self._config['desc_size'], -1, 1)
desc = torch.tensor(desc, dtype=torch.float,
device=self._device)
meta_desc.append(netvlad.forward(desc).flatten())
meta_desc = torch.stack(meta_desc, dim=0)
meta_descs.append(meta_desc)
return torch.stack(meta_descs, dim=0)
def _compute_meta_descriptors(self, outputs):
"""
For each kind of descriptor, compute a meta descriptor encoding
a sub area of the total image.
"""
for v in self._variances:
outputs[v + '_meta_desc'] = self._compute_meta_descriptor(
outputs['assignments'], outputs[v + '_desc'],
self.vlad_layers[v])
class LisrdSift(BaseModel):
required_config_keys = []
def __init__(self, dataset, config, device):
self._device = device
super().__init__(dataset, config, device)
self._variances = ['sift', 'upright_sift']
def _model(self, config):
return LisrdSiftModule(config, self._device)
def _forward(self, inputs, mode, config):
outputs = {}
if mode == Mode.EXPORT:
outputs['descriptors'] = {}
outputs['meta_descriptors'] = {}
with torch.no_grad():
output = self._net.forward(inputs['image0'], mode)
outputs['keypoints'] = output['keypoints']
outputs['assignments'] = output['assignments']
for v in self._variances:
outputs['descriptors'][v] = output[v + '_desc']
outputs['meta_descriptors'][v] = output[v + '_meta_desc']
else:
num_img = 3 if 'image2' in inputs else 2
for i in range(num_img):
n = str(i)
output = self._net.forward(inputs['image' + n], mode)
outputs['keypoints' + n] = output['keypoints']
outputs['assignments' + n] = output['assignments']
for v in self._variances:
outputs[v + '_desc' + n] = output[v + '_desc']
outputs[v + '_meta_desc' + n] = output[v + '_meta_desc']
return outputs
def _loss(self, outputs, inputs, config):
# Loss for the meta descriptors only
meta_desc_loss = self._meta_descriptors_loss(outputs, inputs, config)
return meta_desc_loss
def _meta_descriptors_loss(self, outputs, inputs, config):
# Filter out the points not in common between the two images
H = inputs['homography'].detach().cpu().numpy()
img_size = np.array(inputs['image0'].size()[2:4])
b_size = len(H)
losses = []
for i in range(b_size):
kp0, idx0 = keep_true_keypoints(
outputs['keypoints0'][i], H[i], img_size)
kp1, idx1 = keep_true_keypoints(
outputs['keypoints1'][i], np.linalg.inv(H[i]), img_size)
if (np.sum(idx0) == 0) or (np.sum(idx1) == 0): # No common points
return torch.tensor(0, dtype=torch.float, device=self._device,
requires_grad=True)
assignments0 = outputs['assignments0'][i][idx0]
assignments1 = outputs['assignments1'][i][idx1]
# Compute the distance between all descriptors
desc_dists = []
for v in self._variances:
desc0 = torch.tensor(outputs[v + '_desc0'][i][idx0],
dtype=torch.float, device=self._device)
desc1 = torch.tensor(outputs[v + '_desc1'][i][idx1],
dtype=torch.float, device=self._device)
desc_dist = torch.norm(desc0.unsqueeze(1) - desc1.unsqueeze(0),
dim=2)
desc_dists.append(desc_dist)
desc_dists = torch.stack(desc_dists, dim=2)
# Compute the similarity for each meta descriptor
meta_desc_sims = []
for v in self._variances:
meta_desc0 = outputs[v + '_meta_desc0'][i][assignments0]
meta_desc0 = func.normalize(meta_desc0, dim=1)
meta_desc1 = outputs[v + '_meta_desc1'][i][assignments1]
meta_desc1 = func.normalize(meta_desc1, dim=1)
meta_desc_sims.append(meta_desc0 @ meta_desc1.t())
meta_desc_sims = torch.stack(meta_desc_sims, dim=2)
# Weight the descriptor distances
meta_desc_sims = func.softmax(meta_desc_sims, dim=2)
desc_dist = torch.sum(desc_dists * meta_desc_sims, dim=2)
# Compute correct matches
warped_kp0 = warp_points(kp0, H[i])
points_dist = torch.tensor(np.linalg.norm(
warped_kp0[:, None, :] - kp1[None, :, :], axis=2))
wrong_matches = points_dist > self._config['correct_thresh']
dist_mask = points_dist <= self._config['dist_thresh']
# Positive loss
pos_desc_dist = desc_dist.clone()
pos_desc_dist[wrong_matches] = 0.
pos_dist = torch.max(pos_desc_dist, dim=1)[0]
# Negative loss
desc_dist[dist_mask] = torch.tensor(np.inf)
neg_dist = torch.min(desc_dist, dim=1)[0]
losses.append(func.relu(config['margin']
+ pos_dist - neg_dist).mean())
# Total loss
loss = torch.stack(losses, dim=0).mean()
return loss
def _matching_score(self, outputs, inputs, config):
# Filter out the points not in common between the two images
H = inputs['homography'].detach().cpu().numpy()
img_size = np.array(inputs['image0'].size()[2:4])
b_size = len(H)
matching_scores = []
for i in range(b_size):
kp0, idx0 = keep_true_keypoints(
outputs['keypoints0'][i], H[i], img_size)
kp1, idx1 = keep_true_keypoints(
outputs['keypoints1'][i], np.linalg.inv(H[i]), img_size)
if (np.sum(idx0) == 0) or (np.sum(idx1) == 0): # No common points
return 0.
assignments0 = outputs['assignments0'][i][idx0]
assignments1 = outputs['assignments1'][i][idx1]
# Compute the distance between all descriptors
desc_dists = []
for v in self._variances:
desc0 = torch.tensor(outputs[v + '_desc0'][i][idx0],
dtype=torch.float, device=self._device)
desc1 = torch.tensor(outputs[v + '_desc1'][i][idx1],
dtype=torch.float, device=self._device)
desc_dist = torch.norm(desc0.unsqueeze(1) - desc1.unsqueeze(0),
dim=2)
desc_dists.append(desc_dist)
desc_dists = torch.stack(desc_dists, dim=2)
# Compute the similarity for each meta descriptor
meta_desc_sims = []
for v in self._variances:
meta_desc0 = outputs[v + '_meta_desc0'][i][assignments0]
meta_desc0 = func.normalize(meta_desc0, dim=1)
meta_desc1 = outputs[v + '_meta_desc1'][i][assignments1]
meta_desc1 = func.normalize(meta_desc1, dim=1)
meta_desc_sims.append(meta_desc0 @ meta_desc1.t())
meta_desc_sims = torch.stack(meta_desc_sims, dim=2)
# Weight the descriptor distances
meta_desc_sims = func.softmax(meta_desc_sims, dim=2)
desc_dist = torch.sum(desc_dists * meta_desc_sims, dim=2)
desc_dist = desc_dist.detach().cpu().numpy()
# Compute correct matches
warped_kp0 = warp_points(kp0, H[i])
points_dist = np.linalg.norm(
warped_kp0[:, None, :] - kp1[None, :, :], axis=2)
best_matches = np.argmin(points_dist, axis=1)
min_dist = points_dist[np.arange(len(points_dist)), best_matches]
true_matches = min_dist < self._config['correct_thresh']
# Compute percentage of correct matches
closest = np.argmin(desc_dist, axis=1)
m_score = (0. if np.sum(true_matches) == 0
else (closest == best_matches)[true_matches].mean())
matching_scores.append(m_score)
return np.stack(matching_scores, axis=0).mean()
def _metrics(self, outputs, inputs, config):
m_score = self._matching_score(outputs, inputs, config)
return {'matching_score': m_score}
def initialize_weights(self):
def init_weights(m):
if type(m) == nn.Conv2d:
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
m.bias.data.fill_(0.01)
self._net.apply(init_weights)
| [
"numpy.sum",
"numpy.ones",
"numpy.clip",
"numpy.argmin",
"numpy.linalg.norm",
"torch.no_grad",
"torch.nn.functional.normalize",
"torch.ones",
"torch.nn.init.kaiming_normal_",
"cv2.cvtColor",
"torch.nn.functional.relu",
"numpy.stack",
"numpy.linalg.inv",
"torch.max",
"torch.sum",
"torch... | [((61, 99), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""once"""'}), "(action='once')\n", (84, 99), False, 'import warnings\n'), ((100, 169), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.ufunc size changed"""'}), "('ignore', message='numpy.ufunc size changed')\n", (123, 169), False, 'import warnings\n'), ((4056, 4086), 'torch.stack', 'torch.stack', (['meta_descs'], {'dim': '(0)'}), '(meta_descs, dim=0)\n', (4067, 4086), False, 'import torch\n'), ((1522, 1559), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1534, 1559), False, 'import cv2\n'), ((1583, 1606), 'numpy.array', 'np.array', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (1591, 1606), True, 'import numpy as np\n'), ((1666, 1753), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {'nfeatures': "self._config['n_kp']", 'contrastThreshold': '(0.01)'}), "(nfeatures=self._config['n_kp'],\n contrastThreshold=0.01)\n", (1693, 1753), False, 'import cv2\n'), ((2669, 2710), 'numpy.clip', 'np.clip', (['(points // tile_size)', '(0)', '(tile - 1)'], {}), '(points // tile_size, 0, tile - 1)\n', (2676, 2710), True, 'import numpy as np\n'), ((3970, 3999), 'torch.stack', 'torch.stack', (['meta_desc'], {'dim': '(0)'}), '(meta_desc, dim=0)\n', (3981, 3999), False, 'import torch\n'), ((7622, 7652), 'torch.stack', 'torch.stack', (['desc_dists'], {'dim': '(2)'}), '(desc_dists, dim=2)\n', (7633, 7652), False, 'import torch\n'), ((8154, 8188), 'torch.stack', 'torch.stack', (['meta_desc_sims'], {'dim': '(2)'}), '(meta_desc_sims, dim=2)\n', (8165, 8188), False, 'import torch\n'), ((8265, 8300), 'torch.nn.functional.softmax', 'func.softmax', (['meta_desc_sims'], {'dim': '(2)'}), '(meta_desc_sims, dim=2)\n', (8277, 8300), True, 'import torch.nn.functional as func\n'), ((8325, 8370), 'torch.sum', 'torch.sum', (['(desc_dists * meta_desc_sims)'], {'dim': '(2)'}), '(desc_dists * meta_desc_sims, dim=2)\n', (8334, 8370), False, 'import torch\n'), ((8963, 8983), 'torch.tensor', 'torch.tensor', (['np.inf'], {}), '(np.inf)\n', (8975, 8983), False, 'import torch\n'), ((10655, 10685), 'torch.stack', 'torch.stack', (['desc_dists'], {'dim': '(2)'}), '(desc_dists, dim=2)\n', (10666, 10685), False, 'import torch\n'), ((11187, 11221), 'torch.stack', 'torch.stack', (['meta_desc_sims'], {'dim': '(2)'}), '(meta_desc_sims, dim=2)\n', (11198, 11221), False, 'import torch\n'), ((11298, 11333), 'torch.nn.functional.softmax', 'func.softmax', (['meta_desc_sims'], {'dim': '(2)'}), '(meta_desc_sims, dim=2)\n', (11310, 11333), True, 'import torch.nn.functional as func\n'), ((11358, 11403), 'torch.sum', 'torch.sum', (['(desc_dists * meta_desc_sims)'], {'dim': '(2)'}), '(desc_dists * meta_desc_sims, dim=2)\n', (11367, 11403), False, 'import torch\n'), ((11574, 11638), 'numpy.linalg.norm', 'np.linalg.norm', (['(warped_kp0[:, None, :] - kp1[None, :, :])'], {'axis': '(2)'}), '(warped_kp0[:, None, :] - kp1[None, :, :], axis=2)\n', (11588, 11638), True, 'import numpy as np\n'), ((11683, 11713), 'numpy.argmin', 'np.argmin', (['points_dist'], {'axis': '(1)'}), '(points_dist, axis=1)\n', (11692, 11713), True, 'import numpy as np\n'), ((11936, 11964), 'numpy.argmin', 'np.argmin', (['desc_dist'], {'axis': '(1)'}), '(desc_dist, axis=1)\n', (11945, 11964), True, 'import numpy as np\n'), ((2565, 2581), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2573, 2581), True, 'import numpy as np\n'), ((3785, 3843), 'torch.tensor', 'torch.tensor', (['desc'], {'dtype': 'torch.float', 'device': 'self._device'}), '(desc, dtype=torch.float, device=self._device)\n', (3797, 3843), False, 'import torch\n'), ((5004, 5019), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5017, 5019), False, 'import torch\n'), ((6631, 6650), 'numpy.linalg.inv', 'np.linalg.inv', (['H[i]'], {}), '(H[i])\n', (6644, 6650), True, 'import numpy as np\n'), ((6764, 6839), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.float', 'device': 'self._device', 'requires_grad': '(True)'}), '(0, dtype=torch.float, device=self._device, requires_grad=True)\n', (6776, 6839), False, 'import torch\n'), ((7158, 7247), 'torch.tensor', 'torch.tensor', (["outputs[v + '_desc0'][i][idx0]"], {'dtype': 'torch.float', 'device': 'self._device'}), "(outputs[v + '_desc0'][i][idx0], dtype=torch.float, device=self\n ._device)\n", (7170, 7247), False, 'import torch\n'), ((7304, 7393), 'torch.tensor', 'torch.tensor', (["outputs[v + '_desc1'][i][idx1]"], {'dtype': 'torch.float', 'device': 'self._device'}), "(outputs[v + '_desc1'][i][idx1], dtype=torch.float, device=self\n ._device)\n", (7316, 7393), False, 'import torch\n'), ((7888, 7921), 'torch.nn.functional.normalize', 'func.normalize', (['meta_desc0'], {'dim': '(1)'}), '(meta_desc0, dim=1)\n', (7902, 7921), True, 'import torch.nn.functional as func\n'), ((8024, 8057), 'torch.nn.functional.normalize', 'func.normalize', (['meta_desc1'], {'dim': '(1)'}), '(meta_desc1, dim=1)\n', (8038, 8057), True, 'import torch.nn.functional as func\n'), ((8497, 8561), 'numpy.linalg.norm', 'np.linalg.norm', (['(warped_kp0[:, None, :] - kp1[None, :, :])'], {'axis': '(2)'}), '(warped_kp0[:, None, :] - kp1[None, :, :], axis=2)\n', (8511, 8561), True, 'import numpy as np\n'), ((8864, 8895), 'torch.max', 'torch.max', (['pos_desc_dist'], {'dim': '(1)'}), '(pos_desc_dist, dim=1)\n', (8873, 8895), False, 'import torch\n'), ((9007, 9034), 'torch.min', 'torch.min', (['desc_dist'], {'dim': '(1)'}), '(desc_dist, dim=1)\n', (9016, 9034), False, 'import torch\n'), ((9204, 9230), 'torch.stack', 'torch.stack', (['losses'], {'dim': '(0)'}), '(losses, dim=0)\n', (9215, 9230), False, 'import torch\n'), ((9773, 9792), 'numpy.linalg.inv', 'np.linalg.inv', (['H[i]'], {}), '(H[i])\n', (9786, 9792), True, 'import numpy as np\n'), ((10191, 10280), 'torch.tensor', 'torch.tensor', (["outputs[v + '_desc0'][i][idx0]"], {'dtype': 'torch.float', 'device': 'self._device'}), "(outputs[v + '_desc0'][i][idx0], dtype=torch.float, device=self\n ._device)\n", (10203, 10280), False, 'import torch\n'), ((10337, 10426), 'torch.tensor', 'torch.tensor', (["outputs[v + '_desc1'][i][idx1]"], {'dtype': 'torch.float', 'device': 'self._device'}), "(outputs[v + '_desc1'][i][idx1], dtype=torch.float, device=self\n ._device)\n", (10349, 10426), False, 'import torch\n'), ((10921, 10954), 'torch.nn.functional.normalize', 'func.normalize', (['meta_desc0'], {'dim': '(1)'}), '(meta_desc0, dim=1)\n', (10935, 10954), True, 'import torch.nn.functional as func\n'), ((11057, 11090), 'torch.nn.functional.normalize', 'func.normalize', (['meta_desc1'], {'dim': '(1)'}), '(meta_desc1, dim=1)\n', (11071, 11090), True, 'import torch.nn.functional as func\n'), ((12156, 12189), 'numpy.stack', 'np.stack', (['matching_scores'], {'axis': '(0)'}), '(matching_scores, axis=0)\n', (12164, 12189), True, 'import numpy as np\n'), ((12471, 12531), 'torch.nn.init.kaiming_normal_', 'torch.nn.init.kaiming_normal_', (['m.weight'], {'nonlinearity': '"""relu"""'}), "(m.weight, nonlinearity='relu')\n", (12500, 12531), False, 'import torch\n'), ((1929, 1945), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (1937, 1945), True, 'import numpy as np\n'), ((1994, 2016), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'int'}), '(1, dtype=int)\n', (2002, 2016), True, 'import numpy as np\n'), ((3284, 3311), 'numpy.sum', 'np.sum', (['(assignments[i] == j)'], {}), '(assignments[i] == j)\n', (3290, 3311), True, 'import numpy as np\n'), ((6678, 6690), 'numpy.sum', 'np.sum', (['idx0'], {}), '(idx0)\n', (6684, 6690), True, 'import numpy as np\n'), ((6701, 6713), 'numpy.sum', 'np.sum', (['idx1'], {}), '(idx1)\n', (6707, 6713), True, 'import numpy as np\n'), ((9820, 9832), 'numpy.sum', 'np.sum', (['idx0'], {}), '(idx0)\n', (9826, 9832), True, 'import numpy as np\n'), ((9843, 9855), 'numpy.sum', 'np.sum', (['idx1'], {}), '(idx1)\n', (9849, 9855), True, 'import numpy as np\n'), ((11994, 12014), 'numpy.sum', 'np.sum', (['true_matches'], {}), '(true_matches)\n', (12000, 12014), True, 'import numpy as np\n'), ((2096, 2135), 'numpy.ones', 'np.ones', (["(1, self._config['desc_size'])"], {}), "((1, self._config['desc_size']))\n", (2103, 2135), True, 'import numpy as np\n'), ((3425, 3539), 'torch.ones', 'torch.ones', (["(self._config['meta_desc_dim'] * self._config['n_clusters'])"], {'dtype': 'torch.float', 'device': 'self._device'}), "(self._config['meta_desc_dim'] * self._config['n_clusters'],\n dtype=torch.float, device=self._device)\n", (3435, 3539), False, 'import torch\n'), ((9065, 9114), 'torch.nn.functional.relu', 'func.relu', (["(config['margin'] + pos_dist - neg_dist)"], {}), "(config['margin'] + pos_dist - neg_dist)\n", (9074, 9114), True, 'import torch.nn.functional as func\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
"""Utils for notebook."""
import sys
import os
import os.path as osp
import glob
from collections import OrderedDict
from collections.abc import Iterable
import json
import subprocess
import pickle as pkl
import logging
import h5py
import math
import operator
import pathlib
import pandas as pd
import moviepy.editor as mpy
from tqdm import tqdm
import proglog
import numpy as np
from scipy.special import softmax
import torch
# from omegaconf import OmegaConf
import hydra
from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose
import matplotlib
from matplotlib import pylab
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
# from tqdm import tqdm
from tqdm.notebook import tqdm
sys.path.append('..')
from external.rulstm.RULSTM.utils import topk_recall
from launch import subselect_dict_keys_diff
from datasets import epic_kitchens
CODE_DIR = str(pathlib.Path(__file__).parent.resolve() / '../')
OUTPUT_DIR = f'{CODE_DIR}/OUTPUTS/'
RESULTS_SAVE_DIR_PREFIX = 'results' # This is the prefix, can have multiple, if >1 eval datasets
DATASET_EVAL_CFG_KEY = 'dataset_eval'
DATASET_EVAL_CFG_KEY_SUFFIX = ''
proglog.notebook() # so moviepy uses notebook tqdm
SQRT2 = math.sqrt(2)
sns.set_style("whitegrid")
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
matplotlib.rc('axes', edgecolor='k')
matplotlib.rc('font', size=30)
def save_graph(fig, outfpath, root_dir='./', **kwargs):
# Any postprocessing of the graphs
sns.despine(top=True, right=True, left=False, bottom=False)
# Save code
final_oufpath = os.path.join(root_dir, outfpath)
os.makedirs(osp.dirname(final_oufpath), exist_ok=True)
fig.savefig(final_oufpath,
bbox_inches='tight',
transparent=True,
pad_inches=0,
**kwargs)
def allkeys(obj, keys=[]):
"""Recursively find all leaf keys in h5. """
keys = []
for key in obj.keys():
if isinstance(obj[key], h5py.Group):
keys += [f'{key}/{el}' for el in allkeys(obj[key])]
else:
keys.append(key)
return keys
class EmptyResdirError(ValueError):
pass
def gen_load_resfiles(resdir):
resfiles = glob.glob(osp.join(resdir, '*.pth'))
if len(resfiles) == 0:
resfiles = glob.glob(osp.join(resdir, '*.h5'))
if len(resfiles) == 0:
raise EmptyResdirError(f'Didnt find any resfiles in {resdir}')
for resfile in resfiles:
if resfile.endswith('.pth'):
output_dict = {
key: val.numpy() if torch.torch.is_tensor(val) else val
for key, val in torch.load(resfile).items()
}
else:
output_dict = {}
with h5py.File(resfile, 'r') as fin:
for key in allkeys(fin):
try:
output_dict[key] = fin[key][()]
except AttributeError as err:
# Happens for the string keys... need to figure what
# to do here
logging.warning('Unable to load %s (%s)', key, err)
yield output_dict
def read_results(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
# TODO allow to read only certain keys, eg some times we only need logits
# which would be faster to read
res_per_layer = {
key: OrderedDict()
for key in data if key not in ['epoch']
}
if len(res_per_layer) == 0:
raise ValueError('No logits found in the output. Note that code was '
'changed Aug 26 2020 that renames "output" to '
'"logits" etc. So might need to rerun testing.')
logging.info('Reading from resfiles')
for data in gen_load_resfiles(resdir):
for i, idx in enumerate(data['idx']):
idx = int(idx)
for key in res_per_layer:
if idx not in res_per_layer[key]:
res_per_layer[key][idx] = []
res_per_layer[key][idx].append(data[key][i])
# Mean over all the multiple predictions per key
final_res = {}
for key in res_per_layer:
if len(res_per_layer[key]) == 0:
continue
max_idx = max(res_per_layer[key].keys())
key_output = np.zeros([
max_idx + 1,
] + list(res_per_layer[key][0][0].shape))
for idx in res_per_layer[key]:
key_output[idx] = np.mean(np.stack(res_per_layer[key][idx]),
axis=0)
final_res[key] = key_output
return final_res
def get_epoch_from_resdir(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
if 'epoch' not in data:
return None
return np.min(data['epoch'])
def read_all_results(conf_path, run_id=0):
resdirs = glob.glob(
osp.join(OUTPUT_DIR, conf_path, str(run_id),
RESULTS_SAVE_DIR_PREFIX + '*'))
all_res = {}
for resdir in resdirs:
resdir_bname = osp.basename(resdir)
all_res[resdir_bname] = read_results(conf_path,
run_id,
results_dir=resdir_bname)
return all_res
def read_file_into_list(fpath):
"""Read cli from file into a string."""
# TODO: Ideally reuse this from the launch script
args_lst = []
with open(fpath, 'r') as fin:
for line in fin:
args = line.split('#')[0].strip()
if not args: # Empty
continue
args_lst.append(args)
# Importing this on the global scope does not work .. gives the
# super(cls, self).. error
# https://thomas-cokelaer.info/blog/2011/09/382/
# Probably some issue with auto package reload in notebooks for py2.7
# packages..
from hydra._internal.core_plugins.basic_sweeper import BasicSweeper
from hydra.core.override_parser.overrides_parser import OverridesParser
sweeper = BasicSweeper(max_batch_size=None)
parser = OverridesParser.create()
overrides = parser.parse_overrides(args_lst)
run_args = sweeper.split_arguments(overrides, max_batch_size=None)[0]
return run_args
def get_config(cfg_fpath, run_id=0):
# outdir = osp.join(OUTPUT_DIR, cfg_fpath, str(run_id))
overrides_all = read_file_into_list('../' + cfg_fpath)
# https://github.com/facebookresearch/hydra/issues/716 should fix the issue
# with interpolation not working in notebook etc.
# However it can't handle ":" style custom interpolation, so need to
# override those.
cfg_all = []
for overrides in overrides_all:
overrides.append('cwd="../"')
with hydra_initialize(config_path='../conf'):
cfg = hydra_compose(config_name='config.yaml',
return_hydra_config=True,
overrides=overrides)
cfg_all.append(cfg)
if run_id is None:
return cfg_all
else:
return cfg_all[run_id]
def get_dataset(cfg_fpath,
run_id=0,
dataset_cfg_key=DATASET_EVAL_CFG_KEY,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
cfg = get_config(cfg_fpath, run_id)
sys.path.append('../')
dataset = hydra.utils.instantiate(getattr(
cfg, dataset_cfg_key + dataset_key_suffix),
frames_per_clip=1,
_recursive_=False)
return dataset
def overlay_text(clip, texts):
"""
Args:
clip: Moviepy clip
texts: List of 2 strings (corr to GT and pred) to overlay onto the clip
"""
bg_color = 'white' if texts[0] == texts[1] else 'pink'
texts[0] = 'GT: ' + texts[0]
texts[1] = 'Pred: ' + texts[1]
textclip = (mpy.TextClip(str(texts), bg_color=bg_color).set_duration(
clip.duration).set_pos(("right", "top")))
return mpy.CompositeVideoClip([clip, textclip])
def compute_topk(predictions, labels, k, classes=None):
"""
Args:
predictions (N, K)
labels (N,)
classes: (C', ): Set of classes to compute over. By default, uses
all classes
"""
if classes is None:
classes = np.unique(labels)
# Subselect items that belong to the classes
# Converting to list since classses are at times dict_values and that
# doesn't directly convert to np.array
reqd_elts = np.isin(labels, list(classes))
predictions = predictions[reqd_elts]
labels = labels[reqd_elts]
top_predictions = np.argpartition(predictions, -k, axis=-1)[:, -k:]
ratio_solved = np.mean(
np.any(labels[:, np.newaxis] == top_predictions, axis=-1))
return ratio_solved * 100.0
def combine_verb_noun_preds(res_verb, res_noun):
"""
Args:
res_verb (matrix with NxC1 dims)
res_noun (matrix with NxC2 dims)
Returns:
res_action (matrix with Nx(C1 * C2) dims)
"""
num_elts = res_verb.shape[0]
# normalize the predictions using softmax
res_verb = softmax(res_verb, axis=-1)
res_noun = softmax(res_noun, axis=-1)
# Cross product to get the combined score
return np.einsum('ij,ik->ijk', res_verb, res_noun).reshape((num_elts, -1))
def compute_conf_mat(predictions, target):
def to_onehot(indices, num_classes):
onehot = torch.zeros(indices.shape[0],
num_classes,
*indices.shape[1:],
device=indices.device)
# rgirdhar: When test on test set, there will be some data points where
# we don't have the labels
return onehot.scatter_(1, indices[indices >= 0].unsqueeze(1), 1)
num_classes = predictions.shape[1]
assert predictions.shape[0] == target.shape[0]
with torch.no_grad():
target_1hot = to_onehot(target, num_classes)
target_1hot_t = target_1hot.transpose(0, 1).float()
pred_idx = torch.argmax(predictions, dim=1)
pred_1hot = to_onehot(pred_idx.reshape(-1), num_classes)
pred_1hot = pred_1hot.float()
confusion_matrix = torch.matmul(target_1hot_t, pred_1hot)
return confusion_matrix
def mean_class_accuracy(conf_mat):
# Increase floating point precision similar to forecasting HOI
conf_mat = conf_mat.type(torch.float64)
cls_cnt = conf_mat.sum(dim=1) + 1e-15
cls_hit = conf_mat.diag()
cls_acc = (cls_hit / cls_cnt).mean().item()
return cls_acc
def compute_accuracy(predictions, labels, classes=None):
"""
Args:
predictions: (B, C) logits
labels: (B, )
classes: OrderedDict[name (str), cls_id (int)]
"""
# This can happen when computing tail class accuracies and it's not
# specified for the test set
if predictions.size == 0:
return [float('nan')] * 5
labels = labels.astype(np.int64)
if classes is not None:
classes_to_keep = list(classes.values())
else:
classes_to_keep = range(max(labels) + 1)
top_1 = compute_topk(predictions, labels, 1, classes=classes_to_keep)
top_5 = compute_topk(predictions, labels, 5, classes=classes_to_keep)
try:
ar_outputs = topk_recall(predictions,
labels,
k=5,
classes=classes_to_keep)
if isinstance(ar_outputs, tuple):
# This happens if RULSTM code is modified to return per-class AR
# values
ar5, ar5_per_cls = ar_outputs
ar5_per_cls = {k: v * 100.0 for k, v in ar5_per_cls.items()}
else:
ar5 = ar_outputs
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
except ZeroDivisionError:
# This happens when it can't find any true classes, the code
# can't deal with that
ar5 = float('nan')
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
# Compute a mean class accuracy (used in EGTEA) -- accuracy per class and
# then mean over the classes
conf_mat = compute_conf_mat(torch.from_numpy(predictions),
torch.from_numpy(labels))
# Make sure conf mat makes sense
top_1_confmat = 100.0 * (conf_mat.diag()[classes_to_keep].sum() /
conf_mat[classes_to_keep].sum())
if (not np.isnan(top_1) and not np.isnan(top_1_confmat)
and not np.isclose(top_1, top_1_confmat, atol=1.0)):
# Using a large atol margin cos conf_mat comp happens on GPUs and can
# be non deterministic, so might not match sometimes..
# Save the outputs for analysis
with open('debug_acc.pkl', 'wb') as fout:
pkl.dump(predictions, fout)
pkl.dump(labels, fout)
pkl.dump(conf_mat, fout)
raise ValueError(f'top1 ({top_1}) doesnt match what I get from '
f'conf_mat ({top_1_confmat}). This could happen '
f'if the model predicts all 0s for some data points '
f'and hence argmax is not defined and behaves '
f'differently in numpy and torch '
f'(https://github.com/pytorch/pytorch/issues/14147)')
top1_meancls = 100.0 * mean_class_accuracy(conf_mat)
return top_1, top_5, ar5 * 100, top1_meancls, ar5_per_cls
def print_accuracies_epic(metrics: dict, prefix: str = ''):
print(f"[{prefix}] Accuracies verb/noun/action: "
f"{metrics['vtop1']:.1f} {metrics['vtop5']:.1f} "
f"{metrics['ntop1']:.1f} {metrics['ntop5']:.1f} "
f"{metrics['atop1']:.1f} {metrics['atop5']:.1f} ")
print(f"[{prefix}] Mean class top-1 accuracies verb/noun/action: "
f"{metrics['vtop1_meancls']:.1f} "
f"{metrics['ntop1_meancls']:.1f} "
f"{metrics['atop1_meancls']:.1f} ")
print(f"[{prefix}] Recall@5 verb/noun/action: "
f"{metrics['vrec5']:.1f} {metrics['nrec5']:.1f} "
f"{metrics['arec5']:.1f} ")
print(f"[{prefix}] Recall@5 many shot verb/noun/action: "
f"{metrics['vrec5_ms']:.1f} {metrics['nrec5_ms']:.1f} "
f"{metrics['arec5_ms']:.1f} ")
if 'vrec5_tail' in metrics:
# assuming the others for tail/unseen will be in there too, since
# they are all computed at one place for ek100
print(f"[{prefix}] Recall@5 tail verb/noun/action: "
f"{metrics['vrec5_tail']:.1f} {metrics['nrec5_tail']:.1f} "
f"{metrics['arec5_tail']:.1f} ")
print(f"[{prefix}] Recall@5 unseen verb/noun/action: "
f"{metrics['vrec5_unseen']:.1f} {metrics['nrec5_unseen']:.1f} "
f"{metrics['arec5_unseen']:.1f} ")
def get_logits_from_results(results):
if 'logits' in results:
return results['logits']
# Newer version, as of Nov 3 2020
logits_keys = [key for key in results.keys() if key.startswith('logits/')]
if len(logits_keys) == 1:
return results[logits_keys[0]]
# Else, return all of them in a dict
return {key: results[key] for key in logits_keys}
def get_epic_action_accuracy(run_info_verb, run_info_noun):
# Compute action accuracies implicitly from verb and noun
# TODO also compute with many-shot classes for EPIC 55
res_verb = get_logits_from_results(read_results(*run_info_verb))
res_noun = get_logits_from_results(read_results(*run_info_noun))
dataset_verb = get_dataset(*run_info_verb)
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
res_verb, dataset_verb.df['verb_class'].values)
dataset_noun = get_dataset(*run_info_noun)
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
res_noun, dataset_noun.df['noun_class'].values)
assert (len(dataset_verb.df) == len(res_verb) == len(dataset_noun.df) ==
len(res_noun))
res_action = combine_verb_noun_preds(res_verb, res_noun)
true_action = (
dataset_verb.df['verb_class'].values * len(dataset_noun.classes) +
dataset_noun.df['noun_class'].values)
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
res_action, true_action)
print_accuracies_epic({
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': float('nan'), # TODO
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': float('nan'), # TODO
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': float('nan'), # TODO
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
})
def epic100_unseen_tail_eval(probs, dataset):
"""
probs: contains 3 elements: predictions for verb, noun and action
"""
# based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L379
unseen_participants_ids = pd.read_csv(osp.join(
dataset.rulstm_annotation_dir,
'validation_unseen_participants_ids.csv'),
names=['id'],
squeeze=True)
tail_verbs_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_verbs_ids.csv'),
names=['id'],
squeeze=True)
tail_nouns_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_nouns_ids.csv'),
names=['id'],
squeeze=True)
tail_actions_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_actions_ids.csv'),
names=['id'],
squeeze=True)
# Now based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L495
unseen_bool_idx = dataset.df.narration_id.isin(
unseen_participants_ids).values
tail_verbs_bool_idx = dataset.df.narration_id.isin(tail_verbs_ids).values
tail_nouns_bool_idx = dataset.df.narration_id.isin(tail_nouns_ids).values
tail_actions_bool_idx = dataset.df.narration_id.isin(
tail_actions_ids).values
# For tail
_, _, vrec5_tail, _, _ = compute_accuracy(
probs[0][tail_verbs_bool_idx],
dataset.df.verb_class.values[tail_verbs_bool_idx])
_, _, nrec5_tail, _, _ = compute_accuracy(
probs[1][tail_nouns_bool_idx],
dataset.df.noun_class.values[tail_nouns_bool_idx])
_, _, arec5_tail, _, _ = compute_accuracy(
probs[2][tail_actions_bool_idx],
dataset.df.action_class.values[tail_actions_bool_idx])
# for unseen
_, _, vrec5_unseen, _, _ = compute_accuracy(
probs[0][unseen_bool_idx],
dataset.df.verb_class.values[unseen_bool_idx])
_, _, nrec5_unseen, _, _ = compute_accuracy(
probs[1][unseen_bool_idx],
dataset.df.noun_class.values[unseen_bool_idx])
_, _, arec5_unseen, _, _ = compute_accuracy(
probs[2][unseen_bool_idx],
dataset.df.action_class.values[unseen_bool_idx])
return dict(
vrec5_tail=vrec5_tail,
nrec5_tail=nrec5_tail,
arec5_tail=arec5_tail,
vrec5_unseen=vrec5_unseen,
nrec5_unseen=nrec5_unseen,
arec5_unseen=arec5_unseen,
)
def compute_accuracies_epic(probs, dataset):
manyshot_classes = dataset.classes_manyshot
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
probs[0], dataset.df.verb_class.values)
vrec5_ms, nrec5_ms, arec5_ms = float('nan'), float('nan'), float('nan')
if 'verb' in manyshot_classes:
_, _, vrec5_ms, _, _ = compute_accuracy(
probs[0],
dataset.df.verb_class.values,
classes=manyshot_classes['verb'])
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
probs[1], dataset.df.noun_class.values)
if 'noun' in manyshot_classes:
_, _, nrec5_ms, _, _ = compute_accuracy(
probs[1],
dataset.df.noun_class.values,
classes=manyshot_classes['noun'])
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
probs[2], dataset.df.action_class.values)
if 'action' in manyshot_classes:
_, _, arec5_ms, _, _ = compute_accuracy(
probs[2],
dataset.df.action_class.values,
classes=manyshot_classes['action'])
res = {
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': vrec5_ms,
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': nrec5_ms,
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': arec5_ms,
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
}
if dataset.version == epic_kitchens.EPIC100_VERSION:
res.update(epic100_unseen_tail_eval(probs, dataset))
return res
def get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
res_action = get_logits_from_results(
read_results(*run_info, results_dir=f'results{dataset_key_suffix}'))
dataset = get_dataset(*run_info, dataset_key_suffix=dataset_key_suffix)
if isinstance(res_action, dict):
print(f'Found logits outputs for verb noun as well [{run_info}]')
# It has multiple heads for verb/noun as well
res_verb = res_action['logits/verb']
res_noun = res_action['logits/noun']
res_action = res_action['logits/action']
else:
res_action_probs = softmax(res_action, axis=-1)
# Marginalize the other dimension, using the mapping matrices I store
# in the dataset obj
res_verb = np.matmul(
res_action_probs,
dataset.class_mappings[('verb', 'action')].numpy())
res_noun = np.matmul(
res_action_probs,
dataset.class_mappings[('noun', 'action')].numpy())
accuracies = compute_accuracies_epic([res_verb, res_noun, res_action],
dataset)
# Returning the actual scores for actions instead of the probs. Found
# better results with this, and Sener et al. ECCV'20 does the same.
scores = [res_verb, res_noun, res_action]
return accuracies, scores, dataset
def read_scores_from_pkl(pkl_fpath):
"""
This is to read the data as I dump in the ActionBanks code
"""
with open(pkl_fpath, 'rb') as fin:
scores = pkl.load(fin)
return [
scores['verb_scores'], scores['noun_scores'], scores['action_scores']
]
def load_json(fpath, verb_noun_to_action, nclasses):
"""
Args:
fpath: Path to the json
verb_noun_to_action: Dict from (verb_id, noun_id) to action_id
nclasses: A list of 3 elements, with the label space for verb/noun/act
Returns: a dict with
{uid1: score1, uid2: score2 ...}
"""
assert len(nclasses) == 3, 'One for verb/noun/action'
with open(fpath, 'r') as fin:
preds = json.load(fin)
# Res for verb/noun/action
all_res = []
for j, space in enumerate(['verb', 'noun', 'action']):
# Convert to a {uid: <scores>} format
res = {}
for key, val in preds['results'].items():
# Will be using 0 for all the scores not defined. Should be fine given
# top 100 should be enough for late fusion etc, metrics are like top-5
# anyway.
scores = np.zeros((nclasses[j], ))
for i, score in val[space].items():
if space == 'action':
# Since for actions the "key" is (verb, noun) tuple,
# need to convert it to an action index by
# verb_id * noun_count + noun_id
idx = tuple(int(el) for el in i.split(','))
idx = verb_noun_to_action[idx]
else:
idx = int(i)
scores[idx] = score
res[key] = scores
all_res.append(res)
return all_res
def _concat_with_uids(scores, dataset, uid_key):
# Make a dict with the IDs from the dataset
# There will be 3 elements in scores -- verb, noun, action
return [
dict(
zip([str(el)
for el in dataset.df[uid_key].values], scores_per_space))
for scores_per_space in scores
]
def _normalize_scores(scores, p):
"""This brings the scores between 0 to 1, and normalizes by """
res = []
for scores_per_space in scores:
res.append({
uid: val / (np.linalg.norm(val, ord=p, axis=-1) + 0.000001)
for uid, val in scores_per_space.items()
})
return res
def _get_avg_norm_scores(scores, p):
"""Remove the UID keys etc, and then compute."""
scores = np.array([val for _, val in scores.items()])
return np.mean(np.linalg.norm(scores, ord=p, axis=-1), axis=0)
def get_epic_marginalize_late_fuse(
run_infos,
weights=1.0,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
"""
Args:
eventual_fname: This is used to read prepackaged outputs from result
files, and using the filename to know which file to look for
when a directory is passed in as run info.
normalize_before_combine: Set to non-None to normalize the features
by that p-norm, and then combine. So the weights would have to be
defined w.r.t normalized features.
"""
all_scores = []
all_datasets = []
for run_info_id, run_info in enumerate(run_infos):
if isinstance(run_info[0], dict):
# This is likely a pre-computed scores (so eg a nested
# get_epic_marginalize.. function). So I just use the scores as is.
scores = run_info
elif os.path.isdir(run_info[0]):
assert len(all_datasets) > 0, (
'Need at least 1 datasets to be read before reading from json '
'to figure the verb/noun -> action_id and '
'to figure the total number of classes to gen feat vectors')
scores = load_json(
os.path.join(run_info[0], eventual_fname),
all_datasets[-1].verb_noun_to_action,
[list(el.values())[0].shape[-1] for el in all_scores[-1]])
elif run_info[0].endswith('.pkl'):
# This is the input used to read predictions from the action_banks
# codebase, where I dump output into pkl and read here for late
# fusion.
scores = read_scores_from_pkl(run_info[0])
assert len(
all_datasets) > 0, 'At least one run_info must be passed in'
scores = _concat_with_uids(scores, all_datasets[-1], uid_key)
else:
accuracies, scores, dataset = get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=dataset_key_suffix)
scores = _concat_with_uids(scores, dataset, uid_key)
print_accuracies_epic(accuracies, prefix=run_info)
all_datasets.append(dataset)
if normalize_before_combine is not None:
scores = _normalize_scores(scores, p=normalize_before_combine)
logging.warning(
'Adding scores from run_info %d with avg action L1 norm of %f',
run_info_id, _get_avg_norm_scores(scores[-1], p=1))
all_scores.append(scores)
# Late fuse
if isinstance(weights, float):
weights = [weights] * len(run_infos)
else:
assert len(weights) == len(run_infos)
# broadcastable_weights = np.array(weights)[:, np.newaxis, np.newaxis]
# Combined scores by combining the corresponding score for each uid.
combined = []
for space_id in range(3): # verb/noun/action
scores_for_space = [scores[space_id] for scores in all_scores]
# Take the union of all the UIDs we have score for
total_uids = set.union(*[set(el.keys()) for el in scores_for_space])
logging.warning('Combined UIDs: %d. UIDs in the runs %s',
len(total_uids),
[len(el.keys()) for el in scores_for_space])
combined_for_space = {}
for uid in total_uids:
combined_for_space[uid] = []
for run_id, scores_for_space_per_run in enumerate(
scores_for_space):
if uid in scores_for_space_per_run:
combined_for_space[uid].append(
scores_for_space_per_run[uid] * weights[run_id])
combined_for_space[uid] = np.sum(np.stack(combined_for_space[uid]),
axis=0)
combined.append(combined_for_space)
# Now to compute accuracies, need to convert back to np arrays from dict.
# Would only work for parts that are in the dataset
combined_np = []
for combined_for_space in combined:
combined_np.append(
np.array([
combined_for_space[str(uid)]
for uid in all_datasets[-1].df[uid_key].values
]))
accuracies = compute_accuracies_epic(combined_np, all_datasets[-1])
return accuracies, combined, all_datasets[-1]
def summarize_results(cfg_name, metric='arec5'):
"""
Read all runs corr to cfg_name, and show the results in a human readable
form with the config overrides (unique) that were active. It averages
over runs too.
"""
run_cfgs = read_file_into_list('../' + cfg_name)
run_cfgs_hydra = get_config(cfg_name, run_id=None)
# Convert to dicts
run_cfgs = [(i, dict([el.split('=') for el in conf]))
for i, conf in enumerate(run_cfgs)]
# Keep only the stuff that changes across them
run_cfgs = subselect_dict_keys_diff(run_cfgs)
all_res = {}
for (run_id, params), cfg_hydra in tqdm(zip(run_cfgs, run_cfgs_hydra),
total=len(run_cfgs),
desc='Loading results'):
try:
accuracies, _, _ = get_epic_marginalize_verb_noun(
(cfg_name, run_id))
epoch = get_epoch_from_resdir(cfg_name, run_id)
except (EmptyResdirError, OSError): # H5 didn't let it read
continue
if epoch != cfg_hydra.train.num_epochs:
# This training has not finished
continue
run_id = 0
if 'run_id' in params:
run_id = int(params['run_id'])
del params['run_id']
params_hash = tuple(sorted(list(params.items())))
if params_hash not in all_res:
all_res[params_hash] = {}
all_res[params_hash][run_id] = accuracies[metric]
for params_hash in all_res:
run_ids, values = zip(*all_res[params_hash].items())
print(f'{params_hash} [{run_ids}]: [{values}] '
f'mean: {np.mean(values)}, std: {np.std(values)}')
def plot_per_cls_perf(run_infos_all: list,
names: list,
metrics: list = ['vrec5_per_cls', 'nrec5_per_cls'],
cls_types: list = ['verb', 'noun'],
show_topn: int = 10,
xticks_rotation: float = 0,
show_subset: callable = None,
outfpath: str = 'figs/improved/'):
"""
Args:
run_infos_all: [[(cfg, sweep_id), (cfg, sweep_id)...],
[(cfg, sweep_id), (cfg, sweep_id)...], ...]
names: The name for each run_info group
metrics: There will be 1 graph for each
"""
assert len(run_infos_all) == len(names)
assert len(metrics) == len(cls_types)
final_accs = {cls_type: [] for cls_type in cls_types}
for i, run_infos in enumerate(tqdm(run_infos_all, desc='Reading acc')):
for run_id, run_info in enumerate(run_infos):
cfg_fpath, sweep_id = run_info
all_accuracies, _, dataset = get_epic_marginalize_verb_noun(
(cfg_fpath, sweep_id))
for metric, cls_type in zip(metrics, cls_types):
accuracies = all_accuracies[metric]
assert isinstance(accuracies,
dict), 'Supports per-class for now'
classes = operator.attrgetter(f'{cls_type}_classes')(dataset)
cls_id_to_name = {v: k for k, v in classes.items()}
for cls_id, score in accuracies.items():
final_accs[cls_type].append({
'method':
names[i],
'run_id':
run_id,
'cls_name':
cls_id_to_name[cls_id],
'accuracy':
score,
})
for cls_type in final_accs:
accs = pd.DataFrame(final_accs[cls_type])
# Print logs
for method in names:
for run_id in accs.run_id.unique():
this_acc = (accs[accs.method == method][
accs.run_id == run_id].accuracy.mean())
print(f'Check {method} {run_id}: {this_acc}')
mean_acc_by_cls = accs.groupby(['method',
'cls_name']).mean().reset_index()
first_col = mean_acc_by_cls[mean_acc_by_cls.method == names[0]]
last_col = mean_acc_by_cls[mean_acc_by_cls.method == names[-1]]
merged = first_col[['cls_name', 'accuracy'
]].merge(last_col[['cls_name', 'accuracy']],
on='cls_name',
how='outer',
suffixes=['_first', '_last'])
# get the largest gains
gains = (merged['accuracy_last'] -
merged['accuracy_first']).sort_values()
gained_labels = merged.loc[gains.index].cls_name.tolist()
if show_subset is not None:
gained_labels = [el for el in gained_labels if show_subset(el)]
gained_labels = gained_labels[-show_topn:]
accs_largegains = accs[accs.cls_name.isin(gained_labels)]
fig = plt.figure(num=None,
figsize=(2 * len(gained_labels), 4),
dpi=300)
ax = sns.barplot(x='cls_name',
y='accuracy',
hue='method',
data=accs_largegains,
order=gained_labels,
errwidth=1.0)
ax.set_xlabel('Classes')
ax.set_ylabel('Recall @ 5')
ax.set_xticklabels(ax.get_xticklabels(),
rotation=xticks_rotation,
ha='center')
plt.show()
save_graph(fig, os.path.join(outfpath, cls_type + '.pdf'))
def get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
_, combined, dataset = get_epic_marginalize_late_fuse(
run_infos,
weights,
dataset_key_suffix=dataset_key_suffix,
uid_key=uid_key,
eventual_fname=eventual_fname,
normalize_before_combine=normalize_before_combine)
results = {}
# Now the following may not be true since if the run_info contains an
# actual json, it might have more rows etc.
# assert len(combined[0]) == len(dataset)
action_to_verb_noun = {
val: key
for key, val in dataset.verb_noun_to_action.items()
}
for uid in tqdm(combined[0].keys(), desc='Computing res'):
verb_res = {f'{j}': val for j, val in enumerate(combined[0][uid])}
noun_res = {f'{j}': val for j, val in enumerate(combined[1][uid])}
top_100_actions = sorted(np.argpartition(combined[2][uid],
-100)[-100:],
key=lambda x: -combined[2][uid][x])
action_res = {
','.join((str(el)
for el in action_to_verb_noun[j])): combined[2][uid][j]
for j in top_100_actions
}
results[f'{uid}'] = {
'verb': verb_res,
'noun': noun_res,
'action': action_res,
}
# Add in all the discarded dfs with uniform distribution
if dataset.discarded_df is not None:
for _, row in dataset.discarded_df.iterrows():
if str(row[uid_key]) in results:
continue
results[f'{row[uid_key]}'] = {
'verb':
{f'{j}': 0.0
for j in range(len(dataset.verb_classes))},
'noun':
{f'{j}': 0.0
for j in range(len(dataset.noun_classes))},
'action': {f'0,{j}': 0.0
for j in range(100)},
}
output_dict = {
'version': f'{dataset.version}',
'challenge': dataset.challenge_type,
'results': results
}
return output_dict
def package_results_for_submission(run_infos,
weights,
normalize_before_combine=None):
res_s1 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='',
eventual_fname='seen.json',
normalize_before_combine=normalize_before_combine)
res_s2 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='_s2',
eventual_fname='unseen.json',
normalize_before_combine=normalize_before_combine)
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'seen.json'), 'w') as fout:
json.dump(res_s1, fout, indent=4)
with open(osp.join(output_dir, 'unseen.json'), 'w') as fout:
json.dump(res_s2, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/seen.json '
f'{output_dir}/unseen.json ',
shell=True)
def package_results_for_submission_ek100(run_infos, weights, sls=[1, 4, 4]):
res = get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix='',
uid_key='narration_id',
eventual_fname='test.json')
res['sls_pt'] = sls[0]
res['sls_tl'] = sls[1]
res['sls_td'] = sls[2]
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'test.json'), 'w') as fout:
json.dump(res, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/test.json ',
shell=True)
| [
"matplotlib.rc",
"hydra.experimental.compose",
"pickle.dump",
"torch.argmax",
"tqdm.notebook.tqdm",
"numpy.einsum",
"moviepy.editor.CompositeVideoClip",
"numpy.isnan",
"numpy.argpartition",
"numpy.isclose",
"pickle.load",
"numpy.linalg.norm",
"pathlib.Path",
"numpy.mean",
"torch.no_grad"... | [((806, 827), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (821, 827), False, 'import sys\n'), ((1230, 1248), 'proglog.notebook', 'proglog.notebook', ([], {}), '()\n', (1246, 1248), False, 'import proglog\n'), ((1291, 1303), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (1300, 1303), False, 'import math\n'), ((1304, 1330), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (1317, 1330), True, 'import seaborn as sns\n'), ((1605, 1641), 'matplotlib.rc', 'matplotlib.rc', (['"""axes"""'], {'edgecolor': '"""k"""'}), "('axes', edgecolor='k')\n", (1618, 1641), False, 'import matplotlib\n'), ((1642, 1672), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'size': '(30)'}), "('font', size=30)\n", (1655, 1672), False, 'import matplotlib\n'), ((1774, 1833), 'seaborn.despine', 'sns.despine', ([], {'top': '(True)', 'right': '(True)', 'left': '(False)', 'bottom': '(False)'}), '(top=True, right=True, left=False, bottom=False)\n', (1785, 1833), True, 'import seaborn as sns\n'), ((1870, 1902), 'os.path.join', 'os.path.join', (['root_dir', 'outfpath'], {}), '(root_dir, outfpath)\n', (1882, 1902), False, 'import os\n'), ((4096, 4133), 'logging.info', 'logging.info', (['"""Reading from resfiles"""'], {}), "('Reading from resfiles')\n", (4108, 4133), False, 'import logging\n'), ((5230, 5251), 'numpy.min', 'np.min', (["data['epoch']"], {}), "(data['epoch'])\n", (5236, 5251), True, 'import numpy as np\n'), ((6464, 6497), 'hydra._internal.core_plugins.basic_sweeper.BasicSweeper', 'BasicSweeper', ([], {'max_batch_size': 'None'}), '(max_batch_size=None)\n', (6476, 6497), False, 'from hydra._internal.core_plugins.basic_sweeper import BasicSweeper\n'), ((6511, 6535), 'hydra.core.override_parser.overrides_parser.OverridesParser.create', 'OverridesParser.create', ([], {}), '()\n', (6533, 6535), False, 'from hydra.core.override_parser.overrides_parser import OverridesParser\n'), ((7714, 7736), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (7729, 7736), False, 'import sys\n'), ((8397, 8437), 'moviepy.editor.CompositeVideoClip', 'mpy.CompositeVideoClip', (['[clip, textclip]'], {}), '([clip, textclip])\n', (8419, 8437), True, 'import moviepy.editor as mpy\n'), ((9527, 9553), 'scipy.special.softmax', 'softmax', (['res_verb'], {'axis': '(-1)'}), '(res_verb, axis=-1)\n', (9534, 9553), False, 'from scipy.special import softmax\n'), ((9569, 9595), 'scipy.special.softmax', 'softmax', (['res_noun'], {'axis': '(-1)'}), '(res_noun, axis=-1)\n', (9576, 9595), False, 'from scipy.special import softmax\n'), ((30880, 30914), 'launch.subselect_dict_keys_diff', 'subselect_dict_keys_diff', (['run_cfgs'], {}), '(run_cfgs)\n', (30904, 30914), False, 'from launch import subselect_dict_keys_diff\n'), ((39128, 39166), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (39139, 39166), False, 'import os\n'), ((39383, 39512), 'subprocess.check_output', 'subprocess.check_output', (['f"""zip -j {output_dir}/submit.zip {output_dir}/seen.json {output_dir}/unseen.json """'], {'shell': '(True)'}), "(\n f'zip -j {output_dir}/submit.zip {output_dir}/seen.json {output_dir}/unseen.json '\n , shell=True)\n", (39406, 39512), False, 'import subprocess\n'), ((40218, 40256), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (40229, 40256), False, 'import os\n'), ((40363, 40462), 'subprocess.check_output', 'subprocess.check_output', (['f"""zip -j {output_dir}/submit.zip {output_dir}/test.json """'], {'shell': '(True)'}), "(\n f'zip -j {output_dir}/submit.zip {output_dir}/test.json ', shell=True)\n", (40386, 40462), False, 'import subprocess\n'), ((1919, 1945), 'os.path.dirname', 'osp.dirname', (['final_oufpath'], {}), '(final_oufpath)\n', (1930, 1945), True, 'import os.path as osp\n'), ((2512, 2537), 'os.path.join', 'osp.join', (['resdir', '"""*.pth"""'], {}), "(resdir, '*.pth')\n", (2520, 2537), True, 'import os.path as osp\n'), ((3767, 3780), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3778, 3780), False, 'from collections import OrderedDict\n'), ((5491, 5511), 'os.path.basename', 'osp.basename', (['resdir'], {}), '(resdir)\n', (5503, 5511), True, 'import os.path as osp\n'), ((8709, 8726), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (8718, 8726), True, 'import numpy as np\n'), ((9034, 9075), 'numpy.argpartition', 'np.argpartition', (['predictions', '(-k)'], {'axis': '(-1)'}), '(predictions, -k, axis=-1)\n', (9049, 9075), True, 'import numpy as np\n'), ((9120, 9177), 'numpy.any', 'np.any', (['(labels[:, np.newaxis] == top_predictions)'], {'axis': '(-1)'}), '(labels[:, np.newaxis] == top_predictions, axis=-1)\n', (9126, 9177), True, 'import numpy as np\n'), ((9824, 9914), 'torch.zeros', 'torch.zeros', (['indices.shape[0]', 'num_classes', '*indices.shape[1:]'], {'device': 'indices.device'}), '(indices.shape[0], num_classes, *indices.shape[1:], device=\n indices.device)\n', (9835, 9914), False, 'import torch\n'), ((10285, 10300), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10298, 10300), False, 'import torch\n'), ((10435, 10467), 'torch.argmax', 'torch.argmax', (['predictions'], {'dim': '(1)'}), '(predictions, dim=1)\n', (10447, 10467), False, 'import torch\n'), ((10599, 10637), 'torch.matmul', 'torch.matmul', (['target_1hot_t', 'pred_1hot'], {}), '(target_1hot_t, pred_1hot)\n', (10611, 10637), False, 'import torch\n'), ((11670, 11732), 'external.rulstm.RULSTM.utils.topk_recall', 'topk_recall', (['predictions', 'labels'], {'k': '(5)', 'classes': 'classes_to_keep'}), '(predictions, labels, k=5, classes=classes_to_keep)\n', (11681, 11732), False, 'from external.rulstm.RULSTM.utils import topk_recall\n'), ((12564, 12593), 'torch.from_numpy', 'torch.from_numpy', (['predictions'], {}), '(predictions)\n', (12580, 12593), False, 'import torch\n'), ((12627, 12651), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (12643, 12651), False, 'import torch\n'), ((17584, 17669), 'os.path.join', 'osp.join', (['dataset.rulstm_annotation_dir', '"""validation_unseen_participants_ids.csv"""'], {}), "(dataset.rulstm_annotation_dir,\n 'validation_unseen_participants_ids.csv')\n", (17592, 17669), True, 'import os.path as osp\n'), ((17829, 17901), 'os.path.join', 'osp.join', (['dataset.rulstm_annotation_dir', '"""validation_tail_verbs_ids.csv"""'], {}), "(dataset.rulstm_annotation_dir, 'validation_tail_verbs_ids.csv')\n", (17837, 17901), True, 'import os.path as osp\n'), ((18072, 18144), 'os.path.join', 'osp.join', (['dataset.rulstm_annotation_dir', '"""validation_tail_nouns_ids.csv"""'], {}), "(dataset.rulstm_annotation_dir, 'validation_tail_nouns_ids.csv')\n", (18080, 18144), True, 'import os.path as osp\n'), ((18317, 18391), 'os.path.join', 'osp.join', (['dataset.rulstm_annotation_dir', '"""validation_tail_actions_ids.csv"""'], {}), "(dataset.rulstm_annotation_dir, 'validation_tail_actions_ids.csv')\n", (18325, 18391), True, 'import os.path as osp\n'), ((22573, 22601), 'scipy.special.softmax', 'softmax', (['res_action'], {'axis': '(-1)'}), '(res_action, axis=-1)\n', (22580, 22601), False, 'from scipy.special import softmax\n'), ((23487, 23500), 'pickle.load', 'pkl.load', (['fin'], {}), '(fin)\n', (23495, 23500), True, 'import pickle as pkl\n'), ((24031, 24045), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (24040, 24045), False, 'import json\n'), ((25887, 25925), 'numpy.linalg.norm', 'np.linalg.norm', (['scores'], {'ord': 'p', 'axis': '(-1)'}), '(scores, ord=p, axis=-1)\n', (25901, 25925), True, 'import numpy as np\n'), ((32895, 32934), 'tqdm.notebook.tqdm', 'tqdm', (['run_infos_all'], {'desc': '"""Reading acc"""'}), "(run_infos_all, desc='Reading acc')\n", (32899, 32934), False, 'from tqdm.notebook import tqdm\n'), ((33983, 34017), 'pandas.DataFrame', 'pd.DataFrame', (['final_accs[cls_type]'], {}), '(final_accs[cls_type])\n', (33995, 34017), True, 'import pandas as pd\n'), ((35427, 35541), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""cls_name"""', 'y': '"""accuracy"""', 'hue': '"""method"""', 'data': 'accs_largegains', 'order': 'gained_labels', 'errwidth': '(1.0)'}), "(x='cls_name', y='accuracy', hue='method', data=accs_largegains,\n order=gained_labels, errwidth=1.0)\n", (35438, 35541), True, 'import seaborn as sns\n'), ((35882, 35892), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (35890, 35892), True, 'import matplotlib.pyplot as plt\n'), ((39238, 39271), 'json.dump', 'json.dump', (['res_s1', 'fout'], {'indent': '(4)'}), '(res_s1, fout, indent=4)\n', (39247, 39271), False, 'import json\n'), ((39345, 39378), 'json.dump', 'json.dump', (['res_s2', 'fout'], {'indent': '(4)'}), '(res_s2, fout, indent=4)\n', (39354, 39378), False, 'import json\n'), ((40328, 40358), 'json.dump', 'json.dump', (['res', 'fout'], {'indent': '(4)'}), '(res, fout, indent=4)\n', (40337, 40358), False, 'import json\n'), ((2595, 2619), 'os.path.join', 'osp.join', (['resdir', '"""*.h5"""'], {}), "(resdir, '*.h5')\n", (2603, 2619), True, 'import os.path as osp\n'), ((7170, 7209), 'hydra.experimental.initialize', 'hydra_initialize', ([], {'config_path': '"""../conf"""'}), "(config_path='../conf')\n", (7186, 7209), True, 'from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose\n'), ((7229, 7320), 'hydra.experimental.compose', 'hydra_compose', ([], {'config_name': '"""config.yaml"""', 'return_hydra_config': '(True)', 'overrides': 'overrides'}), "(config_name='config.yaml', return_hydra_config=True,\n overrides=overrides)\n", (7242, 7320), True, 'from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose\n'), ((9653, 9696), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'res_verb', 'res_noun'], {}), "('ij,ik->ijk', res_verb, res_noun)\n", (9662, 9696), True, 'import numpy as np\n'), ((12834, 12849), 'numpy.isnan', 'np.isnan', (['top_1'], {}), '(top_1)\n', (12842, 12849), True, 'import numpy as np\n'), ((12858, 12881), 'numpy.isnan', 'np.isnan', (['top_1_confmat'], {}), '(top_1_confmat)\n', (12866, 12881), True, 'import numpy as np\n'), ((12902, 12944), 'numpy.isclose', 'np.isclose', (['top_1', 'top_1_confmat'], {'atol': '(1.0)'}), '(top_1, top_1_confmat, atol=1.0)\n', (12912, 12944), True, 'import numpy as np\n'), ((13190, 13217), 'pickle.dump', 'pkl.dump', (['predictions', 'fout'], {}), '(predictions, fout)\n', (13198, 13217), True, 'import pickle as pkl\n'), ((13230, 13252), 'pickle.dump', 'pkl.dump', (['labels', 'fout'], {}), '(labels, fout)\n', (13238, 13252), True, 'import pickle as pkl\n'), ((13265, 13289), 'pickle.dump', 'pkl.dump', (['conf_mat', 'fout'], {}), '(conf_mat, fout)\n', (13273, 13289), True, 'import pickle as pkl\n'), ((24475, 24499), 'numpy.zeros', 'np.zeros', (['(nclasses[j],)'], {}), '((nclasses[j],))\n', (24483, 24499), True, 'import numpy as np\n'), ((26929, 26955), 'os.path.isdir', 'os.path.isdir', (['run_info[0]'], {}), '(run_info[0])\n', (26942, 26955), False, 'import os\n'), ((35917, 35958), 'os.path.join', 'os.path.join', (['outfpath', "(cls_type + '.pdf')"], {}), "(outfpath, cls_type + '.pdf')\n", (35929, 35958), False, 'import os\n'), ((39181, 39214), 'os.path.join', 'osp.join', (['output_dir', '"""seen.json"""'], {}), "(output_dir, 'seen.json')\n", (39189, 39214), True, 'import os.path as osp\n'), ((39286, 39321), 'os.path.join', 'osp.join', (['output_dir', '"""unseen.json"""'], {}), "(output_dir, 'unseen.json')\n", (39294, 39321), True, 'import os.path as osp\n'), ((40271, 40304), 'os.path.join', 'osp.join', (['output_dir', '"""test.json"""'], {}), "(output_dir, 'test.json')\n", (40279, 40304), True, 'import os.path as osp\n'), ((3019, 3042), 'h5py.File', 'h5py.File', (['resfile', '"""r"""'], {}), "(resfile, 'r')\n", (3028, 3042), False, 'import h5py\n'), ((4845, 4878), 'numpy.stack', 'np.stack', (['res_per_layer[key][idx]'], {}), '(res_per_layer[key][idx])\n', (4853, 4878), True, 'import numpy as np\n'), ((29712, 29745), 'numpy.stack', 'np.stack', (['combined_for_space[uid]'], {}), '(combined_for_space[uid])\n', (29720, 29745), True, 'import numpy as np\n'), ((37094, 37133), 'numpy.argpartition', 'np.argpartition', (['combined[2][uid]', '(-100)'], {}), '(combined[2][uid], -100)\n', (37109, 37133), True, 'import numpy as np\n'), ((976, 998), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (988, 998), False, 'import pathlib\n'), ((2849, 2875), 'torch.torch.is_tensor', 'torch.torch.is_tensor', (['val'], {}), '(val)\n', (2870, 2875), False, 'import torch\n'), ((27266, 27307), 'os.path.join', 'os.path.join', (['run_info[0]', 'eventual_fname'], {}), '(run_info[0], eventual_fname)\n', (27278, 27307), False, 'import os\n'), ((32008, 32023), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (32015, 32023), True, 'import numpy as np\n'), ((32032, 32046), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (32038, 32046), True, 'import numpy as np\n'), ((33401, 33443), 'operator.attrgetter', 'operator.attrgetter', (['f"""{cls_type}_classes"""'], {}), "(f'{cls_type}_classes')\n", (33420, 33443), False, 'import operator\n'), ((25591, 25626), 'numpy.linalg.norm', 'np.linalg.norm', (['val'], {'ord': 'p', 'axis': '(-1)'}), '(val, ord=p, axis=-1)\n', (25605, 25626), True, 'import numpy as np\n'), ((2917, 2936), 'torch.load', 'torch.load', (['resfile'], {}), '(resfile)\n', (2927, 2936), False, 'import torch\n'), ((3361, 3412), 'logging.warning', 'logging.warning', (['"""Unable to load %s (%s)"""', 'key', 'err'], {}), "('Unable to load %s (%s)', key, err)\n", (3376, 3412), False, 'import logging\n')] |
# house_price.py
# predict price from AC, sq ft, style, nearest school
# PyTorch 1.7.0-CPU Anaconda3-2020.02 Python 3.7.6
# Windows 10
import numpy as np
import time
import torch as T
device = T.device("cpu") # apply to Tensor or Module
# -----------------------------------------------------------
class HouseDataset(T.utils.data.Dataset):
# AC sq ft style price school
# -1 0.2500 0 1 0 0.5650 0 1 0
# 1 0.1275 1 0 0 0.3710 0 0 1
# air condition: -1 = no, +1 = yes
# style: art_deco, bungalow, colonial
# school: johnson, kennedy, lincoln
def __init__(self, src_file, m_rows=None):
all_xy = np.loadtxt(src_file, max_rows=m_rows,
usecols=[0,1,2,3,4,5,6,7,8], delimiter="\t",
# usecols=range(0,9), delimiter="\t",
comments="#", skiprows=0, dtype=np.float32)
tmp_x = all_xy[:,[0,1,2,3,4,6,7,8]]
tmp_y = all_xy[:,5].reshape(-1,1) # 2-D required
self.x_data = T.tensor(tmp_x, \
dtype=T.float32).to(device)
self.y_data = T.tensor(tmp_y, \
dtype=T.float32).to(device)
def __len__(self):
return len(self.x_data)
def __getitem__(self, idx):
preds = self.x_data[idx,:] # or just [idx]
price = self.y_data[idx,:]
return (preds, price) # tuple of two matrices
# -----------------------------------------------------------
class Net(T.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.hid1 = T.nn.Linear(8, 10) # 8-(10-10)-1
self.hid2 = T.nn.Linear(10, 10)
self.oupt = T.nn.Linear(10, 1)
T.nn.init.xavier_uniform_(self.hid1.weight)
T.nn.init.zeros_(self.hid1.bias)
T.nn.init.xavier_uniform_(self.hid2.weight)
T.nn.init.zeros_(self.hid2.bias)
T.nn.init.xavier_uniform_(self.oupt.weight)
T.nn.init.zeros_(self.oupt.bias)
def forward(self, x):
z = T.relu(self.hid1(x))
z = T.relu(self.hid2(z))
z = self.oupt(z) # no activation
return z
# -----------------------------------------------------------
def accuracy(model, ds, pct):
# assumes model.eval()
# percent correct within pct of true house price
n_correct = 0; n_wrong = 0
for i in range(len(ds)):
(X, Y) = ds[i] # (predictors, target)
with T.no_grad():
oupt = model(X) # computed price
abs_delta = np.abs(oupt.item() - Y.item())
max_allow = np.abs(pct * Y.item())
if abs_delta < max_allow:
n_correct +=1
else:
n_wrong += 1
acc = (n_correct * 1.0) / (n_correct + n_wrong)
return acc
# -----------------------------------------------------------
def accuracy_quick(model, dataset, pct):
# assumes model.eval()
n = len(dataset)
X = dataset[0:n][0] # all predictor values
Y = dataset[0:n][1] # all target prices
with T.no_grad():
oupt = model(X) # all computed prices
max_deltas = T.abs(pct * Y) # max allowable deltas
abs_deltas = T.abs(oupt - Y) # actual differences
results = abs_deltas < max_deltas # [[True, False, . .]]
acc = T.sum(results, dim=0).item() / n # dim not needed
return acc
# -----------------------------------------------------------
def baseline_acc(ds, pct):
# linear regression model accuracy using just sq. feet
# y = 1.9559x + 0.0987 (from separate program)
n_correct = 0; n_wrong = 0
for i in range(len(ds)):
(X, Y) = ds[i] # (predictors, target)
x = X[1].item() # sq feet predictor
y = 1.9559 * x + 0.0987 # computed
abs_delta = np.abs(oupt.item() - Y.item())
max_allow = np.abs(pct * Y.item())
if abs_delta < max_allow:
n_correct +=1
else:
n_wrong += 1
acc = (n_correct * 1.0) / (n_correct + n_wrong)
return acc
# -----------------------------------------------------------
def main():
# 0. get started
print("\nBegin predict House price \n")
T.manual_seed(4) # representative results
np.random.seed(4)
# 1. create DataLoader objects
print("Creating Houses Dataset objects ")
train_file = ".\\Data\\houses_train.txt"
train_ds = HouseDataset(train_file) # all 200 rows
test_file = ".\\Data\\houses_test.txt"
test_ds = HouseDataset(test_file) # all 40 rows
bat_size = 10
train_ldr = T.utils.data.DataLoader(train_ds,
batch_size=bat_size, shuffle=True)
# 2. create network
net = Net().to(device)
# 3. train model
max_epochs = 500
ep_log_interval = 50
lrn_rate = 0.005
loss_func = T.nn.MSELoss()
# optimizer = T.optim.SGD(net.parameters(), lr=lrn_rate)
optimizer = T.optim.Adam(net.parameters(), lr=lrn_rate)
print("\nbat_size = %3d " % bat_size)
print("loss = " + str(loss_func))
print("optimizer = Adam")
print("max_epochs = %3d " % max_epochs)
print("lrn_rate = %0.3f " % lrn_rate)
print("\nStarting training with saved checkpoints")
net.train() # set mode
for epoch in range(0, max_epochs):
T.manual_seed(1+epoch) # recovery reproducibility
epoch_loss = 0 # for one full epoch
for (batch_idx, batch) in enumerate(train_ldr):
(X, Y) = batch # (predictors, targets)
optimizer.zero_grad() # prepare gradients
oupt = net(X) # predicted prices
loss_val = loss_func(oupt, Y) # avg per item in batch
epoch_loss += loss_val.item() # accumulate avgs
loss_val.backward() # compute gradients
optimizer.step() # update wts
if epoch % ep_log_interval == 0:
print("epoch = %4d loss = %0.4f" % \
(epoch, epoch_loss))
# save checkpoint
dt = time.strftime("%Y_%m_%d-%H_%M_%S")
fn = ".\\Log\\" + str(dt) + str("-") + \
str(epoch) + "_checkpoint.pt"
info_dict = {
'epoch' : epoch,
'net_state' : net.state_dict(),
'optimizer_state' : optimizer.state_dict()
}
T.save(info_dict, fn)
print("Done ")
# 4. evaluate model accuracy
print("\nComputing model accuracy")
net.eval()
acc_train = accuracy(net, train_ds, 0.10)
print("Accuracy (within 0.10) on train data = %0.4f" % \
acc_train)
acc_test = accuracy(net, test_ds, 0.10)
print("Accuracy (within 0.10) on test data = %0.4f" % \
acc_test)
# base_acc_train = baseline_acc(train_ds, 0.10)
# print("%0.4f" % base_acc_train) # 0.7000
# base_acc_test = baseline_acc(test_ds, 0.10)
# print("%0.4f" % base_acc_test) # 0.7000
# 5. make a prediction
print("\nPredicting price for AC=no, sqft=2300, ")
print(" style=colonial, school=kennedy: ")
unk = np.array([[-1, 0.2300, 0,0,1, 0,1,0]],
dtype=np.float32)
unk = T.tensor(unk, dtype=T.float32).to(device)
with T.no_grad():
pred_price = net(unk)
pred_price = pred_price.item() # scalar
str_price = \
"${:,.2f}".format(pred_price * 1000000)
print(str_price)
# 6. save final model (state_dict approach)
print("\nSaving trained model state")
fn = ".\\Models\\houses_model.pth"
T.save(net.state_dict(), fn)
# saved_model = Net()
# saved_model.load_state_dict(T.load(fn))
# use saved_model to make prediction(s)
print("\nEnd House price demo")
if __name__ == "__main__":
main()
| [
"torch.nn.MSELoss",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.manual_seed",
"torch.nn.init.xavier_uniform_",
"time.strftime",
"torch.save",
"torch.nn.Linear",
"torch.nn.init.zeros_",
"numpy.array",
"numpy.loadtxt",
"torch.device",
"torch.no_grad",
"torch.abs... | [((196, 211), 'torch.device', 'T.device', (['"""cpu"""'], {}), "('cpu')\n", (204, 211), True, 'import torch as T\n'), ((2825, 2839), 'torch.abs', 'T.abs', (['(pct * Y)'], {}), '(pct * Y)\n', (2830, 2839), True, 'import torch as T\n'), ((2881, 2896), 'torch.abs', 'T.abs', (['(oupt - Y)'], {}), '(oupt - Y)\n', (2886, 2896), True, 'import torch as T\n'), ((3821, 3837), 'torch.manual_seed', 'T.manual_seed', (['(4)'], {}), '(4)\n', (3834, 3837), True, 'import torch as T\n'), ((3867, 3884), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (3881, 3884), True, 'import numpy as np\n'), ((4186, 4254), 'torch.utils.data.DataLoader', 'T.utils.data.DataLoader', (['train_ds'], {'batch_size': 'bat_size', 'shuffle': '(True)'}), '(train_ds, batch_size=bat_size, shuffle=True)\n', (4209, 4254), True, 'import torch as T\n'), ((4403, 4417), 'torch.nn.MSELoss', 'T.nn.MSELoss', ([], {}), '()\n', (4415, 4417), True, 'import torch as T\n'), ((6492, 6550), 'numpy.array', 'np.array', (['[[-1, 0.23, 0, 0, 1, 0, 1, 0]]'], {'dtype': 'np.float32'}), '([[-1, 0.23, 0, 0, 1, 0, 1, 0]], dtype=np.float32)\n', (6500, 6550), True, 'import numpy as np\n'), ((633, 771), 'numpy.loadtxt', 'np.loadtxt', (['src_file'], {'max_rows': 'm_rows', 'usecols': '[0, 1, 2, 3, 4, 5, 6, 7, 8]', 'delimiter': '"""\t"""', 'comments': '"""#"""', 'skiprows': '(0)', 'dtype': 'np.float32'}), "(src_file, max_rows=m_rows, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8],\n delimiter='\\t', comments='#', skiprows=0, dtype=np.float32)\n", (643, 771), True, 'import numpy as np\n'), ((1430, 1448), 'torch.nn.Linear', 'T.nn.Linear', (['(8)', '(10)'], {}), '(8, 10)\n', (1441, 1448), True, 'import torch as T\n'), ((1480, 1499), 'torch.nn.Linear', 'T.nn.Linear', (['(10)', '(10)'], {}), '(10, 10)\n', (1491, 1499), True, 'import torch as T\n'), ((1516, 1534), 'torch.nn.Linear', 'T.nn.Linear', (['(10)', '(1)'], {}), '(10, 1)\n', (1527, 1534), True, 'import torch as T\n'), ((1540, 1583), 'torch.nn.init.xavier_uniform_', 'T.nn.init.xavier_uniform_', (['self.hid1.weight'], {}), '(self.hid1.weight)\n', (1565, 1583), True, 'import torch as T\n'), ((1588, 1620), 'torch.nn.init.zeros_', 'T.nn.init.zeros_', (['self.hid1.bias'], {}), '(self.hid1.bias)\n', (1604, 1620), True, 'import torch as T\n'), ((1625, 1668), 'torch.nn.init.xavier_uniform_', 'T.nn.init.xavier_uniform_', (['self.hid2.weight'], {}), '(self.hid2.weight)\n', (1650, 1668), True, 'import torch as T\n'), ((1673, 1705), 'torch.nn.init.zeros_', 'T.nn.init.zeros_', (['self.hid2.bias'], {}), '(self.hid2.bias)\n', (1689, 1705), True, 'import torch as T\n'), ((1710, 1753), 'torch.nn.init.xavier_uniform_', 'T.nn.init.xavier_uniform_', (['self.oupt.weight'], {}), '(self.oupt.weight)\n', (1735, 1753), True, 'import torch as T\n'), ((1758, 1790), 'torch.nn.init.zeros_', 'T.nn.init.zeros_', (['self.oupt.bias'], {}), '(self.oupt.bias)\n', (1774, 1790), True, 'import torch as T\n'), ((2749, 2760), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (2758, 2760), True, 'import torch as T\n'), ((4844, 4868), 'torch.manual_seed', 'T.manual_seed', (['(1 + epoch)'], {}), '(1 + epoch)\n', (4857, 4868), True, 'import torch as T\n'), ((6614, 6625), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (6623, 6625), True, 'import torch as T\n'), ((2214, 2225), 'torch.no_grad', 'T.no_grad', ([], {}), '()\n', (2223, 2225), True, 'import torch as T\n'), ((5533, 5567), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d-%H_%M_%S"""'], {}), "('%Y_%m_%d-%H_%M_%S')\n", (5546, 5567), False, 'import time\n'), ((5805, 5826), 'torch.save', 'T.save', (['info_dict', 'fn'], {}), '(info_dict, fn)\n', (5811, 5826), True, 'import torch as T\n'), ((6563, 6593), 'torch.tensor', 'T.tensor', (['unk'], {'dtype': 'T.float32'}), '(unk, dtype=T.float32)\n', (6571, 6593), True, 'import torch as T\n'), ((932, 964), 'torch.tensor', 'T.tensor', (['tmp_x'], {'dtype': 'T.float32'}), '(tmp_x, dtype=T.float32)\n', (940, 964), True, 'import torch as T\n'), ((1002, 1034), 'torch.tensor', 'T.tensor', (['tmp_y'], {'dtype': 'T.float32'}), '(tmp_y, dtype=T.float32)\n', (1010, 1034), True, 'import torch as T\n'), ((2991, 3012), 'torch.sum', 'T.sum', (['results'], {'dim': '(0)'}), '(results, dim=0)\n', (2996, 3012), True, 'import torch as T\n')] |
# -*- coding: utf-8 -*-
"""data_augmentation.py
# Notebook: Generate Training Dataset
In this Notebook, we want to simulate training dataset from the real world dataset. There are two steps in making such data:
* 1) Create pair of trajectories from the original set
* 2) Create label per pair of trajectories
# Required packages
"""
import numpy as np
from numpy import save, load
import pandas as pd
import matplotlib.pyplot as plt
import glob
"""# Define functions"""
def read_data(path, format, n, min_len):
data_list=[]
c=0
for file in glob.glob(path+format):
#print(file)
try:
df = pd.read_csv(file, header=None, sep=',|;')
except:
df = pd.DataFrame ()
if ((len(df)>min_len)):
data_list.append(df)
c = c + 1
if c >= n:
break
dataset=pd.concat(data_list)
dataset.columns=['id', 'date', 'lng', 'lat']
dataset=dataset.reset_index(drop=True)
print ('Step1: Read files')
return dataset
def linux_TS(df):
df.date=pd.to_datetime(df.date)
df['TS'] = (pd.DatetimeIndex(df.date-pd.Timedelta('02:00:00')).astype(np.int64) // 10**9)
return df
def min_drop_func(df, min_len):
data_list=[]
ids=np.unique(df.id)
for i in ids:
temp=df[df.id==i]
if len (temp) > min_len:
data_list.append(temp)
data_list = pd.concat(data_list)
data_list=data_list.reset_index(drop=True)
return data_list
def data_window(df, win_size, i):
temp=df.loc[i:i+win_size,].copy()
return temp
def gap_finder (df, max_gap):
temp = df.sort_values('TS')
dif_time = np.diff(temp.TS)
max_val = np.max(dif_time)
#print(max_val)
if max_val < max_gap:
#print("*****")
gap_flag = 1
else:
gap_flag = 0
print ('There is a gap, excluded from data...')
return gap_flag, temp
def drop_indx (df_d, r):
drop_indices = np.random.choice(df_d.index, int(r*len(df_d)), replace=False)
temp_d=df_d.copy()
temp_d.loc[drop_indices,['lng_1','lat_1']]=0
return temp_d
def add_offset (df):
var_lng=np.var(df.lng_1)
var_lat=np.var(df.lat_1)
# Set a length of the list to length of dataset
noise_lng=[var_lng]*len(df)
noise_lat=[var_lat]*len(df)
# print (noise_lat)
df['lng_1'] = df.lng_1 + noise_lng
df['lat_1'] = df.lat_1 + noise_lat
return df, var_lng, var_lat
def second_traj(df1, df2):
df1_list = np.array(df1[['id','lng','lat']])
df2_list = np.array(df2[['id','lng_1','lat_1']])
return df1_list, df2_list
def label_maker (var_lng, var_lat, r):
label = r*(var_lng+var_lat)
return label
def shift_time(df, s):
df['lng_1']= df.lng.shift(s)
df['lat_1']= df.lat.shift(s)
return df
def make_pairs(org_df, win_size):
print('Start making trajectory pairs ...')
y_list=[]
x1_list=[]
x2_list=[]
r=[0.1,0.2,0.4,0.6, 0.7, 0.8]
shifts=[-2, -1, 0, 1, 2]
ids = np.unique(org_df.id)
# Extract IDs
for idd in ids:
temp=org_df[org_df.id==idd]
temp=temp.reset_index(drop=True)
# Extract Windows in IDs
for i in range(0,len(temp)-win_size,win_size+1):
for ri in r:
for s in shifts:
# make window of time
temp_0 = data_window(temp, win_size, i)
# check the gap
gap_flag, tempx = gap_finder (temp_0, max_gap)
if gap_flag:
# Create 2nd trajectory
# Shift in time
temps = shift_time(tempx, s)
# Add offset
temp_1, var_lng, var_lat = add_offset (temps)
# Drop index
temp_2 = drop_indx (temp_1, ri)
#print(temp_2)
# Create Trajectory pair
df1_list, df2_list = second_traj(temp_0, temp_2)
x1_list.append(df1_list)
x2_list.append(df2_list)
# Create Label
label = label_maker(var_lng, var_lat, ri)
y_list.append(label)
x1_list = np.array(x1_list, dtype=float)
x2_list = np.array(x2_list, dtype=float)
y_list = np.array(y_list, dtype=float)
print ('Step2: Trajectory pair is created.')
return x1_list, x2_list, y_list
def plot_example( x1_list, x2_list, n):
fig, axis = plt.subplots(figsize = (10, 10))
plt.plot([i[1] for i in x1_list[n]], [i[2] for i in x1_list[n] ], label='Original trajectory')
plt.plot([i[1] for i in x2_list[n] if i[1]!=0], [i[2] for i in x2_list[n] if i[2]!=0], label='Trajectory pair')
for indx, i in enumerate(x2_list[n]):
if i[1] == 0:
plt.scatter(x1_list[n][indx][1], x1_list[n][indx][2], c='red')
plt.scatter(x1_list[n][indx][1], x1_list[n][indx][2], c='red', label='Dropped points')
# Make legend
plt.legend(loc=4)
# Hide values on X and Y axis
plt.xticks([])
plt.yticks([])
# Set labeld for axis
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title ('Simulating trajecory pair')
return 0
"""# Processing the data"""
# Initial variables
path='/home/nasrim/data/taxi_log_2008_by_id'
format='*.txt'
min_len=3
win_size=29
max_gap=1000000000
# Functions
# Read data files
dataset_0 = read_data(path, format, n=5, min_len=3)
# Drop short trajectories
dataset_1 = min_drop_func(dataset_0, min_len)
# Add Linux Timestamp
dataset_2 = linux_TS (dataset_1)
# Make pairs and so on
x1_list, x2_list, y_list = make_pairs(dataset_2, win_size)
# Save datafiles
save('x1_list.npy',x1_list)
save('x2_list.npy',x2_list)
save('y_list.npy',y_list)
# Plot an example, uncomment if you want to see how the 226th sample is look like.
# plot_example( x1_list, x2_list, n=226)
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"glob.glob",
"numpy.unique",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"numpy.max",
"pandas.Timedelta",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"numpy.var",
"pandas.concat",
"numpy.save",
"matplotlib.pyplot.legend",
"p... | [((5274, 5302), 'numpy.save', 'save', (['"""x1_list.npy"""', 'x1_list'], {}), "('x1_list.npy', x1_list)\n", (5278, 5302), False, 'from numpy import save, load\n'), ((5302, 5330), 'numpy.save', 'save', (['"""x2_list.npy"""', 'x2_list'], {}), "('x2_list.npy', x2_list)\n", (5306, 5330), False, 'from numpy import save, load\n'), ((5330, 5356), 'numpy.save', 'save', (['"""y_list.npy"""', 'y_list'], {}), "('y_list.npy', y_list)\n", (5334, 5356), False, 'from numpy import save, load\n'), ((556, 580), 'glob.glob', 'glob.glob', (['(path + format)'], {}), '(path + format)\n', (565, 580), False, 'import glob\n'), ((811, 831), 'pandas.concat', 'pd.concat', (['data_list'], {}), '(data_list)\n', (820, 831), True, 'import pandas as pd\n'), ((997, 1020), 'pandas.to_datetime', 'pd.to_datetime', (['df.date'], {}), '(df.date)\n', (1011, 1020), True, 'import pandas as pd\n'), ((1181, 1197), 'numpy.unique', 'np.unique', (['df.id'], {}), '(df.id)\n', (1190, 1197), True, 'import numpy as np\n'), ((1308, 1328), 'pandas.concat', 'pd.concat', (['data_list'], {}), '(data_list)\n', (1317, 1328), True, 'import pandas as pd\n'), ((1556, 1572), 'numpy.diff', 'np.diff', (['temp.TS'], {}), '(temp.TS)\n', (1563, 1572), True, 'import numpy as np\n'), ((1585, 1601), 'numpy.max', 'np.max', (['dif_time'], {}), '(dif_time)\n', (1591, 1601), True, 'import numpy as np\n'), ((2017, 2033), 'numpy.var', 'np.var', (['df.lng_1'], {}), '(df.lng_1)\n', (2023, 2033), True, 'import numpy as np\n'), ((2044, 2060), 'numpy.var', 'np.var', (['df.lat_1'], {}), '(df.lat_1)\n', (2050, 2060), True, 'import numpy as np\n'), ((2343, 2378), 'numpy.array', 'np.array', (["df1[['id', 'lng', 'lat']]"], {}), "(df1[['id', 'lng', 'lat']])\n", (2351, 2378), True, 'import numpy as np\n'), ((2390, 2429), 'numpy.array', 'np.array', (["df2[['id', 'lng_1', 'lat_1']]"], {}), "(df2[['id', 'lng_1', 'lat_1']])\n", (2398, 2429), True, 'import numpy as np\n'), ((2830, 2850), 'numpy.unique', 'np.unique', (['org_df.id'], {}), '(org_df.id)\n', (2839, 2850), True, 'import numpy as np\n'), ((3860, 3890), 'numpy.array', 'np.array', (['x1_list'], {'dtype': 'float'}), '(x1_list, dtype=float)\n', (3868, 3890), True, 'import numpy as np\n'), ((3903, 3933), 'numpy.array', 'np.array', (['x2_list'], {'dtype': 'float'}), '(x2_list, dtype=float)\n', (3911, 3933), True, 'import numpy as np\n'), ((3945, 3974), 'numpy.array', 'np.array', (['y_list'], {'dtype': 'float'}), '(y_list, dtype=float)\n', (3953, 3974), True, 'import numpy as np\n'), ((4113, 4143), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4125, 4143), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4246), 'matplotlib.pyplot.plot', 'plt.plot', (['[i[1] for i in x1_list[n]]', '[i[2] for i in x1_list[n]]'], {'label': '"""Original trajectory"""'}), "([i[1] for i in x1_list[n]], [i[2] for i in x1_list[n]], label=\n 'Original trajectory')\n", (4156, 4246), True, 'import matplotlib.pyplot as plt\n'), ((4245, 4364), 'matplotlib.pyplot.plot', 'plt.plot', (['[i[1] for i in x2_list[n] if i[1] != 0]', '[i[2] for i in x2_list[n] if i[2] != 0]'], {'label': '"""Trajectory pair"""'}), "([i[1] for i in x2_list[n] if i[1] != 0], [i[2] for i in x2_list[n] if\n i[2] != 0], label='Trajectory pair')\n", (4253, 4364), True, 'import matplotlib.pyplot as plt\n'), ((4488, 4579), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1_list[n][indx][1]', 'x1_list[n][indx][2]'], {'c': '"""red"""', 'label': '"""Dropped points"""'}), "(x1_list[n][indx][1], x1_list[n][indx][2], c='red', label=\n 'Dropped points')\n", (4499, 4579), True, 'import matplotlib.pyplot as plt\n'), ((4594, 4611), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (4604, 4611), True, 'import matplotlib.pyplot as plt\n'), ((4647, 4661), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4657, 4661), True, 'import matplotlib.pyplot as plt\n'), ((4664, 4678), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4674, 4678), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4729), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Longitude"""'], {}), "('Longitude')\n", (4716, 4729), True, 'import matplotlib.pyplot as plt\n'), ((4732, 4754), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (4742, 4754), True, 'import matplotlib.pyplot as plt\n'), ((4758, 4796), 'matplotlib.pyplot.title', 'plt.title', (['"""Simulating trajecory pair"""'], {}), "('Simulating trajecory pair')\n", (4767, 4796), True, 'import matplotlib.pyplot as plt\n'), ((617, 658), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'sep': '""",|;"""'}), "(file, header=None, sep=',|;')\n", (628, 658), True, 'import pandas as pd\n'), ((4422, 4484), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x1_list[n][indx][1]', 'x1_list[n][indx][2]'], {'c': '"""red"""'}), "(x1_list[n][indx][1], x1_list[n][indx][2], c='red')\n", (4433, 4484), True, 'import matplotlib.pyplot as plt\n'), ((684, 698), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (696, 698), True, 'import pandas as pd\n'), ((1060, 1084), 'pandas.Timedelta', 'pd.Timedelta', (['"""02:00:00"""'], {}), "('02:00:00')\n", (1072, 1084), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
import os
import shutil
import glob
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from importlib import import_module
import ast
from utils.logger import _logger
from utils.dataset import SimpleIterDataset
parser = argparse.ArgumentParser()
parser.add_argument('--regression-mode', action='store_true', default=False,
help='run in regression mode if this flag is set; otherwise run in classification mode')
parser.add_argument('-c', '--data-config', type=str, default='data/ak15_points_pf_sv_v0.yaml',
help='data config YAML file')
parser.add_argument('-i', '--data-train', nargs='*', default=[],
help='training files')
parser.add_argument('-t', '--data-test', nargs='*', default=[],
help='testing files')
parser.add_argument('--data-fraction', type=float, default=1,
help='fraction of events to load from each file; for training, the events are randomly selected for each epoch')
parser.add_argument('--file-fraction', type=float, default=1,
help='fraction of files to load; for training, the files are randomly selected for each epoch')
parser.add_argument('--fetch-by-files', action='store_true', default=False,
help='When enabled, will load all events from a small number (set by ``--fetch-step``) of files for each data fetching. '
'Otherwise (default), load a small fraction of events from all files each time, which helps reduce variations in the sample composition.')
parser.add_argument('--fetch-step', type=float, default=0.01,
help='fraction of events to load each time from every file (when ``--fetch-by-files`` is disabled); '
'Or: number of files to load each time (when ``--fetch-by-files`` is enabled). Shuffling & sampling is done within these events, so set a large enough value.')
parser.add_argument('--train-val-split', type=float, default=0.8,
help='training/validation split fraction')
parser.add_argument('--demo', action='store_true', default=False,
help='quickly test the setup by running over only a small number of events')
parser.add_argument('--lr-finder', type=str, default=None,
help='run learning rate finder instead of the actual training; format: ``start_lr, end_lr, num_iters``')
parser.add_argument('-n', '--network-config', type=str, default='networks/particle_net_pfcand_sv.py',
help='network architecture configuration file; the path must be relative to the current dir')
parser.add_argument('--network-option', nargs=2, action='append', default=[],
help='options to pass to the model class constructor, e.g., `--network-option use_counts False`')
parser.add_argument('-m', '--model-prefix', type=str, default='test_output/model_name',
help='path to save or load the model; for training, this will be used as a prefix; for testing, this should be the full path including extension')
parser.add_argument('--num-epochs', type=int, default=20,
help='number of epochs')
parser.add_argument('--optimizer', type=str, default='ranger', choices=['adam', 'ranger'], # TODO: add more
help='optimizer for the training')
parser.add_argument('--load-epoch', type=int, default=None,
help='used to resume interrupted training, load model and optimizer state saved in the `epoch-%d_state.pt` and `epoch-%d_optimizer.pt` files')
parser.add_argument('--start-lr', type=float, default=5e-3,
help='start learning rate')
parser.add_argument('--lr-steps', type=str, default='10,20',
help='steps to reduce the lr; currently only used when setting `--optimizer` to adam')
parser.add_argument('--batch-size', type=int, default=128,
help='batch size')
parser.add_argument('--use-amp', action='store_true', default=False,
help='use mixed precision training (fp16); NOT WORKING YET')
parser.add_argument('--gpus', type=str, default='0',
help='device for the training/testing; to use CPU, set to empty string (''); to use multiple gpu, set it as a comma separated list, e.g., `1,2,3,4`')
parser.add_argument('--num-workers', type=int, default=1,
help='number of threads to load the dataset; memory consumption and disk access load increases (~linearly) with this numbers')
parser.add_argument('--predict', action='store_true', default=False,
help='run prediction instead of training')
parser.add_argument('--predict-output', type=str,
help='path to save the prediction output, support `.root` and `.awkd` format')
parser.add_argument('--export-onnx', type=str, default=None,
help='export the PyTorch model to ONNX model and save it at the given path (path must ends w/ .onnx); '
'needs to set `--data-config`, `--network-config`, and `--model-prefix` (requires the full model path)')
parser.add_argument('--io-test', action='store_true', default=False,
help='test throughput of the dataloader')
def train_load(args):
"""
Loads the training data.
:param args:
:return: train_loader, val_loader, data_config, train_inputs
"""
filelist = sorted(sum([glob.glob(f) for f in args.data_train], []))
# np.random.seed(1)
np.random.shuffle(filelist)
if args.demo:
filelist = filelist[:20]
_logger.info(filelist)
args.data_fraction = 0.1
args.fetch_step = 0.002
num_workers = min(args.num_workers, int(len(filelist) * args.file_fraction))
train_data = SimpleIterDataset(filelist, args.data_config, for_training=True,
load_range_and_fraction=((0, args.train_val_split), args.data_fraction),
file_fraction=args.file_fraction, fetch_by_files=args.fetch_by_files,
fetch_step=args.fetch_step)
val_data = SimpleIterDataset(filelist, args.data_config, for_training=True,
load_range_and_fraction=((args.train_val_split, 1), args.data_fraction),
file_fraction=args.file_fraction, fetch_by_files=args.fetch_by_files,
fetch_step=args.fetch_step)
train_loader = DataLoader(train_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=True,
pin_memory=True)
val_loader = DataLoader(val_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=True,
pin_memory=True)
data_config = train_data.config
train_input_names = train_data.config.input_names
train_label_names = train_data.config.label_names
return train_loader, val_loader, data_config, train_input_names, train_label_names
def test_load(args):
"""
Loads the test data.
:param args:
:return: test_loader, data_config
"""
filelist = sorted(sum([glob.glob(f) for f in args.data_test], []))
num_workers = min(args.num_workers, len(filelist))
test_data = SimpleIterDataset(filelist, args.data_config, for_training=False,
load_range_and_fraction=((0, 1), args.data_fraction),
fetch_by_files=True, fetch_step=1)
test_loader = DataLoader(test_data, num_workers=num_workers, batch_size=args.batch_size, drop_last=False,
pin_memory=True)
data_config = test_data.config
return test_loader, data_config
def onnx(args, model, data_config, model_info):
"""
Saving model as ONNX.
:param args:
:param model:
:param data_config:
:param model_info:
:return:
"""
assert (args.export_onnx.endswith('.onnx'))
model_path = args.model_prefix
_logger.info('Exporting model %s to ONNX' % model_path)
model.load_state_dict(torch.load(model_path, map_location='cpu'))
model = model.cpu()
model.eval()
os.makedirs(os.path.dirname(args.export_onnx), exist_ok=True)
inputs = tuple(
torch.ones(model_info['input_shapes'][k], dtype=torch.float32) for k in model_info['input_names'])
torch.onnx.export(model, inputs, args.export_onnx,
input_names=model_info['input_names'],
output_names=model_info['output_names'],
dynamic_axes=model_info.get('dynamic_axes', None),
opset_version=11)
_logger.info('ONNX model saved to %s', args.export_onnx)
preprocessing_json = os.path.join(os.path.dirname(args.export_onnx), 'preprocess.json')
data_config.export_json(preprocessing_json)
_logger.info('Preprocessing parameters saved to %s', preprocessing_json)
def optim(args, model):
"""
Optimizer and scheduler. Could try CosineAnnealing
:param args:
:param model:
:return:
"""
scheduler = None
if args.optimizer == 'adam':
opt = torch.optim.Adam(model.parameters(), lr=args.start_lr)
if args.lr_finder is None:
lr_steps = [int(x) for x in args.lr_steps.split(',')]
scheduler = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=lr_steps, gamma=0.1)
else:
from utils.nn.optimizer.ranger import Ranger
opt = Ranger(model.parameters(), lr=args.start_lr)
if args.lr_finder is None:
lr_decay_epochs = max(1, int(args.num_epochs * 0.3))
lr_decay_rate = 0.01 ** (1. / lr_decay_epochs)
scheduler = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=list(
range(args.num_epochs - lr_decay_epochs, args.num_epochs)), gamma=lr_decay_rate)
return opt, scheduler
def model_setup(args, data_config):
"""
Loads the model
:param args:
:param data_config:
:return: model, model_info, network_module, network_options
"""
network_module = import_module(args.network_config.replace('.py', '').replace('/', '.'))
network_options = {k: ast.literal_eval(v) for k, v in args.network_option}
if args.export_onnx:
network_options['for_inference'] = True
if args.use_amp:
network_options['use_amp'] = True
model, model_info = network_module.get_model(data_config, **network_options)
_logger.info(model)
return model, model_info, network_module, network_options
def iotest(args, data_loader):
"""
Io test
:param args:
:param data_loader:
:return:
"""
from tqdm.auto import tqdm
from collections import defaultdict
from utils.data.tools import _concat
_logger.info('Start running IO test')
monitor_info = defaultdict(list)
for X, y, Z in tqdm(data_loader):
for k, v in Z.items():
monitor_info[k].append(v.cpu().numpy())
monitor_info = {k: _concat(v) for k, v in monitor_info.items()}
if monitor_info:
monitor_output_path = 'weaver_monitor_info.pkl'
import pickle
with open(monitor_output_path, 'wb') as f:
pickle.dump(monitor_info, f)
_logger.info('Monitor info written to %s' % monitor_output_path)
def save_root(data_config, scores, labels, observers):
"""
Saves as .root
:param data_config:
:param scores:
:param labels
:param observers
:return:
"""
from utils.data.fileio import _write_root
output = {}
if args.regression_mode:
output[data_config.label_names[0]] = labels[data_config.label_names[0]]
output['output'] = scores
else:
for idx, label_name in enumerate(data_config.label_value):
output[label_name] = (labels[data_config.label_names[0]] == idx)
output['score_' + label_name] = scores[:, idx]
for k, v in labels.items():
if k == data_config.label_names[0]:
continue
if v.ndim > 1:
_logger.warning('Ignoring %s, not a 1d array.', k)
continue
output[k] = v
for k, v in observers.items():
if v.ndim > 1:
_logger.warning('Ignoring %s, not a 1d array.', k)
continue
output[k] = v
_write_root(args.predict_output, output)
def save_awk(scores, labels, observers):
"""
Saves as .awkd
:param scores:
:param labels:
:param observers:
:return:
"""
from utils.data.tools import awkward
output = {'scores': scores}
output.update(labels)
output.update(observers)
name_remap = {}
arraynames = list(output)
for i in range(len(arraynames)):
for j in range(i + 1, len(arraynames)):
if arraynames[i].startswith(arraynames[j]):
name_remap[arraynames[j]] = '%s_%d' % (arraynames[j], len(name_remap))
if arraynames[j].startswith(arraynames[i]):
name_remap[arraynames[i]] = '%s_%d' % (arraynames[i], len(name_remap))
_logger.info('Renamed the following variables in the output file: %s', str(name_remap))
output = {name_remap[k] if k in name_remap else k: v for k, v in output.items()}
awkward.save(args.predict_output, output, mode='w')
def main(args):
_logger.info(args)
if args.file_fraction < 1:
_logger.warning('Use of `file-fraction` is not recommended in general -- prefer using `data-fraction` instead.')
# classification/regression mode
if args.regression_mode:
_logger.info('Running in regression mode')
from utils.nn.tools import train_regression as train
from utils.nn.tools import evaluate_regression as evaluate
else:
_logger.info('Running in classification mode')
from utils.nn.tools import train_classification as train
from utils.nn.tools import evaluate_classification as evaluate
# training/testing mode
training_mode = not args.predict
# device
if args.gpus:
gpus = [int(i) for i in args.gpus.split(',')]
dev = torch.device(gpus[0])
else:
gpus = None
dev = torch.device('cpu')
# load data
if training_mode:
train_loader, val_loader, data_config, train_input_names, train_label_names = train_load(args)
else:
test_loader, data_config = test_load(args)
if args.io_test:
data_loader = train_loader if training_mode else test_loader
iotest(args, data_loader)
return
model, model_info, network_module, network_options = model_setup(args, data_config)
# export to ONNX
if args.export_onnx:
onnx(args, model, data_config, model_info)
return
# note: we should always save/load the state_dict of the original model, not the one wrapped by nn.DataParallel
# so we do not convert it to nn.DataParallel now
model = model.to(dev)
if training_mode:
# loss function
try:
loss_func = network_module.get_loss(data_config, **network_options)
_logger.info('Using loss function %s with options %s' % (loss_func, network_options))
except AttributeError:
loss_func = torch.nn.CrossEntropyLoss()
_logger.warning('Loss function not defined in %s. Will use `torch.nn.CrossEntropyLoss()` by default.',
args.network_config)
# optimizer & learning rate
opt, scheduler = optim(args, model)
# load previous training and resume if `--load-epoch` is set
if args.load_epoch is not None:
_logger.info('Resume training from epoch %d' % args.load_epoch)
model_state = torch.load(args.model_prefix + '_epoch-%d_state.pt' % args.load_epoch, map_location=dev)
model.load_state_dict(model_state)
opt_state = torch.load(args.model_prefix + '_epoch-%d_optimizer.pt' % args.load_epoch, map_location=dev)
opt.load_state_dict(opt_state)
# multi-gpu
if gpus is not None and len(gpus) > 1:
# model becomes `torch.nn.DataParallel` w/ model.module being the original `torch.nn.Module`
model = torch.nn.DataParallel(model, device_ids=gpus)
model = model.to(dev)
# lr finder: keep it after all other setups
if args.lr_finder is not None:
start_lr, end_lr, num_iter = args.lr_finder.replace(' ', '').split(',')
from utils.lr_finder import LRFinder
lr_finder = LRFinder(model, opt, loss_func, device=dev, input_names=train_input_names,
label_names=train_label_names)
lr_finder.range_test(train_loader, start_lr=float(start_lr), end_lr=float(end_lr), num_iter=int(num_iter))
lr_finder.plot(output='lr_finder.png') # to inspect the loss-learning rate graph
return
if args.use_amp:
from torch.cuda.amp import GradScaler
scaler = GradScaler()
else:
scaler = None
# training loop
best_valid_metric = np.inf if args.regression_mode else 0
for epoch in range(args.num_epochs):
if args.load_epoch is not None:
if epoch <= args.load_epoch:
continue
print('-' * 50)
_logger.info('Epoch #%d training' % epoch)
train(model, loss_func, opt, scheduler, train_loader, dev, grad_scaler=scaler)
if args.model_prefix:
dirname = os.path.dirname(args.model_prefix)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
torch.save(state_dict, args.model_prefix + '_epoch-%d_state.pt' % epoch)
torch.save(opt.state_dict(), args.model_prefix + '_epoch-%d_optimizer.pt' % epoch)
_logger.info('Epoch #%d validating' % epoch)
valid_metric = evaluate(model, val_loader, dev, loss_func=loss_func)
is_best_epoch = (valid_metric < best_valid_metric) if args.regression_mode else (valid_metric > best_valid_metric)
if is_best_epoch:
best_valid_metric = valid_metric
if args.model_prefix:
shutil.copy2(args.model_prefix + '_epoch-%d_state.pt' % epoch, args.model_prefix + '_best_epoch_state.pt')
torch.save(model, args.model_prefix + '_best_epoch_full.pt')
_logger.info('Epoch #%d: Current validation metric: %.5f (best: %.5f)' % (epoch, valid_metric, best_valid_metric))
else:
# run prediction
if args.model_prefix.endswith('.onnx'):
_logger.info('Loading model %s for eval' % args.model_prefix)
from utils.nn.tools import evaluate_onnx
test_metric, scores, labels, observers = evaluate_onnx(args.model_prefix, test_loader)
else:
model_path = args.model_prefix if args.model_prefix.endswith('.pt') else args.model_prefix + '_best_epoch_state.pt'
_logger.info('Loading model %s for eval' % model_path)
model.load_state_dict(torch.load(model_path, map_location=dev))
if gpus is not None and len(gpus) > 1:
model = torch.nn.DataParallel(model, device_ids=gpus)
model = model.to(dev)
test_metric, scores, labels, observers = evaluate(model, test_loader, dev, for_training=False)
_logger.info('Test metric %.5f' % test_metric)
if args.predict_output:
os.makedirs(os.path.dirname(args.predict_output), exist_ok=True)
if args.predict_output.endswith('.root'):
save_root(data_config, scores, labels, observers)
else:
save_awk(scores, labels, observers)
_logger.info('Written output to %s' % args.predict_output)
if __name__ == '__main__':
args = parser.parse_args()
# maybe add args validation, or use @click instead
main(args)
| [
"pickle.dump",
"argparse.ArgumentParser",
"collections.defaultdict",
"torch.device",
"glob.glob",
"utils.data.fileio._write_root",
"torch.ones",
"torch.utils.data.DataLoader",
"utils.data.tools.awkward.save",
"torch.load",
"os.path.dirname",
"os.path.exists",
"utils.data.tools._concat",
"u... | [((282, 307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (305, 307), False, 'import argparse\n'), ((5540, 5567), 'numpy.random.shuffle', 'np.random.shuffle', (['filelist'], {}), '(filelist)\n', (5557, 5567), True, 'import numpy as np\n'), ((5813, 6060), 'utils.dataset.SimpleIterDataset', 'SimpleIterDataset', (['filelist', 'args.data_config'], {'for_training': '(True)', 'load_range_and_fraction': '((0, args.train_val_split), args.data_fraction)', 'file_fraction': 'args.file_fraction', 'fetch_by_files': 'args.fetch_by_files', 'fetch_step': 'args.fetch_step'}), '(filelist, args.data_config, for_training=True,\n load_range_and_fraction=((0, args.train_val_split), args.data_fraction),\n file_fraction=args.file_fraction, fetch_by_files=args.fetch_by_files,\n fetch_step=args.fetch_step)\n', (5830, 6060), False, 'from utils.dataset import SimpleIterDataset\n'), ((6169, 6416), 'utils.dataset.SimpleIterDataset', 'SimpleIterDataset', (['filelist', 'args.data_config'], {'for_training': '(True)', 'load_range_and_fraction': '((args.train_val_split, 1), args.data_fraction)', 'file_fraction': 'args.file_fraction', 'fetch_by_files': 'args.fetch_by_files', 'fetch_step': 'args.fetch_step'}), '(filelist, args.data_config, for_training=True,\n load_range_and_fraction=((args.train_val_split, 1), args.data_fraction),\n file_fraction=args.file_fraction, fetch_by_files=args.fetch_by_files,\n fetch_step=args.fetch_step)\n', (6186, 6416), False, 'from utils.dataset import SimpleIterDataset\n'), ((6523, 6635), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'num_workers': 'num_workers', 'batch_size': 'args.batch_size', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(train_data, num_workers=num_workers, batch_size=args.batch_size,\n drop_last=True, pin_memory=True)\n', (6533, 6635), False, 'from torch.utils.data import DataLoader\n'), ((6679, 6789), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'num_workers': 'num_workers', 'batch_size': 'args.batch_size', 'drop_last': '(True)', 'pin_memory': '(True)'}), '(val_data, num_workers=num_workers, batch_size=args.batch_size,\n drop_last=True, pin_memory=True)\n', (6689, 6789), False, 'from torch.utils.data import DataLoader\n'), ((7307, 7470), 'utils.dataset.SimpleIterDataset', 'SimpleIterDataset', (['filelist', 'args.data_config'], {'for_training': '(False)', 'load_range_and_fraction': '((0, 1), args.data_fraction)', 'fetch_by_files': '(True)', 'fetch_step': '(1)'}), '(filelist, args.data_config, for_training=False,\n load_range_and_fraction=((0, 1), args.data_fraction), fetch_by_files=\n True, fetch_step=1)\n', (7324, 7470), False, 'from utils.dataset import SimpleIterDataset\n'), ((7548, 7660), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'num_workers': 'num_workers', 'batch_size': 'args.batch_size', 'drop_last': '(False)', 'pin_memory': '(True)'}), '(test_data, num_workers=num_workers, batch_size=args.batch_size,\n drop_last=False, pin_memory=True)\n', (7558, 7660), False, 'from torch.utils.data import DataLoader\n'), ((8031, 8086), 'utils.logger._logger.info', '_logger.info', (["('Exporting model %s to ONNX' % model_path)"], {}), "('Exporting model %s to ONNX' % model_path)\n", (8043, 8086), False, 'from utils.logger import _logger\n'), ((8688, 8744), 'utils.logger._logger.info', '_logger.info', (['"""ONNX model saved to %s"""', 'args.export_onnx'], {}), "('ONNX model saved to %s', args.export_onnx)\n", (8700, 8744), False, 'from utils.logger import _logger\n'), ((8890, 8962), 'utils.logger._logger.info', '_logger.info', (['"""Preprocessing parameters saved to %s"""', 'preprocessing_json'], {}), "('Preprocessing parameters saved to %s', preprocessing_json)\n", (8902, 8962), False, 'from utils.logger import _logger\n'), ((10489, 10508), 'utils.logger._logger.info', '_logger.info', (['model'], {}), '(model)\n', (10501, 10508), False, 'from utils.logger import _logger\n'), ((10802, 10839), 'utils.logger._logger.info', '_logger.info', (['"""Start running IO test"""'], {}), "('Start running IO test')\n", (10814, 10839), False, 'from utils.logger import _logger\n'), ((10859, 10876), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10870, 10876), False, 'from collections import defaultdict\n'), ((10897, 10914), 'tqdm.auto.tqdm', 'tqdm', (['data_loader'], {}), '(data_loader)\n', (10901, 10914), False, 'from tqdm.auto import tqdm\n'), ((12330, 12370), 'utils.data.fileio._write_root', '_write_root', (['args.predict_output', 'output'], {}), '(args.predict_output, output)\n', (12341, 12370), False, 'from utils.data.fileio import _write_root\n'), ((13254, 13305), 'utils.data.tools.awkward.save', 'awkward.save', (['args.predict_output', 'output'], {'mode': '"""w"""'}), "(args.predict_output, output, mode='w')\n", (13266, 13305), False, 'from utils.data.tools import awkward\n'), ((13328, 13346), 'utils.logger._logger.info', '_logger.info', (['args'], {}), '(args)\n', (13340, 13346), False, 'from utils.logger import _logger\n'), ((5627, 5649), 'utils.logger._logger.info', '_logger.info', (['filelist'], {}), '(filelist)\n', (5639, 5649), False, 'from utils.logger import _logger\n'), ((8113, 8155), 'torch.load', 'torch.load', (['model_path'], {'map_location': '"""cpu"""'}), "(model_path, map_location='cpu')\n", (8123, 8155), False, 'import torch\n'), ((8215, 8248), 'os.path.dirname', 'os.path.dirname', (['args.export_onnx'], {}), '(args.export_onnx)\n', (8230, 8248), False, 'import os\n'), ((8784, 8817), 'os.path.dirname', 'os.path.dirname', (['args.export_onnx'], {}), '(args.export_onnx)\n', (8799, 8817), False, 'import os\n'), ((10215, 10234), 'ast.literal_eval', 'ast.literal_eval', (['v'], {}), '(v)\n', (10231, 10234), False, 'import ast\n'), ((11022, 11032), 'utils.data.tools._concat', '_concat', (['v'], {}), '(v)\n', (11029, 11032), False, 'from utils.data.tools import _concat\n'), ((11266, 11330), 'utils.logger._logger.info', '_logger.info', (["('Monitor info written to %s' % monitor_output_path)"], {}), "('Monitor info written to %s' % monitor_output_path)\n", (11278, 11330), False, 'from utils.logger import _logger\n'), ((13387, 13509), 'utils.logger._logger.warning', '_logger.warning', (['"""Use of `file-fraction` is not recommended in general -- prefer using `data-fraction` instead."""'], {}), "(\n 'Use of `file-fraction` is not recommended in general -- prefer using `data-fraction` instead.'\n )\n", (13402, 13509), False, 'from utils.logger import _logger\n'), ((13575, 13617), 'utils.logger._logger.info', '_logger.info', (['"""Running in regression mode"""'], {}), "('Running in regression mode')\n", (13587, 13617), False, 'from utils.logger import _logger\n'), ((13764, 13810), 'utils.logger._logger.info', '_logger.info', (['"""Running in classification mode"""'], {}), "('Running in classification mode')\n", (13776, 13810), False, 'from utils.logger import _logger\n'), ((14113, 14134), 'torch.device', 'torch.device', (['gpus[0]'], {}), '(gpus[0])\n', (14125, 14134), False, 'import torch\n'), ((14179, 14198), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (14191, 14198), False, 'import torch\n'), ((19570, 19616), 'utils.logger._logger.info', '_logger.info', (["('Test metric %.5f' % test_metric)"], {}), "('Test metric %.5f' % test_metric)\n", (19582, 19616), False, 'from utils.logger import _logger\n'), ((8293, 8355), 'torch.ones', 'torch.ones', (["model_info['input_shapes'][k]"], {'dtype': 'torch.float32'}), "(model_info['input_shapes'][k], dtype=torch.float32)\n", (8303, 8355), False, 'import torch\n'), ((9356, 9429), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['opt'], {'milestones': 'lr_steps', 'gamma': '(0.1)'}), '(opt, milestones=lr_steps, gamma=0.1)\n', (9392, 9429), False, 'import torch\n'), ((11229, 11257), 'pickle.dump', 'pickle.dump', (['monitor_info', 'f'], {}), '(monitor_info, f)\n', (11240, 11257), False, 'import pickle\n'), ((12068, 12118), 'utils.logger._logger.warning', '_logger.warning', (['"""Ignoring %s, not a 1d array."""', 'k'], {}), "('Ignoring %s, not a 1d array.', k)\n", (12083, 12118), False, 'from utils.logger import _logger\n'), ((12232, 12282), 'utils.logger._logger.warning', '_logger.warning', (['"""Ignoring %s, not a 1d array."""', 'k'], {}), "('Ignoring %s, not a 1d array.', k)\n", (12247, 12282), False, 'from utils.logger import _logger\n'), ((15092, 15181), 'utils.logger._logger.info', '_logger.info', (["('Using loss function %s with options %s' % (loss_func, network_options))"], {}), "('Using loss function %s with options %s' % (loss_func,\n network_options))\n", (15104, 15181), False, 'from utils.logger import _logger\n'), ((15628, 15691), 'utils.logger._logger.info', '_logger.info', (["('Resume training from epoch %d' % args.load_epoch)"], {}), "('Resume training from epoch %d' % args.load_epoch)\n", (15640, 15691), False, 'from utils.logger import _logger\n'), ((15718, 15810), 'torch.load', 'torch.load', (["(args.model_prefix + '_epoch-%d_state.pt' % args.load_epoch)"], {'map_location': 'dev'}), "(args.model_prefix + '_epoch-%d_state.pt' % args.load_epoch,\n map_location=dev)\n", (15728, 15810), False, 'import torch\n'), ((15878, 15974), 'torch.load', 'torch.load', (["(args.model_prefix + '_epoch-%d_optimizer.pt' % args.load_epoch)"], {'map_location': 'dev'}), "(args.model_prefix + '_epoch-%d_optimizer.pt' % args.load_epoch,\n map_location=dev)\n", (15888, 15974), False, 'import torch\n'), ((16207, 16252), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'gpus'}), '(model, device_ids=gpus)\n', (16228, 16252), False, 'import torch\n'), ((16532, 16641), 'utils.lr_finder.LRFinder', 'LRFinder', (['model', 'opt', 'loss_func'], {'device': 'dev', 'input_names': 'train_input_names', 'label_names': 'train_label_names'}), '(model, opt, loss_func, device=dev, input_names=train_input_names,\n label_names=train_label_names)\n', (16540, 16641), False, 'from utils.lr_finder import LRFinder\n'), ((17000, 17012), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (17010, 17012), False, 'from torch.cuda.amp import GradScaler\n'), ((17347, 17389), 'utils.logger._logger.info', '_logger.info', (["('Epoch #%d training' % epoch)"], {}), "('Epoch #%d training' % epoch)\n", (17359, 17389), False, 'from utils.logger import _logger\n'), ((17402, 17480), 'utils.nn.tools.train_classification', 'train', (['model', 'loss_func', 'opt', 'scheduler', 'train_loader', 'dev'], {'grad_scaler': 'scaler'}), '(model, loss_func, opt, scheduler, train_loader, dev, grad_scaler=scaler)\n', (17407, 17480), True, 'from utils.nn.tools import train_classification as train\n'), ((18001, 18045), 'utils.logger._logger.info', '_logger.info', (["('Epoch #%d validating' % epoch)"], {}), "('Epoch #%d validating' % epoch)\n", (18013, 18045), False, 'from utils.logger import _logger\n'), ((18073, 18126), 'utils.nn.tools.evaluate_classification', 'evaluate', (['model', 'val_loader', 'dev'], {'loss_func': 'loss_func'}), '(model, val_loader, dev, loss_func=loss_func)\n', (18081, 18126), True, 'from utils.nn.tools import evaluate_classification as evaluate\n'), ((18591, 18710), 'utils.logger._logger.info', '_logger.info', (["('Epoch #%d: Current validation metric: %.5f (best: %.5f)' % (epoch,\n valid_metric, best_valid_metric))"], {}), "('Epoch #%d: Current validation metric: %.5f (best: %.5f)' % (\n epoch, valid_metric, best_valid_metric))\n", (18603, 18710), False, 'from utils.logger import _logger\n'), ((18801, 18862), 'utils.logger._logger.info', '_logger.info', (["('Loading model %s for eval' % args.model_prefix)"], {}), "('Loading model %s for eval' % args.model_prefix)\n", (18813, 18862), False, 'from utils.logger import _logger\n'), ((18969, 19014), 'utils.nn.tools.evaluate_onnx', 'evaluate_onnx', (['args.model_prefix', 'test_loader'], {}), '(args.model_prefix, test_loader)\n', (18982, 19014), False, 'from utils.nn.tools import evaluate_onnx\n'), ((19169, 19223), 'utils.logger._logger.info', '_logger.info', (["('Loading model %s for eval' % model_path)"], {}), "('Loading model %s for eval' % model_path)\n", (19181, 19223), False, 'from utils.logger import _logger\n'), ((19508, 19561), 'utils.nn.tools.evaluate_classification', 'evaluate', (['model', 'test_loader', 'dev'], {'for_training': '(False)'}), '(model, test_loader, dev, for_training=False)\n', (19516, 19561), True, 'from utils.nn.tools import evaluate_classification as evaluate\n'), ((19929, 19987), 'utils.logger._logger.info', '_logger.info', (["('Written output to %s' % args.predict_output)"], {}), "('Written output to %s' % args.predict_output)\n", (19941, 19987), False, 'from utils.logger import _logger\n'), ((5467, 5479), 'glob.glob', 'glob.glob', (['f'], {}), '(f)\n', (5476, 5479), False, 'import glob\n'), ((7192, 7204), 'glob.glob', 'glob.glob', (['f'], {}), '(f)\n', (7201, 7204), False, 'import glob\n'), ((15233, 15260), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (15258, 15260), False, 'import torch\n'), ((15273, 15406), 'utils.logger._logger.warning', '_logger.warning', (['"""Loss function not defined in %s. Will use `torch.nn.CrossEntropyLoss()` by default."""', 'args.network_config'], {}), "(\n 'Loss function not defined in %s. Will use `torch.nn.CrossEntropyLoss()` by default.'\n , args.network_config)\n", (15288, 15406), False, 'from utils.logger import _logger\n'), ((17541, 17575), 'os.path.dirname', 'os.path.dirname', (['args.model_prefix'], {}), '(args.model_prefix)\n', (17556, 17575), False, 'import os\n'), ((17816, 17888), 'torch.save', 'torch.save', (['state_dict', "(args.model_prefix + '_epoch-%d_state.pt' % epoch)"], {}), "(state_dict, args.model_prefix + '_epoch-%d_state.pt' % epoch)\n", (17826, 17888), False, 'import torch\n'), ((19258, 19298), 'torch.load', 'torch.load', (['model_path'], {'map_location': 'dev'}), '(model_path, map_location=dev)\n', (19268, 19298), False, 'import torch\n'), ((19375, 19420), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'gpus'}), '(model, device_ids=gpus)\n', (19396, 19420), False, 'import torch\n'), ((19674, 19710), 'os.path.dirname', 'os.path.dirname', (['args.predict_output'], {}), '(args.predict_output)\n', (19689, 19710), False, 'import os\n'), ((17656, 17676), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (17667, 17676), False, 'import os\n'), ((18391, 18502), 'shutil.copy2', 'shutil.copy2', (["(args.model_prefix + '_epoch-%d_state.pt' % epoch)", "(args.model_prefix + '_best_epoch_state.pt')"], {}), "(args.model_prefix + '_epoch-%d_state.pt' % epoch, args.\n model_prefix + '_best_epoch_state.pt')\n", (18403, 18502), False, 'import shutil\n'), ((18518, 18578), 'torch.save', 'torch.save', (['model', "(args.model_prefix + '_best_epoch_full.pt')"], {}), "(model, args.model_prefix + '_best_epoch_full.pt')\n", (18528, 18578), False, 'import torch\n'), ((17611, 17634), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (17625, 17634), False, 'import os\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dataset distributed sampler."""
from __future__ import division
import math
import numpy as np
import mindspore.nn as nn
from mindspore.common import initializer as init, set_seed
from src.model import DeepID
from src.loss import cal_acc
set_seed(1)
np.random.seed(1)
class DistributedSampler:
"""Distributed sampler."""
def __init__(self, dataset_size, num_replicas=None, rank=None, shuffle=False):
if num_replicas is None:
print("***********Setting world_size to 1 since it is not passed in ******************")
num_replicas = 1
if rank is None:
print("***********Setting rank to 0 since it is not passed in ******************")
rank = 0
self.dataset_size = dataset_size
self.num_replicas = num_replicas
self.epoch = 0
self.rank = rank
self.num_samples = int(math.ceil(dataset_size * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# shuffle based on epoch
if self.shuffle:
indices = np.random.RandomState(seed=self.epoch).permutation(self.dataset_size)
indices = indices.tolist()
self.epoch += 1
else:
indices = list(range(self.dataset_size))
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank: self.total_size: self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def init_weights(net, init_type='normal', init_gain=0.02):
"""
Initialize network weights.
Parameters:
net (Cell): Network to be initialized
init_type (str): The name of an initialization method: normal | xavier.
init_gain (float): Gain factor for normal and xavier.
"""
for _, cell in net.cells_and_names():
if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)):
if init_type == 'normal':
cell.weight.set_data(init.initializer(init.Normal(init_gain), cell.weight.shape))
elif init_type == 'xavier':
cell.weight.set_data(init.initializer(init.XavierUniform(init_gain), cell.weight.shape))
elif init_type == 'KaimingUniform':
cell.weight.set_data(init.initializer(init.HeUniform(init_gain), cell.weight.shape))
elif init_type == 'constant':
cell.weight.set_data(init.initializer(0.001, cell.weight.shape))
elif init_type == 'truncatedNormal':
cell.weight.set_data(init.initializer(init.TruncatedNormal(), cell.weight.shape))
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
elif isinstance(cell, nn.GroupNorm):
cell.gamma.set_data(init.initializer('ones', cell.gamma.shape))
cell.beta.set_data(init.initializer('zeros', cell.beta.shape))
def print_network(model, name):
"""Print out the network information."""
num_params = 0
for p in model.trainable_params():
num_params += np.prod(p.shape)
print(model)
print(name)
print('The number of parameters: {}'.format(num_params))
def get_network(args, num_class, feature=False):
"""Create and initial a generator and a discriminator."""
deepID = DeepID(args.input_dim, num_class, feature)
init_weights(deepID, 'truncatedNormal', math.sqrt(5))
print_network(deepID, 'DeepID')
return deepID
def eval_func(net, img, label):
predict = net(img)
acc = cal_acc(predict, label)
return acc
| [
"src.loss.cal_acc",
"numpy.random.seed",
"mindspore.common.initializer.XavierUniform",
"math.sqrt",
"math.ceil",
"mindspore.common.initializer.HeUniform",
"numpy.random.RandomState",
"mindspore.common.set_seed",
"mindspore.common.initializer.TruncatedNormal",
"mindspore.common.initializer.initiali... | [((911, 922), 'mindspore.common.set_seed', 'set_seed', (['(1)'], {}), '(1)\n', (919, 922), False, 'from mindspore.common import initializer as init, set_seed\n'), ((923, 940), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (937, 940), True, 'import numpy as np\n'), ((4175, 4217), 'src.model.DeepID', 'DeepID', (['args.input_dim', 'num_class', 'feature'], {}), '(args.input_dim, num_class, feature)\n', (4181, 4217), False, 'from src.model import DeepID\n'), ((4399, 4422), 'src.loss.cal_acc', 'cal_acc', (['predict', 'label'], {}), '(predict, label)\n', (4406, 4422), False, 'from src.loss import cal_acc\n'), ((3937, 3953), 'numpy.prod', 'np.prod', (['p.shape'], {}), '(p.shape)\n', (3944, 3953), True, 'import numpy as np\n'), ((4263, 4275), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (4272, 4275), False, 'import math\n'), ((1549, 1598), 'math.ceil', 'math.ceil', (['(dataset_size * 1.0 / self.num_replicas)'], {}), '(dataset_size * 1.0 / self.num_replicas)\n', (1558, 1598), False, 'import math\n'), ((1799, 1837), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'self.epoch'}), '(seed=self.epoch)\n', (1820, 1837), True, 'import numpy as np\n'), ((3658, 3700), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""ones"""', 'cell.gamma.shape'], {}), "('ones', cell.gamma.shape)\n", (3674, 3700), True, 'from mindspore.common import initializer as init, set_seed\n'), ((3733, 3775), 'mindspore.common.initializer.initializer', 'init.initializer', (['"""zeros"""', 'cell.beta.shape'], {}), "('zeros', cell.beta.shape)\n", (3749, 3775), True, 'from mindspore.common import initializer as init, set_seed\n'), ((2852, 2874), 'mindspore.common.initializer.Normal', 'init.Normal', (['init_gain'], {}), '(init_gain)\n', (2863, 2874), True, 'from mindspore.common import initializer as init, set_seed\n'), ((2990, 3019), 'mindspore.common.initializer.XavierUniform', 'init.XavierUniform', (['init_gain'], {}), '(init_gain)\n', (3008, 3019), True, 'from mindspore.common import initializer as init, set_seed\n'), ((3143, 3168), 'mindspore.common.initializer.HeUniform', 'init.HeUniform', (['init_gain'], {}), '(init_gain)\n', (3157, 3168), True, 'from mindspore.common import initializer as init, set_seed\n'), ((3269, 3311), 'mindspore.common.initializer.initializer', 'init.initializer', (['(0.001)', 'cell.weight.shape'], {}), '(0.001, cell.weight.shape)\n', (3285, 3311), True, 'from mindspore.common import initializer as init, set_seed\n'), ((3416, 3438), 'mindspore.common.initializer.TruncatedNormal', 'init.TruncatedNormal', ([], {}), '()\n', (3436, 3438), True, 'from mindspore.common import initializer as init, set_seed\n')] |
"""
░░░░░░ ░░░░░░ ░░░░░░░░ ░░░░░ ░░░░░░░░ ░░░░░░░ ░░░░░░ ░░░░░░
▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒ ▒▒
▒▒▒▒▒▒ ▒▒ ▒▒ ▒▒ ▒▒▒▒▒▒▒ ▒▒ ▒▒▒▒▒ ▒▒▒▒▒ ▒▒ ▒▒
▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓ ▓▓
██ ██ ██████ ██ ██ ██ ██ ███████ ██████ ██████
* How to rotate a 3D vector about an axis in Python *
Rotate a vector v about axis by taking the component of v perpendicular to axis,
rotating it theta in the plane perpendicular to axis, then add the component of v parallel to axis.
Let a be a unit vector along an axis axis. Then a = axis/norm(axis).
Let A = I x a, the cross product of a with an identity matrix I.
Then exp(theta,A) is the rotation matrix.
Finally, dotting the rotation matrix with the vector will rotate the vector.
references:
rotation
https://www.kite.com/python/answers/how-to-rotate-a-3d-vector-about-an-axis-in-python
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html
matplotlib 3D
https://stackoverflow.com/questions/62433465/how-to-plot-3d-point-clouds-from-an-npy-file
https://stackoverflow.com/questions/8130823/set-matplotlib-3d-plot-aspect-ratio
"""
import numpy as np
from scipy.spatial.transform import Rotation
import matplotlib.pyplot as plt
import os
def plot_3D(pointcloud):
xs = pointcloud[:, 0]
ys = pointcloud[:, 1]
zs = pointcloud[:, 2]
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(projection='3d')
ax.set_box_aspect((np.ptp(xs), np.ptp(ys), np.ptp(zs)))
img = ax.scatter(xs, ys, zs, s=1) # , c=t_low, cmap=plt.hot())
fig.colorbar(img)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def rotate3d(points3d, rotation_axis, rotation_degrees):
# define 3D rotation
rotation_radians = np.radians(rotation_degrees)
rotation_vector = rotation_radians * rotation_axis
rotation = Rotation.from_rotvec(rotation_vector)
# apply rotation to each point
result_points = points3d.copy()
for i, point in enumerate(points3d):
result_points[i] = rotation.apply(point)
return result_points
# 3D rotation about up-axis (Z)
rotation_axis = np.array([0, 0, 1])
angular_resolution = 1 # in degrees
# index all csv files
filepaths = list()
for root, dirs, files in os.walk("csv", topdown=False):
for file in files:
if os.path.splitext(file)[1] == ".csv":
filepath = os.path.join(root, file)
filepaths.append(filepath)
# init result object
pointcloud = np.zeros((1, 3))
angle = 0
for filepath in filepaths:
points2d = np.loadtxt(filepath, delimiter=";")
# insert 3.dimension before row 1 so 2D-Y becomes 3D-Z (image now vertical)
points3d = np.insert(points2d, 1, values=0, axis=1)
#for angle in range(0, 360, angular_resolution):
rotated_points3d = rotate3d(points3d, rotation_axis, angle)
pointcloud = np.append(pointcloud, rotated_points3d, axis=0)
angle += angular_resolution
plot_3D(pointcloud)
| [
"numpy.radians",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.ptp",
"os.walk",
"numpy.zeros",
"numpy.insert",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.loadtxt",
"os.path.splitext",
"scipy.spatial.transform.Rotation.from_rotvec"
] | [((2247, 2266), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2255, 2266), True, 'import numpy as np\n'), ((2372, 2401), 'os.walk', 'os.walk', (['"""csv"""'], {'topdown': '(False)'}), "('csv', topdown=False)\n", (2379, 2401), False, 'import os\n'), ((2597, 2613), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (2605, 2613), True, 'import numpy as np\n'), ((1461, 1488), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1471, 1488), True, 'import matplotlib.pyplot as plt\n'), ((1754, 1764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1762, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1900), 'numpy.radians', 'np.radians', (['rotation_degrees'], {}), '(rotation_degrees)\n', (1882, 1900), True, 'import numpy as np\n'), ((1971, 2008), 'scipy.spatial.transform.Rotation.from_rotvec', 'Rotation.from_rotvec', (['rotation_vector'], {}), '(rotation_vector)\n', (1991, 2008), False, 'from scipy.spatial.transform import Rotation\n'), ((2667, 2702), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {'delimiter': '""";"""'}), "(filepath, delimiter=';')\n", (2677, 2702), True, 'import numpy as np\n'), ((2799, 2839), 'numpy.insert', 'np.insert', (['points2d', '(1)'], {'values': '(0)', 'axis': '(1)'}), '(points2d, 1, values=0, axis=1)\n', (2808, 2839), True, 'import numpy as np\n'), ((2975, 3022), 'numpy.append', 'np.append', (['pointcloud', 'rotated_points3d'], {'axis': '(0)'}), '(pointcloud, rotated_points3d, axis=0)\n', (2984, 3022), True, 'import numpy as np\n'), ((1554, 1564), 'numpy.ptp', 'np.ptp', (['xs'], {}), '(xs)\n', (1560, 1564), True, 'import numpy as np\n'), ((1566, 1576), 'numpy.ptp', 'np.ptp', (['ys'], {}), '(ys)\n', (1572, 1576), True, 'import numpy as np\n'), ((1578, 1588), 'numpy.ptp', 'np.ptp', (['zs'], {}), '(zs)\n', (1584, 1588), True, 'import numpy as np\n'), ((2497, 2521), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2509, 2521), False, 'import os\n'), ((2437, 2459), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (2453, 2459), False, 'import os\n')] |
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
def display_mask(mask):
dict_col =np.array(
[
[0,0,0],
[0,255,0],
[255,0,0],
[0,0,255],
[255,255,255],
[96,96,96],
[253,96,96],
[255,255,0],
[237,127,16],
[102,0,153],
]
)
dict_col =np.array(
[
[0,0,0], #black
[0,255,0],#green
[255,0,0],#red
[0,0,255],#blue
[0,255,255],#cyan
[255,255,255],#white
[96,96,96], #grey
[255,255,0],#yellow
[237,127,16],#orange
[102,0,153],#purple
[88,41,0], #brown
[253,108,158],#pink
[128,0,0],#maroon
[255,0,255],
[255,0,127],
[0,128,255],
[0,102,51],#17
[192,192,192],
[128,128,0],
[84, 151, 120]
]
)
try :
len(mask.shape)==2
except AssertionError:
print('Mask\'s shape is not 2')
mask_dis = np.zeros((mask.shape[0],mask.shape[1],3))
#print('mask_dis shape',mask_dis.shape)
for i in range(mask.shape[0]):
for j in range(mask.shape[0]):
mask_dis[i,j,:] = dict_col[mask[i,j]]
return mask_dis
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array nb : the first image of the batch is displayed
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
if len(image_numpy.shape)!=2: # it is an image
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: transpose and scaling
else : # it is a mask
image_numpy = image_numpy.astype(np.uint8)
image_numpy = display_mask(image_numpy)
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def gaussian(in_tensor, stddev):
noisy_image = torch.normal(0, stddev, size=in_tensor.size()).to(in_tensor.device) + in_tensor
return noisy_image
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| [
"os.makedirs",
"numpy.median",
"numpy.std",
"numpy.zeros",
"os.path.exists",
"numpy.transpose",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.tile",
"numpy.max",
"PIL.Image.fromarray",
"torch.abs"
] | [((193, 349), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255], [255, 255, 255], [96, 96,\n 96], [253, 96, 96], [255, 255, 0], [237, 127, 16], [102, 0, 153]]'], {}), '([[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255], [255, 255, 255],\n [96, 96, 96], [253, 96, 96], [255, 255, 0], [237, 127, 16], [102, 0, 153]])\n', (201, 349), True, 'import numpy as np\n'), ((486, 805), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255], [0, 255, 255], [255, 255,\n 255], [96, 96, 96], [255, 255, 0], [237, 127, 16], [102, 0, 153], [88, \n 41, 0], [253, 108, 158], [128, 0, 0], [255, 0, 255], [255, 0, 127], [0,\n 128, 255], [0, 102, 51], [192, 192, 192], [128, 128, 0], [84, 151, 120]]'], {}), '([[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255], [0, 255, 255],\n [255, 255, 255], [96, 96, 96], [255, 255, 0], [237, 127, 16], [102, 0, \n 153], [88, 41, 0], [253, 108, 158], [128, 0, 0], [255, 0, 255], [255, 0,\n 127], [0, 128, 255], [0, 102, 51], [192, 192, 192], [128, 128, 0], [84,\n 151, 120]])\n', (494, 805), True, 'import numpy as np\n'), ((1230, 1273), 'numpy.zeros', 'np.zeros', (['(mask.shape[0], mask.shape[1], 3)'], {}), '((mask.shape[0], mask.shape[1], 3))\n', (1238, 1273), True, 'import numpy as np\n'), ((3386, 3414), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (3401, 3414), False, 'from PIL import Image\n'), ((4678, 4698), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4692, 4698), False, 'import os\n'), ((4708, 4725), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4719, 4725), False, 'import os\n'), ((2174, 2205), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (2181, 2205), True, 'import numpy as np\n'), ((3002, 3028), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (3011, 3028), False, 'import torch\n'), ((4162, 4172), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (4169, 4172), True, 'import numpy as np\n'), ((4174, 4183), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (4180, 4183), True, 'import numpy as np\n'), ((4185, 4194), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4191, 4194), True, 'import numpy as np\n'), ((4196, 4208), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (4205, 4208), True, 'import numpy as np\n'), ((4210, 4219), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (4216, 4219), True, 'import numpy as np\n'), ((2288, 2324), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (2300, 2324), True, 'import numpy as np\n')] |
import unittest, os
import json
import numpy as np
from math import factorial
from overcooked_ai_py.mdp.actions import Action, Direction
from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS
from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC
from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS, MotionPlanner
from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json
from utils import TESTING_DATA_DIR, generate_serialized_trajectory
START_ORDER_LIST = ["any"]
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
def comb(n, k):
return factorial(n) / (factorial(n - k) * factorial(k))
class TestRecipe(unittest.TestCase):
def setUp(self):
Recipe.configure({})
self.r1 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r2 = Recipe([Recipe.ONION, Recipe.ONION, Recipe.ONION])
self.r3 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r4 = Recipe([Recipe.ONION, Recipe.TOMATO])
self.r5 = Recipe([Recipe.TOMATO, Recipe.ONION])
self.r6 = Recipe([Recipe.ONION, Recipe.ONION])
self.recipes = [self.r1, self.r2, self.r3, self.r4, self.r5, self.r6]
def tearDown(self):
Recipe.configure({})
def test_eq(self):
self.assertEqual(self.r1, self.r2, "Failed basic equality check")
self.assertNotEqual(self.r1, self.r3, "Failed Basic inequality check")
self.assertNotEqual(self.r1, self.r6, "Failed inequality check with all one ingredient")
self.assertEqual(self.r3, self.r4, "Failed basic equality check")
self.assertEqual(self.r4, self.r5, "Failed ordered equality check")
def test_caching(self):
self.assertIs(self.r1, self.r2)
self.assertIs(self.r3, self.r4)
self.assertIs(self.r4, self.r5)
self.assertFalse(self.r6 is self.r1, "different recipes cached to same value")
def test_value(self):
# TODO
for recipe in self.recipes:
self.assertEqual(recipe.value, 20)
def test_time(self):
# TODO
for recipe in self.recipes:
self.assertEqual(recipe.time, 20)
def test_all_recipes(self):
for recipe in self.recipes:
self.assertTrue(recipe in Recipe.ALL_RECIPES)
self.assertEqual(len(Recipe.ALL_RECIPES), self._expected_num_recipes(len(Recipe.ALL_INGREDIENTS), Recipe.MAX_NUM_INGREDIENTS))
Recipe.configure({ "max_num_ingredients" : 4 })
self.assertEqual(len(Recipe.ALL_RECIPES), self._expected_num_recipes(len(Recipe.ALL_INGREDIENTS), 4))
def test_invalid_input(self):
self.assertRaises(ValueError, Recipe, [Recipe.ONION, Recipe.TOMATO, "carrot"])
self.assertRaises(ValueError, Recipe, [Recipe.ONION]*4)
self.assertRaises(ValueError, Recipe, [])
self.assertRaises(ValueError, Recipe, "invalid argument")
def test_recipes_generation(self):
self.assertRaises(AssertionError, Recipe.generate_random_recipes, max_size=Recipe.MAX_NUM_INGREDIENTS+1)
self.assertRaises(AssertionError, Recipe.generate_random_recipes, min_size=0)
self.assertRaises(AssertionError, Recipe.generate_random_recipes, min_size=3, max_size=2)
self.assertRaises(AssertionError, Recipe.generate_random_recipes, ingredients=["onion", "tomato", "fake_ingredient"])
self.assertRaises(AssertionError, Recipe.generate_random_recipes, n=99999)
self.assertEqual(len(Recipe.generate_random_recipes(n=3)), 3)
self.assertEqual(len(Recipe.generate_random_recipes(n=99, unique=False)), 99)
two_sized_recipes = [Recipe(["onion", "onion"]), Recipe(["onion", "tomato"]), Recipe(["tomato", "tomato"])]
for _ in range(100):
self.assertCountEqual(two_sized_recipes, Recipe.generate_random_recipes(n=3, min_size=2, max_size=2, ingredients=["onion", "tomato"]))
only_onions_recipes = [Recipe(["onion", "onion"]), Recipe(["onion", "onion", "onion"])]
for _ in range(100):
self.assertCountEqual(only_onions_recipes, Recipe.generate_random_recipes(n=2, min_size=2, max_size=3, ingredients=["onion"]))
self.assertCountEqual(only_onions_recipes, set([Recipe.generate_random_recipes(n=1, recipes=only_onions_recipes)[0] for _ in range(100)])) # false positives rate for this test is 1/10^99
def _expected_num_recipes(self, num_ingredients, max_len):
return comb(num_ingredients + max_len, num_ingredients) - 1
class TestSoupState(unittest.TestCase):
def setUp(self):
Recipe.configure({})
self.s1 = SoupState.get_soup((0, 0), num_onions=0, num_tomatoes=0)
self.s2 = SoupState.get_soup((0, 1), num_onions=2, num_tomatoes=1)
self.s3 = SoupState.get_soup((1, 1), num_onions=1, num_tomatoes=0, cooking_tick=1)
self.s4 = SoupState.get_soup((1, 0), num_onions=0, num_tomatoes=2, finished=True)
def test_position(self):
new_pos = (2, 0)
self.s4.position = new_pos
for ingredient in self.s4._ingredients:
self.assertEqual(new_pos, ingredient.position)
self.assertEqual(new_pos, self.s4.position)
def test_is_cooking(self):
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s2.is_cooking)
self.assertTrue(self.s3.is_cooking)
self.assertFalse(self.s4.is_cooking)
def test_is_ready(self):
self.assertFalse(self.s1.is_ready)
self.assertFalse(self.s2.is_ready)
self.assertFalse(self.s3.is_ready)
self.assertTrue(self.s4.is_ready)
def test_is_idle(self):
self.assertTrue(self.s1.is_idle)
self.assertTrue(self.s2.is_idle)
self.assertFalse(self.s3.is_idle)
self.assertFalse(self.s4.is_idle)
def test_is_full(self):
self.assertFalse(self.s1.is_full)
self.assertTrue(self.s2.is_full)
self.assertTrue(self.s3.is_full)
self.assertTrue(self.s4.is_full)
def test_cooking(self):
self.s1.add_ingredient_from_str(Recipe.ONION)
self.s1.add_ingredient_from_str(Recipe.TOMATO)
self.assertTrue(self.s1.is_idle)
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s1.is_full)
self.s1.begin_cooking()
self.assertFalse(self.s1.is_idle)
self.assertTrue(self.s1.is_full)
self.assertTrue(self.s1.is_cooking)
for _ in range(self.s1.cook_time):
self.s1.cook()
self.assertFalse(self.s1.is_cooking)
self.assertFalse(self.s1.is_idle)
self.assertTrue(self.s1.is_full)
self.assertTrue(self.s1.is_ready)
def test_attributes(self):
self.assertListEqual(self.s1.ingredients, [])
self.assertListEqual(self.s2.ingredients, [Recipe.ONION, Recipe.ONION, Recipe.TOMATO])
self.assertListEqual(self.s3.ingredients, [Recipe.ONION])
self.assertListEqual(self.s4.ingredients, [Recipe.TOMATO, Recipe.TOMATO])
try:
self.s1.recipe
self.fail("Expected ValueError to be raised")
except ValueError as e:
pass
except Exception as e:
self.fail("Expected ValueError to be raised, {} raised instead".format(e))
try:
self.s2.recipe
self.fail("Expected ValueError to be raised")
except ValueError as e:
pass
except Exception as e:
self.fail("Expected ValueError to be raised, {} raised instead".format(e))
self.assertEqual(self.s3.recipe, Recipe([Recipe.ONION]))
self.assertEqual(self.s4.recipe, Recipe([Recipe.TOMATO, Recipe.TOMATO]))
def test_invalid_ops(self):
# Cannot cook an empty soup
self.assertRaises(ValueError, self.s1.begin_cooking)
# Must call 'begin_cooking' before cooking a soup
self.assertRaises(ValueError, self.s2.cook)
# Cannot cook a done soup
self.assertRaises(ValueError, self.s4.cook)
# Cannot begin cooking a soup that is already cooking
self.assertRaises(ValueError, self.s3.begin_cooking)
# Cannot begin cooking a soup that is already done
self.assertRaises(ValueError, self.s4.begin_cooking)
# Cannot add ingredients to a soup that is cooking
self.assertRaises(ValueError, self.s3.add_ingredient_from_str, Recipe.ONION)
# Cannot add ingredients to a soup that is ready
self.assertRaises(ValueError, self.s4.add_ingredient_from_str, Recipe.ONION)
# Cannot remove an ingredient from a soup that is ready
self.assertRaises(ValueError, self.s4.pop_ingredient)
# Cannot remove an ingredient from a soup that is cooking
self.assertRaises(ValueError, self.s3.pop_ingredient)
# Cannot remove an ingredient from a soup that is empty
self.assertRaises(ValueError, self.s1.pop_ingredient)
class TestDirection(unittest.TestCase):
def test_direction_number_conversion(self):
all_directions = Direction.ALL_DIRECTIONS
all_numbers = []
for direction in Direction.ALL_DIRECTIONS:
number = Direction.DIRECTION_TO_INDEX[direction]
direction_again = Direction.INDEX_TO_DIRECTION[number]
self.assertEqual(direction, direction_again)
all_numbers.append(number)
# Check that all directions are distinct
num_directions = len(all_directions)
self.assertEqual(len(set(all_directions)), num_directions)
# Check that the numbers are 0, 1, ... num_directions - 1
self.assertEqual(set(all_numbers), set(range(num_directions)))
class TestGridworld(unittest.TestCase):
# TODO: write more smaller targeted tests to be loaded from jsons
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("mdp_test")
def test_constructor_invalid_inputs(self):
# Height and width must be at least 3.
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid(['X', 'X', 'X'])
with self.assertRaises(AssertionError):
mdp = OvercookedGridworld.from_grid([['X', 'X', 'X']])
with self.assertRaises(AssertionError):
# Borders must be present.
mdp = OvercookedGridworld.from_grid(['XOSX',
'P D',
' 21 '])
with self.assertRaises(AssertionError):
# The grid can't be ragged.
mdp = OvercookedGridworld.from_grid(['XXPXX',
'O 2XX',
'X1 3 X',
'XDXSXX'])
with self.assertRaises(AssertionError):
# The agents must be numbered 1 and 2.
mdp = OvercookedGridworld.from_grid(['XXPXX',
'O 3O',
'X1 X',
'XDXSX'])
with self.assertRaises(AssertionError):
# The agents must be numbered 1 and 2.
mdp = OvercookedGridworld.from_grid(['XXPXX',
'O 1O',
'X1 X',
'XDXSX'])
with self.assertRaises(AssertionError):
# B is not a valid element.
mdp = OvercookedGridworld.from_grid(['XBPXX',
'O 2O',
'X1 X',
'XDXSX'])
def test_start_positions(self):
actual_start_state = self.base_mdp.get_standard_start_state()
expected_state_path = os.path.join(TESTING_DATA_DIR, "test_start_positions", "expected.json")
# NOTE: Uncomment the following line if expected start state deliberately changed
# save_as_json(actual_start_state.to_dict(), expected_state_path)
expected_start_state = OvercookedState.from_dict(load_from_json(expected_state_path))
self.assertEqual(actual_start_state, expected_start_state, '\n' + str(actual_start_state) + '\n' + str(expected_start_state))
def test_file_constructor(self):
mdp = OvercookedGridworld.from_layout_name('corridor')
expected_start_state = OvercookedState(
[PlayerState((3, 1), Direction.NORTH), PlayerState((10, 1), Direction.NORTH)], {},
all_orders=[{ "ingredients" : ["onion", "onion", "onion"]}])
actual_start_state = mdp.get_standard_start_state()
self.assertEqual(actual_start_state, expected_start_state, '\n' + str(actual_start_state) + '\n' + str(expected_start_state))
def test_actions(self):
bad_state = OvercookedState(
[PlayerState((0, 0), Direction.NORTH), PlayerState((3, 1), Direction.NORTH)], {})
with self.assertRaises(AssertionError):
self.base_mdp.get_actions(bad_state)
self.assertEqual(self.base_mdp.get_actions(self.base_mdp.get_standard_start_state()),
[Action.ALL_ACTIONS, Action.ALL_ACTIONS])
def test_from_dict(self):
state_dict = {"players": [{"position": [2, 1], "orientation": [0, -1], "held_object": None }, {"position": [1, 1], "orientation": [0, -1], "held_object": None }], "objects": [{"name": "onion", "position": [1, 0], "state": None }], "order_list": None }
state = OvercookedState.from_dict(state_dict)
def test_transitions_and_environment(self):
bad_state = OvercookedState(
[P((0, 0), s), P((3, 1), s)], {})
with self.assertRaises(AssertionError):
self.base_mdp.get_state_transition(bad_state, stay)
env = OvercookedEnv.from_mdp(self.base_mdp)
def check_transition(action, expected_path, recompute=False):
# Compute actual values
state = env.state
pred_state, _ = self.base_mdp.get_state_transition(state, action)
new_state, sparse_reward, _, _ = env.step(action)
self.assertEqual(pred_state, new_state, '\n' + str(pred_state) + '\n' + str(new_state))
# Recompute expected values if desired
if recompute:
actual = {
"state" : pred_state.to_dict(),
"reward" : sparse_reward
}
save_as_json(actual, expected_path)
# Compute expected values
expected = load_from_json(expected_path)
expected_state = OvercookedState.from_dict(expected['state'])
expected_reward = expected['reward']
# Make sure everything lines up (note __eq__ is transitive)
self.assertTrue(pred_state.time_independent_equal(expected_state), '\n' + str(pred_state) + '\n' + str(expected_state))
self.assertEqual(sparse_reward, expected_reward)
expected_path = os.path.join(TESTING_DATA_DIR, "test_transitions_and_environments", "expected.json")
# NOTE: set 'recompute=True' if deliberately updating state dynamics
check_transition([n, e], expected_path, recompute=False)
def test_mdp_dynamics(self):
traj_path = os.path.join(TESTING_DATA_DIR, 'test_mdp_dynamics', 'expected.json')
# NOTE: uncomment the following line to recompute trajectories if MDP dymamics were deliberately updated
generate_serialized_trajectory(self.base_mdp, traj_path)
test_trajectory = AgentEvaluator.load_traj_from_json(traj_path)
AgentEvaluator.check_trajectories(test_trajectory, from_json=True)
def test_mdp_serialization(self):
# Where to store serialized states -- will be overwritten each timestep
dummy_path = os.path.join(TESTING_DATA_DIR, 'test_mdp_serialization', 'dummy.json')
# Get starting seed and random agent pair
seed = 47
random_pair = AgentPair(RandomAgent(all_actions=True), RandomAgent(all_actions=True))
# Run rollouts with different seeds until sparse reward is achieved
sparse_reward = 0
while sparse_reward <= 0:
np.random.seed(seed)
state = self.base_mdp.get_standard_start_state()
for _ in range(1500):
# Ensure serialization and deserializations are inverses
reconstructed_state = OvercookedState.from_dict(load_from_json(save_as_json(state.to_dict(), dummy_path)))
self.assertEqual(state, reconstructed_state, "\nState: \t\t\t{}\nReconstructed State: \t{}".format(state, reconstructed_state))
# Advance state
joint_action, _ = zip(*random_pair.joint_action(state))
state, infos = self.base_mdp.get_state_transition(state, joint_action)
sparse_reward += sum(infos['sparse_reward_by_agent'])
seed += 1
def test_four_player_mdp(self):
try:
OvercookedGridworld.from_layout_name("multiplayer_schelling")
except AssertionError as e:
print("Loading > 2 player map failed with error:", e)
def test_potential_function(self):
mp = MotionPlanner(self.base_mdp)
state = self.base_mdp.get_standard_start_state()
val0 = self.base_mdp.potential_function(state, mp)
# Pick up onion
print("pick up onion")
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
actions = [Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val1 = self.base_mdp.potential_function(state, mp)
# Pick up tomato
print("pick up tomtato")
actions = [Direction.WEST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val2 = self.base_mdp.potential_function(state, mp)
self.assertLess(val0, val1, "Picking up onion should increase potential")
self.assertLess(val1, val2, "Picking up tomato should increase potential")
# Pot tomato
print("pot tomato")
actions = [Direction.EAST, Direction.NORTH, Action.INTERACT, Direction.WEST]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val3 = self.base_mdp.potential_function(state, mp)
# Pot onion
print("pot onion")
actions = [Direction.WEST, Direction.NORTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val4 = self.base_mdp.potential_function(state, mp)
self.assertLess(val2, val3, "Potting tomato should increase potential")
self.assertLess(val3, val4, "Potting onion should increase potential")
## Repeat on second pot ##
# Pick up onion
print("pick up onion")
state, _ = self.base_mdp.get_state_transition(state, [Action.INTERACT, Action.STAY])
val5 = self.base_mdp.potential_function(state, mp)
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
# Pick up tomato
print("pick up tomato")
actions = [Direction.SOUTH, Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val6 = self.base_mdp.potential_function(state, mp)
self.assertLess(val4, val5, "Picking up onion should increase potential")
self.assertLess(val5, val6, "Picking up tomato should increase potential")
# Pot onion
print("pot onion")
actions = [Direction.SOUTH, Direction.EAST, Direction.SOUTH, Action.INTERACT, Direction.WEST]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val7 = self.base_mdp.potential_function(state, mp)
# Pot tomato
print("pot tomato")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val8 = self.base_mdp.potential_function(state, mp)
self.assertLess(val6, val7, "Potting onion should increase potential")
self.assertLess(val7, val8, "Potting tomato should increase potential")
## Useless pickups ##
# pickup tomato
print("pickup tomato")
actions = [Action.INTERACT, Direction.NORTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val9 = self.base_mdp.potential_function(state, mp)
# pickup tomato
print("pickup tomato")
actions = [Direction.EAST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val10 = self.base_mdp.potential_function(state, mp)
self.assertLessEqual(val9, val8, "Extraneous pickup should not increase potential")
self.assertLessEqual(val10, val8, "Extraneous pickup should not increase potential")
## Catastrophic soup failure ##
# pot tomato
print("pot catastrophic tomato")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val11 = self.base_mdp.potential_function(state, mp)
self.assertLess(val11, val10, "Catastrophic potting should decrease potential")
## Bonus soup creation
# pick up onion
print("pick up onion")
actions = [Direction.NORTH, Action.INTERACT, Direction.WEST, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val12 = self.base_mdp.potential_function(state, mp)
# pot onion
print("pot onion")
actions = [Direction.EAST, Direction.NORTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val13 = self.base_mdp.potential_function(state, mp)
# Cook soup
print("cook soup")
actions = [Action.INTERACT, Direction.WEST]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val14 = self.base_mdp.potential_function(state, mp)
self.assertLess(val11, val12, "Useful onion pickup should increase potential")
self.assertLess(val12, val13, "Potting useful onion should increase potential")
self.assertLess(val13, val14, "Cooking optimal soup should increase potential")
## Soup pickup ##
# Pick up dish
print("pick up dish")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val15 = self.base_mdp.potential_function(state, mp)
# Move towards pot
print("move towards pot")
actions = [Direction.EAST, Direction.NORTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val16 = self.base_mdp.potential_function(state, mp)
# Pickup soup
print("pickup soup")
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, Action.INTERACT])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val17 = self.base_mdp.potential_function(state, mp)
self.assertLess(val14, val15, "Useful dish pickups should increase potential")
self.assertLess(val15, val16, "Moving towards soup with dish should increase potential")
self.assertLess(val16, val17, "Picking up soup should increase potential")
## Removing failed soup from pot
# move towards failed soup
print("move torwards failed soup")
actions = [Direction.SOUTH, Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val18 = self.base_mdp.potential_function(state, mp)
# Cook failed soup
actions = [Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val19 = self.base_mdp.potential_function(state, mp)
# Pickup dish
print("pickup dish")
actions = [Direction.WEST, Direction.SOUTH, Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val20 = self.base_mdp.potential_function(state, mp)
# Move towards soup
print("move towards soup")
actions = [Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val21 = self.base_mdp.potential_function(state, mp)
self.assertLess(val17, val18, "Moving towards failed soup should increase potential")
self.assertLess(val18, val19, "Cooking failed soup should increase potential")
self.assertLess(val19, val20, "Dish pickup for failed soup is still useful")
self.assertLess(val20, val21, "Moving towars pertinant pot with dish should increase potential")
## Deliver failed soup ##
# Pickup failed soup
actions = [Action.INTERACT]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val22 = self.base_mdp.potential_function(state, mp)
# Move towards serving area
print("move towards servering area")
actions = [Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [action, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val23 = self.base_mdp.potential_function(state, mp)
# Move away from serving area
print("move away from serving area")
state, _ = self.base_mdp.get_state_transition(state, [Direction.NORTH, Action.STAY])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val24 = self.base_mdp.potential_function(state, mp)
self.assertLess(val21, val22, "Picking up failed soup should increase potential")
self.assertAlmostEqual(val23, val22, delta=0.2, msg="Moving to serve failed soup doesn't change potential much")
self.assertAlmostEqual(val23, val24, delta=0.2, msg="Moving away from serving area with failed soup doesn't change much")
## Deliver successful soup ##
# Move towards serving area
print("move towards serving area")
actions = [Direction.SOUTH, Direction.EAST, Direction.SOUTH]
for action in actions:
state, _ = self.base_mdp.get_state_transition(state, [Action.STAY, action])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
val25 = self.base_mdp.potential_function(state, mp)
# Deliver soup
print("deliver successful soup")
state, rewards = self.base_mdp.get_state_transition(state, [Action.STAY, Action.INTERACT])
print(self.base_mdp.state_string(state))
print("potential: ", self.base_mdp.potential_function(state, mp))
self.assertLess(val24, val25, "Moving towards serving area with valid soup increases potential")
self.assertEqual(sum(rewards['sparse_reward_by_agent']), 50, "Soup was not properly devivered, probably an error with MDP logic")
def random_joint_action():
num_actions = len(Action.ALL_ACTIONS)
a_idx0, a_idx1 = np.random.randint(low=0, high=num_actions, size=2)
return (Action.INDEX_TO_ACTION[a_idx0], Action.INDEX_TO_ACTION[a_idx1])
class TestFeaturizations(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.mlam = MediumLevelActionManager.from_pickle_or_compute(self.base_mdp, NO_COUNTERS_PARAMS, force_compute=True)
self.env = OvercookedEnv.from_mdp(self.base_mdp, **DEFAULT_ENV_PARAMS)
self.rnd_agent_pair = AgentPair(GreedyHumanModel(self.mlam), GreedyHumanModel(self.mlam))
np.random.seed(0)
def test_lossless_state_featurization_shape(self):
s = self.base_mdp.get_standard_start_state()
obs = self.base_mdp.lossless_state_encoding(s)[0]
self.assertTrue(np.array_equal(obs.shape, self.base_mdp.lossless_state_encoding_shape), "{} vs {}".format(obs.shape, self.base_mdp.lossless_state_encoding_shape))
def test_state_featurization_shape(self):
s = self.base_mdp.get_standard_start_state()
obs = self.base_mdp.featurize_state(s, self.mlam)[0]
self.assertTrue(np.array_equal(obs.shape, self.base_mdp.featurize_state_shape), "{} vs {}".format(obs.shape, self.base_mdp.featurize_state_shape))
def test_lossless_state_featurization(self):
trajs = self.env.get_rollouts(self.rnd_agent_pair, num_games=5)
featurized_observations = [[self.base_mdp.lossless_state_encoding(state) for state in ep_states] for ep_states in trajs["ep_states"]]
pickle_path = os.path.join(TESTING_DATA_DIR, "test_lossless_state_featurization", "expected")
# NOTE: If the featurizations are updated intentionally, you can overwrite the expected
# featurizations by uncommenting the following line:
# save_pickle(featurized_observations, pickle_path)
expected_featurization = load_pickle(pickle_path)
self.assertTrue(np.array_equal(expected_featurization, featurized_observations))
def test_state_featurization(self):
trajs = self.env.get_rollouts(self.rnd_agent_pair, num_games=5)
featurized_observations = [[self.base_mdp.featurize_state(state, self.mlam) for state in ep_states] for ep_states in trajs["ep_states"]]
pickle_path = os.path.join(TESTING_DATA_DIR, "test_state_featurization", 'expected')
# NOTE: If the featurizations are updated intentionally, you can overwrite the expected
# featurizations by uncommenting the following line:
# save_pickle(featurized_observations, pickle_path)
expected_featurization = load_pickle(pickle_path)
self.assertTrue(np.array_equal(expected_featurization, featurized_observations))
class TestOvercookedEnvironment(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.env = OvercookedEnv.from_mdp(self.base_mdp, **DEFAULT_ENV_PARAMS)
self.rnd_agent_pair = AgentPair(FixedPlanAgent([stay, w, w]), FixedPlanAgent([stay, e, e]))
np.random.seed(0)
def test_constructor(self):
try:
OvercookedEnv.from_mdp(self.base_mdp, horizon=10)
except Exception as e:
self.fail("Failed to instantiate OvercookedEnv:\n{}".format(e))
with self.assertRaises(TypeError):
OvercookedEnv.from_mdp(self.base_mdp, **{"invalid_env_param": None})
def test_step_fn(self):
for _ in range(10):
joint_action = random_joint_action()
self.env.step(joint_action)
def test_execute_plan(self):
action_plan = [random_joint_action() for _ in range(10)]
self.env.execute_plan(self.base_mdp.get_standard_start_state(), action_plan)
def test_run_agents(self):
start_state = self.env.state
self.env.run_agents(self.rnd_agent_pair)
self.assertNotEqual(self.env.state, start_state)
def test_rollouts(self):
try:
self.env.get_rollouts(self.rnd_agent_pair, 3)
except Exception as e:
print(e.with_traceback())
self.fail("Failed to get rollouts from environment:\n{}".format(e))
def test_one_player_env(self):
mdp = OvercookedGridworld.from_layout_name("cramped_room_single")
env = OvercookedEnv.from_mdp(mdp, horizon=12)
a0 = FixedPlanAgent([stay, w, w, e, e, n, e, interact, w, n, interact])
ag = AgentGroup(a0)
env.run_agents(ag, display=False)
self.assertEqual(
env.state.players_pos_and_or,
(((2, 1), (0, -1)),)
)
def test_four_player_env_fixed(self):
mdp = OvercookedGridworld.from_layout_name("multiplayer_schelling")
assert mdp.num_players == 4
env = OvercookedEnv.from_mdp(mdp, horizon=16)
a0 = FixedPlanAgent([stay, w, w])
a1 = FixedPlanAgent([stay, stay, e, e, n, n, n, e, interact, n, n, w, w, w, n, interact, e])
a2 = FixedPlanAgent([stay, w, interact, n, n, e, e, e, n, e, n, interact, w])
a3 = FixedPlanAgent([e, interact, n, n, w, w, w, n, interact, e, s])
ag = AgentGroup(a0, a1, a2, a3)
env.run_agents(ag, display=False)
self.assertEqual(
env.state.players_pos_and_or,
(((1, 1), (-1, 0)), ((3, 1), (0, -1)), ((2, 1), (-1, 0)), ((4, 2), (0, 1)))
)
def test_display(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp_fn = lambda _ignored: mdp0
env = OvercookedEnv(mdp_fn, horizon=20)
env.get_rollouts(self.rnd_agent_pair, 1, display=True)
def test_display_phi(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp_fn = lambda _ignored: mdp0
env = OvercookedEnv(mdp_fn, horizon=20)
env.get_rollouts(self.rnd_agent_pair, 1, display=True, display_phi=True)
def test_multiple_mdp_env(self):
mdp0 = OvercookedGridworld.from_layout_name("cramped_room")
mdp1 = OvercookedGridworld.from_layout_name("counter_circuit")
mdp_fn = lambda _ignored: np.random.choice([mdp0, mdp1])
# Default env
env = OvercookedEnv(mdp_fn, horizon=100)
env.get_rollouts(self.rnd_agent_pair, 5)
def test_starting_position_randomization(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
start_state_fn = self.base_mdp.get_random_start_state_fn(random_start_pos=True, rnd_obj_prob_thresh=0.0)
env = OvercookedEnv.from_mdp(self.base_mdp, start_state_fn)
start_state = env.state.players_pos_and_or
for _ in range(3):
env.reset()
curr_terrain = env.state.players_pos_and_or
self.assertFalse(np.array_equal(start_state, curr_terrain))
def test_starting_obj_randomization(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
start_state_fn = self.base_mdp.get_random_start_state_fn(random_start_pos=False, rnd_obj_prob_thresh=0.8)
env = OvercookedEnv.from_mdp(self.base_mdp, start_state_fn)
start_state = env.state.all_objects_list
for _ in range(3):
env.reset()
curr_terrain = env.state.all_objects_list
self.assertFalse(np.array_equal(start_state, curr_terrain))
def test_failing_rnd_layout(self):
with self.assertRaises(TypeError):
mdp_gen_params = {"None": None}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(**mdp_gen_params)
OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
def test_random_layout(self):
mdp_gen_params = {"inner_shape": (5, 4),
"prop_empty": 0.8,
"prop_feats": 0.2,
"start_all_orders" : [
{ "ingredients" : ["onion", "onion", "onion"]}
],
"recipe_values" : [20],
"recipe_times" : [20],
"display": False
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params, outer_shape=(5, 4))
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
start_terrain = env.mdp.terrain_mtx
for _ in range(3):
env.reset()
curr_terrain = env.mdp.terrain_mtx
self.assertFalse(np.array_equal(start_terrain, curr_terrain))
mdp_gen_params = {"layout_name": 'cramped_room'}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
layouts_seen = []
for _ in range(5):
layouts_seen.append(env.mdp.terrain_mtx)
env.reset()
all_same_layout = all([np.array_equal(env.mdp.terrain_mtx, terrain) for terrain in layouts_seen])
self.assertTrue(all_same_layout)
mdp_gen_params = {"layout_name": 'asymmetric_advantages'}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params)
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(5):
layouts_seen.append(env.mdp.terrain_mtx)
env.reset()
all_same_layout = all([np.array_equal(env.mdp.terrain_mtx, terrain) for terrain in layouts_seen])
self.assertFalse(all_same_layout)
def test_random_layout_feature_types(self):
mandatory_features = {POT, DISH_DISPENSER, SERVING_LOC}
optional_features = {ONION_DISPENSER, TOMATO_DISPENSER}
optional_features_combinations = [{ONION_DISPENSER, TOMATO_DISPENSER}, {ONION_DISPENSER}, {TOMATO_DISPENSER}]
for optional_features_combo in optional_features_combinations:
left_out_optional_features = optional_features - optional_features_combo
used_features = list(optional_features_combo | mandatory_features)
mdp_gen_params = {"prop_feats": 0.9,
"feature_types": used_features,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
"start_all_orders" : [
{ "ingredients" : ["onion", "onion", "onion"]}
]}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params, outer_shape=(6, 5))
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
curr_terrain = env.mdp.terrain_mtx
terrain_features = set.union(*(set(line) for line in curr_terrain))
self.assertTrue(all(elem in terrain_features for elem in used_features)) # all used_features are actually used
if left_out_optional_features:
self.assertFalse(any(elem in terrain_features for elem in left_out_optional_features)) # all left_out optional_features are not used
def test_random_layout_generated_recipes(self):
only_onions_recipes = [Recipe(["onion", "onion"]), Recipe(["onion", "onion", "onion"])]
only_onions_dict_recipes = [r.to_dict() for r in only_onions_recipes]
# checking if recipes are generated from mdp_params
mdp_gen_params = {"generate_all_orders": {"n":2, "ingredients": ["onion"], "min_size":2, "max_size":3},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params, outer_shape=(6, 5))
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
self.assertCountEqual(env.mdp.start_all_orders, only_onions_dict_recipes)
self.assertEqual(len(env.mdp.start_bonus_orders), 0)
# checking if bonus_orders is subset of all_orders even if not specified
mdp_gen_params = {"generate_all_orders": {"n":2, "ingredients": ["onion"], "min_size":2, "max_size":3},
"generate_bonus_orders": {"n":1, "min_size":2, "max_size":3},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params, outer_shape=(6,5))
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
for _ in range(10):
env.reset()
self.assertCountEqual(env.mdp.start_all_orders, only_onions_dict_recipes)
self.assertEqual(len(env.mdp.start_bonus_orders), 1)
self.assertTrue(env.mdp.start_bonus_orders[0] in only_onions_dict_recipes)
# checking if after reset there are new recipes generated
mdp_gen_params = {"generate_all_orders": {"n":3, "min_size":2, "max_size":3},
"prop_feats": 0.9,
"prop_empty": 0.1,
"inner_shape": (6, 5),
"display": False,
"feature_types": [POT, DISH_DISPENSER, SERVING_LOC, ONION_DISPENSER, TOMATO_DISPENSER]
}
mdp_fn = LayoutGenerator.mdp_gen_fn_from_dict(mdp_gen_params, outer_shape=(6,5))
env = OvercookedEnv(mdp_fn, **DEFAULT_ENV_PARAMS)
generated_recipes_strings = set()
for _ in range(20):
env.reset()
generated_recipes_strings |= {json.dumps(o, sort_keys=True) for o in env.mdp.start_all_orders}
self.assertTrue(len(generated_recipes_strings) > 3)
class TestGymEnvironment(unittest.TestCase):
def setUp(self):
self.base_mdp = OvercookedGridworld.from_layout_name("cramped_room")
self.env = OvercookedEnv.from_mdp(self.base_mdp, **DEFAULT_ENV_PARAMS)
self.rnd_agent_pair = AgentPair(FixedPlanAgent([]), FixedPlanAgent([]))
np.random.seed(0)
# TODO: write more tests here
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.seed",
"overcooked_ai_py.agents.benchmarking.AgentEvaluator.load_traj_from_json",
"json.dumps",
"numpy.random.randint",
"overcooked_ai_py.planning.planners.MediumLevelActionManager.from_pickle_or_compute",
"os.path.join",
"overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout... | [((31722, 31772), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'num_actions', 'size': '(2)'}), '(low=0, high=num_actions, size=2)\n', (31739, 31772), True, 'import numpy as np\n'), ((45837, 45852), 'unittest.main', 'unittest.main', ([], {}), '()\n', (45850, 45852), False, 'import unittest, os\n'), ((1169, 1181), 'math.factorial', 'factorial', (['n'], {}), '(n)\n', (1178, 1181), False, 'from math import factorial\n'), ((1287, 1307), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.configure', 'Recipe.configure', (['{}'], {}), '({})\n', (1303, 1307), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1326, 1376), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION, Recipe.ONION, Recipe.ONION]'], {}), '([Recipe.ONION, Recipe.ONION, Recipe.ONION])\n', (1332, 1376), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1395, 1445), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION, Recipe.ONION, Recipe.ONION]'], {}), '([Recipe.ONION, Recipe.ONION, Recipe.ONION])\n', (1401, 1445), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1464, 1501), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION, Recipe.TOMATO]'], {}), '([Recipe.ONION, Recipe.TOMATO])\n', (1470, 1501), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1520, 1557), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION, Recipe.TOMATO]'], {}), '([Recipe.ONION, Recipe.TOMATO])\n', (1526, 1557), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1576, 1613), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.TOMATO, Recipe.ONION]'], {}), '([Recipe.TOMATO, Recipe.ONION])\n', (1582, 1613), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1632, 1668), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION, Recipe.ONION]'], {}), '([Recipe.ONION, Recipe.ONION])\n', (1638, 1668), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((1781, 1801), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.configure', 'Recipe.configure', (['{}'], {}), '({})\n', (1797, 1801), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((2984, 3028), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.configure', 'Recipe.configure', (["{'max_num_ingredients': 4}"], {}), "({'max_num_ingredients': 4})\n", (3000, 3028), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((5118, 5138), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.configure', 'Recipe.configure', (['{}'], {}), '({})\n', (5134, 5138), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((5157, 5213), 'overcooked_ai_py.mdp.overcooked_mdp.SoupState.get_soup', 'SoupState.get_soup', (['(0, 0)'], {'num_onions': '(0)', 'num_tomatoes': '(0)'}), '((0, 0), num_onions=0, num_tomatoes=0)\n', (5175, 5213), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((5232, 5288), 'overcooked_ai_py.mdp.overcooked_mdp.SoupState.get_soup', 'SoupState.get_soup', (['(0, 1)'], {'num_onions': '(2)', 'num_tomatoes': '(1)'}), '((0, 1), num_onions=2, num_tomatoes=1)\n', (5250, 5288), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((5307, 5379), 'overcooked_ai_py.mdp.overcooked_mdp.SoupState.get_soup', 'SoupState.get_soup', (['(1, 1)'], {'num_onions': '(1)', 'num_tomatoes': '(0)', 'cooking_tick': '(1)'}), '((1, 1), num_onions=1, num_tomatoes=0, cooking_tick=1)\n', (5325, 5379), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((5398, 5469), 'overcooked_ai_py.mdp.overcooked_mdp.SoupState.get_soup', 'SoupState.get_soup', (['(1, 0)'], {'num_onions': '(0)', 'num_tomatoes': '(2)', 'finished': '(True)'}), '((1, 0), num_onions=0, num_tomatoes=2, finished=True)\n', (5416, 5469), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((10379, 10427), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""mdp_test"""'], {}), "('mdp_test')\n", (10415, 10427), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((12461, 12532), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_start_positions"""', '"""expected.json"""'], {}), "(TESTING_DATA_DIR, 'test_start_positions', 'expected.json')\n", (12473, 12532), False, 'import unittest, os\n'), ((12979, 13027), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""corridor"""'], {}), "('corridor')\n", (13015, 13027), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((14164, 14201), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedState.from_dict', 'OvercookedState.from_dict', (['state_dict'], {}), '(state_dict)\n', (14189, 14201), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((14463, 14500), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {}), '(self.base_mdp)\n', (14485, 14500), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((15668, 15756), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_transitions_and_environments"""', '"""expected.json"""'], {}), "(TESTING_DATA_DIR, 'test_transitions_and_environments',\n 'expected.json')\n", (15680, 15756), False, 'import unittest, os\n'), ((15950, 16018), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_mdp_dynamics"""', '"""expected.json"""'], {}), "(TESTING_DATA_DIR, 'test_mdp_dynamics', 'expected.json')\n", (15962, 16018), False, 'import unittest, os\n'), ((16141, 16197), 'utils.generate_serialized_trajectory', 'generate_serialized_trajectory', (['self.base_mdp', 'traj_path'], {}), '(self.base_mdp, traj_path)\n', (16171, 16197), False, 'from utils import TESTING_DATA_DIR, generate_serialized_trajectory\n'), ((16225, 16270), 'overcooked_ai_py.agents.benchmarking.AgentEvaluator.load_traj_from_json', 'AgentEvaluator.load_traj_from_json', (['traj_path'], {}), '(traj_path)\n', (16259, 16270), False, 'from overcooked_ai_py.agents.benchmarking import AgentEvaluator\n'), ((16279, 16345), 'overcooked_ai_py.agents.benchmarking.AgentEvaluator.check_trajectories', 'AgentEvaluator.check_trajectories', (['test_trajectory'], {'from_json': '(True)'}), '(test_trajectory, from_json=True)\n', (16312, 16345), False, 'from overcooked_ai_py.agents.benchmarking import AgentEvaluator\n'), ((16486, 16556), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_mdp_serialization"""', '"""dummy.json"""'], {}), "(TESTING_DATA_DIR, 'test_mdp_serialization', 'dummy.json')\n", (16498, 16556), False, 'import unittest, os\n'), ((17888, 17916), 'overcooked_ai_py.planning.planners.MotionPlanner', 'MotionPlanner', (['self.base_mdp'], {}), '(self.base_mdp)\n', (17901, 17916), False, 'from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS, MotionPlanner\n'), ((31942, 31994), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (31978, 31994), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((32015, 32121), 'overcooked_ai_py.planning.planners.MediumLevelActionManager.from_pickle_or_compute', 'MediumLevelActionManager.from_pickle_or_compute', (['self.base_mdp', 'NO_COUNTERS_PARAMS'], {'force_compute': '(True)'}), '(self.base_mdp,\n NO_COUNTERS_PARAMS, force_compute=True)\n', (32062, 32121), False, 'from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS, MotionPlanner\n'), ((32137, 32196), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {}), '(self.base_mdp, **DEFAULT_ENV_PARAMS)\n', (32159, 32196), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((32303, 32320), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (32317, 32320), True, 'import numpy as np\n'), ((33270, 33349), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_lossless_state_featurization"""', '"""expected"""'], {}), "(TESTING_DATA_DIR, 'test_lossless_state_featurization', 'expected')\n", (33282, 33349), False, 'import unittest, os\n'), ((33601, 33625), 'overcooked_ai_py.utils.load_pickle', 'load_pickle', (['pickle_path'], {}), '(pickle_path)\n', (33612, 33625), False, 'from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json\n'), ((33995, 34065), 'os.path.join', 'os.path.join', (['TESTING_DATA_DIR', '"""test_state_featurization"""', '"""expected"""'], {}), "(TESTING_DATA_DIR, 'test_state_featurization', 'expected')\n", (34007, 34065), False, 'import unittest, os\n'), ((34316, 34340), 'overcooked_ai_py.utils.load_pickle', 'load_pickle', (['pickle_path'], {}), '(pickle_path)\n', (34327, 34340), False, 'from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json\n'), ((34530, 34582), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (34566, 34582), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((34602, 34661), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {}), '(self.base_mdp, **DEFAULT_ENV_PARAMS)\n', (34624, 34661), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((34770, 34787), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (34784, 34787), True, 'import numpy as np\n'), ((35933, 35992), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room_single"""'], {}), "('cramped_room_single')\n", (35969, 35992), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((36007, 36046), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['mdp'], {'horizon': '(12)'}), '(mdp, horizon=12)\n', (36029, 36046), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((36060, 36126), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, w, w, e, e, n, e, interact, w, n, interact]'], {}), '([stay, w, w, e, e, n, e, interact, w, n, interact])\n', (36074, 36126), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36140, 36154), 'overcooked_ai_py.agents.agent.AgentGroup', 'AgentGroup', (['a0'], {}), '(a0)\n', (36150, 36154), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36365, 36426), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""multiplayer_schelling"""'], {}), "('multiplayer_schelling')\n", (36401, 36426), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((36477, 36516), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['mdp'], {'horizon': '(16)'}), '(mdp, horizon=16)\n', (36499, 36516), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((36530, 36558), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, w, w]'], {}), '([stay, w, w])\n', (36544, 36558), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36572, 36663), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, stay, e, e, n, n, n, e, interact, n, n, w, w, w, n, interact, e]'], {}), '([stay, stay, e, e, n, n, n, e, interact, n, n, w, w, w, n,\n interact, e])\n', (36586, 36663), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36673, 36745), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, w, interact, n, n, e, e, e, n, e, n, interact, w]'], {}), '([stay, w, interact, n, n, e, e, e, n, e, n, interact, w])\n', (36687, 36745), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36759, 36822), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[e, interact, n, n, w, w, w, n, interact, e, s]'], {}), '([e, interact, n, n, w, w, w, n, interact, e, s])\n', (36773, 36822), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((36836, 36862), 'overcooked_ai_py.agents.agent.AgentGroup', 'AgentGroup', (['a0', 'a1', 'a2', 'a3'], {}), '(a0, a1, a2, a3)\n', (36846, 36862), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((37115, 37167), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (37151, 37167), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((37221, 37254), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {'horizon': '(20)'}), '(mdp_fn, horizon=20)\n', (37234, 37254), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((37366, 37418), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (37402, 37418), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((37472, 37505), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {'horizon': '(20)'}), '(mdp_fn, horizon=20)\n', (37485, 37505), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((37640, 37692), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (37676, 37692), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((37708, 37763), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""counter_circuit"""'], {}), "('counter_circuit')\n", (37744, 37763), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((37874, 37908), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {'horizon': '(100)'}), '(mdp_fn, horizon=100)\n', (37887, 37908), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((38035, 38087), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (38071, 38087), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((38215, 38268), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp', 'start_state_fn'], {}), '(self.base_mdp, start_state_fn)\n', (38237, 38268), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((38571, 38623), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (38607, 38623), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((38752, 38805), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp', 'start_state_fn'], {}), '(self.base_mdp, start_state_fn)\n', (38774, 38805), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((39823, 39895), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {'outer_shape': '(5, 4)'}), '(mdp_gen_params, outer_shape=(5, 4))\n', (39859, 39895), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((39910, 39953), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (39923, 39953), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((40246, 40298), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {}), '(mdp_gen_params)\n', (40282, 40298), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((40313, 40356), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (40326, 40356), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((40728, 40780), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {}), '(mdp_gen_params)\n', (40764, 40780), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((40795, 40838), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (40808, 40838), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((43315, 43387), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {'outer_shape': '(6, 5)'}), '(mdp_gen_params, outer_shape=(6, 5))\n', (43351, 43387), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((43402, 43445), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (43415, 43445), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((44130, 44202), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {'outer_shape': '(6, 5)'}), '(mdp_gen_params, outer_shape=(6, 5))\n', (44166, 44202), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((44216, 44259), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (44229, 44259), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((45032, 45104), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {'outer_shape': '(6, 5)'}), '(mdp_gen_params, outer_shape=(6, 5))\n', (45068, 45104), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((45118, 45161), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (45131, 45161), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((45532, 45584), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""cramped_room"""'], {}), "('cramped_room')\n", (45568, 45584), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((45604, 45663), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {}), '(self.base_mdp, **DEFAULT_ENV_PARAMS)\n', (45626, 45663), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((45752, 45769), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (45766, 45769), True, 'import numpy as np\n'), ((1185, 1201), 'math.factorial', 'factorial', (['(n - k)'], {}), '(n - k)\n', (1194, 1201), False, 'from math import factorial\n'), ((1204, 1216), 'math.factorial', 'factorial', (['k'], {}), '(k)\n', (1213, 1216), False, 'from math import factorial\n'), ((4178, 4204), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'onion']"], {}), "(['onion', 'onion'])\n", (4184, 4204), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4206, 4233), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'tomato']"], {}), "(['onion', 'tomato'])\n", (4212, 4233), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4235, 4263), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['tomato', 'tomato']"], {}), "(['tomato', 'tomato'])\n", (4241, 4263), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4473, 4499), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'onion']"], {}), "(['onion', 'onion'])\n", (4479, 4499), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4501, 4536), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'onion', 'onion']"], {}), "(['onion', 'onion', 'onion'])\n", (4507, 4536), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((8111, 8133), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.ONION]'], {}), '([Recipe.ONION])\n', (8117, 8133), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((8176, 8214), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (['[Recipe.TOMATO, Recipe.TOMATO]'], {}), '([Recipe.TOMATO, Recipe.TOMATO])\n', (8182, 8214), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((10590, 10636), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['X', 'X', 'X']"], {}), "(['X', 'X', 'X'])\n", (10619, 10636), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((10703, 10751), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["[['X', 'X', 'X']]"], {}), "([['X', 'X', 'X']])\n", (10732, 10751), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((10857, 10912), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['XOSX', 'P D', ' 21 ']"], {}), "(['XOSX', 'P D', ' 21 '])\n", (10886, 10912), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((11118, 11188), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['XXPXX', 'O 2XX', 'X1 3 X', 'XDXSXX']"], {}), "(['XXPXX', 'O 2XX', 'X1 3 X', 'XDXSXX'])\n", (11147, 11188), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((11454, 11521), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['XXPXX', 'O 3O', 'X1 X', 'XDXSX']"], {}), "(['XXPXX', 'O 3O', 'X1 X', 'XDXSX'])\n", (11483, 11521), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((11787, 11854), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['XXPXX', 'O 1O', 'X1 X', 'XDXSX']"], {}), "(['XXPXX', 'O 1O', 'X1 X', 'XDXSX'])\n", (11816, 11854), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((12109, 12176), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_grid', 'OvercookedGridworld.from_grid', (["['XBPXX', 'O 2O', 'X1 X', 'XDXSX']"], {}), "(['XBPXX', 'O 2O', 'X1 X', 'XDXSX'])\n", (12138, 12176), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((12756, 12791), 'overcooked_ai_py.utils.load_from_json', 'load_from_json', (['expected_state_path'], {}), '(expected_state_path)\n', (12770, 12791), False, 'from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json\n'), ((15212, 15241), 'overcooked_ai_py.utils.load_from_json', 'load_from_json', (['expected_path'], {}), '(expected_path)\n', (15226, 15241), False, 'from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json\n'), ((15271, 15315), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedState.from_dict', 'OvercookedState.from_dict', (["expected['state']"], {}), "(expected['state'])\n", (15296, 15315), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((16658, 16687), 'overcooked_ai_py.agents.agent.RandomAgent', 'RandomAgent', ([], {'all_actions': '(True)'}), '(all_actions=True)\n', (16669, 16687), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((16689, 16718), 'overcooked_ai_py.agents.agent.RandomAgent', 'RandomAgent', ([], {'all_actions': '(True)'}), '(all_actions=True)\n', (16700, 16718), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((16869, 16889), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16883, 16889), True, 'import numpy as np\n'), ((17671, 17732), 'overcooked_ai_py.mdp.overcooked_mdp.OvercookedGridworld.from_layout_name', 'OvercookedGridworld.from_layout_name', (['"""multiplayer_schelling"""'], {}), "('multiplayer_schelling')\n", (17707, 17732), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((32237, 32264), 'overcooked_ai_py.agents.agent.GreedyHumanModel', 'GreedyHumanModel', (['self.mlam'], {}), '(self.mlam)\n', (32253, 32264), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((32266, 32293), 'overcooked_ai_py.agents.agent.GreedyHumanModel', 'GreedyHumanModel', (['self.mlam'], {}), '(self.mlam)\n', (32282, 32293), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((32512, 32582), 'numpy.array_equal', 'np.array_equal', (['obs.shape', 'self.base_mdp.lossless_state_encoding_shape'], {}), '(obs.shape, self.base_mdp.lossless_state_encoding_shape)\n', (32526, 32582), True, 'import numpy as np\n'), ((32844, 32906), 'numpy.array_equal', 'np.array_equal', (['obs.shape', 'self.base_mdp.featurize_state_shape'], {}), '(obs.shape, self.base_mdp.featurize_state_shape)\n', (32858, 32906), True, 'import numpy as np\n'), ((33650, 33713), 'numpy.array_equal', 'np.array_equal', (['expected_featurization', 'featurized_observations'], {}), '(expected_featurization, featurized_observations)\n', (33664, 33713), True, 'import numpy as np\n'), ((34365, 34428), 'numpy.array_equal', 'np.array_equal', (['expected_featurization', 'featurized_observations'], {}), '(expected_featurization, featurized_observations)\n', (34379, 34428), True, 'import numpy as np\n'), ((34702, 34730), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, w, w]'], {}), '([stay, w, w])\n', (34716, 34730), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((34732, 34760), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[stay, e, e]'], {}), '([stay, e, e])\n', (34746, 34760), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((34846, 34895), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {'horizon': '(10)'}), '(self.base_mdp, horizon=10)\n', (34868, 34895), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((35059, 35127), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv.from_mdp', 'OvercookedEnv.from_mdp', (['self.base_mdp'], {}), "(self.base_mdp, **{'invalid_env_param': None})\n", (35081, 35127), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((37798, 37828), 'numpy.random.choice', 'np.random.choice', (['[mdp0, mdp1]'], {}), '([mdp0, mdp1])\n', (37814, 37828), True, 'import numpy as np\n'), ((39180, 39234), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', ([], {}), '(**mdp_gen_params)\n', (39216, 39234), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((39247, 39290), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (39260, 39290), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((42066, 42138), 'overcooked_ai_py.mdp.layout_generator.LayoutGenerator.mdp_gen_fn_from_dict', 'LayoutGenerator.mdp_gen_fn_from_dict', (['mdp_gen_params'], {'outer_shape': '(6, 5)'}), '(mdp_gen_params, outer_shape=(6, 5))\n', (42102, 42138), False, 'from overcooked_ai_py.mdp.layout_generator import LayoutGenerator, ONION_DISPENSER, TOMATO_DISPENSER, POT, DISH_DISPENSER, SERVING_LOC\n'), ((42157, 42200), 'overcooked_ai_py.mdp.overcooked_env.OvercookedEnv', 'OvercookedEnv', (['mdp_fn'], {}), '(mdp_fn, **DEFAULT_ENV_PARAMS)\n', (42170, 42200), False, 'from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv, DEFAULT_ENV_PARAMS\n'), ((42807, 42833), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'onion']"], {}), "(['onion', 'onion'])\n", (42813, 42833), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((42835, 42870), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe', 'Recipe', (["['onion', 'onion', 'onion']"], {}), "(['onion', 'onion', 'onion'])\n", (42841, 42870), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((45704, 45722), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[]'], {}), '([])\n', (45718, 45722), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((45724, 45742), 'overcooked_ai_py.agents.agent.FixedPlanAgent', 'FixedPlanAgent', (['[]'], {}), '([])\n', (45738, 45742), False, 'from overcooked_ai_py.agents.agent import AgentGroup, AgentPair, GreedyHumanModel, FixedPlanAgent, RandomAgent\n'), ((4021, 4056), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.generate_random_recipes', 'Recipe.generate_random_recipes', ([], {'n': '(3)'}), '(n=3)\n', (4051, 4056), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4091, 4141), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.generate_random_recipes', 'Recipe.generate_random_recipes', ([], {'n': '(99)', 'unique': '(False)'}), '(n=99, unique=False)\n', (4121, 4141), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4347, 4444), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.generate_random_recipes', 'Recipe.generate_random_recipes', ([], {'n': '(3)', 'min_size': '(2)', 'max_size': '(2)', 'ingredients': "['onion', 'tomato']"}), "(n=3, min_size=2, max_size=2, ingredients=[\n 'onion', 'tomato'])\n", (4377, 4444), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((4622, 4709), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.generate_random_recipes', 'Recipe.generate_random_recipes', ([], {'n': '(2)', 'min_size': '(2)', 'max_size': '(3)', 'ingredients': "['onion']"}), "(n=2, min_size=2, max_size=3, ingredients=[\n 'onion'])\n", (4652, 4709), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((13089, 13125), 'overcooked_ai_py.mdp.overcooked_mdp.PlayerState', 'PlayerState', (['(3, 1)', 'Direction.NORTH'], {}), '((3, 1), Direction.NORTH)\n', (13100, 13125), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((13127, 13164), 'overcooked_ai_py.mdp.overcooked_mdp.PlayerState', 'PlayerState', (['(10, 1)', 'Direction.NORTH'], {}), '((10, 1), Direction.NORTH)\n', (13138, 13164), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((13517, 13553), 'overcooked_ai_py.mdp.overcooked_mdp.PlayerState', 'PlayerState', (['(0, 0)', 'Direction.NORTH'], {}), '((0, 0), Direction.NORTH)\n', (13528, 13553), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((13555, 13591), 'overcooked_ai_py.mdp.overcooked_mdp.PlayerState', 'PlayerState', (['(3, 1)', 'Direction.NORTH'], {}), '((3, 1), Direction.NORTH)\n', (13566, 13591), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n'), ((15114, 15149), 'overcooked_ai_py.utils.save_as_json', 'save_as_json', (['actual', 'expected_path'], {}), '(actual, expected_path)\n', (15126, 15149), False, 'from overcooked_ai_py.utils import save_pickle, load_pickle, iterate_over_json_files_in_dir, load_from_json, save_as_json\n'), ((38456, 38497), 'numpy.array_equal', 'np.array_equal', (['start_state', 'curr_terrain'], {}), '(start_state, curr_terrain)\n', (38470, 38497), True, 'import numpy as np\n'), ((38989, 39030), 'numpy.array_equal', 'np.array_equal', (['start_state', 'curr_terrain'], {}), '(start_state, curr_terrain)\n', (39003, 39030), True, 'import numpy as np\n'), ((40126, 40169), 'numpy.array_equal', 'np.array_equal', (['start_terrain', 'curr_terrain'], {}), '(start_terrain, curr_terrain)\n', (40140, 40169), True, 'import numpy as np\n'), ((40528, 40572), 'numpy.array_equal', 'np.array_equal', (['env.mdp.terrain_mtx', 'terrain'], {}), '(env.mdp.terrain_mtx, terrain)\n', (40542, 40572), True, 'import numpy as np\n'), ((40975, 41019), 'numpy.array_equal', 'np.array_equal', (['env.mdp.terrain_mtx', 'terrain'], {}), '(env.mdp.terrain_mtx, terrain)\n', (40989, 41019), True, 'import numpy as np\n'), ((45298, 45327), 'json.dumps', 'json.dumps', (['o'], {'sort_keys': '(True)'}), '(o, sort_keys=True)\n', (45308, 45327), False, 'import json\n'), ((4771, 4835), 'overcooked_ai_py.mdp.overcooked_mdp.Recipe.generate_random_recipes', 'Recipe.generate_random_recipes', ([], {'n': '(1)', 'recipes': 'only_onions_recipes'}), '(n=1, recipes=only_onions_recipes)\n', (4801, 4835), False, 'from overcooked_ai_py.mdp.overcooked_mdp import PlayerState, OvercookedGridworld, OvercookedState, ObjectState, SoupState, Recipe\n')] |
"""
This script is used to generate a mock catalog from MDPL2 Rockstar data.
The main procedures of this script are:
1. Load and organize all raw data and mock observation parameters.
Seperate the data into 'host' information and 'galaxy'
information.
2. Iterate through all galaxies and assign them to their hosts.
Galaxies which are gravitationally bound to their hosts are
designated as the 'true members' of that host.
3. Pad the galaxy data by creating copies of the galaxies which are
near edges of the box. Keep track of the galaxies and their
respective copies.
4. Reassign true members to hosts which are close to the edges of
the box. The padded galaxies might be closer to the host center
than the originals.
5. Cut cylinders
a. Rotate the box randomly. All x,y,z and vx,vy,vz data of hosts
and galaxies are transformed by a rotation matrix R. The
following steps will run in parallel for each rotated box.
b. Initialize a kd-tree along the (rotated) x,y positions of
galaxies.
c. Iterate through all hosts and identify all galaxies within
[aperature] of the host center along x,y positions.
d. For each of these galaxies, calculate relative LOS velocity
to the cluster.
e. Cut off relative velocity of the members by v_cut. The
galaxies which fall within the velocity cut are part of the
contaminated catalog.
f. Calculate relative LOS velocity for all true members of the
host. These galaxies are part of the pure catalog.
6. Organize output and save data
"""
print('Importing modules...')
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ IMPORTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os
import sys
import numpy as np
import pandas as pd
import pickle
import time
import multiprocessing as mp
from scipy.spatial import KDTree
from collections import OrderedDict, defaultdict
from tools.catalog import Catalog
import mock_tools
from mock_tools import *
import tracemalloc
# For running
np.random.seed(44323)
n_proc = 9
# For cache-ing intermediate data products
save_cache = True
load_cache = True
if len(sys.argv)==2:
load_ind = int(sys.argv[1])
else:
load_ind = None
print('Load index:', load_ind)
# For debugging
debug = False
tree_pool = False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PARAMETERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def build_config():
global config
config = OrderedDict([
('wdir', '/hildafs/home/mho1/scratch/halo_cnn'),
('in_folder', 'data_raw'),
('out_folder', 'data_mocks'),
# # UM Macc cut with z=0.022->0.0231
# ('catalog_name', 'Rockstar_UM_z=0.0.022-0231_Macc1e11-large-fix'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:124.csv'),
# ('gal_file', None), # 'um_mdpl2_hearin/sfr_catalog_0.978100.npy'),
# ('z', 0.0231), # coma
# # # UM SM cut with z=0.022->0.0231
# ('catalog_name', 'Rockstar_UM_z=0.0.022-0231_SM9.5-large-fix'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:124.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.978100.npy'),
# ('z', 0.0231), # coma
# # Uchuu with z=0.022->0.0231 and Macc>=1e11
# ('catalog_name', 'Uchuu_z=0.022-0.231_VmaxP2.0-large-oldcorrect'),
# ('host_file', 'uchuu/Uchuu_snap:049_m200c1e11.csv'),
# ('gal_file', None),
# ('z', 0.0231), # coma
# Magneticum with z=0.066340191 and m>=1e10.5
('catalog_name', 'Magneticum_z=0.066_m1e11-large'),
('host_file', 'magneticum/magn_snap136_cluster_fix+Dhayaa.csv'),
('gal_file', 'magneticum/magn_snap136_galaxies_fix.csv'),
('z', 0.066340191),
# # IllustriusTNG with z=0. and M*>=9.5
# ('catalog_name', 'Illustrius_z=0_M*9.5-large'),
# ('host_file', 'illustrius/TNG300-1_DES_Data_Halos_z0.csv'),
# ('gal_file', 'illustrius/TNG300-1_DES_Data_Galaxies_z0.csv'),
# ('z', 0.0),
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
('host_min_mass', 10**13.5), # 14 # 13.5
('gal_min_mass', 10**9.5), # 10**10.5), #11.3112), # (10.5)),# 10**12), #10**11), (9.5)), #
('gal_mass_def', 'm'), # 'Macc' #'Vmax_Mpeak' #'M*'), # 'obs_sm')
('min_richness', 10),
('cut_size', 'large'),
('volume', 352.**3), # 1000.**3), # (Mpc/h)^3 # 2000.**3 # # 302.6**3),
('dn_dlogm', 10.**-5.), # -5.2
('dlogm', 0.01),
('boost_minmass', 10**13.5),
('min_rotations', 3),
('max_rotations', 500),
('rot_template', True),
('split_rot', 1),
# MDPL
# ('cosmo', {#'H_0': 100., # [km/s/(Mpc/h)]
# 'h': 0.6777,
# 'Omega_m': 0.307115,
# 'Omega_l': 0.692885,
# 'c': 299792.458 # [km/s]
# }),
# # Uchuu
# ('cosmo', {#'H_0': 100., # [km/s/(Mpc/h)]
# 'h': 0.6774, # not used
# 'Omega_m': 0.3089,
# 'Omega_l': 0.6911,
# 'c': 299792.458 # [km/s]
# }),
# Magneticum
('cosmo', {#'H_0': 100., # [km/s/(Mpc/h)]
'h': 0.704, # not used
'Omega_m': 0.272,
'Omega_l': 0.728,
'c': 299792.458 # [km/s]
}),
('samp_hosts', 1),
('samp_gals', 1)
])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PARAMETER HANDLING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def handle_parameters():
global debug, config, mock_tools
if debug:
print('>'*10, 'DEBUGGING', '<'*10)
config['samp_hosts'] = 100
config['samp_gals'] = 0.01
config['dn_dlogm'] = 0
print('\n~~~~~ GENERATING ' + config['catalog_name'] + ' ~~~~~~')
for key in config.keys():
print(key + ' : ' + str(config[key]))
if config['cut_size'] == 'small':
config['aperture'] = 1.1 # the radial aperture in comoving Mpc/h
config['vcut'] = 1570. # maximum velocity is vmean +/- vcut in km/s
elif config['cut_size'] == 'medium':
config['aperture'] = 1.6 # Mpc/h
config['vcut'] = 2200. # km/s
elif config['cut_size'] == 'large':
config['aperture'] = 2.3 # Mpc/h
config['vcut'] = 3785. # km/s
else:
raise Exception('Invalid cut_size')
"""
Since d(z) has no simple, analytic form, but is smooth and continuous,
we will sample it at many z and use scipy's interpolation function to
reduce calculation time. Note, scipy interp is faster than numpy's
"""
z_samp = np.arange(0, 5, 0.0001)
d_samp = [d_from_z(i, config['cosmo']) for i in z_samp]
# d(z) interpolation: Comoving distance from redshift
mock_tools.d_from_z_sci = interp1d(z_samp, d_samp)
# z(d) interpolation: Redshift from comoving distance
mock_tools.z_from_d_sci = interp1d(d_samp, z_samp)
"""
We'll assume that each host (cluster) is centered at z=config['z'].
Therefore, comoving distances and Hubble recession velocities of the
hosts are constant.
"""
mock_tools.d_clu = d_from_z(config['z'], config['cosmo'])
mock_tools.vH_clu = vH_from_z(config['z'], config['cosmo'])
return config
# ~~~~~~~~~~~~~~~~~~~~~~~~ LOADING, PREPROCESSING DATA ~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def load_and_preprocess():
global config, host_data, gal_data
print('\nLoading data...')
host_data = load_raw(os.path.join(config['wdir'], config['in_folder'],
config['host_file']))
# only changes columns if they're misnamed
host_data = host_data.rename(columns={'upid':'upId','id':'rockstarId'}, copy=False)
if config['gal_file'] is None:
gal_data = host_data
else:
gal_data = load_raw(os.path.join(config['wdir'], config['in_folder'],
config['gal_file']))
gal_data = gal_data.rename(columns={'upid': 'upId', 'mpeak': 'Macc',
'mvir': 'Mvir'}, copy=False)
# Cluster, galaxy constraints
print('\nFinding hosts, galaxies...')
host_data = host_data[(host_data['upId'] == -1) &
(host_data['M200c'] >= config['host_min_mass'])]
gal_data = gal_data[(gal_data['upId'] != -1) &
(gal_data[config['gal_mass_def']] >= config['gal_min_mass'])]
# Subsample
if config['samp_hosts'] < 1:
host_data = host_data.sample(int(config['samp_hosts'] * len(host_data)), replace=False)
elif config['samp_hosts'] > 1:
host_data = host_data.sample(int(config['samp_hosts']), replace=False)
if config['samp_gals'] < 1:
gal_data = gal_data.sample(int(config['samp_gals'] * len(gal_data)), replace=False)
elif config['samp_gals'] > 1:
gal_data = gal_data.sample(int(config['samp_gals']), replace=False)
host_data = host_data.reset_index(drop=True)
gal_data = gal_data.reset_index(drop=True)
print('host_data length: ' + str(len(host_data)))
print('gal_data length: ' + str(len(gal_data)))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIND TRUE MEMBERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def find_true_members():
global gal_data, host_members
print('\nAssigning true members...')
# Relates host rockstarid to indices of member galaxies
host_members = defaultdict(lambda: [])
for i in range(len(gal_data)):
if i % (10**5) == 0:
print(str(int(i / (10**5))) + ' / ' +
str(int(len(gal_data) / (10**5))))
# Assign to host
host_members[gal_data.loc[i, 'upId']].append(i)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PADDING DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def in_pad_region(key):
global config, gal_data
# Assign to padding regions. To be used in multiprocessing
axes = ('x', 'y', 'z')
out = np.ones(shape=(len(gal_data), len(axes)))
for j in range(len(key)):
if key[j] is True:
out[:, j] = (gal_data[axes[j]] < config['pad_size'])
elif key[j] is False:
out[:, j] = (gal_data[axes[j]] > (config['box_length'] - config['pad_size']))
return np.argwhere(np.sum(out, axis=1) == len(axes)).flatten()
def reassign_true(host_i):
global host_data, gal_data, host_members, pad_gal_copies
host_pos = host_data.iloc[host_i][['x', 'y', 'z']]
host_mem = host_members[host_data.at[host_i, 'rockstarId']]
for j in range(len(host_mem)):
true_memb_dist = np.sqrt(np.sum(
(gal_data.iloc[host_mem[j]][['x', 'y', 'z']] - host_pos)**2))
if (true_memb_dist > host_data.at[host_i, 'Rvir'] / 1000.) & \
(len(pad_gal_copies[host_mem[j]]) > 0):
close_gal = np.sqrt(np.sum(
(gal_data.iloc[pad_gal_copies[host_mem[j]]][['x', 'y', 'z']] -
host_pos)**2, axis=1)).idxmin()
if np.sqrt(
np.sum((gal_data.iloc[close_gal][['x', 'y', 'z']]
- host_pos)**2)) < true_memb_dist:
host_mem[j] = close_gal
return host_mem
@func_stats
def pad():
global config, gal_data, host_members, pad_gal_copies
"""
to_pad is a dictionary which relates tuples to an array of galaxy
indices. The tuples have length 3 and correspond to the x,y,z axes,
respectively. For a given axis n, if the value within the tuple is
'True', the galaxies within the related array will have their 'n'-axis
added one box_length when padding. If the tuple value is 'False', the
galaxies within the related array will have their 'n'-axis subtracted
one box_length when padding. If 'None' appears, the 'n'-axis is left
alone while padding. I did this to automate the padding process and
make it O(n).
"""
print('\nDetermining padding regions...')
max_gal_v = np.sqrt((gal_data['vx']**2 + gal_data['vy']**2 +
gal_data['vz']**2).max())
max_clu_v = np.sqrt((host_data['vx']**2 + host_data['vy']**2 +
host_data['vz']**2).max())
# max possible distance in the non-relativistic case
config['pad_size'] = 1.0 * (config['vcut'] + max_gal_v + max_clu_v) / H_from_z(config['z'], config['cosmo'])
config['box_length'] = config['volume']**(1./3)
print('pad_size/box_length: ' + str(config['pad_size'] / config['box_length']))
pad_regions = []
pad_directions = (None, False, True)
for i in pad_directions:
for j in pad_directions:
for k in pad_directions:
pad_regions.append((i, j, k))
_ = pad_regions.remove((None, None, None))
axes = ('x', 'y', 'z')
with mp.Pool(processes=n_proc) as pool:
# each process checks a different padding region
to_pad_ind = pool.map(in_pad_region, pad_regions, chunksize=1)
print('Padding regions: ')
for i in range(len(pad_regions)):
s = ''
for j in range(len(pad_regions[i])):
if pad_regions[i][j] is True:
s += axes[j] + '+ '
elif pad_regions[i][j] is False:
s += axes[j] + '- '
print(s + ' : ' + str(len(to_pad_ind[i])))
num_padded = np.sum([len(to_pad_ind[i]) for i in range(len(to_pad_ind))])
c = len(gal_data)
gal_data = gal_data.append(pd.DataFrame(
np.zeros(shape=(num_padded, gal_data.shape[1])),
columns=gal_data.columns), ignore_index=True)
# Relates indices of original galaxies to their padded copy indices
pad_gal_copies = defaultdict(lambda: [])
print('\nPadding...')
for i in range(len(pad_regions)):
print(str(i + 1) + ' / ' + str(len(pad_regions)))
c_end = c + len(to_pad_ind[i])
gal_data.values[c: c_end, :] = gal_data.iloc[to_pad_ind[i]].values
for j in range(len(to_pad_ind[i])):
pad_gal_copies[to_pad_ind[i][j]].append(c + j)
for j in range(len(pad_regions[i])):
if pad_regions[i][j] is True:
gal_data.loc[c: c_end - 1, axes[j]] += config['box_length']
elif pad_regions[i][j] is False:
gal_data.loc[c: c_end - 1, axes[j]] -= config['box_length']
c = c_end
print('Padded gal_data length: ' + str(len(gal_data)))
"""
Host-galaxy relationships might take place over a periodic boundary.
Therefore, we must check if any padded copies of galaxies are closer to
their respective hosts than the original galaxies
"""
print('\nReassigning true members to account for padding...')
# with mp.Pool(processes=n_proc) as pool:
# # each process checks a different padding region
# reassignments = pool.map(reassign_true, range(len(host_data)))
# for i in range(len(host_data)):
# if i % (10**4) == 0:
# print(str(int(i / (10**4))) + ' / ' +
# str(int(len(host_data) / (10**4))))
# host_members[host_data.at[i, 'rockstarId']] = reassignments[i]
# ~~~~~~~~~~~~~~~~~~~~~~~~~ REDUCING DATASET ~~~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def reduce():
global host_data, gal_data
print('\nReducing dataset...') # Dropping unnecessary data fields
host_drop = ['row_id', 'upId', 'pId', 'descId', 'breadthFirstId', 'scale']
gal_drop = []
host_data = host_data[[i for i in host_data.columns.values if i not in
host_drop]]
gal_data = gal_data[[i for i in gal_data.columns.values if i not in gal_drop]]
# ~~~~~~~~~~~~~~~~~~~~ CALCULATING BOOSTED ROTATION ANGLES ~~~~~~~~~~~~~~~~~~~~
@func_stats
def get_rot_assign():
global config, host_data, rot_assign
print('Calculating boosted rotation angles...')
host_rots = pd.DataFrame(0, index=host_data.index,
columns=['logM', 'num_rot'])
host_rots.loc[:, 'logM'] = np.log10(host_data['M200c'])
host_rots.loc[:, 'num_rot'] = 0
host_rots = host_rots.sort_values(by='logM')
start = np.argwhere(host_rots['logM'].values > np.log10(config['boost_minmass'])).min()
window = [0, 1]
out = np.zeros(len(host_rots), dtype=int)
for i in range(start, len(host_rots)):
if i%1e4==0: print(i, '/', len(host_rots))
while (host_rots['logM'].iloc[window[0]] < host_rots['logM'].iloc[i]
- config['dlogm'] / 2.):
window[0] += 1
while (window[1] < len(host_rots)):
if(host_rots['logM'].iloc[window[1]] >= host_rots['logM'].iloc[i]
+ config['dlogm'] / 2.):
break
window[1] += 1
i_dn_dlogm = (window[1] - window[0]) / config['volume'] / config['dlogm']
out[i] = int(config['dn_dlogm'] / i_dn_dlogm) + 1
host_rots['num_rot'] = out
host_rots.iloc[np.argwhere(host_rots['num_rot'].values < config['min_rotations']),
host_rots.columns.get_loc('num_rot')] = config['min_rotations']
host_rots.iloc[np.argwhere(host_rots['num_rot'].values > config['max_rotations']),
host_rots.columns.get_loc('num_rot')] = config['max_rotations']
rot_assign = pd.DataFrame(
True, index=host_rots.index,
columns=[(0, 0, 0), (np.pi / 2, 0, 0), (np.pi / 2, np.pi / 2, 0)])
if config['rot_template']:
ang_template = fibonacci_sphere(max(host_rots['num_rot']) - config['min_rotations'])
for n_rot in host_rots['num_rot'].unique():
ang_list = fibonacci_sphere(n_rot - config['min_rotations'])
if (len(ang_list)>0) and config['rot_template']:
ang_list = match_angles(ang_list, ang_template)
for ang in ang_list:
if (*ang, 0) in rot_assign.index:
rot_assign[(*ang, 0)] = (rot_assign[(*ang, 0)]) | (host_rots['num_rot'] == n_rot)
else:
rot_assign[(*ang, 0)] = host_rots['num_rot'] == n_rot
print('# Rotations:' + str(host_rots['num_rot'].unique()))
# ~~~~~~~~~~~ DIVIDE ROTATION CALCULATIONS TO MULTIPLE PROCESSES ~~~~~~~~~~~~~~
print('Dividing rotation angles over multiple processes...')
# it takes about a 1000 (UM) or 7000 (Uchuu) kdtree queries to equal one kdtree generation
if len(rot_assign.columns) < n_proc:
critical_num = len(host_data)/(n_proc*config['split_rot'])
else:
critical_num = max(1000, len(host_data)/(n_proc*config['split_rot']))
for i in range(len(rot_assign.columns)): # note range iterator is constant through loop
ang = rot_assign.columns[i]
num_in_rot = np.sum(rot_assign[ang])
if num_in_rot > critical_num:
tot_processes = int(num_in_rot/critical_num)+1
ind_processes = np.argwhere(rot_assign[ang].values).flatten()
proc_assign = np.random.choice(tot_processes, size=num_in_rot, replace=True)
for j in range(tot_processes):
rot_assign[(*(ang[:2]), j)] = False
rot_assign[(*(ang[:2]), j)].iloc[ind_processes[proc_assign==j]] = True
print(critical_num)
print(rot_assign.sum(axis=0))
# ~~~~~~~~~~~~~~~~~~~ SAVE AND LOAD INTERMEDIATES ~~~~~~~~~~~~~~~~~~~
@func_stats
def save_intermediates():
global config, host_data, gal_data, host_members, rot_assign
cache_dir = os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '.cache')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
with open(os.path.join(cache_dir, 'cache.p'), 'wb') as f:
pickle.dump(config, f)
with open(os.path.join(cache_dir, 'host_members.p'), 'wb') as f:
pickle.dump(dict(host_members), f)
host_data.to_pickle(os.path.join(cache_dir, 'host_data.p'))
gal_data.to_pickle(os.path.join(cache_dir, 'gal_data.p'))
if config['split_rot']<=1:
rot_assign.to_pickle(os.path.join(cache_dir, 'rot_assign.p'))
else:
print('\nDiving over multiple rot-assigns...')
col_count = rot_assign.sum(axis=0)
avg = np.sum(col_count)/config['split_rot']
print('Average:', avg)
j0=0
ji=0
j=0
for i in range(config['split_rot']-1):
ang_list = []
while ji < (i+1)*avg:
ang_list.append(j)
ji += col_count.iloc[j]
j+=1
print('#Clusters:', (ji-j0), '; #Rotations:', len(ang_list))
j0=ji
x = rot_assign.iloc[:, ang_list]
x.to_pickle(os.path.join(cache_dir, f'rot_assign_{i}.p'))
x = rot_assign.iloc[:, j:]
x.to_pickle(os.path.join(cache_dir, f'rot_assign_{i+1}.p'))
@func_stats
def load_intermediates():
global config, host_data, gal_data, host_members, rot_assign, load_ind
cache_dir = os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '.cache')
with open(os.path.join(cache_dir, 'cache.p'), 'rb') as f:
config = pickle.load(f)
with open(os.path.join(cache_dir, 'host_members.p'), 'rb') as f:
host_members = pickle.load(f)
host_data = pd.read_pickle(os.path.join(cache_dir, 'host_data.p'))
gal_data = pd.read_pickle(os.path.join(cache_dir, 'gal_data.p'))
if load_ind==None:
rot_assign = pd.read_pickle(os.path.join(cache_dir, 'rot_assign.p'))
else:
rot_assign = pd.read_pickle(os.path.join(cache_dir, f'rot_assign_{load_ind}.p'))
# ~~~~~~~~~~~~~~~~~~~~~~~~ GENERATE PROJECTED KDTREES ~~~~~~~~~~~~~~~~~~~~~~~~
def grow_tree(angle):
R = rot_matrix_LOS(*angle)
gal_pos_rot = np.matmul(gal_data[['x', 'y', 'z']].values, R.T)
return KDTree(gal_pos_rot[:,:2], leafsize=int(10**3.5))
@func_stats
def generate_forest(ang_list):
global rot_assign, trees, tree_pool
print('Growing KDTrees...')
print('Total rotations:', len(ang_list))
if tree_pool:
with mp.Pool(processes=n_proc) as pool:
tree_list = pool.map(grow_tree, ang_list, chunksize=1)
else:
tree_list = list(map(grow_tree, ang_list))
trees = dict(zip(ang_list, tree_list))
# ~~~~~~~~~~~~~~~~~~~ CALCULATING ROTATED MOCK OBSERVATIONS ~~~~~~~~~~~~~~~~~~~
def cut_mock(angle_id=None):
global config, host_data, gal_data, host_members, rot_assign
"""
Given a set of rotation angles, rotate catalog. Iterate through clusters.
Cut a long cylinder through the entire sim around projected cluster center
(called pillars). Calculate all vlos. Create pure, contaminated catalogs.
This is wrapped in a function so it can be parallelized.
I removed virial true members (true members are <Rproj) after commit
<PASSWORD>.
"""
angle = angle_id[:2]
# ~~~~~ rotate galaxy-host positions+velocities ~~~~~
R = rot_matrix_LOS(*angle)
cl_list = rot_assign.index.values[rot_assign[angle_id]] # cluster indices to be cut
# print(angle_id, len(cl_list))
gal_rot = pd.DataFrame(
np.concatenate((np.matmul(gal_data[['x', 'y', 'z']].values, R.T),
np.matmul(gal_data[['vx', 'vy', 'vz']].values, R.T)),
axis=1), columns=['x', 'y', 'z', 'vx', 'vy', 'vz'])
host_rot = pd.DataFrame(
np.concatenate((np.matmul(host_data[['x', 'y', 'z']].values, R.T),
np.matmul(host_data[['vx', 'vy', 'vz']].values, R.T)),
axis=1), columns=['x', 'y', 'z', 'vx', 'vy', 'vz'])
# print('calc', angle_id)
# ~~~~~ initialize projected KDTree ~~~~~
# 10**3.5 found to be optimal empirically
gal_tree_proj = trees[angle] # KDTree(gal_rot[['x', 'y']], leafsize=int(10**3.5))
# ~~~~~ initialize catalogs ~~~~~
# empty catalog files
pure_catalog = Catalog(prop=host_data, gal=[])
contam_catalog = Catalog(prop=host_data, gal=[])
# record which clusters were successfully measured
pure_ind = []
contam_ind = []
# output member galaxy information
gal_prop = [(config['gal_mass_def'], '<f8'), ('Mvir', '<f8'), ('Macc', '<f8')] #
if config['gal_file'] is not None:
gal_prop = [('obs_sm', '<f8'), ('obs_sfr', '<f8'), ('id', '<i8')]
else:
gal_prop += [('rockstarId', '<i8')]# , ('Vmax', '<f8'), ('Vpeak', '<f8')]
gal_prop = [('m', '<f8'), ('id', '<i8')]
dtype_gal = [('xproj', '<f8'), ('yproj', '<f8'), ('zproj', '<f8'),
('Rproj', '<f8'), ('vlos', '<f8'),
('memb_halo', '?'),
*gal_prop
]
# ~~~ iterate through hosts, assign member galaxies, calculate observables
mem_proj_list = gal_tree_proj.query_ball_point(
host_rot.loc[cl_list, ['x', 'y']], config['aperture'])
for i in range(len(cl_list)): # for each cluster
ind = cl_list[i]
if i%1000==0:
print(angle_id, 'ind', ind)
# Grab galaxy members as assigned by halo algo
try:
mem_true_ind = host_members[host_data.loc[ind, 'rockstarId']]
except KeyError:
print('error:',ind)
continue
# Sample KDTree to find potential projected members
mem_proj_ind = mem_proj_list[i]
# calculate relative velocities of all potential projected members
v_rel_proj = cal_v_rel(host_rot.loc[ind, 'z'],
host_rot.loc[ind, 'vz'],
gal_rot.loc[mem_proj_ind, 'z'],
gal_rot.loc[mem_proj_ind, 'vz'],
config['cosmo'])
# restrict projected member list to those with |v_los|<v_cut
v_rel_proj = v_rel_proj[np.abs(v_rel_proj) < config['vcut']]
mem_proj_ind = v_rel_proj.index.to_series()
# contaminated catalog
if len(mem_proj_ind) > config['min_richness']:
# initialize array of output member galaxy information
clu_gals = np.zeros(shape=(len(mem_proj_ind)),
dtype=dtype_gal)
# assign relevant information
clu_gals[['xproj', 'yproj', 'zproj']] = (
gal_rot.loc[mem_proj_ind, ['x', 'y', 'z']]
- host_rot.loc[ind, ['x', 'y', 'z']]).to_records(index=False)
clu_gals['Rproj'] = np.sqrt(
clu_gals['xproj']**2 + clu_gals['yproj']**2)
clu_gals['vlos'] = v_rel_proj
clu_gals['memb_halo'] = [x in mem_true_ind for x in mem_proj_ind]
clu_gals[[p[0] for p in gal_prop]] = gal_data.loc[
mem_proj_ind, [p[0] for p in gal_prop]].to_records(index=False)
# append output galaxy information to catalog
contam_catalog.gal.append(clu_gals)
contam_ind.append(ind)
# pure catalog
if len(mem_true_ind) > config['min_richness']:
# calculate v_los for true members which may not fall in projected
# cylinder
mem_truenotproj_ind = [
j for j in mem_true_ind if j not in mem_proj_ind]
v_rel_truenotproj = cal_v_rel(host_rot.loc[ind, 'z'],
host_rot.loc[ind, 'vz'],
gal_rot.loc[
mem_truenotproj_ind, 'z'],
gal_rot.loc[
mem_truenotproj_ind, 'vz'],
config['cosmo'])
v_rel_true = v_rel_proj.append(v_rel_truenotproj)[mem_true_ind]
# initialize array of output member galaxy information
clu_gals = np.zeros(shape=(len(mem_true_ind)),
dtype=dtype_gal)
# assign relevant information
clu_gals[['xproj', 'yproj', 'zproj']] = (
gal_rot.loc[mem_true_ind, ['x', 'y', 'z']]
- host_rot.loc[ind, ['x', 'y', 'z']]).to_records(index=False)
clu_gals['Rproj'] = np.sqrt(
clu_gals['xproj']**2 + clu_gals['yproj']**2)
clu_gals['vlos'] = v_rel_true
clu_gals[[p[0] for p in gal_prop]] = gal_data.loc[
mem_true_ind, [p[0] for p in gal_prop]].to_records(index=False)
clu_gals['memb_halo'] = True
# append output galaxy information to catalog
pure_catalog.gal.append(clu_gals)
pure_ind.append(ind)
# restrict output catalogs to only correctly calulated clusters (e.g.
# above min_richness)
pure_catalog.prop = pure_catalog.prop.iloc[pure_ind]
contam_catalog.prop = contam_catalog.prop.iloc[contam_ind]
# delete unnecessary data
del gal_rot
del host_rot
# print('end', angle_id)
return (pure_catalog, contam_catalog)
@func_stats
def generate_catalogs(ang_ids):
print(f'Cutting mock clusters with {n_proc} processes...')
with mp.Pool(processes=n_proc) as pool:
catalogs = list(pool.map(cut_mock, ang_ids, chunksize=1))
return catalogs
@func_stats
def consolidate_catalogs(catalogs, ang_seq):
pure_len = 0
contam_len = 0
for i in range(len(catalogs)):
pure_len += len(catalogs[i][0].prop)
contam_len += len(catalogs[i][1].prop)
print('\nCombining rotation catalogs...')
# Initialize catalogs
cat_par = OrderedDict([
('catalog_name', config['catalog_name']),
('z', config['z']),
('min_mass', config['host_min_mass']),
('min_richness', config['min_richness']),
('aperture', config['aperture']),
('vcut', config['vcut']),
('cosmo', config['cosmo']),
('config', config)
])
contam_catalog = Catalog(
par=cat_par.copy(),
prop=pd.DataFrame(np.zeros(
shape=(contam_len, catalogs[0][1].prop.shape[1] + 2)),
columns=np.append(catalogs[0][1].prop.columns.values, ['rot_theta', 'rot_phi'])),
gal=[None] * contam_len)
cat_par['aperture'] = None
cat_par['vcut'] = None
pure_catalog = Catalog(
par=cat_par.copy(),
prop=pd.DataFrame(np.zeros(
shape=(pure_len, catalogs[0][0].prop.shape[1] + 2)),
columns=np.append(host_data.columns.values, ['rot_theta', 'rot_phi'])),
gal=[None] * pure_len)
pure_c = 0
contam_c = 0
# rot_id_dict = {}
for i in range(len(catalogs)):
ang = ang_seq[i]
# if ang[-1]==0:
# rot_num = len(rot_id_dict)
# rot_id_dict[ang[:2]] = rot_num
# else:
# rot_num = rot_id_dict[ang[:2]]
pure, contam = catalogs[i]
pure_c_end = pure_c + len(pure.prop)
contam_c_end = contam_c + len(contam.prop)
pure_catalog.prop.iloc[pure_c:pure_c_end, :-2] = pure.prop.values
pure_catalog.prop.loc[contam_c:contam_c_end,
['rot_theta','rot_phi']] = ang[:2]
pure_catalog.gal[pure_c:pure_c_end] = pure.gal
contam_catalog.prop.iloc[contam_c:contam_c_end, :-2] = contam.prop.values
contam_catalog.prop.loc[contam_c:contam_c_end,
['rot_theta','rot_phi']] = ang[:2]
contam_catalog.gal[contam_c:contam_c_end] = contam.gal
pure_c = pure_c_end
contam_c = contam_c_end
pure_catalog.gal = np.array(pure_catalog.gal)
contam_catalog.gal = np.array(contam_catalog.gal)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STATS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print('Calculating additional statistics...')
# Ngal
pure_catalog.prop['Ngal'] = [len(x) for x in pure_catalog.gal]
contam_catalog.prop['Ngal'] = [len(x) for x in contam_catalog.gal]
# sigv
gapper = lambda x: np.sqrt(np.sum((x-np.mean(x))**2)/(len(x)-1))
pure_catalog.prop['sigv'] = [gapper(x['vlos']) for x in pure_catalog.gal]
contam_catalog.prop['sigv'] = [gapper(x['vlos']) for x in contam_catalog.gal]
return pure_catalog, contam_catalog
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SAVE DATA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@func_stats
def save_data(pure_catalog, contam_catalog):
global config, debug, load_ind
if not debug:
print('Saving...')
if load_ind is None:
pure_catalog.save(os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '_pure.p'))
contam_catalog.save(os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '_contam.p'))
else:
pure_catalog.save(os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '.cache',
config['catalog_name'] + f'_pure_{load_ind}.p'))
contam_catalog.save(os.path.join(config['wdir'],
config['out_folder'],
config['catalog_name'] + '.cache',
config['catalog_name'] + f'_contam_{load_ind}.p'))
config, host_data, gal_data, host_members, rot_assign, trees, pad_gal_copies = [None]*7
if __name__=='__main__':
t0 = time.time()
build_config()
if (not load_cache) or (save_cache and load_cache):
handle_parameters()
load_and_preprocess()
find_true_members()
pad()
reduce()
get_rot_assign()
if save_cache and not debug:
save_intermediates()
if load_cache:
load_intermediates()
if not save_cache:
handle_parameters()
ang_list = np.unique([x[:2] for x in rot_assign.columns.values], axis=0)
ang_list = [tuple(x) for x in ang_list]
ang_seq = []
if (len(ang_list) > 5) & (load_ind != None) :
print('Segmenting ang_list...')
catalogs = []
for i in range(0,len(ang_list),3):
print(i, '/', len(ang_list))
generate_forest(ang_list[i:i+3])
ang_ids = [x for x in rot_assign.columns.values if tuple(x[:2]) in trees]
catalogs += generate_catalogs(ang_ids)
ang_seq += ang_ids
else:
generate_forest(ang_list)
catalogs = generate_catalogs(rot_assign.columns.values)
ang_seq = rot_assign.columns.values
pure_catalog, contam_catalog = consolidate_catalogs(catalogs, ang_seq)
save_data(pure_catalog, contam_catalog)
print('\nAll done!')
print('Total Runtime: ' + str((time.time() - t0) / 60.) + ' minutes')
"""
# ~~~~ Previously run mocks ~~~~
# # UM with z=0.117
# ('catalog_name', 'Rockstar_UM_z=0.117'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:120_v3.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.895100.npy'),
# ('z', 0.117),
# # UM with z=0.000
# ('catalog_name', 'Rockstar_UM_z=0.000'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:125.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_1.000000.npy'),
# ('z', 0.),
# # UM with z=0.022->0.0231
# ('catalog_name', 'Rockstar_UM_z=0.0.022-0231_SMCut=9.5'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:124.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.978100.npy'),
# ('z', 0.0231), # coma
# # UM with z=0.394
# ('catalog_name', 'Rockstar_UM_z=0.394'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:110.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.717300.npy'),
# ('z', 0.394),
# # UM with z=0.045
# ('catalog_name', 'Rockstar_UM_z=0.045'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:123.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.956700.npy'),
# ('z', 0.045),
# # UM with z=0.194
# ('catalog_name', 'Rockstar_UM_z=0.194'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:117.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.837600.npy'),
# ('z', 0.194),
# # UM with z=0.248
# ('catalog_name', 'Rockstar_UM_z=0.248'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:115.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.801300.npy'),
# ('z', 0.248),
# # UM with z=0.304
# ('catalog_name', 'Rockstar_UM_z=0.304'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:113.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.766600.npy'),
# ('z', 0.304),
# UM with z=0.364
# ('catalog_name', 'Rockstar_UM_z=0.364'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:111.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.733300.npy'),
# ('z', 0.364),
# # UM with z=0.425
# ('catalog_name', 'Rockstar_UM_z=0.425'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:109.csv'),
# ('gal_file', 'um_mdpl2_hearin/sfr_catalog_0.701600.npy'),
# ('z', 0.425),
# MD with z=0.117
# ('catalog_name', 'Rockstar_MD_z=0.117'),
# ('host_file', 'cosmosim/MDPL2_Rockstar_snap:120_v3.csv'),
# ('gal_file', None),
# ('z', 0.117),
# BigMDPL with z=0.1131
# ('catalog_name', 'Rockstar_BigMDPL_z=0. 1131'),
# ('host_file', 'cosmosim/BigMDPL_Rockstar_snap:73.csv'),
# ('gal_file', None),
# ('z', 0.1131),
# Uchuu with z=0.000
# ('catalog_name', 'Uchuu_z=0.000_bigvol'),
# ('host_file', 'uchuu/Uchuu_snap:050_m200c1e11'),
# ('gal_file', None),
# ('z', 0.),
# Uchuu with z=0.0231
# ('catalog_name', 'Uchuu_z=0.0231_bigvol'),
# ('host_file', 'uchuu/Uchuu_snap:050_m200c1e11'),
# ('gal_file', None),
# ('z', 0.0231), # coma
# Uchuu with z=0.000->0.0231 and Macc>=1e11
# ('catalog_name', 'Uchuu_z=0.0-0.231_Macc1e11'),
# ('host_file', 'uchuu/Uchuu_z=0.0_Maccg=11.csv'),
# ('gal_file', None),
# ('z', 0.0231), # coma
"""
| [
"pickle.dump",
"numpy.random.seed",
"numpy.sum",
"numpy.abs",
"tools.catalog.Catalog",
"collections.defaultdict",
"pickle.load",
"numpy.arange",
"numpy.mean",
"os.path.join",
"numpy.unique",
"pandas.DataFrame",
"numpy.append",
"numpy.random.choice",
"numpy.log10",
"multiprocessing.Pool... | [((2110, 2131), 'numpy.random.seed', 'np.random.seed', (['(44323)'], {}), '(44323)\n', (2124, 2131), True, 'import numpy as np\n'), ((2528, 3323), 'collections.OrderedDict', 'OrderedDict', (["[('wdir', '/hildafs/home/mho1/scratch/halo_cnn'), ('in_folder', 'data_raw'),\n ('out_folder', 'data_mocks'), ('catalog_name',\n 'Magneticum_z=0.066_m1e11-large'), ('host_file',\n 'magneticum/magn_snap136_cluster_fix+Dhayaa.csv'), ('gal_file',\n 'magneticum/magn_snap136_galaxies_fix.csv'), ('z', 0.066340191), (\n 'host_min_mass', 10 ** 13.5), ('gal_min_mass', 10 ** 9.5), (\n 'gal_mass_def', 'm'), ('min_richness', 10), ('cut_size', 'large'), (\n 'volume', 352.0 ** 3), ('dn_dlogm', 10.0 ** -5.0), ('dlogm', 0.01), (\n 'boost_minmass', 10 ** 13.5), ('min_rotations', 3), ('max_rotations', \n 500), ('rot_template', True), ('split_rot', 1), ('cosmo', {'h': 0.704,\n 'Omega_m': 0.272, 'Omega_l': 0.728, 'c': 299792.458}), ('samp_hosts', 1\n ), ('samp_gals', 1)]"], {}), "([('wdir', '/hildafs/home/mho1/scratch/halo_cnn'), ('in_folder',\n 'data_raw'), ('out_folder', 'data_mocks'), ('catalog_name',\n 'Magneticum_z=0.066_m1e11-large'), ('host_file',\n 'magneticum/magn_snap136_cluster_fix+Dhayaa.csv'), ('gal_file',\n 'magneticum/magn_snap136_galaxies_fix.csv'), ('z', 0.066340191), (\n 'host_min_mass', 10 ** 13.5), ('gal_min_mass', 10 ** 9.5), (\n 'gal_mass_def', 'm'), ('min_richness', 10), ('cut_size', 'large'), (\n 'volume', 352.0 ** 3), ('dn_dlogm', 10.0 ** -5.0), ('dlogm', 0.01), (\n 'boost_minmass', 10 ** 13.5), ('min_rotations', 3), ('max_rotations', \n 500), ('rot_template', True), ('split_rot', 1), ('cosmo', {'h': 0.704,\n 'Omega_m': 0.272, 'Omega_l': 0.728, 'c': 299792.458}), ('samp_hosts', 1\n ), ('samp_gals', 1)])\n", (2539, 3323), False, 'from collections import OrderedDict, defaultdict\n'), ((7068, 7091), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(0.0001)'], {}), '(0, 5, 0.0001)\n', (7077, 7091), True, 'import numpy as np\n'), ((9877, 9901), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (9888, 9901), False, 'from collections import OrderedDict, defaultdict\n'), ((14049, 14073), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (14060, 14073), False, 'from collections import OrderedDict, defaultdict\n'), ((16233, 16300), 'pandas.DataFrame', 'pd.DataFrame', (['(0)'], {'index': 'host_data.index', 'columns': "['logM', 'num_rot']"}), "(0, index=host_data.index, columns=['logM', 'num_rot'])\n", (16245, 16300), True, 'import pandas as pd\n'), ((16362, 16390), 'numpy.log10', 'np.log10', (["host_data['M200c']"], {}), "(host_data['M200c'])\n", (16370, 16390), True, 'import numpy as np\n'), ((17628, 17740), 'pandas.DataFrame', 'pd.DataFrame', (['(True)'], {'index': 'host_rots.index', 'columns': '[(0, 0, 0), (np.pi / 2, 0, 0), (np.pi / 2, np.pi / 2, 0)]'}), '(True, index=host_rots.index, columns=[(0, 0, 0), (np.pi / 2, 0,\n 0), (np.pi / 2, np.pi / 2, 0)])\n', (17640, 17740), True, 'import pandas as pd\n'), ((19778, 19867), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '.cache')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '.cache')\n", (19790, 19867), False, 'import os\n'), ((21383, 21472), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '.cache')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '.cache')\n", (21395, 21472), False, 'import os\n'), ((22234, 22282), 'numpy.matmul', 'np.matmul', (["gal_data[['x', 'y', 'z']].values", 'R.T'], {}), "(gal_data[['x', 'y', 'z']].values, R.T)\n", (22243, 22282), True, 'import numpy as np\n'), ((24410, 24441), 'tools.catalog.Catalog', 'Catalog', ([], {'prop': 'host_data', 'gal': '[]'}), '(prop=host_data, gal=[])\n', (24417, 24441), False, 'from tools.catalog import Catalog\n'), ((24463, 24494), 'tools.catalog.Catalog', 'Catalog', ([], {'prop': 'host_data', 'gal': '[]'}), '(prop=host_data, gal=[])\n', (24470, 24494), False, 'from tools.catalog import Catalog\n'), ((29938, 30216), 'collections.OrderedDict', 'OrderedDict', (["[('catalog_name', config['catalog_name']), ('z', config['z']), ('min_mass',\n config['host_min_mass']), ('min_richness', config['min_richness']), (\n 'aperture', config['aperture']), ('vcut', config['vcut']), ('cosmo',\n config['cosmo']), ('config', config)]"], {}), "([('catalog_name', config['catalog_name']), ('z', config['z']),\n ('min_mass', config['host_min_mass']), ('min_richness', config[\n 'min_richness']), ('aperture', config['aperture']), ('vcut', config[\n 'vcut']), ('cosmo', config['cosmo']), ('config', config)])\n", (29949, 30216), False, 'from collections import OrderedDict, defaultdict\n'), ((31942, 31968), 'numpy.array', 'np.array', (['pure_catalog.gal'], {}), '(pure_catalog.gal)\n', (31950, 31968), True, 'import numpy as np\n'), ((31994, 32022), 'numpy.array', 'np.array', (['contam_catalog.gal'], {}), '(contam_catalog.gal)\n', (32002, 32022), True, 'import numpy as np\n'), ((34015, 34026), 'time.time', 'time.time', ([], {}), '()\n', (34024, 34026), False, 'import time\n'), ((34487, 34548), 'numpy.unique', 'np.unique', (['[x[:2] for x in rot_assign.columns.values]'], {'axis': '(0)'}), '([x[:2] for x in rot_assign.columns.values], axis=0)\n', (34496, 34548), True, 'import numpy as np\n'), ((7937, 8007), 'os.path.join', 'os.path.join', (["config['wdir']", "config['in_folder']", "config['host_file']"], {}), "(config['wdir'], config['in_folder'], config['host_file'])\n", (7949, 8007), False, 'import os\n'), ((13192, 13217), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'n_proc'}), '(processes=n_proc)\n', (13199, 13217), True, 'import multiprocessing as mp\n'), ((19039, 19062), 'numpy.sum', 'np.sum', (['rot_assign[ang]'], {}), '(rot_assign[ang])\n', (19045, 19062), True, 'import numpy as np\n'), ((19938, 19962), 'os.path.isdir', 'os.path.isdir', (['cache_dir'], {}), '(cache_dir)\n', (19951, 19962), False, 'import os\n'), ((19972, 19994), 'os.makedirs', 'os.makedirs', (['cache_dir'], {}), '(cache_dir)\n', (19983, 19994), False, 'import os\n'), ((20070, 20092), 'pickle.dump', 'pickle.dump', (['config', 'f'], {}), '(config, f)\n', (20081, 20092), False, 'import pickle\n'), ((20234, 20272), 'os.path.join', 'os.path.join', (['cache_dir', '"""host_data.p"""'], {}), "(cache_dir, 'host_data.p')\n", (20246, 20272), False, 'import os\n'), ((20297, 20334), 'os.path.join', 'os.path.join', (['cache_dir', '"""gal_data.p"""'], {}), "(cache_dir, 'gal_data.p')\n", (20309, 20334), False, 'import os\n'), ((21611, 21625), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (21622, 21625), False, 'import pickle\n'), ((21718, 21732), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (21729, 21732), False, 'import pickle\n'), ((21769, 21807), 'os.path.join', 'os.path.join', (['cache_dir', '"""host_data.p"""'], {}), "(cache_dir, 'host_data.p')\n", (21781, 21807), False, 'import os\n'), ((21839, 21876), 'os.path.join', 'os.path.join', (['cache_dir', '"""gal_data.p"""'], {}), "(cache_dir, 'gal_data.p')\n", (21851, 21876), False, 'import os\n'), ((29492, 29517), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'n_proc'}), '(processes=n_proc)\n', (29499, 29517), True, 'import multiprocessing as mp\n'), ((8285, 8354), 'os.path.join', 'os.path.join', (["config['wdir']", "config['in_folder']", "config['gal_file']"], {}), "(config['wdir'], config['in_folder'], config['gal_file'])\n", (8297, 8354), False, 'import os\n'), ((11020, 11089), 'numpy.sum', 'np.sum', (["((gal_data.iloc[host_mem[j]][['x', 'y', 'z']] - host_pos) ** 2)"], {}), "((gal_data.iloc[host_mem[j]][['x', 'y', 'z']] - host_pos) ** 2)\n", (11026, 11089), True, 'import numpy as np\n'), ((13852, 13899), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_padded, gal_data.shape[1])'}), '(shape=(num_padded, gal_data.shape[1]))\n', (13860, 13899), True, 'import numpy as np\n'), ((17289, 17355), 'numpy.argwhere', 'np.argwhere', (["(host_rots['num_rot'].values < config['min_rotations'])"], {}), "(host_rots['num_rot'].values < config['min_rotations'])\n", (17300, 17355), True, 'import numpy as np\n'), ((17459, 17525), 'numpy.argwhere', 'np.argwhere', (["(host_rots['num_rot'].values > config['max_rotations'])"], {}), "(host_rots['num_rot'].values > config['max_rotations'])\n", (17470, 17525), True, 'import numpy as np\n'), ((19263, 19325), 'numpy.random.choice', 'np.random.choice', (['tot_processes'], {'size': 'num_in_rot', 'replace': '(True)'}), '(tot_processes, size=num_in_rot, replace=True)\n', (19279, 19325), True, 'import numpy as np\n'), ((20014, 20048), 'os.path.join', 'os.path.join', (['cache_dir', '"""cache.p"""'], {}), "(cache_dir, 'cache.p')\n", (20026, 20048), False, 'import os\n'), ((20107, 20148), 'os.path.join', 'os.path.join', (['cache_dir', '"""host_members.p"""'], {}), "(cache_dir, 'host_members.p')\n", (20119, 20148), False, 'import os\n'), ((20401, 20440), 'os.path.join', 'os.path.join', (['cache_dir', '"""rot_assign.p"""'], {}), "(cache_dir, 'rot_assign.p')\n", (20413, 20440), False, 'import os\n'), ((20564, 20581), 'numpy.sum', 'np.sum', (['col_count'], {}), '(col_count)\n', (20570, 20581), True, 'import numpy as np\n'), ((21192, 21240), 'os.path.join', 'os.path.join', (['cache_dir', 'f"""rot_assign_{i + 1}.p"""'], {}), "(cache_dir, f'rot_assign_{i + 1}.p')\n", (21204, 21240), False, 'import os\n'), ((21546, 21580), 'os.path.join', 'os.path.join', (['cache_dir', '"""cache.p"""'], {}), "(cache_dir, 'cache.p')\n", (21558, 21580), False, 'import os\n'), ((21640, 21681), 'os.path.join', 'os.path.join', (['cache_dir', '"""host_members.p"""'], {}), "(cache_dir, 'host_members.p')\n", (21652, 21681), False, 'import os\n'), ((21942, 21981), 'os.path.join', 'os.path.join', (['cache_dir', '"""rot_assign.p"""'], {}), "(cache_dir, 'rot_assign.p')\n", (21954, 21981), False, 'import os\n'), ((22029, 22080), 'os.path.join', 'os.path.join', (['cache_dir', 'f"""rot_assign_{load_ind}.p"""'], {}), "(cache_dir, f'rot_assign_{load_ind}.p')\n", (22041, 22080), False, 'import os\n'), ((22550, 22575), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'n_proc'}), '(processes=n_proc)\n', (22557, 22575), True, 'import multiprocessing as mp\n'), ((26945, 27001), 'numpy.sqrt', 'np.sqrt', (["(clu_gals['xproj'] ** 2 + clu_gals['yproj'] ** 2)"], {}), "(clu_gals['xproj'] ** 2 + clu_gals['yproj'] ** 2)\n", (26952, 27001), True, 'import numpy as np\n'), ((28576, 28632), 'numpy.sqrt', 'np.sqrt', (["(clu_gals['xproj'] ** 2 + clu_gals['yproj'] ** 2)"], {}), "(clu_gals['xproj'] ** 2 + clu_gals['yproj'] ** 2)\n", (28583, 28632), True, 'import numpy as np\n'), ((21082, 21126), 'os.path.join', 'os.path.join', (['cache_dir', 'f"""rot_assign_{i}.p"""'], {}), "(cache_dir, f'rot_assign_{i}.p')\n", (21094, 21126), False, 'import os\n'), ((23656, 23704), 'numpy.matmul', 'np.matmul', (["gal_data[['x', 'y', 'z']].values", 'R.T'], {}), "(gal_data[['x', 'y', 'z']].values, R.T)\n", (23665, 23704), True, 'import numpy as np\n'), ((23730, 23781), 'numpy.matmul', 'np.matmul', (["gal_data[['vx', 'vy', 'vz']].values", 'R.T'], {}), "(gal_data[['vx', 'vy', 'vz']].values, R.T)\n", (23739, 23781), True, 'import numpy as np\n'), ((23912, 23961), 'numpy.matmul', 'np.matmul', (["host_data[['x', 'y', 'z']].values", 'R.T'], {}), "(host_data[['x', 'y', 'z']].values, R.T)\n", (23921, 23961), True, 'import numpy as np\n'), ((23987, 24039), 'numpy.matmul', 'np.matmul', (["host_data[['vx', 'vy', 'vz']].values", 'R.T'], {}), "(host_data[['vx', 'vy', 'vz']].values, R.T)\n", (23996, 24039), True, 'import numpy as np\n'), ((26327, 26345), 'numpy.abs', 'np.abs', (['v_rel_proj'], {}), '(v_rel_proj)\n', (26333, 26345), True, 'import numpy as np\n'), ((30378, 30440), 'numpy.zeros', 'np.zeros', ([], {'shape': '(contam_len, catalogs[0][1].prop.shape[1] + 2)'}), '(shape=(contam_len, catalogs[0][1].prop.shape[1] + 2))\n', (30386, 30440), True, 'import numpy as np\n'), ((30724, 30784), 'numpy.zeros', 'np.zeros', ([], {'shape': '(pure_len, catalogs[0][0].prop.shape[1] + 2)'}), '(shape=(pure_len, catalogs[0][0].prop.shape[1] + 2))\n', (30732, 30784), True, 'import numpy as np\n'), ((32887, 32977), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '_pure.p')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '_pure.p')\n", (32899, 32977), False, 'import os\n'), ((33093, 33185), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '_contam.p')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '_contam.p')\n", (33105, 33185), False, 'import os\n'), ((33317, 33454), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '.cache')", "(config['catalog_name'] + f'_pure_{load_ind}.p')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '.cache', config['catalog_name'] + f'_pure_{load_ind}.p')\n", (33329, 33454), False, 'import os\n'), ((33613, 33752), 'os.path.join', 'os.path.join', (["config['wdir']", "config['out_folder']", "(config['catalog_name'] + '.cache')", "(config['catalog_name'] + f'_contam_{load_ind}.p')"], {}), "(config['wdir'], config['out_folder'], config['catalog_name'] +\n '.cache', config['catalog_name'] + f'_contam_{load_ind}.p')\n", (33625, 33752), False, 'import os\n'), ((10694, 10713), 'numpy.sum', 'np.sum', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (10700, 10713), True, 'import numpy as np\n'), ((11433, 11500), 'numpy.sum', 'np.sum', (["((gal_data.iloc[close_gal][['x', 'y', 'z']] - host_pos) ** 2)"], {}), "((gal_data.iloc[close_gal][['x', 'y', 'z']] - host_pos) ** 2)\n", (11439, 11500), True, 'import numpy as np\n'), ((16529, 16562), 'numpy.log10', 'np.log10', (["config['boost_minmass']"], {}), "(config['boost_minmass'])\n", (16537, 16562), True, 'import numpy as np\n'), ((19190, 19225), 'numpy.argwhere', 'np.argwhere', (['rot_assign[ang].values'], {}), '(rot_assign[ang].values)\n', (19201, 19225), True, 'import numpy as np\n'), ((30475, 30546), 'numpy.append', 'np.append', (['catalogs[0][1].prop.columns.values', "['rot_theta', 'rot_phi']"], {}), "(catalogs[0][1].prop.columns.values, ['rot_theta', 'rot_phi'])\n", (30484, 30546), True, 'import numpy as np\n'), ((30819, 30880), 'numpy.append', 'np.append', (['host_data.columns.values', "['rot_theta', 'rot_phi']"], {}), "(host_data.columns.values, ['rot_theta', 'rot_phi'])\n", (30828, 30880), True, 'import numpy as np\n'), ((11256, 11353), 'numpy.sum', 'np.sum', (["((gal_data.iloc[pad_gal_copies[host_mem[j]]][['x', 'y', 'z']] - host_pos) ** 2)"], {'axis': '(1)'}), "((gal_data.iloc[pad_gal_copies[host_mem[j]]][['x', 'y', 'z']] -\n host_pos) ** 2, axis=1)\n", (11262, 11353), True, 'import numpy as np\n'), ((32362, 32372), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (32369, 32372), True, 'import numpy as np\n'), ((35387, 35398), 'time.time', 'time.time', ([], {}), '()\n', (35396, 35398), False, 'import time\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import requests
import numpy.testing as npt
from astropy.tests.helper import pytest
from astropy.table import Table
import astropy.units as u
from ...utils.testing_tools import MockResponse
from ... import nist
DATA_FILES = {'lines': 'nist_out.html'}
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(requests, 'get', get_mockreturn)
return mp
def get_mockreturn(url, params=None, timeout=10, **kwargs):
filename = data_path(DATA_FILES['lines'])
content = open(filename, 'r').read()
return MockResponse(content, **kwargs)
def test_parse_wavelength():
minwav, maxwav, unit = nist.core._parse_wavelength(4000 * u.AA, 7000 * u.AA)
npt.assert_approx_equal(minwav, 4000, significant=4)
npt.assert_approx_equal(maxwav, 7000, significant=4)
assert unit == nist.core.Nist.unit_code['Angstrom']
def test_query_async(patch_get):
response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm, "H I", get_query_payload=True)
assert response['spectra'] == "H I"
assert response['unit'] == nist.core.Nist.unit_code['nm']
response = nist.core.Nist.query_async(4000 * u.nm, 7000 * u.nm, "H I")
assert response is not None
def test_query(patch_get):
result = nist.core.Nist.query(4000 * u.nm, 7000 * u.nm, "H I")
assert isinstance(result, Table)
| [
"numpy.testing.assert_approx_equal",
"os.path.dirname",
"os.path.join"
] | [((429, 461), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (441, 461), False, 'import os\n'), ((922, 974), 'numpy.testing.assert_approx_equal', 'npt.assert_approx_equal', (['minwav', '(4000)'], {'significant': '(4)'}), '(minwav, 4000, significant=4)\n', (945, 974), True, 'import numpy.testing as npt\n'), ((979, 1031), 'numpy.testing.assert_approx_equal', 'npt.assert_approx_equal', (['maxwav', '(7000)'], {'significant': '(4)'}), '(maxwav, 7000, significant=4)\n', (1002, 1031), True, 'import numpy.testing as npt\n'), ((383, 408), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (398, 408), False, 'import os\n')] |
from collections import OrderedDict
from itertools import count
from warnings import warn
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from tqdm import tqdm
from pycircstat import event_series as es
import pycircstat as circ
from .utils.data import peakdet
from .utils.locking import find_significant_peaks, find_best_locking, vector_strength_at
from . import colordict
import numpy as np
import pandas as pd
import seaborn as sns
import sympy
from scipy import optimize, stats, signal
from matplotlib.ticker import PercentFormatter
import datajoint as dj
from .data import Runs, Cells, BaseEOD, Baseline
schema = dj.schema('efish_analysis', locals())
class PlotableSpectrum:
def plot(self, ax, restrictions, f_max=2000, ncol=None):
sns.set_context('paper')
# colors = ['#1b9e77', '#d95f02', '#7570b3', '#e7298a']
# colors = ['deeppink', 'dodgerblue', sns.xkcd_rgb['mustard'], sns.xkcd_rgb['steel grey']]
markers = [(4, 0, 90), '^', 'D', 's', 'o']
stim, eod, baseline, beat = sympy.symbols('f_s, EODf, f_b, \Delta')
for fos in ((self * Runs()).proj() & restrictions).fetch(as_dict=True):
if isinstance(self, FirstOrderSpikeSpectra):
peaks = (FirstOrderSignificantPeaks() * restrictions & fos)
elif isinstance(self, SecondOrderSpikeSpectra):
if isinstance(restrictions, dict):
peaks = (SecondOrderSignificantPeaks() & restrictions & fos)
else:
peaks = (SecondOrderSignificantPeaks() * restrictions & fos)
else:
raise Exception("Mother class unknown!")
f, v, alpha, cell, run = (self & fos).fetch1('frequencies', 'vector_strengths', 'critical_value',
'cell_id', 'run_id')
# insert refined vector strengths
peak_f, peak_v = peaks.fetch('frequency', 'vector_strength')
f = np.hstack((f, peak_f))
v = np.hstack((v, peak_v))
idx = np.argsort(f)
f, v = f[idx], v[idx]
# only take frequencies within defined ange
idx = (f >= 0) & (f <= f_max) & ~np.isnan(v)
ax.fill_between(f[idx], 0 * f[idx], 0 * f[idx] + alpha, lw=0, color='silver')
ax.fill_between(f[idx], 0 * f[idx], v[idx], lw=0, color='k')
ax.set_xlabel('frequency [Hz]')
ax.set_ylabel('vector strength')
ax.set_ylim((0, 1.))
ax.set_xlim((0, f_max))
ax.set_yticks([0, .25, .5, .75, 1.0])
df = pd.DataFrame(peaks.fetch())
df['on'] = np.abs(df.ix[:, :3]).sum(axis=1)
df = df[df.frequency > 0]
for freq, freq_group in df.groupby('frequency'): # get all combinations that have the same frequency
freq_group = freq_group[
freq_group.on == freq_group.on.min()] # take the ones that have the lowest factors
def label_order(x):
if 'EODf' in x[0]:
return 0
elif 'stimulus' in x[0]:
return 1
elif 'Delta' in x[0]:
return 2
else:
return 3
for i, (cs, ce, cb, freq, vs) in freq_group[
['stimulus_coeff', 'eod_coeff', 'baseline_coeff', 'frequency', 'vector_strength']].iterrows():
terms = []
if 0 <= freq <= f_max:
term = cs * stim + ce * eod + cb * baseline
if (cs < 0 and ce > 0) or (cs > 0 and ce < 0):
coeff = np.sign(ce) * min(abs(cs), abs(ce))
term = term + coeff * (stim - eod) - coeff * beat
terms.append(sympy.latex(term.simplify()))
term = ' = '.join(terms)
fontsize = ax.xaxis.get_ticklabels()[0].get_fontsize()
# use different colors and labels depending on the frequency
if cs != 0 and ce == 0 and cb == 0:
ax.plot(freq, vs, 'k', mfc=colordict['stimulus'],
label='$f_s$={:.0f} Hz'.format(freq, ) if cs == 1 else None, marker=markers[0],
linestyle='None')
elif cs == 0 and ce != 0 and cb == 0:
ax.plot(freq, vs, 'k', mfc=colordict['eod'],
label='EODf = {:.0f} Hz'.format(freq) if ce == 1 else None,
marker=markers[1],
linestyle='None')
elif cs == 0 and ce == 0 and cb != 0:
ax.plot(freq, vs, 'k', mfc=colordict['baseline'],
label='baseline firing = {:.0f} Hz'.format(freq) if cb == 1 else None,
marker=markers[2],
linestyle='None')
elif cs == 1 and ce == -1 and cb == 0:
ax.plot(freq, vs, 'k', mfc=colordict['delta_f'],
label=r'$\Delta f$=%.0f Hz' % freq,
marker=markers[3],
linestyle='None')
else:
ax.plot(freq, vs, 'k', mfc=colordict['combinations'], label='combinations', marker=markers[4],
linestyle='None')
term = term.replace('1.0 ', ' ')
term = term.replace('.0 ', ' ')
term = term.replace('EODf', '\\mathdefault{EODf}')
term = term.replace('\\Delta', '\\Delta f')
ax.text(freq - 20, vs + 0.05, r'${}$'.format(term),
fontsize=fontsize, rotation=90, ha='left', va='bottom')
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(sorted(zip(labels, handles), key=label_order))
ax.legend(by_label.values(), by_label.keys(), bbox_to_anchor=(1, 1.3), frameon=False,
ncol=len(by_label) if ncol is None else ncol)
@schema
class CoincidenceTolerance(dj.Lookup):
definition = """
# Coincidence tolerance of EOD and stimulus phase in s
coincidence_idx : int
---
tol : double
"""
contents = [(0, 0.0001), ]
@schema
class SpectraParameters(dj.Lookup):
definition = """
spectra_setting : tinyint # index of the setting
---
f_max : float # maximal frequency considered
"""
contents = [(0, 2000)]
@schema
class TrialAlign(dj.Computed):
definition = """
# computes a time point where the EOD and the stimulus coincide
-> Runs # each trial has an alignmnt point
-> CoincidenceTolerance # tolerance of alignment
---
"""
class Alignment(dj.Part):
definition = """
-> master
-> Runs.SpikeTimes
-> Runs.GlobalEODPeaksTroughs
-> Runs.GlobalEFieldPeaksTroughs
---
t0 : double # time where the trial will be aligned to
"""
@property
def key_source(self):
return Runs() * CoincidenceTolerance() & dict(am=0, n_harmonics=0) # TODO: is n_harmonics=0 necessary, what's with the newly recorded cells?
def _make_tuples(self, key):
print('Populating', key)
tol = CoincidenceTolerance().fetch1('tol')
samplingrate = (Runs() & key).fetch1('samplingrate')
trials = Runs.GlobalEODPeaksTroughs() * \
Runs.GlobalEFieldPeaksTroughs().proj(stim_peaks='peaks') * \
Runs.SpikeTimes() & key
self.insert1(key)
for trial_key in tqdm(trials.fetch.keys()):
ep, sp = (trials & trial_key).fetch1('peaks', 'stim_peaks')
p0 = ep[np.abs(sp[:, None] - ep[None, :]).min(axis=0) <= tol * samplingrate] / samplingrate
if len(p0) == 0:
warn('Could not find an alignment within given tolerance of {}s. Skipping!'.format(tol))
continue
else:
self.Alignment().insert1(dict(trial_key, **key, t0=p0.min()), ignore_extra_fields=True)
def load_trials(self, restriction):
"""
Loads aligned trials.
:param restriction: restriction on Runs.SpikeTimes() * TrialAlign()
:returns: aligned trials; spike times are in seconds
"""
trials = Runs.SpikeTimes() * TrialAlign.Alignment() & restriction
return [s / 1000 - t0 for s, t0 in zip(*trials.fetch('times', 't0'))]
def plot(self, ax, restriction):
trials = self.load_trials(restriction)
for i, trial in enumerate(trials):
ax.plot(trial, 0 * trial + i, '.k', ms=1)
ax.set_ylabel('trial no')
ax.set_xlabel('time [s]')
def plot_traces(self, ax, restriction):
sampling_rate = (Runs() & restriction).fetch('samplingrate')
sampling_rate = np.unique(sampling_rate)
assert len(sampling_rate) == 1, 'Sampling rate must be unique by restriction'
sampling_rate = sampling_rate[0]
trials = Runs.GlobalEOD() * Runs.GlobalEField() * TrialAlign.Alignment() & restriction
t = np.arange(0, 0.01, 1 / sampling_rate)
n = len(t)
for geod, gef, t0 in zip(*trials.fetch('global_efield', 'global_voltage', 't0')):
ax.plot(t - t0, geod[:n], '-', color='dodgerblue', lw=.1)
ax.plot(t - t0, gef[:n], '-', color='k', lw=.1)
@schema
class FirstOrderSpikeSpectra(dj.Computed, PlotableSpectrum):
definition = """
# table that holds 1st order vector strength spectra
-> Runs # each run has a spectrum
-> SpectraParameters
---
frequencies : longblob # frequencies at which the spectra are computed
vector_strengths : longblob # vector strengths at those frequencies
critical_value : float # critical value for significance with alpha=0.001
"""
@property
def key_source(self):
return Runs() * SpectraParameters() & TrialAlign() & dict(am=0)
@staticmethod
def compute_1st_order_spectrum(aggregated_spikes, sampling_rate, duration, alpha=0.001, f_max=2000):
"""
Computes the 1st order amplitue spectrum of the spike train (i.e. the vector strength spectrum
of the aggregated spikes).
:param aggregated_spikes: all spike times over all trials
:param sampling_rate: sampling rate of the spikes
:param alpha: significance level for the boundary against non-locking
:returns: the frequencies for the vector strength spectrum, the spectrum, and the threshold against non-locking
"""
if len(aggregated_spikes) < 2:
return np.array([0]), np.array([0]), 0,
f = np.fft.fftfreq(int(duration * sampling_rate), 1 / sampling_rate)
f = f[(f >= -f_max) & (f <= f_max)]
v = es.direct_vector_strength_spectrum(aggregated_spikes, f)
threshold = np.sqrt(- np.log(alpha) / len(aggregated_spikes))
return f, v, threshold
def _make_tuples(self, key):
print('Processing', key['cell_id'], 'run', key['run_id'], )
samplingrate, duration = (Runs() & key).fetch1('samplingrate', 'duration')
f_max = (SpectraParameters() & key).fetch1('f_max')
aggregated_spikes = TrialAlign().load_trials(key)
if len(aggregated_spikes) > 0:
aggregated_spikes = np.hstack(aggregated_spikes)
else:
warn("Trial align returned no spikes! Continuing")
return
key['frequencies'], key['vector_strengths'], key['critical_value'] = \
self.compute_1st_order_spectrum(aggregated_spikes, samplingrate, duration, alpha=0.001, f_max=f_max)
vs = key['vector_strengths']
vs[np.isnan(vs)] = 0
self.insert1(key)
@schema
class FirstOrderSignificantPeaks(dj.Computed):
definition = """
# hold significant peaks in spektra
stimulus_coeff : int # how many multiples of the stimulus
eod_coeff : int # how many multiples of the eod
baseline_coeff : int # how many multiples of the baseline firing rate
refined : int # whether the search was refined or not
->FirstOrderSpikeSpectra
---
frequency : double # frequency at which there is significant locking
vector_strength : double # vector strength at that frequency
tolerance : double # tolerance within which a peak was accepted
"""
def _make_tuples(self, key):
double_peaks = -1
data = (FirstOrderSpikeSpectra() & key).fetch1()
run = (Runs() & key).fetch1()
cell = (Cells() & key).fetch1()
spikes = np.hstack(TrialAlign().load_trials(key))
interesting_frequencies = {'stimulus_coeff': run['eod'] + run['delta_f'], 'eod_coeff': run['eod'],
'baseline_coeff': cell['baseline']}
f_max = (SpectraParameters() & key).fetch1('f_max')
sas = find_significant_peaks(spikes, data['frequencies'], data['vector_strengths'],
interesting_frequencies, data['critical_value'], upper_cutoff=f_max)
for s in sas:
s.update(key)
try:
self.insert1(s)
except dj.DataJointError: # sometimes one peak has two peaks nearby
print("Found double peak")
s['refined'] = double_peaks
self.insert1(s)
double_peaks -= 1
@schema
class SecondOrderSpikeSpectra(dj.Computed, PlotableSpectrum):
definition = """
# table that holds 2nd order vector strength spectra
-> Runs # each run has a spectrum
-> SpectraParameters
---
frequencies : longblob # frequencies at which the spectra are computed
vector_strengths : longblob # vector strengths at those frequencies
critical_value : float # critical value for significance with alpha=0.001
"""
@property
def key_source(self):
return Runs() * SpectraParameters() & dict(am=0)
@staticmethod
def compute_2nd_order_spectrum(spikes, t, sampling_rate, alpha=0.001, method='poisson', f_max=2000):
"""
Computes the 1st order amplitue spectrum of the spike train (i.e. the vector strength spectrum
of the aggregated spikes).
:param spikes: list of spike trains from the single trials
:param t: numpy.array of time points
:param sampling_rate: sampling rate of the spikes
:param alpha: significance level for the boundary against non-locking
:param method: method to compute the confidence interval (poisson or gauss)
:returns: the frequencies for the vector strength spectrum, the spectrum, and the threshold against non-locking
"""
# compute 99% confidence interval for Null distribution of 2nd order spectra (no locking)
spikes_per_trial = list(map(len, spikes))
freqs, vs_spectra = zip(*[es.vector_strength_spectrum(sp, sampling_rate, time=t) for sp in spikes])
freqs = freqs[0]
m_ampl = np.mean(vs_spectra, axis=0)
if method == 'poisson':
poiss_rate = np.mean(spikes_per_trial)
r = np.linspace(0, 2, 10000)
dr = r[1] - r[0]
mu = np.sum(2 * poiss_rate * r ** 2 * np.exp(poiss_rate * np.exp(-r ** 2) - poiss_rate - r ** 2) / (
1 - np.exp(-poiss_rate))) * dr
s = np.sum(2 * poiss_rate * r ** 3 * np.exp(poiss_rate * np.exp(-r ** 2) - poiss_rate - r ** 2) / (
1 - np.exp(-poiss_rate))) * dr
s2 = np.sqrt(s - mu ** 2.)
y = stats.norm.ppf(1 - alpha, loc=mu,
scale=s2 / np.sqrt(len(spikes_per_trial))) # use central limit theorem
elif method == 'gauss':
n = np.asarray(spikes_per_trial)
mu = np.sqrt(np.pi) / 2. * np.mean(1. / np.sqrt(n))
N = len(spikes_per_trial)
s = np.sqrt(np.mean(1. / n - np.pi / 4. / n) / N)
y = stats.norm.ppf(1 - alpha, loc=mu, scale=s)
else:
raise ValueError("Method %s not known" % (method,))
idx = (freqs >= -f_max) & (freqs <= f_max)
return freqs[idx], m_ampl[idx], y
def _make_tuples(self, key):
print('Processing {cell_id} run {run_id}'.format(**key))
dat = (Runs() & key).fetch1(as_dict=True)
dt = 1 / dat['samplingrate']
t = np.arange(0, dat['duration'], dt)
st = (Runs.SpikeTimes() & key).fetch(as_dict=True)
st = [s['times'] / 1000 for s in st if len(s) > 0] # convert to s and drop empty trials
f_max = (SpectraParameters() & key).fetch1('f_max')
key['frequencies'], key['vector_strengths'], key['critical_value'] = \
SecondOrderSpikeSpectra.compute_2nd_order_spectrum(st, t, 1 / dt, alpha=0.001, method='poisson',
f_max=f_max)
self.insert1(key)
@schema
class SecondOrderSignificantPeaks(dj.Computed):
definition = """
# hold significant peaks in spektra
stimulus_coeff : int # how many multiples of the stimulus
eod_coeff : int # how many multiples of the eod
baseline_coeff : int # how many multiples of the baseline firing rate
refined : int # whether the search was refined or not
->SecondOrderSpikeSpectra
---
frequency : double # frequency at which there is significant locking
vector_strength : double # vector strength at that frequency
tolerance : double # tolerance within which a peak was accepted
"""
def _make_tuples(self, key):
double_peaks = -1
data = (SecondOrderSpikeSpectra() & key).fetch1()
run = (Runs() & key).fetch1()
cell = (Cells() & key).fetch1()
st = (Runs.SpikeTimes() & key).fetch(as_dict=True)
spikes = [s['times'] / 1000 for s in st] # convert to s
interesting_frequencies = {'stimulus_coeff': run['eod'] + run['delta_f'], 'eod_coeff': run['eod'],
'baseline_coeff': cell['baseline']}
f_max = (SpectraParameters() & key).fetch1('f_max')
sas = find_significant_peaks(spikes, data['frequencies'], data['vector_strengths'],
interesting_frequencies, data['critical_value'], upper_cutoff=f_max)
for s in sas:
s.update(key)
try:
self.insert1(s)
except dj.DataJointError: # sometimes one peak has two peaks nearby
print("Found double peak")
s['refined'] = double_peaks
self.insert1(s)
double_peaks -= 1
@schema
class SamplingPointsPerBin(dj.Lookup):
definition = """
# sampling points per bin
n : int # sampling points per bin
---
"""
contents = [(2,), (4,), (8,)]
@schema
class PhaseLockingHistogram(dj.Computed):
definition = """
# phase locking histogram at significant peaks
-> FirstOrderSignificantPeaks
---
locking_frequency : double # frequency for which the locking is computed
peak_frequency : double # frequency as determined by the peaks of the electric field
spikes : longblob # union of spike times over trials relative to period of locking frequency
vector_strength : double # vector strength computed from the spikes for sanity checking
"""
class Histograms(dj.Part):
definition = """
->PhaseLockingHistogram
->SamplingPointsPerBin
---
bin_width_radians : double # bin width in radians
bin_width_time : double # bin width in time
histogram : longblob # vector of counts
"""
@property
def key_source(self):
return FirstOrderSignificantPeaks() \
& 'baseline_coeff=0' \
& '((stimulus_coeff=1 and eod_coeff=0) or (stimulus_coeff=0 and eod_coeff=1))' \
& 'refined=1'
def _make_tuples(self, key):
key_sub = dict(key)
delta_f, eod, samplingrate = (Runs() & key).fetch1('delta_f', 'eod', 'samplingrate')
locking_frequency = (FirstOrderSignificantPeaks() & key).fetch1('frequency')
if key['eod_coeff'] > 0:
# convert spikes to s and center on first peak of eod
# times, peaks = (Runs.SpikeTimes() * LocalEODPeaksTroughs() & key).fetch('times', 'peaks')
peaks = (Runs.GlobalEODPeaksTroughs() & key).fetch('peaks')
#
# spikes = np.hstack([s / 1000 - p[0] / samplingrate for s, p in zip(times, peaks)])
else:
# # convert spikes to s and center on first peak of stimulus
# times, peaks = (Runs.SpikeTimes() * GlobalEFieldPeaksTroughs() & key).fetch('times', 'peaks')
peaks = (Runs.GlobalEFieldPeaksTroughs() & key).fetch('peaks')
# spikes = np.hstack([s / 1000 - p[0] / samplingrate for s, p in zip(times, peaks)])
spikes = np.hstack(TrialAlign().load_trials(key))
key['peak_frequency'] = samplingrate / np.mean([np.diff(p).mean() for p in peaks])
key['locking_frequency'] = locking_frequency
cycle = 1 / locking_frequency
spikes %= cycle
key['spikes'] = spikes / cycle * 2 * np.pi
key['vector_strength'] = 1 - circ.var(key['spikes'])
self.insert1(key)
histograms = self.Histograms()
for n in SamplingPointsPerBin().fetch:
n = int(n[0])
bin_width_time = n / samplingrate
bin_width_radians = bin_width_time / cycle * np.pi * 2
bins = np.arange(0, cycle + bin_width_time, bin_width_time)
key_sub['n'] = n
key_sub['histogram'], _ = np.histogram(spikes, bins=bins)
key_sub['bin_width_time'] = bin_width_time
key_sub['bin_width_radians'] = bin_width_radians
histograms.insert1(key_sub)
def violin_plot(self, ax, restrictions, palette):
runs = Runs() * self & restrictions
if len(runs) == 0:
return
df = pd.concat([pd.DataFrame(item) for item in runs.fetch(as_dict=True)])
df.ix[df.stimulus_coeff == 1, 'type'] = 'stimulus'
df.ix[df.eod_coeff == 1, 'type'] = 'EOD'
delta_fs = np.unique(runs.fetch('delta_f'))
delta_fs = delta_fs[np.argsort(-delta_fs)]
sns.violinplot(data=df, y='delta_f', x='spikes', hue='type', split=True, ax=ax, hue_order=['EOD', 'stimulus'],
order=delta_fs, palette=palette, cut=0, inner=None, linewidth=.5,
orient='h', bw=.05)
@schema
class EODStimulusPSTSpikes(dj.Computed):
definition = """
# PSTH of Stimulus and EOD at the difference frequency of both
-> FirstOrderSignificantPeaks
-> CoincidenceTolerance
cycle_idx : int # index of the cycle
---
stimulus_frequency : double
eod_frequency : double
window_half_size : double # spikes will be extracted around +- this size around in phase points of stimulus and eod
vector_strength_eod : double # vector strength of EOD
vector_strength_stimulus : double # vector strength of stimulus
spikes : longblob # spikes in that window
efield : longblob # stimulus + eod
"""
@property
def key_source(self):
constr = dict(stimulus_coeff=1, baseline_coeff=0, eod_coeff=0, refined=1)
cell_type = Cells() & dict(cell_type='p-unit')
return FirstOrderSignificantPeaks() * CoincidenceTolerance() & cell_type & constr
def _make_tuples(self, key):
# key_sub = dict(key)
print('Populating', key, flush=True)
delta_f, eod, samplingrate, duration = (Runs() & key).fetch1('delta_f', 'eod', 'samplingrate', 'duration')
runs_stim = Runs() * FirstOrderSignificantPeaks() & key
runs_eod = Runs() * FirstOrderSignificantPeaks() & dict(key, stimulus_coeff=0, eod_coeff=1)
if len(runs_eod) > 0:
# duration = runs_eod.fetch1('duration')
tol = (CoincidenceTolerance() & key).fetch1('tol')
eod_period = 1 / runs_eod.fetch1('frequency')
whs = 10 * eod_period
times, peaks, epeaks, global_eod = \
(Runs.SpikeTimes() * Runs.GlobalEODPeaksTroughs() * Runs.LocalEOD() \
* Runs.GlobalEFieldPeaksTroughs().proj(epeaks='peaks') \
& key).fetch('times', 'peaks', 'epeaks', 'local_efield')
p0 = [peaks[i][
np.abs(epeaks[i][:, None] - peaks[i][None, :]).min(axis=0) <= tol * samplingrate] / samplingrate
for i in range(len(peaks))]
spikes, eod, field = [], [], []
t = np.linspace(0, duration, duration * samplingrate, endpoint=False)
sampl_times = np.linspace(-whs, whs, 1000)
for train, eftrain, in_phase in zip(times, global_eod, p0):
train = np.asarray(train) / 1000 # convert to seconds
for phase in in_phase:
chunk = train[(train >= phase - whs) & (train <= phase + whs)] - phase
if len(chunk) > 0:
spikes.append(chunk)
field.append(np.interp(sampl_times + phase, t, eftrain))
key['eod_frequency'] = runs_eod.fetch1('frequency')
key['vector_strength_eod'] = runs_eod.fetch1('vector_strength')
key['stimulus_frequency'] = runs_stim.fetch1('frequency')
key['vector_strength_stimulus'] = runs_stim.fetch1('vector_strength')
key['window_half_size'] = whs
for cycle_idx, train, ef in zip(count(), spikes, field):
key['spikes'] = train
key['cycle_idx'] = cycle_idx
key['efield'] = ef
self.insert1(key)
def plot(self, ax, restrictions, coincidence=0.0001, repeats=200):
rel = self * CoincidenceTolerance() * Runs().proj('delta_f') & restrictions & dict(tol=coincidence)
df = pd.DataFrame(rel.fetch())
df['adelta_f'] = np.abs(df.delta_f)
df['sdelta_f'] = np.sign(df.delta_f)
df.sort_values(['adelta_f', 'sdelta_f'], inplace=True)
t, e, pe = (BaseEOD() & restrictions).fetch1('time', 'eod_ampl', 'max_idx')
eod = (Runs() & restrictions).fetch('eod').mean()
if len(df) > 0:
whs = df.window_half_size.mean()
cycles = int(whs * eod) * 2
db = 2 * whs / 400
bins = np.arange(-whs, whs + db, 1/np.mean(df['eod_frequency']))
bins2 = np.arange(-whs, whs + db, db)
# g = np.exp(-np.linspace(-whs, whs, len(bins) - 1) ** 2 / 2 / (whs / 25) ** 2)
g = np.exp(-np.linspace(-whs, whs, len(bins) - 1) ** 2 / 2 / (whs / 100) ** 2)
print('Low pass kernel sigma=', whs / 25)
bin_centers = 0.5 * (bins[1:] + bins[:-1])
bin_centers2 = 0.5 * (bins2[1:] + bins2[:-1])
y = [0]
yticks = []
i = 0
for (adf, sdf), dgr in df.groupby(['adelta_f', 'sdelta_f'], sort=True):
delta_f = adf * sdf
yticks.append(delta_f)
n_trials = min(repeats, len(dgr.spikes))
dgr = dgr[:n_trials]
h, bin_edges = np.histogram(np.hstack(dgr.spikes), bins=bins)
h2, bin_edges2 = np.histogram(np.hstack(dgr.spikes), bins=bins2)
for sp in dgr.spikes:
ax.scatter(sp, 0 * sp + i, s=0.1, zorder=-10, c='k', edgecolors='none')
i += 1
y.append(i)
h = (h * (y[-1] - y[-2]) / h.max())
h2 = (h2 * (y[-1] - y[-2]) / h2.max())
ax.bar(bin_centers, h, bottom=y[-2], color="xkcd:powder blue", zorder=-20, lw=0.2, width=1/np.mean(df['eod_frequency']) ,edgecolor='black')
ax.bar(bin_centers2, h2, bottom=y[-2], color="xkcd:steel blue", zorder=-20, width=0.0001, edgecolor='none')
if BaseEOD() & restrictions:
t, e, pe = (BaseEOD() & restrictions).fetch1('time', 'eod_ampl', 'max_idx')
t = t / 1000
pe_t = t[pe]
t = t - pe_t[cycles // 2]
fr, to = pe[0], pe[cycles]
t, e = t[fr:to], e[fr:to]
e = Baseline.clean_signal(e, eod, t[1] - t[0])
dy = 0.15 * (y[-1] - y[0])
e = (e - e.min()) / (e.max() - e.min()) * dy
ax.plot(t, e + y[-1]+80, lw=2, color='steelblue', zorder=-15, label='EOD')
y.append(y[-1] + 1.2 * dy)
y = np.asarray(y)
ax.set_xlim((-whs, whs))
ax.set_xticks([-whs, -whs / 2, 0, whs / 2, whs])
ax.set_xticklabels([-10, -5, 0, 5, 10])
ax.set_ylabel(r'$\Delta f$ [Hz]')
ax.tick_params(axis='y', length=3, width=1, which='major')
ax.set_ylim(y[[0, -1]])
y = y[:-1]
ax.set_yticks(0.5 * (y[1:] + y[:-1]))
ax.set_yticklabels(['%.0f' % yt for yt in yticks])
def plot_single(self, ax, restrictions, coincidence=0.0001, repeats=20):
rel = self * CoincidenceTolerance() * Runs().proj('delta_f', 'contrast') & restrictions & dict(tol=coincidence)
df = pd.DataFrame(rel.fetch())
samplingrate, eod = (Runs() & restrictions).fetch1('samplingrate', 'eod')
if len(df) > 0:
whs = df.window_half_size.mean()
db = 1 / eod
bins = np.arange(-whs, whs + db, db)
bin_centers = 0.5 * (bins[1:] + bins[:-1])
# --- histogram
h, _ = np.histogram(np.hstack(df.spikes), bins=bins)
f_max = h.max() / db / len(df.spikes)
h = h.astype(np.float64)
h *= repeats / h.max() / 2
ax.bar(bin_centers, h, align='center', width=db, color='lightgray', zorder=-20, lw=0, label='PSTH', edgecolor='black')
ax.plot(bin_centers[0] * np.ones(2), [repeats // 8, h.max() * 450 / f_max + repeats // 8], '-',
color='darkslategray',
lw=3, solid_capstyle='butt')
ax.text(bin_centers[0] + db / 4, repeats / 5, '450 Hz')
# y = np.asarray(y)
if len(df) > repeats:
df = df[:repeats]
for offset, sp in zip(count(start=repeats // 2 + 1), df.spikes):
# ax.plot(sp, 0 * sp + offset, '.k', mfc='k', ms=2, zorder=-10, rasterized=False,
# label='spikes' if offset == repeats // 2 + 1 else None)
ax.vlines(sp, 0 * sp + offset, 0 * sp + offset + 1, 'k', zorder=-10, rasterized=False,
label='spikes' if offset == repeats // 2 + 1 else None)
offset += 1
norm = lambda x: (x - x.min()) / (x.max() - x.min())
avg_efield = norm(np.mean(df.efield, axis=0)) * repeats / 2
t = np.linspace(-whs, whs, len(avg_efield), endpoint=False)
high, hidx, low, lidx = peakdet(avg_efield, delta=0.01)
fh = InterpolatedUnivariateSpline(t[hidx], high, k=3)
fl = InterpolatedUnivariateSpline(t[lidx], low, k=3)
ax.plot(t, avg_efield + offset, lw=2, color=colordict['stimulus'], zorder=-15,
label='stimulus + EOD')
ax.plot(t, fh(t) + offset, lw=2, color=colordict['delta_f'], zorder=-15, label='AM')
ax.plot(t, fl(t) + offset, lw=2, color=colordict['delta_f'], zorder=-15)
ax.set_xlim((-whs, whs))
ax.set_xticks([-whs, -whs / 2, 0, whs / 2, whs])
ax.set_xticklabels([-10, -5, 0, 5, 10])
ax.tick_params(axis='y', length=0, width=0, which='major')
ax.set_yticks([])
# ax.set_yticklabels(['%.0f' % yt for yt in yticks])
ax.set_ylim((0, 2.4 * repeats))
@schema
class Decoding(dj.Computed):
definition = """
# locking by decoding time
-> Runs
---
beat : float # refined beat frequency
stimulus : float # refined stimulus frequency
"""
class Beat(dj.Part):
definition = """
-> Decoding
-> Runs.SpikeTimes
---
crit_beat=null : float # critical value for beat locking
vs_beat=null : float # vector strength for full trial
"""
class Stimulus(dj.Part):
definition = """
-> Decoding
-> Runs.SpikeTimes
---
crit_stimulus=null : float # critical value for stimulus locking
vs_stimulus=null : float # vector strength for full trial
"""
@property
def key_source(self):
return Runs() * Cells() & dict(cell_type='p-unit')
def _make_tuples(self, key):
print('Processing', key['cell_id'], 'run', key['run_id'], )
dat = (Runs() & key).fetch(as_dict=True)[0]
spike_times, trial_ids = (Runs.SpikeTimes() & key).fetch('times', 'trial_id')
spike_times = [s / 1000 for s in spike_times] # convert to s
# refine delta f locking on all spikes
delta_f = find_best_locking(spike_times, [dat['delta_f']], tol=3)[0][0]
stimulus_frequency = find_best_locking(spike_times, [dat['delta_f'] + dat['eod']], tol=3)[0][0]
self.insert1(dict(key, beat=delta_f, stimulus=stimulus_frequency))
stim = self.Stimulus()
beat = self.Beat()
for key['trial_id'], trial in zip(trial_ids, spike_times):
v, c = vector_strength_at(stimulus_frequency, trial, alpha=0.001)
if np.isinf(c):
c = np.NaN
stim.insert1(dict(key, vs_stimulus=v, crit_stimulus=c))
v, c = vector_strength_at(delta_f, trial, alpha=0.001)
if np.isinf(c):
c = np.NaN
beat.insert1(dict(key, vs_beat=v, crit_beat=c))
@schema
class BaselineSpikeJitter(dj.Computed):
definition = """
# circular variance and mean of spike times within an EOD period
-> Baseline
---
base_var : double # circular variance
base_std : double # circular std
base_mean : double # circular mean
"""
@property
def key_source(self):
return Baseline() & Baseline.LocalEODPeaksTroughs() & dict(cell_type='p-unit')
def _make_tuples(self, key):
print('Processing', key['cell_id'])
sampling_rate, eod = (Baseline() & key).fetch1('samplingrate', 'eod')
dt = 1. / sampling_rate
trials = Baseline.LocalEODPeaksTroughs() * Baseline.SpikeTimes() & key
aggregated_spikes = np.hstack([s / 1000 - p[0] * dt for s, p in zip(*trials.fetch('times', 'peaks'))])
aggregated_spikes %= 1 / eod
aggregated_spikes *= eod * 2 * np.pi # normalize to 2*pi
key['base_var'], key['base_mean'], key['base_std'] = \
circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
self.insert1(key)
@schema
class StimulusSpikeJitter(dj.Computed):
definition = """
# circular variance and std of spike times within an EOD period during stimulation
-> Runs
---
stim_var : double # circular variance
stim_std : double # circular std
stim_mean : double # circular mean
"""
@property
def key_source(self):
return Runs() & TrialAlign()
def _make_tuples(self, key):
print('Processing', key['cell_id'], 'run', key['run_id'])
if SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0, refined=1):
eod, vs = (SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
refined=1)).fetch1('frequency', 'vector_strength')
elif SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0, refined=0):
eod, vs = (SecondOrderSignificantPeaks() & dict(key, eod_coeff=1, stimulus_coeff=0, baseline_coeff=0,
refined=0)).fetch1('frequency', 'vector_strength')
else:
eod = (Runs() & key).fetch1('eod')
aggregated_spikes = TrialAlign().load_trials(key)
if len(aggregated_spikes) == 0:
warn('TrialAlign returned no spikes. Skipping')
return
else:
aggregated_spikes = np.hstack(aggregated_spikes)
aggregated_spikes %= 1 / eod
aggregated_spikes *= eod * 2 * np.pi # normalize to 2*pi
if len(aggregated_spikes) > 1:
key['stim_var'], key['stim_mean'], key['stim_std'] = \
circ.var(aggregated_spikes), circ.mean(aggregated_spikes), circ.std(aggregated_spikes)
self.insert1(key)
| [
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"numpy.argsort",
"numpy.histogram",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"pycircstat.std",
"numpy.interp",
"numpy.unique",
"pandas.DataFrame",
"pycircstat.mean",
"scipy.interpolate.InterpolatedUnivariateSpline",
"pycircstat.var",
"pycircs... | [((795, 819), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (810, 819), True, 'import seaborn as sns\n'), ((1071, 1111), 'sympy.symbols', 'sympy.symbols', (['"""f_s, EODf, f_b, \\\\Delta"""'], {}), "('f_s, EODf, f_b, \\\\Delta')\n", (1084, 1111), False, 'import sympy\n'), ((9243, 9267), 'numpy.unique', 'np.unique', (['sampling_rate'], {}), '(sampling_rate)\n', (9252, 9267), True, 'import numpy as np\n'), ((9505, 9542), 'numpy.arange', 'np.arange', (['(0)', '(0.01)', '(1 / sampling_rate)'], {}), '(0, 0.01, 1 / sampling_rate)\n', (9514, 9542), True, 'import numpy as np\n'), ((11239, 11295), 'pycircstat.event_series.direct_vector_strength_spectrum', 'es.direct_vector_strength_spectrum', (['aggregated_spikes', 'f'], {}), '(aggregated_spikes, f)\n', (11273, 11295), True, 'from pycircstat import event_series as es\n'), ((15555, 15582), 'numpy.mean', 'np.mean', (['vs_spectra'], {'axis': '(0)'}), '(vs_spectra, axis=0)\n', (15562, 15582), True, 'import numpy as np\n'), ((16926, 16959), 'numpy.arange', 'np.arange', (['(0)', "dat['duration']", 'dt'], {}), "(0, dat['duration'], dt)\n", (16935, 16959), True, 'import numpy as np\n'), ((23033, 23240), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'df', 'y': '"""delta_f"""', 'x': '"""spikes"""', 'hue': '"""type"""', 'split': '(True)', 'ax': 'ax', 'hue_order': "['EOD', 'stimulus']", 'order': 'delta_fs', 'palette': 'palette', 'cut': '(0)', 'inner': 'None', 'linewidth': '(0.5)', 'orient': '"""h"""', 'bw': '(0.05)'}), "(data=df, y='delta_f', x='spikes', hue='type', split=True, ax\n =ax, hue_order=['EOD', 'stimulus'], order=delta_fs, palette=palette,\n cut=0, inner=None, linewidth=0.5, orient='h', bw=0.05)\n", (23047, 23240), True, 'import seaborn as sns\n'), ((26807, 26825), 'numpy.abs', 'np.abs', (['df.delta_f'], {}), '(df.delta_f)\n', (26813, 26825), True, 'import numpy as np\n'), ((26851, 26870), 'numpy.sign', 'np.sign', (['df.delta_f'], {}), '(df.delta_f)\n', (26858, 26870), True, 'import numpy as np\n'), ((2020, 2042), 'numpy.hstack', 'np.hstack', (['(f, peak_f)'], {}), '((f, peak_f))\n', (2029, 2042), True, 'import numpy as np\n'), ((2059, 2081), 'numpy.hstack', 'np.hstack', (['(v, peak_v)'], {}), '((v, peak_v))\n', (2068, 2081), True, 'import numpy as np\n'), ((2100, 2113), 'numpy.argsort', 'np.argsort', (['f'], {}), '(f)\n', (2110, 2113), True, 'import numpy as np\n'), ((11772, 11800), 'numpy.hstack', 'np.hstack', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (11781, 11800), True, 'import numpy as np\n'), ((11827, 11877), 'warnings.warn', 'warn', (['"""Trial align returned no spikes! Continuing"""'], {}), "('Trial align returned no spikes! Continuing')\n", (11831, 11877), False, 'from warnings import warn\n'), ((12138, 12150), 'numpy.isnan', 'np.isnan', (['vs'], {}), '(vs)\n', (12146, 12150), True, 'import numpy as np\n'), ((15641, 15666), 'numpy.mean', 'np.mean', (['spikes_per_trial'], {}), '(spikes_per_trial)\n', (15648, 15666), True, 'import numpy as np\n'), ((15683, 15707), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(10000)'], {}), '(0, 2, 10000)\n', (15694, 15707), True, 'import numpy as np\n'), ((16081, 16103), 'numpy.sqrt', 'np.sqrt', (['(s - mu ** 2.0)'], {}), '(s - mu ** 2.0)\n', (16088, 16103), True, 'import numpy as np\n'), ((21980, 22003), 'pycircstat.var', 'circ.var', (["key['spikes']"], {}), "(key['spikes'])\n", (21988, 22003), True, 'import pycircstat as circ\n'), ((22276, 22328), 'numpy.arange', 'np.arange', (['(0)', '(cycle + bin_width_time)', 'bin_width_time'], {}), '(0, cycle + bin_width_time, bin_width_time)\n', (22285, 22328), True, 'import numpy as np\n'), ((22396, 22427), 'numpy.histogram', 'np.histogram', (['spikes'], {'bins': 'bins'}), '(spikes, bins=bins)\n', (22408, 22427), True, 'import numpy as np\n'), ((23001, 23022), 'numpy.argsort', 'np.argsort', (['(-delta_fs)'], {}), '(-delta_fs)\n', (23011, 23022), True, 'import numpy as np\n'), ((25446, 25511), 'numpy.linspace', 'np.linspace', (['(0)', 'duration', '(duration * samplingrate)'], {'endpoint': '(False)'}), '(0, duration, duration * samplingrate, endpoint=False)\n', (25457, 25511), True, 'import numpy as np\n'), ((25538, 25566), 'numpy.linspace', 'np.linspace', (['(-whs)', 'whs', '(1000)'], {}), '(-whs, whs, 1000)\n', (25549, 25566), True, 'import numpy as np\n'), ((27313, 27342), 'numpy.arange', 'np.arange', (['(-whs)', '(whs + db)', 'db'], {}), '(-whs, whs + db, db)\n', (27322, 27342), True, 'import numpy as np\n'), ((29403, 29416), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (29413, 29416), True, 'import numpy as np\n'), ((30292, 30321), 'numpy.arange', 'np.arange', (['(-whs)', '(whs + db)', 'db'], {}), '(-whs, whs + db, db)\n', (30301, 30321), True, 'import numpy as np\n'), ((31864, 31912), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['t[hidx]', 'high'], {'k': '(3)'}), '(t[hidx], high, k=3)\n', (31892, 31912), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((31930, 31977), 'scipy.interpolate.InterpolatedUnivariateSpline', 'InterpolatedUnivariateSpline', (['t[lidx]', 'low'], {'k': '(3)'}), '(t[lidx], low, k=3)\n', (31958, 31977), False, 'from scipy.interpolate import InterpolatedUnivariateSpline\n'), ((34412, 34423), 'numpy.isinf', 'np.isinf', (['c'], {}), '(c)\n', (34420, 34423), True, 'import numpy as np\n'), ((34602, 34613), 'numpy.isinf', 'np.isinf', (['c'], {}), '(c)\n', (34610, 34613), True, 'import numpy as np\n'), ((35706, 35733), 'pycircstat.var', 'circ.var', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (35714, 35733), True, 'import pycircstat as circ\n'), ((35735, 35763), 'pycircstat.mean', 'circ.mean', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (35744, 35763), True, 'import pycircstat as circ\n'), ((35765, 35792), 'pycircstat.std', 'circ.std', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (35773, 35792), True, 'import pycircstat as circ\n'), ((37178, 37225), 'warnings.warn', 'warn', (['"""TrialAlign returned no spikes. Skipping"""'], {}), "('TrialAlign returned no spikes. Skipping')\n", (37182, 37225), False, 'from warnings import warn\n'), ((37291, 37319), 'numpy.hstack', 'np.hstack', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (37300, 37319), True, 'import numpy as np\n'), ((11073, 11086), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (11081, 11086), True, 'import numpy as np\n'), ((11088, 11101), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (11096, 11101), True, 'import numpy as np\n'), ((16305, 16333), 'numpy.asarray', 'np.asarray', (['spikes_per_trial'], {}), '(spikes_per_trial)\n', (16315, 16333), True, 'import numpy as np\n'), ((16514, 16556), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1 - alpha)'], {'loc': 'mu', 'scale': 's'}), '(1 - alpha, loc=mu, scale=s)\n', (16528, 16556), False, 'from scipy import optimize, stats, signal\n'), ((22755, 22773), 'pandas.DataFrame', 'pd.DataFrame', (['item'], {}), '(item)\n', (22767, 22773), True, 'import pandas as pd\n'), ((26386, 26393), 'itertools.count', 'count', ([], {}), '()\n', (26391, 26393), False, 'from itertools import count\n'), ((30438, 30458), 'numpy.hstack', 'np.hstack', (['df.spikes'], {}), '(df.spikes)\n', (30447, 30458), True, 'import numpy as np\n'), ((31133, 31162), 'itertools.count', 'count', ([], {'start': '(repeats // 2 + 1)'}), '(start=repeats // 2 + 1)\n', (31138, 31162), False, 'from itertools import count\n'), ((37546, 37573), 'pycircstat.var', 'circ.var', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (37554, 37573), True, 'import pycircstat as circ\n'), ((37575, 37603), 'pycircstat.mean', 'circ.mean', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (37584, 37603), True, 'import pycircstat as circ\n'), ((37605, 37632), 'pycircstat.std', 'circ.std', (['aggregated_spikes'], {}), '(aggregated_spikes)\n', (37613, 37632), True, 'import pycircstat as circ\n'), ((2250, 2261), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (2258, 2261), True, 'import numpy as np\n'), ((2703, 2723), 'numpy.abs', 'np.abs', (['df.ix[:, :3]'], {}), '(df.ix[:, :3])\n', (2709, 2723), True, 'import numpy as np\n'), ((11326, 11339), 'numpy.log', 'np.log', (['alpha'], {}), '(alpha)\n', (11332, 11339), True, 'import numpy as np\n'), ((15439, 15493), 'pycircstat.event_series.vector_strength_spectrum', 'es.vector_strength_spectrum', (['sp', 'sampling_rate'], {'time': 't'}), '(sp, sampling_rate, time=t)\n', (15466, 15493), True, 'from pycircstat import event_series as es\n'), ((25664, 25681), 'numpy.asarray', 'np.asarray', (['train'], {}), '(train)\n', (25674, 25681), True, 'import numpy as np\n'), ((27263, 27291), 'numpy.mean', 'np.mean', (["df['eod_frequency']"], {}), "(df['eod_frequency'])\n", (27270, 27291), True, 'import numpy as np\n'), ((28071, 28092), 'numpy.hstack', 'np.hstack', (['dgr.spikes'], {}), '(dgr.spikes)\n', (28080, 28092), True, 'import numpy as np\n'), ((28151, 28172), 'numpy.hstack', 'np.hstack', (['dgr.spikes'], {}), '(dgr.spikes)\n', (28160, 28172), True, 'import numpy as np\n'), ((30766, 30776), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (30773, 30776), True, 'import numpy as np\n'), ((16351, 16365), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (16358, 16365), True, 'import numpy as np\n'), ((16460, 16494), 'numpy.mean', 'np.mean', (['(1.0 / n - np.pi / 4.0 / n)'], {}), '(1.0 / n - np.pi / 4.0 / n)\n', (16467, 16494), True, 'import numpy as np\n'), ((31665, 31691), 'numpy.mean', 'np.mean', (['df.efield'], {'axis': '(0)'}), '(df.efield, axis=0)\n', (31672, 31691), True, 'import numpy as np\n'), ((15874, 15893), 'numpy.exp', 'np.exp', (['(-poiss_rate)'], {}), '(-poiss_rate)\n', (15880, 15893), True, 'import numpy as np\n'), ((16037, 16056), 'numpy.exp', 'np.exp', (['(-poiss_rate)'], {}), '(-poiss_rate)\n', (16043, 16056), True, 'import numpy as np\n'), ((16386, 16396), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (16393, 16396), True, 'import numpy as np\n'), ((21740, 21750), 'numpy.diff', 'np.diff', (['p'], {}), '(p)\n', (21747, 21750), True, 'import numpy as np\n'), ((25962, 26004), 'numpy.interp', 'np.interp', (['(sampl_times + phase)', 't', 'eftrain'], {}), '(sampl_times + phase, t, eftrain)\n', (25971, 26004), True, 'import numpy as np\n'), ((28593, 28621), 'numpy.mean', 'np.mean', (["df['eod_frequency']"], {}), "(df['eod_frequency'])\n", (28600, 28621), True, 'import numpy as np\n'), ((3782, 3793), 'numpy.sign', 'np.sign', (['ce'], {}), '(ce)\n', (3789, 3793), True, 'import numpy as np\n'), ((8103, 8136), 'numpy.abs', 'np.abs', (['(sp[:, None] - ep[None, :])'], {}), '(sp[:, None] - ep[None, :])\n', (8109, 8136), True, 'import numpy as np\n'), ((25242, 25288), 'numpy.abs', 'np.abs', (['(epeaks[i][:, None] - peaks[i][None, :])'], {}), '(epeaks[i][:, None] - peaks[i][None, :])\n', (25248, 25288), True, 'import numpy as np\n'), ((15807, 15822), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (15813, 15822), True, 'import numpy as np\n'), ((15970, 15985), 'numpy.exp', 'np.exp', (['(-r ** 2)'], {}), '(-r ** 2)\n', (15976, 15985), True, 'import numpy as np\n')] |
import subprocess
from subprocess import call
import pandas as pd
import os
import shutil
import gzip
import json
import sys
import mygene
import pickle
def uncompress_gzip(file_name, new_name=None, delete=True):
# Read the stream and write that stream to a new file:
in_file = gzip.open(file_name, 'rb')
if new_name is None:
out_file = open(file_name.strip('.gz'), 'wb')
else:
out_file = open(new_name, 'wb')
out_file.write(in_file.read())
in_file.close()
out_file.close()
if delete:
os.remove(file_name)
def execute(comando, doitlive=False, input_to_use=None, verbose=True):
# result = subprocess.run(['ls', '-l'], stdout=subprocess.PIPE)
comando = comando.split(' ')
if doitlive:
popen = subprocess.Popen(comando, stdout=subprocess.PIPE, universal_newlines=True)
to_return = popen.stdout.read()
for line in to_return:
if verbose: # I see no reason to doitlive and have it be not verbose, but to each their own.
print(line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, comando)
else:
if input_to_use is not None:
input_to_use = input_to_use.ecode('utf-8')
result = subprocess.run(comando, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=input_to_use)
to_return = result.stdout.decode('utf-8')
if verbose:
print(to_return)
return to_return.strip('\n')
def make_sample_information_file(name, manifest_df, name_id_dict):
# pwd = execute('pwd', doitlive=False, verbose=False)
# destination = os.path.join(pwd, 'unused_files')
# if os.path.isdir(destination):
# shutil.rmtree(destination)
# name = 'TCGA_' + dataset_name + '.txt'
file = open(name, 'w')
file.write('File\tClass\tSample_Name\n')
ignored_files = []
ignored_twice = []
ignored_flag = False
for f in manifest_df['filename']:
# TODO: add a filter here for the types of samples we want. I am using all "0x"and "1x" samples...
# but presumably we only want "01" and "11" but then we should remove those from the directory (moved them to "unused")
if name_id_dict[f][13:15] == '01':
# file.write('\t'.join([name_id_dict[f]+'.htseq',class_dict[name_id_dict[f][17:19]] ,name_id_dict[f]]))
file.write('\t'.join([name_id_dict[f]+'.htseq', 'Tumor', name_id_dict[f]]))
file.write('\n')
# print(name_id_dict[f])
elif name_id_dict[f][13:15] == '03':
print('\tNot ignoring file named "{}" because sample is tagged as'
' "Primary Blood Derived Cancer - Peripheral Blood"'
'(i.e., sample id = {}), this is usually expected for LAML.'
.format(name_id_dict[f], name_id_dict[f][13:15]))
# file.write('\t'.join([name_id_dict[f]+'.htseq',class_dict[name_id_dict[f][17:19]] ,name_id_dict[f]]))
file.write('\t'.join([name_id_dict[f]+'.htseq', 'Tumor', name_id_dict[f]]))
file.write('\n')
# print(name_id_dict[f])
elif name_id_dict[f][13:15] == '11':
file.write('\t'.join([name_id_dict[f]+'.htseq', 'Normal', name_id_dict[f]]))
file.write('\n')
# print(name_id_dict[f])
else:
print('\tIgnoring file named "{}" because sample is neither Primary Tumor nor Matched Normal tissue '
'(i.e., sample id = {}).'.format(name_id_dict[f], name_id_dict[f][13:15]))
# Move from raw_count_files to unused_files
if name_id_dict[f] not in ignored_files:
ignored_flag = True
ignored_files.append(name_id_dict[f]) #keeping a list of the files which have been deleted prevent double deletion.
pwd = execute('pwd', doitlive=False, verbose=False)
destination = os.path.join(pwd, 'unused_files')
# if os.path.isdir(destination):
# shutil.rmtree(destination)
if not os.path.isdir(destination):
os.mkdir(destination)
# Move the downloaded files to a folder
source = os.path.join(pwd, 'raw_count_files', name_id_dict[f]+'.htseq.counts')
print("source", source)
print("destination", destination)
# try:
shutil.move(source, os.path.join(destination, name_id_dict[f]+'.htseq.counts'))
# except shutil.Error:
# shutil.move(source, destination)
# shutil.rmtree(os.path.join(pwd, 'raw_count_files')) # Remove those files/folders from current directory
# print(f)
# print(name_id_dict[f]+'.htseq.counts')
else:
print("This sample has been removed already only one sample with the same ID is allowed. "
"Consider setting 'long IDs' to 'True'"
"[as of 2018-07-06 this feature is yet to be implemented]")
ignored_twice.append(name_id_dict[f])
file.close()
if ignored_flag:
print("The following files were ignored due to having the same ID as other sample:")
print(ignored_twice)
return
def remove_duplicate_genes(df):
"""
TCGA has two duplicated genes RGS5 and POLR2J4.
Rather than getting them manually, we'll check for all duplicated genes.
"""
try:
new_ix = df.index.droplevel(1).values
df.index = new_ix
except AttributeError:
print("Dataframe only has one index, that's alright.")
except IndexError:
print("Using Pandas>1.0; Dataframe only has one index, that's alright.")
except ValueError:
print("Dataframe only has one index but it thinks it's multi-indexed, that's weird but alright.")
# print(df)
# print("")
# print(df.columns)
# print('getting first row')
# print(df.ix[1,:])
# print('getting first column')
# print(df.ix[:, 0].ix[:, 0])
# print('index')
# print(df.index)
# print(df.index.values)
s = pd.Series(df['Name'])
# print(s)
import numpy as np
repeated = s[s.duplicated()]
repeated = np.unique(repeated.values)
# print("---")
# print(s)
# print("@@@@@@@@@@")
# print("Repeated are")
# print(repeated)
print(f"There were {len(repeated)} repeated genes.")
print("Note that mygene.info's ENSEMBL ID to HUGO ID are not a 1 to 1 mapping, hence the replication.")
df.set_index('Name', inplace=True)
# print(df.head())
# print(df.loc['TSPAN6',:])
print(repeated)
for gene in repeated:
# if gene == 'RF00019':
# # print(gene)
# temp = df.loc[gene, :]
# df.drop(gene, axis=0, inplace=True)
# # print('before:')
# # print(temp)
# # print('after max:')
# temp_max = temp.max(axis=0, numeric_only=True)
# # print(temp_max)
# # temp_max.insert(0, 'ENSEMBL ID', 'Multiple ENSEMBL IDs')
# temp_max = pd.concat([pd.Series(['Multiple ENSEMBL IDs']), temp_max])
# # print(temp_max)
# columns = temp_max.values
# # columns = columns[-1:] + columns[:-1] # Moving the ENSEMBL IDs first
# # print(columns)
#
# df.loc[gene] = columns
# # print('after collapse')
# # print(df.loc[gene])
# # exit(192)
# temp = df.loc[gene,:]
# df.drop(gene, axis=0, inplace=True)
# df.loc[gene] = temp.max(axis=0)
# if gene == 'RF00019':
temp = df.loc[gene,:]
df.drop(gene, axis=0, inplace=True)
temp_max = temp.max(axis=0, numeric_only=True)
temp_max = pd.concat([pd.Series(['Multiple ENSEMBL IDs']), temp_max])
df.loc[gene] = temp_max.values
return df
def make_gct(file_list, translate_bool, file_name, cls_bool):
"""
This function makes a GCT file by concatenating all the files present in file_list
"""
df_gct = None
if translate_bool:
print('translation done with mygene.info')
# get sample names
sample_list = []
# sample_list.append("GID") # 2018-02-07 Changing this to Name
sample_list.append("Name")
# sample_list.append("NAME") # 2018-02-07 Changing this to Description to conform to other GenePattern GCT files
sample_list.append("Description")
#CLS file generation
#01 - 09 tumor, 10 - 19 normal, 20 - 29 control
cls_list = []
removed_for_repetition = 0
# add data from every file in list to dataframe if exists
for file in file_list:
if os.path.exists(file):
# get sample name
splited = file.split('/')
# # splited = splited[len(splited) - 1].split('.')[0][:19] # After 15 IDs become redundant
splited = splited[len(splited) - 1].split('.')[0][:15]
if splited not in sample_list:
sample_list.append(splited)
cls_list.append(splited[-2:])
# read in file
df_curr = pd.read_table(file, header=None)
df_curr.columns = ['ENSEMBL ID', 'Counts']
# if first file, get gene translations and ensembl ids
if df_gct is None:
df_gct = df_curr.copy()
df_curr.drop(df_curr.columns[1, ], axis=1, inplace=True)
df_curr[df_curr.columns[0]] = df_curr[df_curr.columns[0]].apply(lambda x: x.split(".")[0])
if translate_bool:
print("Translating genes now")
df_curr[df_curr.columns[0]] = df_curr[df_curr.columns[0]].apply(lambda x: translate(x))
df_curr.columns = ['Name']
df_gct = pd.concat([df_curr, df_gct], axis=1)
# otherwise just concatenate
else:
# get counts column and concatenate
df_curr.drop(df_curr.columns[0,], axis=1, inplace=True)
df_gct = pd.concat([df_gct, df_curr], axis=1)
else:
removed_for_repetition += 1
print("{} samples were not included due to repetition, only one sample per unique ID is being used.".format(
removed_for_repetition))
# remove last 5 rows, which are not genes
df_gct = df_gct[:-5]
# remove repeated genes
df_gct = remove_duplicate_genes(df_gct)
# start writing gct file
f = open(str(file_name+".gct"), "w")
# headers
f.write("#1.2")
# # The next two lines add enough tabs, removed for now on 2018-02-07
# for i in range(len(sample_list)):
# f.write('\t')
f.write('\n')
f.write(str(len(df_gct)) + "\t" + str((len(sample_list) - 2)))
# # The next two lines add enough tabs, removed for now on 2018-02-07
# for i in range(len(sample_list) - 2):
# f.write('\t')
f.write('\n')
# sample names
f.write('\t'.join(sample_list))
# # The following lines do the same but add one extra tab
# for i in range(len(sample_list)):
# f.write(sample_list[i])
# print(sample_list[i])
# f.write('\t')
f.write('\n')
# dataframe
df_gct.to_csv(f, sep='\t', index=True, header=False)
f.close()
if cls_bool:
print("cls_list= ", cls_list)
# start writing cls file
f = open(str(file_name+".cls"), "w")
types = set(cls_list)
# headers
f.write(str(len(cls_list)))
f.write(' ')
f.write(str(len(types)))
f.write(' 1\n#')
type_dict = {}
type_count = 0
for t in types:
type_dict[t] = type_count
type_count += 1
f.write(' ')
f.write(t)
print(t)
f.write('\n')
for val in cls_list[:-1]:
f.write(str(type_dict[val]))
f.write(' ')
f.write(str(type_dict[cls_list[-1]]))
f.close()
mg = mygene.MyGeneInfo()
try:
with open('TCGA_ENSEMBL2HUGO_dictionary.p', 'rb') as handle:
ENSEMBL2HUGO = pickle.load(handle)
except FileNotFoundError:
try:
print("Local version of dictionary not found, trying to explicitly add the PWD")
pwd = os.path.dirname(os.path.realpath(__file__))
print(pwd)
with open(pwd+'/TCGA_ENSEMBL2HUGO_dictionary.p', 'rb') as handle:
ENSEMBL2HUGO = pickle.load(handle)
except FileNotFoundError:
print("Local version of dictionary not found again, trying the docker container version")
with open('/usr/local/bin/TCGAImporter/TCGA_ENSEMBL2HUGO_dictionary.p', 'rb') as handle:
ENSEMBL2HUGO = pickle.load(handle)
def translate(ESNG):
hugo_id = ESNG
try:
hugo_id = ENSEMBL2HUGO[ESNG]
except KeyError:
try:
hugo_id = mg.getgene(ESNG)['symbol']
except TypeError:
hugo_id = ESNG
return hugo_id
class_dict = {
'01': 'Tumor',
'02': 'Tumor',
'03': 'Tumor',
'04': 'Tumor',
'05': 'Tumor',
'06': 'Tumor',
'07': 'Tumor',
'08': 'Tumor',
'09': 'Tumor',
'10': 'Normal',
'11': 'Normal',
'12': 'Normal',
'13': 'Normal',
'14': 'Normal',
'15': 'Normal',
'16': 'Normal',
'17': 'Normal',
'18': 'Normal',
'19': 'Normal',
}
| [
"subprocess.run",
"os.remove",
"subprocess.Popen",
"gzip.open",
"os.mkdir",
"os.path.isdir",
"mygene.MyGeneInfo",
"os.path.realpath",
"os.path.exists",
"subprocess.CalledProcessError",
"pickle.load",
"pandas.Series",
"pandas.read_table",
"os.path.join",
"pandas.concat",
"numpy.unique"
... | [((12382, 12401), 'mygene.MyGeneInfo', 'mygene.MyGeneInfo', ([], {}), '()\n', (12399, 12401), False, 'import mygene\n'), ((288, 314), 'gzip.open', 'gzip.open', (['file_name', '"""rb"""'], {}), "(file_name, 'rb')\n", (297, 314), False, 'import gzip\n'), ((6425, 6446), 'pandas.Series', 'pd.Series', (["df['Name']"], {}), "(df['Name'])\n", (6434, 6446), True, 'import pandas as pd\n'), ((6533, 6559), 'numpy.unique', 'np.unique', (['repeated.values'], {}), '(repeated.values)\n', (6542, 6559), True, 'import numpy as np\n'), ((543, 563), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (552, 563), False, 'import os\n'), ((772, 846), 'subprocess.Popen', 'subprocess.Popen', (['comando'], {'stdout': 'subprocess.PIPE', 'universal_newlines': '(True)'}), '(comando, stdout=subprocess.PIPE, universal_newlines=True)\n', (788, 846), False, 'import subprocess\n'), ((1338, 1433), 'subprocess.run', 'subprocess.run', (['comando'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'input': 'input_to_use'}), '(comando, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n input=input_to_use)\n', (1352, 1433), False, 'import subprocess\n'), ((9012, 9032), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (9026, 9032), False, 'import os\n'), ((12495, 12514), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (12506, 12514), False, 'import pickle\n'), ((1167, 1218), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['return_code', 'comando'], {}), '(return_code, comando)\n', (1196, 1218), False, 'import subprocess\n'), ((8121, 8156), 'pandas.Series', 'pd.Series', (["['Multiple ENSEMBL IDs']"], {}), "(['Multiple ENSEMBL IDs'])\n", (8130, 8156), True, 'import pandas as pd\n'), ((9467, 9499), 'pandas.read_table', 'pd.read_table', (['file'], {'header': 'None'}), '(file, header=None)\n', (9480, 9499), True, 'import pandas as pd\n'), ((12669, 12695), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12685, 12695), False, 'import os\n'), ((12817, 12836), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (12828, 12836), False, 'import pickle\n'), ((10181, 10217), 'pandas.concat', 'pd.concat', (['[df_curr, df_gct]'], {'axis': '(1)'}), '([df_curr, df_gct], axis=1)\n', (10190, 10217), True, 'import pandas as pd\n'), ((10447, 10483), 'pandas.concat', 'pd.concat', (['[df_gct, df_curr]'], {'axis': '(1)'}), '([df_gct, df_curr], axis=1)\n', (10456, 10483), True, 'import pandas as pd\n'), ((13089, 13108), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (13100, 13108), False, 'import pickle\n'), ((4114, 4147), 'os.path.join', 'os.path.join', (['pwd', '"""unused_files"""'], {}), "(pwd, 'unused_files')\n", (4126, 4147), False, 'import os\n'), ((4444, 4515), 'os.path.join', 'os.path.join', (['pwd', '"""raw_count_files"""', "(name_id_dict[f] + '.htseq.counts')"], {}), "(pwd, 'raw_count_files', name_id_dict[f] + '.htseq.counts')\n", (4456, 4515), False, 'import os\n'), ((4281, 4307), 'os.path.isdir', 'os.path.isdir', (['destination'], {}), '(destination)\n', (4294, 4307), False, 'import os\n'), ((4333, 4354), 'os.mkdir', 'os.mkdir', (['destination'], {}), '(destination)\n', (4341, 4354), False, 'import os\n'), ((4679, 4739), 'os.path.join', 'os.path.join', (['destination', "(name_id_dict[f] + '.htseq.counts')"], {}), "(destination, name_id_dict[f] + '.htseq.counts')\n", (4691, 4739), False, 'import os\n')] |
""" Illustris Simulation: Public Data Release.
lhalotree.py: File I/O related to the LHaloTree merger tree files. """
import numpy as np
import h5py
from groupcat import gcPath
from util import partTypeNum
def treePath(basePath,chunkNum=0):
""" Return absolute path to a LHaloTree HDF5 file (modify as needed). """
filePath = basePath + '/trees/treedata/' + 'trees_sf1_135.' + str(chunkNum) + '.hdf5'
return filePath
def treeOffsets(basePath, snapNum, id):
""" Handle offset loading for a LHaloTree merger tree cutout. """
# load groupcat chunk offsets from header of first file
with h5py.File(gcPath(basePath,snapNum),'r') as f:
groupFileOffsets = f['Header'].attrs['FileOffsets_Subhalo']
# calculate target groups file chunk which contains this id
groupFileOffsets = int(id) - groupFileOffsets
fileNum = np.max( np.where(groupFileOffsets >= 0) )
groupOffset = groupFileOffsets[fileNum]
with h5py.File(gcPath(basePath,snapNum,fileNum),'r') as f:
# load the merger tree offsets of this subgroup
TreeFile = f['Offsets']['Subhalo_LHaloTreeFile'][groupOffset]
TreeIndex = f['Offsets']['Subhalo_LHaloTreeIndex'][groupOffset]
TreeNum = f['Offsets']['Subhalo_LHaloTreeNum'][groupOffset]
return TreeFile,TreeIndex,TreeNum
def singleNodeFlat(conn, index, data_in, data_out, count, onlyMPB):
""" Recursive helper function: Add a single tree node. """
data_out[count] = data_in[index]
count += 1
count = recProgenitorFlat(conn,index,data_in,data_out,count,onlyMPB)
return count
def recProgenitorFlat(conn, start_index, data_in, data_out, count, onlyMPB):
""" Recursive helper function: Flatten out the unordered LHaloTree, one data field at a time. """
firstProg = conn["FirstProgenitor"][start_index]
if firstProg < 0:
return count
# depth-ordered traversal (down mpb)
count = singleNodeFlat(conn,firstProg,data_in,data_out,count,onlyMPB)
# explore breadth
if not onlyMPB:
nextProg = conn["NextProgenitor"][firstProg]
while nextProg >= 0:
count = singleNodeFlat(conn,nextProg,data_in,data_out,count,onlyMPB)
nextProg = conn["NextProgenitor"][nextProg]
firstProg = conn["FirstProgenitor"][firstProg]
return count
def loadTree(basePath, snapNum, id, fields=None, onlyMPB=False):
""" Load portion of LHaloTree, for a given subhalo, re-arranging into a flat format. """
TreeFile,TreeIndex,TreeNum = treeOffsets(basePath, snapNum, id)
# config
gName = 'Tree' + str(TreeNum) # group name containing this subhalo
nRows = None # we do not know in advance the size of the tree
# make sure fields is not a single element
if isinstance(fields, basestring):
fields = [fields]
fTree = h5py.File(treePath(basePath,TreeFile),'r')
# if no fields requested, return everything
if not fields:
fields = fTree[gName].keys()
# verify existence of requested fields
for field in fields:
if field not in fTree[gName].keys():
raise Exception('Error: Requested field '+field+' not in tree.')
# load connectivity for this entire TreeX group
connFields = ['FirstProgenitor','NextProgenitor']
conn = {}
for field in connFields:
conn[field] = fTree[gName][field][:]
# determine sub-tree size with dummy walk
dummy = np.zeros( conn['FirstProgenitor'].shape, dtype='int32' )
nRows = singleNodeFlat(conn, TreeIndex, dummy, dummy, 0, onlyMPB)
result = {}
result['count'] = nRows
# walk through connectivity, one data field at a time
for field in fields:
# load field for entire tree? doing so is much faster than randomly accessing the disk
# during walk, assuming that the sub-tree is a large fraction of the full tree, and that
# the sub-tree is large in the absolute sense. the decision is heuristic, and can be
# modified (if you have the tree on a fast SSD, could disable the full load).
if nRows < 1000: # and float(nRows)/len(result['FirstProgenitor']) > 0.1
# do not load, walk with single disk reads
full_data = fTree[gName][field]
else:
# pre-load all, walk in-memory
full_data = fTree[gName][field][:]
# allocate the data array in the sub-tree
dtype = fTree[gName][field].dtype
shape = list(fTree[gName][field].shape)
shape[0] = nRows
data = np.zeros(shape, dtype=dtype)
# walk the tree, depth-first
count = singleNodeFlat(conn, TreeIndex, full_data, data, 0, onlyMPB)
# save field
result[field] = data
fTree.close()
# only a single field? then return the array instead of a single item dict
if len(fields) == 1:
return result[fields[0]]
return result
| [
"groupcat.gcPath",
"numpy.where",
"numpy.zeros"
] | [((3634, 3688), 'numpy.zeros', 'np.zeros', (["conn['FirstProgenitor'].shape"], {'dtype': '"""int32"""'}), "(conn['FirstProgenitor'].shape, dtype='int32')\n", (3642, 3688), True, 'import numpy as np\n'), ((899, 930), 'numpy.where', 'np.where', (['(groupFileOffsets >= 0)'], {}), '(groupFileOffsets >= 0)\n', (907, 930), True, 'import numpy as np\n'), ((4794, 4822), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4802, 4822), True, 'import numpy as np\n'), ((649, 674), 'groupcat.gcPath', 'gcPath', (['basePath', 'snapNum'], {}), '(basePath, snapNum)\n', (655, 674), False, 'from groupcat import gcPath\n'), ((1004, 1038), 'groupcat.gcPath', 'gcPath', (['basePath', 'snapNum', 'fileNum'], {}), '(basePath, snapNum, fileNum)\n', (1010, 1038), False, 'from groupcat import gcPath\n')] |
#!/usr/bin/python
# removeQuadOff_ENVI.py
# Author: <NAME>
# All rights reserved
def residuals(p, y, xs):
err = y;
for i in range(0, len(xs)):
err = err - p[i] * xs[i];
# for i in range(0, len(xs)):
# err = err - p[i] * xs;
return err;
def peval(xs, p):
value = p[0] * xs[0];
for i in range(1, len(xs)):
value = value + p[i] * xs[i];
# for i in range(1, len(xs)):
# value = value + p[i] * xs;
return value;
def removeQuadOff_ENVI(input_image_path):
import numpy;
import pylab;
import re;
import scipy;
import scipy.optimize;
import subprocess;
ramp_removed_image_path = "ramp_removed_" + input_image_path[input_image_path.rfind("/") + 1 : input_image_path.rfind(".")] + ".img";
assert not os.path.exists(ramp_removed_image_path), "\n***** " + ramp_removed_image_path + " already exists, exiting...\n";
cmd = "\ngdalinfo " + input_image_path + "\n";
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout;
info = pipe.read();
pipe.close();
size = info[re.search("Size is \d+, \d+", info).start(0) + 8 : re.search("Size is \d+, \d+", info).end(0)];
width, length = size.split(",");
width = width.strip();
length = length.strip();
if info.find("ENVI") < 0:
out_path = input_image_path[ input_image_path.rfind("/") + 1 : input_image_path.rfind(".")] + ".img";
cmd = "\ngdalwarp -of ENVI -srcnodata \"nan\" -dstnodata \"nan\" " + input_image_path + " " + out_path + "\n";
subprocess.call(cmd, shell=True);
input_image_path = out_path;
infile = open(input_image_path, "rb");
indat = pylab.fromfile(infile,pylab.float32,-1).reshape(int(length), int(width));
#indat = pylab.fromfile(infile,pylab.float32,-1).reshape(int(width) * int(length), -1);
infile.close();
x = scipy.arange(0, int(length));
y = scipy.arange(0, int(width));
x_grid, y_grid = numpy.meshgrid(x, y);
indices = numpy.arange(0, int(width) * int(length));
mx = scipy.asarray(x_grid).reshape(-1);
my = scipy.asarray(y_grid).reshape(-1);
d = scipy.asarray(indat).reshape(-1);
nonan_ids = indices[scipy.logical_not(numpy.isnan(d))];
mx = mx[nonan_ids];
my = my[nonan_ids];
d = d[nonan_ids];
init_mx = scipy.asarray(x_grid).reshape(-1)[nonan_ids];
init_my = scipy.asarray(y_grid).reshape(-1)[nonan_ids];
#ramp_removed = scipy.asarray(indat).reshape(-1)[nonan_ids];
ramp_removed = scipy.zeros(int(length) * int(width))[nonan_ids];
init_m_ones = scipy.ones(int(length) * int(width))[nonan_ids];
# init_xs = [init_m_ones, init_mx, init_my, scipy.multiply(init_mx,init_my), scipy.power(init_mx,2), scipy.power(init_my,2)];
init_xs = [init_m_ones, init_mx, init_my];
p0 = scipy.zeros(len(init_xs));
p = scipy.zeros(len(init_xs));
for i in scipy.arange(0, 1):
m_ones = scipy.ones(scipy.size(mx));
# xs = [m_ones, mx, my, scipy.multiply(mx,my), scipy.power(mx,2), scipy.power(my,2)];
xs = [m_ones, mx, my];
G = scipy.vstack(xs).T;
# print(mx);
# print(scipy.size(d), scipy.size(xs));
plsq = scipy.optimize.leastsq(residuals, p0, args = (d, xs));
res = d - peval(xs, plsq[0]);
mod = plsq[0];
p = p + mod;
# print(plsq[0]);
synth = G * scipy.matrix(mod).T;
cutoff = res.std(axis=0,ddof=1);
#print(cutoff);
indices = numpy.arange(0, numpy.size(mx));
good_ids = indices[abs(res) <= cutoff];
# plt.figure(i + 2);
# plt.plot(mx,d,'b.',label='alloff');
# plt.plot(mx[good_ids],synth[good_ids],'.',label='fit',color='lightgreen');
# plt.plot(mx[bad_ids],d[bad_ids],'r.',label='cull #' + str(i + 1));
# plt.legend();
mx = mx[good_ids];
my = my[good_ids];
d = res[good_ids];
# ramp_removed = scipy.asarray(ramp_removed - peval(init_xs, plsq[0]));
ramp_removed = scipy.asarray(ramp_removed + peval(init_xs, plsq[0]));
d = scipy.asarray(indat).reshape(-1);
for i in range(0, scipy.size(nonan_ids)):
d[nonan_ids[i]] = ramp_removed[i];
ramp_removed = d.reshape(int(length), int(width));
# import matplotlib;
# matplotlib.pyplot.imshow(scipy.array(indat),interpolation='nearest',origin='lower');
# matplotlib.pyplot.show();
outfile = open(ramp_removed_image_path, "wb");
outoff = scipy.matrix(ramp_removed, scipy.float32);
outoff.tofile(outfile);
outfile.close();
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 1, "\n***** ERROR: removeQuadOff_ENVI.py requires one argument, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
removeQuadOff_ENVI(sys.argv[1]);
exit();
| [
"pylab.fromfile",
"subprocess.Popen",
"numpy.meshgrid",
"scipy.arange",
"numpy.size",
"os.path.exists",
"numpy.isnan",
"scipy.optimize.leastsq",
"scipy.matrix",
"scipy.vstack",
"scipy.asarray",
"subprocess.call",
"re.search",
"scipy.size"
] | [((1832, 1852), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1846, 1852), False, 'import numpy\n'), ((2721, 2739), 'scipy.arange', 'scipy.arange', (['(0)', '(1)'], {}), '(0, 1)\n', (2733, 2739), False, 'import scipy\n'), ((4139, 4180), 'scipy.matrix', 'scipy.matrix', (['ramp_removed', 'scipy.float32'], {}), '(ramp_removed, scipy.float32)\n', (4151, 4180), False, 'import scipy\n'), ((4424, 4451), 'os.path.exists', 'os.path.exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (4438, 4451), False, 'import os\n'), ((728, 767), 'os.path.exists', 'os.path.exists', (['ramp_removed_image_path'], {}), '(ramp_removed_image_path)\n', (742, 767), False, 'import os\n'), ((899, 956), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE'}), '(cmd, shell=True, stdout=subprocess.PIPE)\n', (915, 956), False, 'import subprocess\n'), ((1446, 1478), 'subprocess.call', 'subprocess.call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (1461, 1478), False, 'import subprocess\n'), ((3002, 3053), 'scipy.optimize.leastsq', 'scipy.optimize.leastsq', (['residuals', 'p0'], {'args': '(d, xs)'}), '(residuals, p0, args=(d, xs))\n', (3024, 3053), False, 'import scipy\n'), ((3826, 3847), 'scipy.size', 'scipy.size', (['nonan_ids'], {}), '(nonan_ids)\n', (3836, 3847), False, 'import scipy\n'), ((1562, 1603), 'pylab.fromfile', 'pylab.fromfile', (['infile', 'pylab.float32', '(-1)'], {}), '(infile, pylab.float32, -1)\n', (1576, 1603), False, 'import pylab\n'), ((1916, 1937), 'scipy.asarray', 'scipy.asarray', (['x_grid'], {}), '(x_grid)\n', (1929, 1937), False, 'import scipy\n'), ((1957, 1978), 'scipy.asarray', 'scipy.asarray', (['y_grid'], {}), '(y_grid)\n', (1970, 1978), False, 'import scipy\n'), ((1998, 2018), 'scipy.asarray', 'scipy.asarray', (['indat'], {}), '(indat)\n', (2011, 2018), False, 'import scipy\n'), ((2073, 2087), 'numpy.isnan', 'numpy.isnan', (['d'], {}), '(d)\n', (2084, 2087), False, 'import numpy\n'), ((2764, 2778), 'scipy.size', 'scipy.size', (['mx'], {}), '(mx)\n', (2774, 2778), False, 'import scipy\n'), ((2914, 2930), 'scipy.vstack', 'scipy.vstack', (['xs'], {}), '(xs)\n', (2926, 2930), False, 'import scipy\n'), ((3268, 3282), 'numpy.size', 'numpy.size', (['mx'], {}), '(mx)\n', (3278, 3282), False, 'import numpy\n'), ((3772, 3792), 'scipy.asarray', 'scipy.asarray', (['indat'], {}), '(indat)\n', (3785, 3792), False, 'import scipy\n'), ((2171, 2192), 'scipy.asarray', 'scipy.asarray', (['x_grid'], {}), '(x_grid)\n', (2184, 2192), False, 'import scipy\n'), ((2233, 2254), 'scipy.asarray', 'scipy.asarray', (['y_grid'], {}), '(y_grid)\n', (2246, 2254), False, 'import scipy\n'), ((3164, 3181), 'scipy.matrix', 'scipy.matrix', (['mod'], {}), '(mod)\n', (3176, 3181), False, 'import scipy\n'), ((1066, 1103), 're.search', 're.search', (['"""Size is \\\\d+, \\\\d+"""', 'info'], {}), "('Size is \\\\d+, \\\\d+', info)\n", (1075, 1103), False, 'import re\n'), ((1015, 1052), 're.search', 're.search', (['"""Size is \\\\d+, \\\\d+"""', 'info'], {}), "('Size is \\\\d+, \\\\d+', info)\n", (1024, 1052), False, 'import re\n')] |
import numpy as np
import pandas as pd
import networkx as nx
def read_lines(folder, file_name):
'''parse the output from the SchNet evalutation
1) a list of the coordinates of each cluster (returns 1st)
['O 1.922131 -1.3179458 -2.891314\n', 'H 1.9396669 -1.812519 -2.0610204\n', ...]
2) the actual energy (returns 2nd)
3) the predicted energy (returns 3rd)'''
# Load the output file and separate into energies, coordinates, and number of atoms
coordinates_list=[]; energies_list=[]; atoms_list=[]
file = open(file_name)
for line in file:
if 'Predicted' in line:
energies_list.append(line)
elif 'O' in line or 'H' in line:
coordinates_list.append(line)
else:
atoms_list.append(int(line))
# Divide the structure list into clusters
start=0; cluster_list=[]
for i, atoms in enumerate(atoms_list):
cluster_list.append([x for x in coordinates_list[start:start+atoms]])
start += atoms
return cluster_list#, [float(str.split(x)[1]) for x in energies_list], [float(str.split(x)[3]) for x in energies_list]
def read_lines_base(file_name):
'''parse the database xyz files
1) a list of the coordinates of each cluster (returns 1st)
['O 1.922131 -1.3179458 -2.891314\n', 'H 1.9396669 -1.812519 -2.0610204\n', ...]
2) the energy (returns 2nd)'''
with open(file_name) as f:
n_atoms = f.readline() #atoms per cluster
n_lines = 2 + int(n_atoms) #lines per cluster (#atoms + energy + coords)
with open(file_name) as f:
lines = f.readlines()
energies = np.array(lines[1::n_lines],dtype='float32')
structure_list, energy_list = [], []
for n in range(int(energies.shape[0])):
structure_list.append(lines[n_lines*(n+1)-n_lines:n_lines*(n+1)])
structure_list[n][1]=float(structure_list[n][1]) #energy in float
energy_list.append(float(structure_list[n][1]))
return structure_list, energy_list
def adjacency_list(coord_list):
'''Creates adjacency list to form graph in graph_analytics.
Structure list must be ordered such that the two covalently
bound hydrogens directly follow their oxygen.
Definition of a hydrogen bond obtained from
https://aip.scitation.org/doi/10.1063/1.2742385'''
s = pd.Series(coord_list)
dfx=s.str.split(expand=True)
coords=[str.split(x) for x in coord_list]
#delete atom label
for j in coords:
del j[0]
cov_bonds, h_bonds, labels = [],[],[]
for i, row in dfx.iterrows():
labels.append([str(i+1),row[0]])
q_1_2=[]
for i in range(len(dfx)):
if s[i].split()[0]=='O':
cov_bonds.append([str(i+1),str(i+2),'covalent'])
cov_bonds.append([str(i+1),str(i+3),'covalent'])
h1=np.array(s[i+1].split()[1:],dtype='float64')
h2=np.array(s[i+2].split()[1:],dtype='float64')
o=np.array(s[i].split()[1:],dtype='float64')
q_1_2.append([h1-o, h2-o])
v_list=[np.cross(q1,q2) for (q1,q2) in q_1_2]
for idx, v in enumerate(v_list):
for index, both_roh in enumerate(q_1_2):
for h_index, roh in enumerate(both_roh):
indexO=((idx+1)*3)-2
indexH=((index+1)*3)-2+(h_index+1)
o_hbond = s[indexO-1].split()
try:
h_hbond= s[indexH-1].split() #will enumerate past the list if you let it
except KeyError:
continue
dist = np.linalg.norm(np.array(o_hbond[1:],dtype='float64')-np.array(h_hbond[1:],dtype='float64'))
if (dist>1) & (dist<2.8):
angle = np.arccos(np.dot(roh, v)/(np.linalg.norm(roh)*np.linalg.norm(v)))*(180.0/np.pi)
if angle > 90.0:
angle=180.0-angle
N = np.exp(-np.linalg.norm(dist)/0.343)*(7.1-(0.05*angle)+(0.00021*(angle**2)))
if N >=0.007:#0.0085:
h_bonds.append([str(indexO),str(indexH),'hydrogen'])
return labels, cov_bonds, h_bonds, coords
def load_graph(struct):
'''loads the graph for a single structure and returns the graph'''
l,c,h,coords=adjacency_list(np.array(struct))
node_labels = dict()
for i in range(len(l)):
node_labels[l[i][0]] = l[i][1]
edges=c+h
graph = nx.Graph()
for k,v in node_labels.items():
graph.add_node(k, label=v, coords=coords[int(k)-1])
for triple in edges:
atom1 = [float(x) for x in coords[int(triple[0])-1]]
atom2 = [float(x) for x in coords[int(triple[1])-1]]
distance = np.linalg.norm(np.array(atom2)-np.array(atom1))
graph.add_edge(triple[0], triple[1], label=triple[2], weight=distance)
return graph, node_labels, edges
| [
"numpy.cross",
"networkx.Graph",
"numpy.array",
"pandas.Series",
"numpy.linalg.norm",
"numpy.dot"
] | [((1690, 1734), 'numpy.array', 'np.array', (['lines[1::n_lines]'], {'dtype': '"""float32"""'}), "(lines[1::n_lines], dtype='float32')\n", (1698, 1734), True, 'import numpy as np\n'), ((2399, 2420), 'pandas.Series', 'pd.Series', (['coord_list'], {}), '(coord_list)\n', (2408, 2420), True, 'import pandas as pd\n'), ((4473, 4483), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4481, 4483), True, 'import networkx as nx\n'), ((3110, 3126), 'numpy.cross', 'np.cross', (['q1', 'q2'], {}), '(q1, q2)\n', (3118, 3126), True, 'import numpy as np\n'), ((4337, 4353), 'numpy.array', 'np.array', (['struct'], {}), '(struct)\n', (4345, 4353), True, 'import numpy as np\n'), ((4769, 4784), 'numpy.array', 'np.array', (['atom2'], {}), '(atom2)\n', (4777, 4784), True, 'import numpy as np\n'), ((4785, 4800), 'numpy.array', 'np.array', (['atom1'], {}), '(atom1)\n', (4793, 4800), True, 'import numpy as np\n'), ((3636, 3674), 'numpy.array', 'np.array', (['o_hbond[1:]'], {'dtype': '"""float64"""'}), "(o_hbond[1:], dtype='float64')\n", (3644, 3674), True, 'import numpy as np\n'), ((3674, 3712), 'numpy.array', 'np.array', (['h_hbond[1:]'], {'dtype': '"""float64"""'}), "(h_hbond[1:], dtype='float64')\n", (3682, 3712), True, 'import numpy as np\n'), ((3793, 3807), 'numpy.dot', 'np.dot', (['roh', 'v'], {}), '(roh, v)\n', (3799, 3807), True, 'import numpy as np\n'), ((3809, 3828), 'numpy.linalg.norm', 'np.linalg.norm', (['roh'], {}), '(roh)\n', (3823, 3828), True, 'import numpy as np\n'), ((3829, 3846), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (3843, 3846), True, 'import numpy as np\n'), ((3974, 3994), 'numpy.linalg.norm', 'np.linalg.norm', (['dist'], {}), '(dist)\n', (3988, 3994), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
x_start = 40
y_start = 80
x_extent = 50
y_extent = -40
fig = plt.figure()
fig.set_size_inches(5.5, 2.5)
ax = fig.add_subplot(111)
ax.set_xlim([0., 100.])
ax.set_ylim([0., 100.])
ax.set_xticks(np.arange(0, 101, 25))
ax.set_yticks(np.arange(0, 101, 25))
ax.set_xlabel("width [mm]")
ax.set_ylabel("height [mm]")
rect = patches.Rectangle((x_start, y_start), x_extent,y_extent, alpha=0.5, facecolor="silver", edgecolor='k', lw=0.75, ls='--' )
ax.add_patch(rect)
ax.annotate('', xy=(x_start + x_extent, y_start), xytext=(x_start, y_start),
arrowprops=dict(facecolor='silver', connectionstyle="arc3", arrowstyle="->", lw=1.5))
ax.annotate('', xy=(x_start, y_start + y_extent), xytext=(x_start, y_start),
arrowprops=dict(facecolor='silver', connectionstyle="arc3", arrowstyle="->", lw=1.5 ))
ax.annotate('position (%i, %i)'%(x_start, y_start), xy=(x_start, y_start), xytext=(5, 95),
arrowprops=dict(facecolor='silver', connectionstyle="arc3", arrowstyle="->" ))
ax.scatter(x_start, y_start, marker=".", s=40, color='r')
ax.text(55, y_start + 5, "x-extent (%i)"%x_extent)
ax.text(x_start - 5, 75, "y-extent (%i)"%y_extent, rotation=90)
ax.text(x_start + x_extent/2, y_start + y_extent/2, "tagged region", ha="center", va="center")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
fig.subplots_adjust(bottom=0.175, left=0.15, right=0.95, top=0.95)
fig.savefig("../images/2d_tag.png")
| [
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.patches.Rectangle"
] | [((151, 163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (161, 163), True, 'import matplotlib.pyplot as plt\n'), ((408, 533), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x_start, y_start)', 'x_extent', 'y_extent'], {'alpha': '(0.5)', 'facecolor': '"""silver"""', 'edgecolor': '"""k"""', 'lw': '(0.75)', 'ls': '"""--"""'}), "((x_start, y_start), x_extent, y_extent, alpha=0.5,\n facecolor='silver', edgecolor='k', lw=0.75, ls='--')\n", (425, 533), True, 'import matplotlib.patches as patches\n'), ((283, 304), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(25)'], {}), '(0, 101, 25)\n', (292, 304), True, 'import numpy as np\n'), ((320, 341), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(25)'], {}), '(0, 101, 25)\n', (329, 341), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
# In[3]:
from sn_random_numbers import sn_random_numbers
# In[5]:
from simulation_class import simulation_class
# In[22]:
class geometric_brownian_motion(simulation_class):
def __init__(self,name,mar_env,corr=False):
super(geometric_brownian_motion,self).__init__(name,mar_env,corr)
def update(self,initial_value=None,volatility=None,final_date=None):
if initial_value is not None:
self.initial_value=initial_value
if volatility is not None:
self.volatility=volatility
if final_date is not None:
self.final_date=final_date
self.instrument_values=None
def generate_path(self,fixed_seed=False,day_count=365):
if self.time_grid is None:
self.generate_time_grid()
M=len(self.time_grid)
I=self.paths
paths=np.zeros((M,I))
paths[0]=self.initial_value
if not self.correlated:
rand=sn_random_numbers((1,M,I),fixed_seed=fixed_seed)
else:
rand=self.random_numbers
short_rate=self.discount_curve.short_rate
for t in range(1,len(self.time_grid)):
if not self.correlated:
ran=rand[t]
else:
ran=np.dot(self.cholesky_matrix,rand[:,t,:])
ran=ran[self.rn_set]
dt=(self.time_grid[t]-self.time_grid[t-1]).days/day_count
paths[t]=paths[t-1]*np.exp((short_rate-0.5*self.volatility**2)*dt+
self.volatility*np.sqrt(dt)*ran)
self.instrument_values=paths
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"numpy.dot",
"numpy.zeros",
"sn_random_numbers.sn_random_numbers",
"numpy.sqrt"
] | [((929, 945), 'numpy.zeros', 'np.zeros', (['(M, I)'], {}), '((M, I))\n', (937, 945), True, 'import numpy as np\n'), ((1030, 1081), 'sn_random_numbers.sn_random_numbers', 'sn_random_numbers', (['(1, M, I)'], {'fixed_seed': 'fixed_seed'}), '((1, M, I), fixed_seed=fixed_seed)\n', (1047, 1081), False, 'from sn_random_numbers import sn_random_numbers\n'), ((1338, 1381), 'numpy.dot', 'np.dot', (['self.cholesky_matrix', 'rand[:, t, :]'], {}), '(self.cholesky_matrix, rand[:, t, :])\n', (1344, 1381), True, 'import numpy as np\n'), ((1632, 1643), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (1639, 1643), True, 'import numpy as np\n')] |
# libraries and directory
import pandas as pd
import numpy as np
import os
directory = "C:/Users/" # complete directory
os.chdir(directory)
# load data
dataset = pd.read_csv("data.txt", header = None)
# scale data so that values fall between -1 and 1
X = pd.DataFrame.to_numpy(dataset.loc[:,0:dataset.shape[1]], dtype = "float")
X_std = (X - X.min(axis = 0)) / (X.max(axis = 0) - X.min(axis = 0))
X = X_std * (1 - -1) + -1
# save scaled data
np.savetxt("scaled_data.txt", X, delimiter = ",") | [
"pandas.read_csv",
"numpy.savetxt",
"os.chdir",
"pandas.DataFrame.to_numpy"
] | [((129, 148), 'os.chdir', 'os.chdir', (['directory'], {}), '(directory)\n', (137, 148), False, 'import os\n'), ((177, 213), 'pandas.read_csv', 'pd.read_csv', (['"""data.txt"""'], {'header': 'None'}), "('data.txt', header=None)\n", (188, 213), True, 'import pandas as pd\n'), ((276, 348), 'pandas.DataFrame.to_numpy', 'pd.DataFrame.to_numpy', (['dataset.loc[:, 0:dataset.shape[1]]'], {'dtype': '"""float"""'}), "(dataset.loc[:, 0:dataset.shape[1]], dtype='float')\n", (297, 348), True, 'import pandas as pd\n'), ((471, 518), 'numpy.savetxt', 'np.savetxt', (['"""scaled_data.txt"""', 'X'], {'delimiter': '""","""'}), "('scaled_data.txt', X, delimiter=',')\n", (481, 518), True, 'import numpy as np\n')] |
import os
from tqdm import tqdm
import numpy as np
import mercantile
import torch
import torch.backends.cudnn
from torch.utils.data import DataLoader
from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs
from robosat_pink.tiles import tile_label_to_file, tile_translate_to_file, tiles_from_csv
def add_parser(subparser, formatter_class):
parser = subparser.add_parser(
"predict", help="Predict masks, from given inputs and an already trained model", formatter_class=formatter_class
)
inp = parser.add_argument_group("Inputs")
inp.add_argument("dataset", type=str, help="predict dataset directory path [required]")
inp.add_argument("--checkpoint", type=str, required=True, help="path to the trained model to use [required]")
inp.add_argument("--config", type=str, help="path to config file [required]")
inp.add_argument("--cover", type=str, help="path to csv tiles cover file, to filter tiles to predict [optional]")
out = parser.add_argument_group("Outputs")
choices = ["first", "second", "both"]
out.add_argument("--passes", type=str, default="both", choices=choices, help="Predict passes [default: both]")
out.add_argument("out", type=str, help="output directory path [required]")
dl = parser.add_argument_group("Data Loaders")
dl.add_argument("--workers", type=int, help="number of workers to load images [default: batch size]")
dl.add_argument("--bs", type=int, help="if set override config file batch size value for data loader")
ui = parser.add_argument_group("Web UI")
ui.add_argument("--web_ui_base_url", type=str, help="alternate Web UI base URL")
ui.add_argument("--web_ui_template", type=str, help="alternate Web UI template path")
ui.add_argument("--no_web_ui", action="store_true", help="desactivate Web UI output")
parser.set_defaults(func=main)
def predict(config, cover, args, palette, chkpt, nn, device, mode):
assert mode in ["predict", "predict_translate"], "Predict unknown mode"
loader_module = load_module("robosat_pink.loaders.{}".format(chkpt["loader"].lower()))
loader_predict = getattr(loader_module, chkpt["loader"])(config, chkpt["shape_in"][1:3], args.dataset, cover, mode=mode)
loader = DataLoader(loader_predict, batch_size=args.bs, num_workers=args.workers)
assert len(loader), "Empty predict dataset directory. Check your path."
tiled = []
for images, tiles in tqdm(loader, desc="Eval", unit="batch", ascii=True):
images = images.to(device)
for tile, prob in zip(tiles, torch.nn.functional.softmax(nn(images), dim=1).data.cpu().numpy()):
x, y, z = list(map(int, tile))
mask = np.around(prob[1:, :, :]).astype(np.uint8).squeeze()
if mode == "predict":
tile_label_to_file(args.out, mercantile.Tile(x, y, z), palette, mask)
if mode == "predict_translate":
tile_translate_to_file(args.out, mercantile.Tile(x, y, z), palette, mask, config["model"]["ms"])
tiled.append(mercantile.Tile(x, y, z))
return tiled
def main(args):
config = load_config(args.config)
check_channels(config)
check_classes(config)
palette = make_palette([classe["color"] for classe in config["classes"]])
if not args.bs:
try:
args.bs = config["model"]["bs"]
except:
pass
assert args.bs, "For rsp predict, model/bs must be set either in config file, or pass trought parameter --bs"
args.workers = args.bs if not args.workers else args.workers
cover = [tile for tile in tiles_from_csv(os.path.expanduser(args.cover))] if args.cover else None
log = Logs(os.path.join(args.out, "log"))
if torch.cuda.is_available():
log.log("RoboSat.pink - predict on {} GPUs, with {} workers".format(torch.cuda.device_count(), args.workers))
log.log("(Torch:{} Cuda:{} CudNN:{})".format(torch.__version__, torch.version.cuda, torch.backends.cudnn.version()))
device = torch.device("cuda")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
else:
log.log("RoboSat.pink - predict on CPU, with {} workers".format(args.workers))
log.log("")
log.log("============================================================")
log.log("WARNING: Are you -really- sure about not predicting on GPU ?")
log.log("============================================================")
log.log("")
device = torch.device("cpu")
chkpt = torch.load(args.checkpoint, map_location=device)
model_module = load_module("robosat_pink.models.{}".format(chkpt["nn"].lower()))
nn = getattr(model_module, chkpt["nn"])(chkpt["shape_in"], chkpt["shape_out"]).to(device)
nn = torch.nn.DataParallel(nn)
nn.load_state_dict(chkpt["state_dict"])
nn.eval()
log.log("Model {} - UUID: {}".format(chkpt["nn"], chkpt["uuid"]))
with torch.no_grad(): # don't track tensors with autograd during prediction
tiled = []
if args.passes in ["first", "both"]:
log.log("== Predict First Pass ==")
tiled = predict(config, cover, args, palette, chkpt, nn, device, "predict")
if args.passes in ["second", "both"]:
log.log("== Predict Second Pass ==")
predict(config, cover, args, palette, chkpt, nn, device, "predict_translate")
if not args.no_web_ui and tiled:
template = "leaflet.html" if not args.web_ui_template else args.web_ui_template
base_url = args.web_ui_base_url if args.web_ui_base_url else "."
web_ui(args.out, base_url, tiled, tiled, "png", template)
| [
"os.path.expanduser",
"tqdm.tqdm",
"mercantile.Tile",
"robosat_pink.core.load_config",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.load",
"robosat_pink.core.make_palette",
"robosat_pink.core.check_classes",
"torch.cuda.device_count",
"numpy.around",
"torch.cuda.is_available",
"rob... | [((2290, 2362), 'torch.utils.data.DataLoader', 'DataLoader', (['loader_predict'], {'batch_size': 'args.bs', 'num_workers': 'args.workers'}), '(loader_predict, batch_size=args.bs, num_workers=args.workers)\n', (2300, 2362), False, 'from torch.utils.data import DataLoader\n'), ((2480, 2531), 'tqdm.tqdm', 'tqdm', (['loader'], {'desc': '"""Eval"""', 'unit': '"""batch"""', 'ascii': '(True)'}), "(loader, desc='Eval', unit='batch', ascii=True)\n", (2484, 2531), False, 'from tqdm import tqdm\n'), ((3166, 3190), 'robosat_pink.core.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (3177, 3190), False, 'from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs\n'), ((3195, 3217), 'robosat_pink.core.check_channels', 'check_channels', (['config'], {}), '(config)\n', (3209, 3217), False, 'from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs\n'), ((3222, 3243), 'robosat_pink.core.check_classes', 'check_classes', (['config'], {}), '(config)\n', (3235, 3243), False, 'from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs\n'), ((3258, 3321), 'robosat_pink.core.make_palette', 'make_palette', (["[classe['color'] for classe in config['classes']]"], {}), "([classe['color'] for classe in config['classes']])\n", (3270, 3321), False, 'from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs\n'), ((3769, 3794), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3792, 3794), False, 'import torch\n'), ((4594, 4642), 'torch.load', 'torch.load', (['args.checkpoint'], {'map_location': 'device'}), '(args.checkpoint, map_location=device)\n', (4604, 4642), False, 'import torch\n'), ((4831, 4856), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['nn'], {}), '(nn)\n', (4852, 4856), False, 'import torch\n'), ((3730, 3759), 'os.path.join', 'os.path.join', (['args.out', '"""log"""'], {}), "(args.out, 'log')\n", (3742, 3759), False, 'import os\n'), ((4056, 4076), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4068, 4076), False, 'import torch\n'), ((4561, 4580), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4573, 4580), False, 'import torch\n'), ((4996, 5011), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5009, 5011), False, 'import torch\n'), ((5662, 5719), 'robosat_pink.core.web_ui', 'web_ui', (['args.out', 'base_url', 'tiled', 'tiled', '"""png"""', 'template'], {}), "(args.out, base_url, tiled, tiled, 'png', template)\n", (5668, 5719), False, 'from robosat_pink.core import load_config, load_module, check_classes, check_channels, make_palette, web_ui, Logs\n'), ((3091, 3115), 'mercantile.Tile', 'mercantile.Tile', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3106, 3115), False, 'import mercantile\n'), ((3872, 3897), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3895, 3897), False, 'import torch\n'), ((4006, 4036), 'torch.backends.cudnn.version', 'torch.backends.cudnn.version', ([], {}), '()\n', (4034, 4036), False, 'import torch\n'), ((2868, 2892), 'mercantile.Tile', 'mercantile.Tile', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2883, 2892), False, 'import mercantile\n'), ((3002, 3026), 'mercantile.Tile', 'mercantile.Tile', (['x', 'y', 'z'], {}), '(x, y, z)\n', (3017, 3026), False, 'import mercantile\n'), ((3657, 3687), 'os.path.expanduser', 'os.path.expanduser', (['args.cover'], {}), '(args.cover)\n', (3675, 3687), False, 'import os\n'), ((2736, 2761), 'numpy.around', 'np.around', (['prob[1:, :, :]'], {}), '(prob[1:, :, :])\n', (2745, 2761), True, 'import numpy as np\n')] |
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1' # Limit numpy threads to 1
import argparse
import csv
import matplotlib
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from src.anomaly_detection import *
from src.dataloader import load_weighted_edgelist
from src.egonet_extractor import EgonetFeatureExtractor
def write_scores_to_file(nodes, filename):
"""write tsv file with node and score
[node name]<tab>[total score]
Args:
nodes (list): List of AnomalyScores
filename (str): Output file name
"""
with open(filename, 'w') as f:
writer = csv.writer(f, delimiter='\t')
for node in nodes:
writer.writerow([node[1]['name'], node[1]['score']])
def get_scores_from_ids(graph, node_ids):
"""Get scores from node ids
Args:
graph (nx.Graph): Graph
node_ids (list): List of node ids
Returns:
list: List of anomaly scores for each node
"""
return [graph.nodes(data=True)[node_id]['score'] for node_id in node_ids]
def plot_node_and_scores(model, X, Y, node_ids, scores, nodes, xlabel, ylabel, path, no_anomalies=5):
"""Plot results
Hovering mechanic of plot inspired by https://stackoverflow.com/questions/7908636/how-to-add-hovering-annotations-in-matplotlib
Args:
model (LinearRegression): Fitted linear regression model
X (list): Sorted list of X features based on score
Y (list): Sorted list of Y features based on score
node_ids (list): Sorted List of node ids based on score
scores (list): Sorted list of anomaly scores
nodes (list): List of NetworkX nodes
xlabel (str): X label name
ylabel (str): Y label name
path (str): Output path for plot
no_anomalies (int): Number of top anomalies to plot as red dots
"""
alpha = model.coef_[0][0]
beta = model.intercept_[0]
top_scores_node_ids = node_ids[:no_anomalies+1]
fig,ax = plt.subplots()
fig.set_size_inches(14, 10, forward=True)
# Plot points
sc = plt.scatter(X, Y, cmap='viridis', c=scores, s=2, norm=matplotlib.colors.LogNorm())
# Colour top k anomalies
if no_anomalies > 0:
X_top, Y_top = zip(*[(x, y) for x, y, node in zip(X, Y, node_ids) if node in top_scores_node_ids])
plt.scatter(X_top, Y_top, s=5, color='red')
# Plot power law line
X = np.sort(np.array(X))
plt.plot(X, np.exp(model.predict(np.expand_dims(np.log(X), axis=-1))), label=f"y = {alpha:.2f}x + ({beta:.2f})", color='red')
# Enable annotation of nodes
annot = ax.annotate("", xy=(0,0), xytext=(20,20),textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
# Create hover function
def update_annot(ind):
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
text = ""
for n in ind["ind"]:
text += f"Rank: {n} Score: {scores[n]}\n {nodes[n]}\n"
annot.set_text(text)
annot.get_bbox_patch().set_alpha(0.4)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("motion_notify_event", hover)
# Save and show figure
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.yscale("log")
plt.xscale("log")
plt.xlim(left=1)
plt.ylim(bottom=1)
plt.legend()
plt.colorbar(sc, pad=0.01)
plt.savefig(f"{path}.pdf")
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Perform OddBall anomaly detection")
parser.add_argument("--graph", "-g", type=str, required=True,
help="Path to graph file")
parser.add_argument("--out", "-o", type=str,
help="output folder", default="./out")
parser.add_argument("--processes", "-p", type=int, default=None,
help="Number of processes to use")
parser.add_argument('--lof', "-l", action="store_true", default=False,
help='Use LOF. If left out LOF is not used.')
parser.add_argument('--anomaly_type', "-a", type=str, required=True, choices=["sc", "hv", "de"],
help='Anomaly Type. sc:star_or_clique. hv:heavy_vicinity. de:dominant_edge.')
parser.add_argument('--id_names', type=str, nargs='*', default=None,
help='Path to labels for first edge list column in bipartite graph. If graph is unipartite, only the first file is used to match IDs')
parser.add_argument('--group', type=int, default=-1,
help="For bipartite graphs left/right group have id 0 and 1 respectively. -1 = analyze as unipartite")
parser.add_argument('--force_features', action="store_true", default=False,
help='Force feature extraction')
parser.add_argument("--no_anomalies", "-n", type=int, default=5,
help="Number of top anomalies to plot")
parser.add_argument("--no_neighbors", "-k", type=int, default=5000,
help="Number of neighbors to use for LOF")
args = parser.parse_args()
# Set up some files and folders
dataset_name = os.path.splitext(os.path.basename(args.graph))[0]
output_folder = os.path.join(args.out, dataset_name)
output_file = os.path.join(output_folder, f"{dataset_name}_oddball_{args.anomaly_type}_lof={args.lof}_group={args.group}")
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
feature_graph_path = os.path.join(output_folder, f"{dataset_name}_features.pkl")
# Check if feature file exists. If not, extract features
if not args.force_features and os.path.exists(feature_graph_path):
graph = nx.read_gpickle(feature_graph_path)
else:
extractor = EgonetFeatureExtractor()
graph = load_weighted_edgelist(args.graph, args.id_names)
features = extractor.get_feature_vector(graph, args.processes)
nx.write_gpickle(graph, feature_graph_path)
# Choose anomaly detection method
if not args.lof:
print("No LOF score considered")
if args.anomaly_type == "sc":
print("Detecting Star/Cliques")
detector = StarCliqueAnomalyDetection(graph)
xlabel = "# nodes"
ylabel = "# edges"
elif args.anomaly_type == "hv":
print("Detecting HeavyVicinities")
detector = HeavyVicinityAnomalyDetection(graph)
xlabel = "# edges"
ylabel = "Total weight"
elif args.anomaly_type == "de":
print("Detecting DominantEdges")
detector = DominantEdgeAnomalyDetection(graph)
xlabel = "Total weight"
ylabel = "Eigenvalue"
else:
print("Using LOF score")
if args.anomaly_type == "sc":
print("Detecting Star/Cliques")
detector = StarCliqueLOFAnomalyDetection(graph, args.no_neighbors, args.processes)
xlabel = "# nodes"
ylabel = "# edges"
elif args.anomaly_type == "hv":
print("Detecting HeavyVicinities")
detector = HeavyVicinityLOFAnomalyDetection(graph, args.no_neighbors, args.processes)
xlabel = "# edges"
ylabel = "Total weight"
elif args.anomaly_type == "de":
print("Detecting DominantEdges")
detector = DominantEdgeLOFAnomalyDetection(graph, args.no_neighbors, args.processes)
xlabel = "Total weight"
ylabel = "Eigenvalue"
# Run anomaly detection
model, X, Y, node_ids = detector.detect_anomalies(args.group)
# Sort everything based on total score
node_ids, X, Y = zip(*[(sc, xs, ys) for (sc, xs, ys) in sorted(zip(node_ids, X, Y), reverse=True, key=lambda triplet : detector.graph.nodes(data=True)[triplet[0]]['score'])])
scores = get_scores_from_ids(detector.graph, node_ids)
nodes = [(node_id, detector.graph.nodes()[node_id]) for node_id in node_ids]
# Write score results to tsv
print(f"saving as{output_file}")
write_scores_to_file(nodes, output_file + ".txt")
# Write result graph to file
nx.write_gpickle(detector.graph, f"{output_file}_out.pkl")
# Round floats for readability
for node in nodes:
for key, value in node[1].items():
if isinstance(node[1][key], float):
node[1][key] = round(node[1][key], 2)
# Plot results
plot_node_and_scores(model, X, Y, node_ids, scores, nodes, xlabel, ylabel, output_file, args.no_anomalies)
| [
"matplotlib.pyplot.yscale",
"argparse.ArgumentParser",
"networkx.write_gpickle",
"matplotlib.colors.LogNorm",
"os.path.join",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"src.dataloader.load_weighted_edgelist",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"csv.writer",
"matplotl... | [((1822, 1836), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1834, 1836), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3220), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (3212, 3220), True, 'import matplotlib.pyplot as plt\n'), ((3222, 3240), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (3232, 3240), True, 'import matplotlib.pyplot as plt\n'), ((3242, 3259), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (3252, 3259), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3278), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (3271, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3280, 3296), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)'}), '(left=1)\n', (3288, 3296), True, 'import matplotlib.pyplot as plt\n'), ((3298, 3316), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(1)'}), '(bottom=1)\n', (3306, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3318, 3330), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3328, 3330), True, 'import matplotlib.pyplot as plt\n'), ((3332, 3358), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {'pad': '(0.01)'}), '(sc, pad=0.01)\n', (3344, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3361, 3387), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{path}.pdf"""'], {}), "(f'{path}.pdf')\n", (3372, 3387), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3397, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3441, 3513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform OddBall anomaly detection"""'}), "(description='Perform OddBall anomaly detection')\n", (3464, 3513), False, 'import argparse\n'), ((4986, 5022), 'os.path.join', 'os.path.join', (['args.out', 'dataset_name'], {}), '(args.out, dataset_name)\n', (4998, 5022), False, 'import os\n'), ((5038, 5155), 'os.path.join', 'os.path.join', (['output_folder', 'f"""{dataset_name}_oddball_{args.anomaly_type}_lof={args.lof}_group={args.group}"""'], {}), "(output_folder,\n f'{dataset_name}_oddball_{args.anomaly_type}_lof={args.lof}_group={args.group}'\n )\n", (5050, 5155), False, 'import os\n'), ((5238, 5297), 'os.path.join', 'os.path.join', (['output_folder', 'f"""{dataset_name}_features.pkl"""'], {}), "(output_folder, f'{dataset_name}_features.pkl')\n", (5250, 5297), False, 'import os\n'), ((7520, 7578), 'networkx.write_gpickle', 'nx.write_gpickle', (['detector.graph', 'f"""{output_file}_out.pkl"""'], {}), "(detector.graph, f'{output_file}_out.pkl')\n", (7536, 7578), True, 'import networkx as nx\n'), ((586, 615), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (596, 615), False, 'import csv\n'), ((2137, 2180), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_top', 'Y_top'], {'s': '(5)', 'color': '"""red"""'}), "(X_top, Y_top, s=5, color='red')\n", (2148, 2180), True, 'import matplotlib.pyplot as plt\n'), ((2218, 2229), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2226, 2229), True, 'import numpy as np\n'), ((5156, 5184), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (5169, 5184), False, 'import os\n'), ((5188, 5214), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (5199, 5214), False, 'import os\n'), ((5389, 5423), 'os.path.exists', 'os.path.exists', (['feature_graph_path'], {}), '(feature_graph_path)\n', (5403, 5423), False, 'import os\n'), ((5435, 5470), 'networkx.read_gpickle', 'nx.read_gpickle', (['feature_graph_path'], {}), '(feature_graph_path)\n', (5450, 5470), True, 'import networkx as nx\n'), ((5492, 5516), 'src.egonet_extractor.EgonetFeatureExtractor', 'EgonetFeatureExtractor', ([], {}), '()\n', (5514, 5516), False, 'from src.egonet_extractor import EgonetFeatureExtractor\n'), ((5527, 5576), 'src.dataloader.load_weighted_edgelist', 'load_weighted_edgelist', (['args.graph', 'args.id_names'], {}), '(args.graph, args.id_names)\n', (5549, 5576), False, 'from src.dataloader import load_weighted_edgelist\n'), ((5644, 5687), 'networkx.write_gpickle', 'nx.write_gpickle', (['graph', 'feature_graph_path'], {}), '(graph, feature_graph_path)\n', (5660, 5687), True, 'import networkx as nx\n'), ((1956, 1983), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {}), '()\n', (1981, 1983), False, 'import matplotlib\n'), ((4935, 4963), 'os.path.basename', 'os.path.basename', (['args.graph'], {}), '(args.graph)\n', (4951, 4963), False, 'import os\n'), ((2280, 2289), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (2286, 2289), True, 'import numpy as np\n')] |
"""
SCRIPT TO CONVERT WRITE CNS/X-PLOR PARAM AND TOP FILES
FROM BOSS ZMATRIX
Created on Mon Feb 15 15:40:05 2016
@author: <NAME> <EMAIL>
@author: <NAME>
REQUIREMENTS:
BOSS (need to set BOSSdir in bashrc and cshrc)
Preferably Anaconda python with following modules
pandas
argparse
numpy
"""
from LigParGen.BOSSReader import bossPdbAtom2Element,bossElement2Mass,ucomb,tor_cent
import pickle
import pandas as pd
import numpy as np
def retDihedImp(df):
odihed = []
if np.sum([df['V' + str(pot)] for pot in range(1, 5)]) != 0.0:
for pot in range(1, 5):
if (df['V' + str(pot)] != 0.0):
odihed.append('IMPRoper %s %4.5f %d %4.5f \n' % (df['NAME'].replace(
"-", " "), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
else:
pot = 2
odihed.append('IMPRoper %s %4.5f %d %4.5f \n' % (df['NAME'].replace(
"-", " "), df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
return (odihed)
def retDihed(df):
odihed = []
odihed.append('DIHEdral %s MULT 4 %4.3f %d %4.2f \n' % (df['NAME'].replace("-", " "), df['V' + str(1)], 1, 180.00 * abs(1 % 2 - 1)))
for pot in range(2, 5):
odihed.append(' %4.3f %d %4.2f \n' % (df['V' + str(pot)], pot, 180.00 * abs(pot % 2 - 1)))
return (odihed)
def Boss2CharmmRTF(num2typ2symb, Qs, resid, bnd_df, angs,props,imps):
charges = [float(Qs[i][1]) for i in range(len(Qs))]
rtf = open(resid + '.top', 'w+')
rtf.write('Remarks generated XPLOR-TOP file for Bonvin group (by <NAME>)\n')
rtf.write('\nset echo=false end\n')
rtf.write('\nautogenerate angles=True dihedrals=True end\n')
rtf.write('{ atomType mass }\n')
Mass = ['MASS %s %3.4f \n' % (num2typ2symb[i][2], bossElement2Mass(bossPdbAtom2Element(num2typ2symb[i][0]))) for i in range(len(Qs))]
for i in range(len(Mass)):
rtf.write('%s' % Mass[i])
rtf.write('\nRESIdue %5s\n' % (resid))
rtf.write('\nGROUP\n')
rtf.write('\n{ atomName atomType Charge } \n')
for i in range(len(Qs)):
rtf.write('ATOM %6s TYPE= %6s CHARGE= %8s END\n' % (num2typ2symb[i][0], num2typ2symb[i][2], Qs[i][1]))
rtf.write('\n{ Bonds: atomName1 atomName2 } \n')
for (x, y) in zip(bnd_df.cl1, bnd_df.cl2):
rtf.write('BOND %s %s \n' % (num2typ2symb[x][0], num2typ2symb[y][0]))
bndlist = list(bnd_df.UR) + list(bnd_df.UR)
imp_list = []
for i,dat in imps.iterrows():
ndata =tor_cent([dat.I, dat.J, dat.K, dat.L], bndlist)
sdata = [num2typ2symb[j][0] for j in ndata]
imp_list.append('-'.join(sdata))
rtf.write('\n{ Improper Dihedrals: aName1 aName2 aName3 aName4 }\n')
for i in imp_list:
rtf.write('IMPRoper %s \n' % (i.replace("-", " ")))
rtf.write('\nEND {RESIdue UNK}\n')
rtf.write('\nset echo=true end\n')
rtf.close()
return None
def Boss2CharmmPRM(resid, num2typ2symb, Qs, bnd_df, ang_df, tor_df):
#### COLLECTING NONBONDING PART #######
prm = open(resid + '.param', 'w+')
prm.write('Remarks generated XPLOR-TOP file for Bonvin group (by <NAME>)\n')
prm.write('\nset echo=false end \n')
prm.write('\n{ Bonds: atomType1 atomType2 kb r0 } \n')
for i in bnd_df.index:
prm.write('BOND %6s %6s %8.1f %8.4f \n' % (num2typ2symb[bnd_df.cl1[i]][
2], num2typ2symb[bnd_df.cl2[i]][2], bnd_df.KIJ[i], bnd_df.RIJ[i]))
prm.write('\n{ Angles: aType1 aType2 aType3 kt t0 }\n')
for i in ang_df.index:
prm.write('ANGLe %5s %5s %5s %8.1f %8.2f \n' % (num2typ2symb[ang_df.cl1[i]][2], num2typ2symb[
ang_df.cl2[i]][2], num2typ2symb[ang_df.cl3[i]][2], ang_df.K[i], ang_df.R[i]))
prm.write('\n{ Proper Dihedrals: aType1 aType2 aType3 aType4 kt period phase } \n')
if len(tor_df.index) > 0:
tor_df = tor_df.drop_duplicates(['NAME', 'TY'])
pro_df = tor_df[tor_df.TY == 'Proper']
for i in list(pro_df.index):
ndf = pro_df.ix[i]
pro_out = retDihed(ndf.to_dict())
for i in range(4):
prm.write('%s' % pro_out[i])
prm.write(
'DIHEdral X X X X MULT 1 0.000 1 0.00 ! WILD CARD FOR MISSING TORSION PARAMETERS\n')
prm.write('\n{ Improper Dihedrals: aType1 aType2 aType3 aType4 kt period phase }\n')
imp_df = tor_df[tor_df.TY == 'Improper']
for i in list(imp_df.index):
ndf = tor_df.ix[i]
imp_out = retDihedImp(ndf.to_dict())
for i in range(len(imp_out)):
prm.write('%s' % imp_out[i])
prm.write(
'IMPRoper X X X X 0.000 1 0.00 ! WILD CARD FOR MISSING IMPROPER PARAMETERS \n')
prm.write(
'\n{ Nonbonded: Type Emin sigma; (1-4): Emin/2 sigma }\n')
Qlines = ['NONBonded %5s %11.6f %11.6f %11.6f %11.6f \n' %
(num2typ2symb[i][2], float(Qs[i][3]), float(Qs[i][2]), float(Qs[i][3]) * 0.50,float(Qs[i][2])) for i in range(len(Qs))]
for i in range(len(Qlines)):
prm.write('%s' % Qlines[i])
prm.write('\nset echo=true end \n')
prm.close()
return None
def Boss2CharmmTorsion(bnd_df, num2opls, st_no, molecule_data, num2typ2symb):
dhd = []
for line in molecule_data.MolData['TORSIONS']:
dt = [float(l) for l in line]
dhd.append(dt)
dhd = np.array(dhd)
dhd = dhd # kcal to kj conversion
dhd = dhd / 2.0 # Komm = Vopls/2
dhd_df = pd.DataFrame(dhd, columns=['V1', 'V2', 'V3', 'V4'])
ats = []
for line in molecule_data.MolData['ATOMS'][3:]:
dt = [line.split()[0], line.split()[4],
line.split()[6], line.split()[8]]
dt = [int(d) for d in dt]
ats.append(dt)
for line in molecule_data.MolData['ADD_DIHED']:
dt = [int(l) for l in line]
ats.append(dt)
assert len(ats) == len(
dhd), 'Number of Dihedral angles in Zmatrix and Out file dont match'
ats = np.array(ats) - st_no
for i in range(len(ats)):
for j in range(len(ats[0])):
if ats[i][j] < 0:
ats[i][j] = 0
at_df = pd.DataFrame(ats, columns=['I', 'J', 'K', 'L'])
final_df = pd.concat([dhd_df, at_df], axis=1, join_axes=[at_df.index])
bndlist = list(bnd_df.UR) + (list(bnd_df.UR))
final_df['TY'] = ['Proper' if ucomb(list([final_df.I[n], final_df.J[n], final_df.K[
n], final_df.L[n]]), bndlist) == 3 else 'Improper' for n in range(len(final_df.I))]
final_df['TI'] = [num2typ2symb[j][2] for j in final_df.I]
final_df['TJ'] = [num2typ2symb[j][2] for j in final_df.J]
final_df['TK'] = [num2typ2symb[j][2] for j in final_df.K]
final_df['TL'] = [num2typ2symb[j][2] for j in final_df.L]
final_df['SYMB'] = [' '.join([num2typ2symb[final_df.I[i]][0], num2typ2symb[final_df.J[i]][
0], num2typ2symb[final_df.K[i]][0], num2typ2symb[final_df.L[i]][0]]) for i in final_df.index]
if len(final_df.index) > 0:
final_df['NAME'] = final_df.TI + '-' + final_df.TJ + \
'-' + final_df.TK + '-' + final_df.TL
return final_df
def boss2CharmmBond(molecule_data, st_no):
bdat = molecule_data.MolData['BONDS']
bdat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl1']]
bdat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in bdat['cl2']]
bnd_df = pd.DataFrame(bdat)
bnd_df['UF'] = ((bnd_df.cl1 + bnd_df.cl2) *
(bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl2
bnd_df['UR'] = ((bnd_df.cl1 + bnd_df.cl2) *
(bnd_df.cl1 + bnd_df.cl2 + 1) * 0.5) + bnd_df.cl1
# bnd_df.to_csv('bos_bonds.csv', index=False)
hb_df = bnd_df.drop(['cl1', 'cl2', 'UF', 'UR'], 1)
hb_df = hb_df.drop_duplicates()
return bnd_df
def boss2CharmmAngle(anglefile, num2opls, st_no,num2typ2symb):
adat = anglefile
adat['cl1'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl1']]
adat['cl2'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl2']]
adat['cl3'] = [x - st_no if not x - st_no < 0 else 0 for x in adat['cl3']]
ang_df = pd.DataFrame(adat)
ang_df = ang_df[ang_df.K > 0]
# ang_df.to_csv('bos_angles.csv', index=False)
ang_df['TY'] = np.array([num2opls[i] + '-' + num2opls[j] + '-' + num2opls[k]
for i, j, k in zip(ang_df.cl1, ang_df.cl2, ang_df.cl3)])
ang_df['TI']=[num2typ2symb[ang_df.cl1[i]][2] for i in ang_df.index]
ang_df['TJ']=[num2typ2symb[ang_df.cl2[i]][2] for i in ang_df.index]
ang_df['TK']=[num2typ2symb[ang_df.cl3[i]][2] for i in ang_df.index]
ang_df['TY'] = np.array([i + ' ' + j + ' ' + k
for i, j, k in zip(ang_df.TI, ang_df.TJ, ang_df.TK)])
return ang_df
def bossData(molecule_data):
ats_file = molecule_data.MolData['ATOMS']
types = []
for i in enumerate(ats_file):
types.append([i[1].split()[1], 'opls_' + i[1].split()[2]])
st_no = 3
Qs = molecule_data.MolData['Q_LJ']
assert len(Qs) == len(types), 'Please check the at_info and Q_LJ_dat files'
num2opls = {}
for i in range(0, len(types)):
num2opls[i] = Qs[i][0]
num2typ2symb = {i: types[i] for i in range(len(Qs))}
for i in range(len(Qs)):
trm = bossPdbAtom2Element(num2typ2symb[i][0]) + num2typ2symb[i][1][-3:]
num2typ2symb[i].append(trm[0:4])
num2typ2symb[i].append(bossPdbAtom2Element(num2typ2symb[i][0]))
num2typ2symb[i].append(bossElement2Mass(num2typ2symb[i][3]))
num2typ2symb[i].append(Qs[i][0])
return (types, Qs, num2opls, st_no, num2typ2symb)
def Boss2Charmm(resid, molecule_data):
types, Qs, num2opls, st_no, num2typ2symb = bossData(molecule_data)
bnd_df = boss2CharmmBond(molecule_data, st_no)
ang_df = boss2CharmmAngle(molecule_data.MolData['ANGLES'], num2opls, st_no,num2typ2symb)
tor_df = Boss2CharmmTorsion(bnd_df, num2opls, st_no,
molecule_data, num2typ2symb)
Boss2CharmmRTF(num2typ2symb, Qs, resid, bnd_df, list(ang_df['TY']), list(
tor_df[tor_df.TY == 'Proper']['SYMB']),tor_df[tor_df.TY == 'Improper'])
Boss2CharmmPRM(resid, num2typ2symb, Qs, bnd_df, ang_df, tor_df)
return None
def mainBOSS2XPLOR(resid, clu=False):
mol = pickle.load(open(resid + ".p", "rb"))
Boss2Charmm(resid, mol)
return None
| [
"pandas.DataFrame",
"LigParGen.BOSSReader.tor_cent",
"numpy.array",
"LigParGen.BOSSReader.bossPdbAtom2Element",
"LigParGen.BOSSReader.bossElement2Mass",
"pandas.concat"
] | [((5333, 5346), 'numpy.array', 'np.array', (['dhd'], {}), '(dhd)\n', (5341, 5346), True, 'import numpy as np\n'), ((5437, 5488), 'pandas.DataFrame', 'pd.DataFrame', (['dhd'], {'columns': "['V1', 'V2', 'V3', 'V4']"}), "(dhd, columns=['V1', 'V2', 'V3', 'V4'])\n", (5449, 5488), True, 'import pandas as pd\n'), ((6094, 6141), 'pandas.DataFrame', 'pd.DataFrame', (['ats'], {'columns': "['I', 'J', 'K', 'L']"}), "(ats, columns=['I', 'J', 'K', 'L'])\n", (6106, 6141), True, 'import pandas as pd\n'), ((6157, 6216), 'pandas.concat', 'pd.concat', (['[dhd_df, at_df]'], {'axis': '(1)', 'join_axes': '[at_df.index]'}), '([dhd_df, at_df], axis=1, join_axes=[at_df.index])\n', (6166, 6216), True, 'import pandas as pd\n'), ((7317, 7335), 'pandas.DataFrame', 'pd.DataFrame', (['bdat'], {}), '(bdat)\n', (7329, 7335), True, 'import pandas as pd\n'), ((8066, 8084), 'pandas.DataFrame', 'pd.DataFrame', (['adat'], {}), '(adat)\n', (8078, 8084), True, 'import pandas as pd\n'), ((2517, 2564), 'LigParGen.BOSSReader.tor_cent', 'tor_cent', (['[dat.I, dat.J, dat.K, dat.L]', 'bndlist'], {}), '([dat.I, dat.J, dat.K, dat.L], bndlist)\n', (2525, 2564), False, 'from LigParGen.BOSSReader import bossPdbAtom2Element, bossElement2Mass, ucomb, tor_cent\n'), ((5933, 5946), 'numpy.array', 'np.array', (['ats'], {}), '(ats)\n', (5941, 5946), True, 'import numpy as np\n'), ((9216, 9255), 'LigParGen.BOSSReader.bossPdbAtom2Element', 'bossPdbAtom2Element', (['num2typ2symb[i][0]'], {}), '(num2typ2symb[i][0])\n', (9235, 9255), False, 'from LigParGen.BOSSReader import bossPdbAtom2Element, bossElement2Mass, ucomb, tor_cent\n'), ((9354, 9393), 'LigParGen.BOSSReader.bossPdbAtom2Element', 'bossPdbAtom2Element', (['num2typ2symb[i][0]'], {}), '(num2typ2symb[i][0])\n', (9373, 9393), False, 'from LigParGen.BOSSReader import bossPdbAtom2Element, bossElement2Mass, ucomb, tor_cent\n'), ((9426, 9462), 'LigParGen.BOSSReader.bossElement2Mass', 'bossElement2Mass', (['num2typ2symb[i][3]'], {}), '(num2typ2symb[i][3])\n', (9442, 9462), False, 'from LigParGen.BOSSReader import bossPdbAtom2Element, bossElement2Mass, ucomb, tor_cent\n'), ((1828, 1867), 'LigParGen.BOSSReader.bossPdbAtom2Element', 'bossPdbAtom2Element', (['num2typ2symb[i][0]'], {}), '(num2typ2symb[i][0])\n', (1847, 1867), False, 'from LigParGen.BOSSReader import bossPdbAtom2Element, bossElement2Mass, ucomb, tor_cent\n')] |
"""Module containing AOI loss calculation methods"""
from pvfactors.config import DISTANCE_TOLERANCE
from pvfactors.geometry.timeseries import (
TsPointCoords, TsSurface, TsLineCoords)
from pvfactors import PVFactorsError
import pvlib
from pvlib.tools import cosd
import numpy as np
class AOIMethods:
"""Class containing methods related to calculating AOI losses for
:py:class:`~pvfactors.geometry.pvarray.OrderedPVArray` objects."""
def __init__(self, faoi_fn_front, faoi_fn_back, n_integral_sections=300):
"""Instantiate class with faoi function and number of sections to use
to calculate integrals of view factors with faoi losses
Parameters
----------
faoi_fn_front : function
Function which takes a list (or numpy array) of incidence angles
measured from the surface horizontal
(with values from 0 to 180 deg) and returns the fAOI values for
the front side of PV rows
faoi_fn_back : function
Function which takes a list (or numpy array) of incidence angles
measured from the surface horizontal
(with values from 0 to 180 deg) and returns the fAOI values for
the back side of PV rows
n_integral_sections : int, optional
Number of integral divisions of the 0 to 180 deg interval
to use for the fAOI loss integral (default = 300)
"""
# Check that faoi fn where passed
faoi_fns_ok = callable(faoi_fn_front) and callable(faoi_fn_back)
if not faoi_fns_ok:
raise PVFactorsError("The faoi_fn passed to the AOI methods are "
"not callable. Please check the fAOI "
"functions again")
self.faoi_fn_front = faoi_fn_front
self.faoi_fn_back = faoi_fn_back
self.n_integral_sections = n_integral_sections
# The following will be updated at fitting time
self.interval = None
self.aoi_angles_low = None
self.aoi_angles_high = None
self.integrand_front = None
self.integrand_back = None
def fit(self, n_timestamps):
"""Fit the AOI methods to timeseries inputs: create all the necessary
integration attributes.
Parameters
----------
n_timestamps : int
Number of simulation timestamps
"""
# Will use x values at the middle of the integral sections
aoi_angles = np.linspace(0., 180., num=self.n_integral_sections + 1)
# Assumes that at least 2 aoi angle values, otherwise what's the point
self.interval = aoi_angles[1] - aoi_angles[0]
# Get integral intervals' low, high, and middle points
aoi_angles_low = aoi_angles[:-1]
aoi_angles_high = aoi_angles_low + self.interval
aoi_angles_middle = aoi_angles_low + self.interval / 2.
# Calculate faoi values using middle points of integral intervals
faoi_front = self.faoi_fn_front(aoi_angles_middle)
faoi_back = self.faoi_fn_back(aoi_angles_middle)
# Calculate small view factor values for each section
vf_values = self._vf(aoi_angles_low, aoi_angles_high)
# Multiply to get integrand
integrand_front = faoi_front * vf_values
integrand_back = faoi_back * vf_values
# Replicate these values for all timestamps such that shapes
# becomes: [n_timestamps, n_integral_sections]map
self.aoi_angles_low = np.tile(aoi_angles_low, (n_timestamps, 1))
self.aoi_angles_high = np.tile(aoi_angles_high, (n_timestamps, 1))
self.integrand_front = np.tile(integrand_front, (n_timestamps, 1))
self.integrand_back = np.tile(integrand_back, (n_timestamps, 1))
def vf_aoi_pvrow_to_sky(self, ts_pvrows, ts_ground, tilted_to_left,
vf_matrix):
"""Calculate the view factors between timeseries PV row surface and sky
while accounting for AOI losses,
and assign values to the passed view factor matrix using
the surface indices.
Parameters
----------
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
ts_ground : :py:class:`~pvfactors.geometry.timeseries.TsGround`
Timeseries ground of the PV array
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
vf_matrix : np.ndarray
View factor matrix to update during calculation. Should have 3
dimensions as follows: [n_surfaces, n_surfaces, n_timesteps]
"""
sky_index = vf_matrix.shape[0] - 1
# --- Build list of dummy sky surfaces
# create sky left open area
pt_1 = TsPointCoords(ts_ground.x_min * np.ones_like(tilted_to_left),
ts_ground.y_ground * np.ones_like(tilted_to_left))
pt_2 = ts_pvrows[0].highest_point
sky_left = TsSurface(TsLineCoords(pt_1, pt_2))
# create sky right open area
pt_1 = TsPointCoords(ts_ground.x_max * np.ones_like(tilted_to_left),
ts_ground.y_ground * np.ones_like(tilted_to_left))
pt_2 = ts_pvrows[-1].highest_point
sky_right = TsSurface(TsLineCoords(pt_2, pt_1))
# Add sky surfaces in-between PV rows
dummy_sky_surfaces = [sky_left]
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows[:-1]):
right_ts_pvrow = ts_pvrows[idx_pvrow + 1]
pt_1 = ts_pvrow.highest_point
pt_2 = right_ts_pvrow.highest_point
sky_surface = TsSurface(TsLineCoords(pt_1, pt_2))
dummy_sky_surfaces.append(sky_surface)
# Add sky right open area
dummy_sky_surfaces.append(sky_right)
# Now calculate vf_aoi for all PV row surfaces to sky
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows):
# Get dummy sky surfaces
sky_left = dummy_sky_surfaces[idx_pvrow]
sky_right = dummy_sky_surfaces[idx_pvrow + 1]
# Calculate vf_aoi for surfaces in PV row
# front side
front = ts_pvrow.front
for front_surf in front.all_ts_surfaces:
vf_aoi_left = self._vf_aoi_surface_to_surface(
front_surf, sky_left, is_back=False)
vf_aoi_right = self._vf_aoi_surface_to_surface(
front_surf, sky_right, is_back=False)
vf_aoi = np.where(tilted_to_left, vf_aoi_left, vf_aoi_right)
vf_matrix[front_surf.index, sky_index, :] = vf_aoi
# back side
back = ts_pvrow.back
for back_surf in back.all_ts_surfaces:
vf_aoi_left = self._vf_aoi_surface_to_surface(
back_surf, sky_left, is_back=True)
vf_aoi_right = self._vf_aoi_surface_to_surface(
back_surf, sky_right, is_back=True)
vf_aoi = np.where(tilted_to_left, vf_aoi_right, vf_aoi_left)
vf_matrix[back_surf.index, sky_index, :] = vf_aoi
def vf_aoi_pvrow_to_pvrow(self, ts_pvrows, tilted_to_left, vf_matrix):
"""Calculate the view factors between timeseries PV row surfaces
while accounting for AOI losses,
and assign values to the passed view factor matrix using
the surface indices.
Parameters
----------
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
vf_matrix : np.ndarray
View factor matrix to update during calculation. Should have 3
dimensions as follows: [n_surfaces, n_surfaces, n_timesteps]
"""
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows[:-1]):
# Get the next pv row
right_ts_pvrow = ts_pvrows[idx_pvrow + 1]
# front side
front = ts_pvrow.front
for surf_i in front.all_ts_surfaces:
i = surf_i.index
for surf_j in right_ts_pvrow.back.all_ts_surfaces:
j = surf_j.index
# vf aoi from i to j
vf_i_to_j = self._vf_aoi_surface_to_surface(
surf_i, surf_j, is_back=False)
vf_i_to_j = np.where(tilted_to_left, 0., vf_i_to_j)
# vf aoi from j to i
vf_j_to_i = self._vf_aoi_surface_to_surface(
surf_j, surf_i, is_back=True)
vf_j_to_i = np.where(tilted_to_left, 0., vf_j_to_i)
# save results
vf_matrix[i, j, :] = vf_i_to_j
vf_matrix[j, i, :] = vf_j_to_i
# back side
back = ts_pvrow.back
for surf_i in back.all_ts_surfaces:
i = surf_i.index
for surf_j in right_ts_pvrow.front.all_ts_surfaces:
j = surf_j.index
# vf aoi from i to j
vf_i_to_j = self._vf_aoi_surface_to_surface(
surf_i, surf_j, is_back=True)
vf_i_to_j = np.where(tilted_to_left, vf_i_to_j, 0.)
# vf aoi from j to i
vf_j_to_i = self._vf_aoi_surface_to_surface(
surf_j, surf_i, is_back=False)
vf_j_to_i = np.where(tilted_to_left, vf_j_to_i, 0.)
# save results
vf_matrix[i, j, :] = vf_i_to_j
vf_matrix[j, i, :] = vf_j_to_i
def vf_aoi_pvrow_to_gnd(self, ts_pvrows, ts_ground, tilted_to_left,
vf_aoi_matrix):
"""Calculate the view factors between timeseries PV row and ground
surfaces while accounting for non-diffuse AOI losses,
and assign it to the passed view factor aoi matrix using
the surface indices.
Notes
-----
This assumes that the PV row surfaces are infinitesimal (very small)
Parameters
----------
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
ts_ground : :py:class:`~pvfactors.geometry.timeseries.TsGround`
Timeseries ground of the PV array
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
vf_aoi_matrix : np.ndarray
View factor aoi matrix to update during calculation. Should have 3
dimensions as follows: [n_surfaces, n_surfaces, n_timesteps]
"""
n_pvrows = len(ts_pvrows)
for idx_pvrow, ts_pvrow in enumerate(ts_pvrows):
# Separate gnd surfaces depending on side
left_gnd_surfaces = ts_ground.ts_surfaces_side_of_cut_point(
'left', idx_pvrow)
right_gnd_surfaces = ts_ground.ts_surfaces_side_of_cut_point(
'right', idx_pvrow)
# Front side
front = ts_pvrow.front
for pvrow_surf in front.all_ts_surfaces:
ts_length = pvrow_surf.length
i = pvrow_surf.index
for gnd_surf in left_gnd_surfaces:
j = gnd_surf.index
vf_pvrow_to_gnd = (
self._vf_aoi_pvrow_surf_to_gnd_surf_obstruction(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=False, is_left=True))
vf_aoi_matrix[i, j, :] = vf_pvrow_to_gnd
for gnd_surf in right_gnd_surfaces:
j = gnd_surf.index
vf_pvrow_to_gnd = (
self._vf_aoi_pvrow_surf_to_gnd_surf_obstruction(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=False, is_left=False))
vf_aoi_matrix[i, j, :] = vf_pvrow_to_gnd
# Back side
back = ts_pvrow.back
for pvrow_surf in back.all_ts_surfaces:
ts_length = pvrow_surf.length
i = pvrow_surf.index
for gnd_surf in left_gnd_surfaces:
j = gnd_surf.index
vf_pvrow_to_gnd = (
self._vf_aoi_pvrow_surf_to_gnd_surf_obstruction(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=True, is_left=True))
vf_aoi_matrix[i, j, :] = vf_pvrow_to_gnd
for gnd_surf in right_gnd_surfaces:
j = gnd_surf.index
vf_pvrow_to_gnd = (
self._vf_aoi_pvrow_surf_to_gnd_surf_obstruction(
pvrow_surf, idx_pvrow, n_pvrows,
tilted_to_left, ts_pvrows, gnd_surf, ts_length,
is_back=True, is_left=False))
vf_aoi_matrix[i, j, :] = vf_pvrow_to_gnd
def _vf_aoi_surface_to_surface(self, surf_1, surf_2, is_back=True):
"""Calculate view factor, while accounting from AOI losses, from
surface 1 to surface 2.
Notes
-----
This assumes that surf_1 is infinitesimal (very small)
Parameters
----------
surf_1 : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Infinitesimal surface from which to calculate view factor with
AOI losses
surf_2 : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Surface to which the view factor with AOI losses should be
calculated
is_back : bool
Flag specifying whether pv row surface is on back or front side
of PV row (Default = True)
Returns
-------
vf_aoi : np.ndarray
View factors with aoi losses from surface 1 to surface 2,
dimension is [n_timesteps]
"""
# skip calculation if either surface is empty (always zero length)
skip = surf_1.is_empty or surf_2.is_empty
if skip:
vf_aoi = np.zeros_like(surf_2.length)
else:
# Get surface 1 params
u_vector = surf_1.u_vector
centroid = surf_1.centroid
# Calculate AOI angles
aoi_angles_1 = self._calculate_aoi_angles(u_vector, centroid,
surf_2.b1)
aoi_angles_2 = self._calculate_aoi_angles(u_vector, centroid,
surf_2.b2)
low_aoi_angles = np.where(aoi_angles_1 < aoi_angles_2, aoi_angles_1,
aoi_angles_2)
high_aoi_angles = np.where(aoi_angles_1 < aoi_angles_2, aoi_angles_2,
aoi_angles_1)
# Calculate vf_aoi
vf_aoi_raw = self._calculate_vf_aoi_wedge_level(
low_aoi_angles, high_aoi_angles, is_back=is_back)
# Should be zero where either of the surfaces have zero length
vf_aoi = np.where((surf_1.length < DISTANCE_TOLERANCE)
| (surf_2.length < DISTANCE_TOLERANCE), 0.,
vf_aoi_raw)
return vf_aoi
def _vf_aoi_pvrow_surf_to_gnd_surf_obstruction(
self, pvrow_surf, pvrow_idx, n_pvrows, tilted_to_left, ts_pvrows,
gnd_surf, ts_length, is_back=True, is_left=True):
"""Calculate view factors from timeseries PV row surface to a
timeseries ground surface, accounting for AOI losses.
This will return the calculated view
factors from the PV row surface to the ground surface.
Notes
-----
This assumes that the PV row surfaces are infinitesimal (very small)
Parameters
----------
pvrow_surf : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Timeseries PV row surface to use for calculation
pvrow_idx : int
Index of the timeseries PV row on the which the pvrow_surf is
n_pvrows : int
Number of timeseries PV rows in the PV array, and therefore number
of shadows they cast on the ground
tilted_to_left : list of bool
Flags indicating when the PV rows are strictly tilted to the left
ts_pvrows : list of :py:class:`~pvfactors.geometry.timeseries.TsPVRow`
List of timeseries PV rows in the PV array
gnd_surf : :py:class:`~pvfactors.geometry.timeseries.TsSurface`
Timeseries ground surface to use for calculation
pvrow_surf_length : np.ndarray
Length (width) of the timeseries PV row surface [m]
is_back : bool
Flag specifying whether pv row surface is on back or front side
of PV row (Default = True)
is_left : bool
Flag specifying whether gnd surface is left of pv row cut point or
not (Default = True)
Returns
-------
vf_aoi_pvrow_to_gnd_surf : np.ndarray
View factors aoi from timeseries PV row surface to timeseries
ground surface, dimension is [n_timesteps]
"""
# skip calculation if either surface is empty (always zero length)
skip = pvrow_surf.is_empty or gnd_surf.is_empty
if skip:
vf_aoi = np.zeros_like(gnd_surf.length)
else:
centroid = pvrow_surf.centroid
u_vector = pvrow_surf.u_vector
no_obstruction = (is_left & (pvrow_idx == 0)) \
or ((not is_left) & (pvrow_idx == n_pvrows - 1))
if no_obstruction:
# There is no obstruction to the ground surface
aoi_angles_1 = self._calculate_aoi_angles(u_vector, centroid,
gnd_surf.b1)
aoi_angles_2 = self._calculate_aoi_angles(u_vector, centroid,
gnd_surf.b2)
else:
# Get lowest point of obstructing point
idx_obstructing_pvrow = (pvrow_idx - 1 if is_left
else pvrow_idx + 1)
pt_obstr = ts_pvrows[idx_obstructing_pvrow
].full_pvrow_coords.lowest_point
# Adjust angle seen when there is obstruction
aoi_angles_1 = self._calculate_aoi_angles_w_obstruction(
u_vector, centroid, gnd_surf.b1, pt_obstr, is_left)
aoi_angles_2 = self._calculate_aoi_angles_w_obstruction(
u_vector, centroid, gnd_surf.b2, pt_obstr, is_left)
low_aoi_angles = np.where(aoi_angles_1 < aoi_angles_2,
aoi_angles_1, aoi_angles_2)
high_aoi_angles = np.where(aoi_angles_1 < aoi_angles_2,
aoi_angles_2, aoi_angles_1)
vf_aoi_raw = self._calculate_vf_aoi_wedge_level(
low_aoi_angles, high_aoi_angles, is_back=is_back)
# Should be zero where either of the surfaces have zero length
vf_aoi_raw = np.where((ts_length < DISTANCE_TOLERANCE)
| (gnd_surf.length < DISTANCE_TOLERANCE), 0.,
vf_aoi_raw)
# Final result depends on whether front or back surface
if is_left:
vf_aoi = (np.where(tilted_to_left, 0., vf_aoi_raw) if is_back
else np.where(tilted_to_left, vf_aoi_raw, 0.))
else:
vf_aoi = (np.where(tilted_to_left, vf_aoi_raw, 0.) if is_back
else np.where(tilted_to_left, 0., vf_aoi_raw))
return vf_aoi
def _calculate_aoi_angles_w_obstruction(
self, u_vector, centroid, point_gnd, point_obstr,
gnd_surf_is_left):
"""Calculate AOI angles for a PV row surface of the
:py:class:`~pvfactors.geometry.pvarray.OrderedPVArray` that sees
a ground surface, while being potentially obstructed by another
PV row
Parameters
----------
u_vector : np.ndarray
Direction vector of the surface for which to calculate AOI angles
centroid : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Centroid point of PV row surface for which to calculate AOI angles
point : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Point of ground surface that will determine AOI angle
point_obstr: :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Potentially obstructing point for the view aoi angle calculation
gnd_surf_is_left : bool
Flag specifying whether ground surface is left of PV row's cut
point or not
Returns
-------
np.ndarray
AOI angles formed by remote point and centroid on surface,
measured against surface direction vector, accounting for
potential obstruction [degrees]
"""
if point_obstr is None:
# There is no obstruction
point = point_gnd
else:
# Determine if there is obstruction by using the angles made by
# specific strings with the x-axis
alpha_pv = self._angle_with_x_axis(point_gnd, centroid)
alpha_ob = self._angle_with_x_axis(point_gnd, point_obstr)
if gnd_surf_is_left:
is_obstructing = alpha_pv > alpha_ob
else:
is_obstructing = alpha_pv < alpha_ob
x = np.where(is_obstructing, point_obstr.x, point_gnd.x)
y = np.where(is_obstructing, point_obstr.y, point_gnd.y)
point = TsPointCoords(x, y)
aoi_angles = self._calculate_aoi_angles(u_vector, centroid, point)
return aoi_angles
def _calculate_vf_aoi_wedge_level(self, low_angles, high_angles,
is_back=True):
"""Calculate faoi modified view factors for a wedge defined by
low and high angles.
Parameters
----------
low_angles : np.ndarray
Low AOI angles (between 0 and 180 deg), length = n_timestamps
high_angles : np.ndarray
High AOI angles (between 0 and 180 deg), length = n_timestamps.
Should be bigger than ``low_angles``
is_back : bool
Flag specifying whether pv row surface is on back or front side
of PV row (Default = True)
Returns
-------
np.ndarray
faoi modified view factors for wedge
shape = (n_timestamps, )
"""
# Calculate integrand: all d_vf_aoi values
faoi_integrand = self._calculate_vfaoi_integrand(
low_angles, high_angles, is_back=is_back)
# Total vf_aoi will be sum of all smaller d_vf_aoi values
total_vf_aoi = faoi_integrand.sum(axis=1)
# Make sure vf is counted as zero if the wedge is super small
total_vf_aoi = np.where(
np.abs(high_angles - low_angles) < DISTANCE_TOLERANCE, 0.,
total_vf_aoi)
return total_vf_aoi
def _calculate_vfaoi_integrand(self, low_angles, high_angles,
is_back=True):
"""
Calculate the timeseries view factors with aoi loss integrand
given the low and high angles that define the surface.
Parameters
----------
low_angles : np.ndarray
Low AOI angles (between 0 and 180 deg), length = n_timestamps
high_angles : np.ndarray
High AOI angles (between 0 and 180 deg), length = n_timestamps.
Should be bigger than ``low_angles``
is_back : bool
Flag specifying whether pv row surface is on back or front side
of PV row (Default = True)
Returns
-------
np.ndarray
vf_aoi integrand values for all timestamps
shape = (n_timestamps, n_integral_sections)
"""
# Turn into dimension: [n_timestamps, n_integral_sections]
low_angles_mat = np.tile(low_angles, (self.n_integral_sections, 1)).T
high_angles_mat = np.tile(high_angles, (self.n_integral_sections, 1)).T
# Filter out integrand values outside of range
include_integral_section = ((low_angles_mat <= self.aoi_angles_high) &
(high_angles_mat > self.aoi_angles_low))
# The integrand values are different for front and back sides
if is_back:
faoi_integrand = np.where(include_integral_section,
self.integrand_back, 0.)
else:
faoi_integrand = np.where(include_integral_section,
self.integrand_front, 0.)
return faoi_integrand
@staticmethod
def _calculate_aoi_angles(u_vector, centroid, point):
"""Calculate AOI angles from direction vector of surface,
centroid point of that surface, and point from another surface
Parameters
----------
u_vector : np.ndarray
Direction vector of the surface for which to calculate AOI angles
centroid : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Centroid point of surface for which to calculate AOI angles
point : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Point of remote surface that will determine AOI angle
Returns
-------
np.ndarray
AOI angles formed by remote point and centroid on surface,
measured against surface direction vector [degrees]
"""
v_vector = np.array([point.x - centroid.x, point.y - centroid.y])
dot_product = u_vector[0, :] * v_vector[0, :] \
+ u_vector[1, :] * v_vector[1, :]
u_norm = np.linalg.norm(u_vector, axis=0)
v_norm = np.linalg.norm(v_vector, axis=0)
cos_theta = dot_product / (u_norm * v_norm)
# because of round off errors, cos_theta can be slightly > 1,
# or slightly < -1, so clip it
cos_theta = np.clip(cos_theta, -1., 1.)
aoi_angles = np.rad2deg(np.arccos(cos_theta))
return aoi_angles
@staticmethod
def _vf(aoi_1, aoi_2):
"""Calculate view factor from infinitesimal surface to infinite band.
See illustration: http://www.thermalradiation.net/sectionb/B-71.html
Here we're using angles measured from the horizontal
Parameters
----------
aoi_1 : np.ndarray
Lower angles defining the infinite band
aoi_2 : np.ndarray
Higher angles defining the infinite band
Returns
-------
np.ndarray
View factors from infinitesimal surface to infinite strip
"""
return 0.5 * np.abs(cosd(aoi_1) - cosd(aoi_2))
@staticmethod
def _angle_with_x_axis(pt_1, pt_2):
"""Angle with x-axis of vector going from pt_1 to pt_2
Parameters
----------
pt_1 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 1
pt_2 : :py:class:`~pvfactors.geometry.timeseries.TsPointCoords`
Timeseries point coordinates of point 2
Returns
-------
np.ndarray
Angle between vector pt_1->pt_2 and x-axis
"""
return np.arctan2(pt_2.y - pt_1.y, pt_2.x - pt_1.x)
def rho_from_faoi_fn(self, is_back):
"""Calculate global average reflectivity from faoi function
for either side of the PV row (requires calculating view factors)
Parameters
----------
is_back : bool
Flag specifying whether to use front or back faoi function
Returns
-------
rho_average : float
Global average reflectivity value of surface
"""
# Will use x values at the middle of the integral sections
aoi_angles = np.linspace(0., 180., num=self.n_integral_sections + 1)
# Assumes that at least 2 aoi angle values, otherwise what's the point
self.interval = aoi_angles[1] - aoi_angles[0]
# Get integral intervals' low, high, and middle points
aoi_angles_low = aoi_angles[:-1]
aoi_angles_high = aoi_angles_low + self.interval
aoi_angles_middle = aoi_angles_low + self.interval / 2.
# Calculate faoi values using middle points of integral intervals
if is_back:
faoi_values = self.faoi_fn_back(aoi_angles_middle)
else:
faoi_values = self.faoi_fn_front(aoi_angles_middle)
# Calculate small view factor values for each section
vf_values = self._vf(aoi_angles_low, aoi_angles_high)
# Multiply to get integrand
integrand_values = faoi_values * vf_values
return (1. - integrand_values.sum())
def faoi_fn_from_pvlib_sandia(pvmodule_name):
"""Generate a faoi function from a pvlib sandia PV module name
Parameters
----------
pvmodule_name : str
Name of PV module in pvlib Sandia module database
Returns
-------
faoi_function
Function that returns positive loss values for numeric inputs
between 0 and 180 degrees.
"""
# Get Sandia module database from pvlib
sandia_modules = pvlib.pvsystem.retrieve_sam('SandiaMod')
# Grab pv module sandia coeffs from database
pvmodule = sandia_modules[pvmodule_name]
def fn(angles):
"""fAOI loss funtion: calculate how much light is absorbed at given
incidence angles
Parameters
----------
angles : np.ndarray or list
Angles measured from surface horizontal, from 0 de 180 deg
Returns
-------
np.ndarray
fAOI values
"""
angles = np.array(angles) if isinstance(angles, list) else angles
# Transform the inputs for the SAPM function
angles = np.where(angles >= 90, angles - 90, 90. - angles)
# Use pvlib sapm aoi loss method
return pvlib.iam.sapm(angles, pvmodule, upper=1.)
return fn
| [
"pvfactors.PVFactorsError",
"numpy.arctan2",
"numpy.zeros_like",
"pvfactors.geometry.timeseries.TsPointCoords",
"numpy.ones_like",
"numpy.abs",
"pvlib.pvsystem.retrieve_sam",
"numpy.clip",
"numpy.where",
"numpy.array",
"numpy.tile",
"numpy.linspace",
"numpy.linalg.norm",
"pvlib.iam.sapm",
... | [((29991, 30031), 'pvlib.pvsystem.retrieve_sam', 'pvlib.pvsystem.retrieve_sam', (['"""SandiaMod"""'], {}), "('SandiaMod')\n", (30018, 30031), False, 'import pvlib\n'), ((2506, 2563), 'numpy.linspace', 'np.linspace', (['(0.0)', '(180.0)'], {'num': '(self.n_integral_sections + 1)'}), '(0.0, 180.0, num=self.n_integral_sections + 1)\n', (2517, 2563), True, 'import numpy as np\n'), ((3523, 3565), 'numpy.tile', 'np.tile', (['aoi_angles_low', '(n_timestamps, 1)'], {}), '(aoi_angles_low, (n_timestamps, 1))\n', (3530, 3565), True, 'import numpy as np\n'), ((3597, 3640), 'numpy.tile', 'np.tile', (['aoi_angles_high', '(n_timestamps, 1)'], {}), '(aoi_angles_high, (n_timestamps, 1))\n', (3604, 3640), True, 'import numpy as np\n'), ((3672, 3715), 'numpy.tile', 'np.tile', (['integrand_front', '(n_timestamps, 1)'], {}), '(integrand_front, (n_timestamps, 1))\n', (3679, 3715), True, 'import numpy as np\n'), ((3746, 3788), 'numpy.tile', 'np.tile', (['integrand_back', '(n_timestamps, 1)'], {}), '(integrand_back, (n_timestamps, 1))\n', (3753, 3788), True, 'import numpy as np\n'), ((26318, 26372), 'numpy.array', 'np.array', (['[point.x - centroid.x, point.y - centroid.y]'], {}), '([point.x - centroid.x, point.y - centroid.y])\n', (26326, 26372), True, 'import numpy as np\n'), ((26492, 26524), 'numpy.linalg.norm', 'np.linalg.norm', (['u_vector'], {'axis': '(0)'}), '(u_vector, axis=0)\n', (26506, 26524), True, 'import numpy as np\n'), ((26542, 26574), 'numpy.linalg.norm', 'np.linalg.norm', (['v_vector'], {'axis': '(0)'}), '(v_vector, axis=0)\n', (26556, 26574), True, 'import numpy as np\n'), ((26756, 26785), 'numpy.clip', 'np.clip', (['cos_theta', '(-1.0)', '(1.0)'], {}), '(cos_theta, -1.0, 1.0)\n', (26763, 26785), True, 'import numpy as np\n'), ((28058, 28102), 'numpy.arctan2', 'np.arctan2', (['(pt_2.y - pt_1.y)', '(pt_2.x - pt_1.x)'], {}), '(pt_2.y - pt_1.y, pt_2.x - pt_1.x)\n', (28068, 28102), True, 'import numpy as np\n'), ((28637, 28694), 'numpy.linspace', 'np.linspace', (['(0.0)', '(180.0)'], {'num': '(self.n_integral_sections + 1)'}), '(0.0, 180.0, num=self.n_integral_sections + 1)\n', (28648, 28694), True, 'import numpy as np\n'), ((30626, 30676), 'numpy.where', 'np.where', (['(angles >= 90)', '(angles - 90)', '(90.0 - angles)'], {}), '(angles >= 90, angles - 90, 90.0 - angles)\n', (30634, 30676), True, 'import numpy as np\n'), ((30732, 30775), 'pvlib.iam.sapm', 'pvlib.iam.sapm', (['angles', 'pvmodule'], {'upper': '(1.0)'}), '(angles, pvmodule, upper=1.0)\n', (30746, 30775), False, 'import pvlib\n'), ((1602, 1723), 'pvfactors.PVFactorsError', 'PVFactorsError', (['"""The faoi_fn passed to the AOI methods are not callable. Please check the fAOI functions again"""'], {}), "(\n 'The faoi_fn passed to the AOI methods are not callable. Please check the fAOI functions again'\n )\n", (1616, 1723), False, 'from pvfactors import PVFactorsError\n'), ((5069, 5093), 'pvfactors.geometry.timeseries.TsLineCoords', 'TsLineCoords', (['pt_1', 'pt_2'], {}), '(pt_1, pt_2)\n', (5081, 5093), False, 'from pvfactors.geometry.timeseries import TsPointCoords, TsSurface, TsLineCoords\n'), ((5362, 5386), 'pvfactors.geometry.timeseries.TsLineCoords', 'TsLineCoords', (['pt_2', 'pt_1'], {}), '(pt_2, pt_1)\n', (5374, 5386), False, 'from pvfactors.geometry.timeseries import TsPointCoords, TsSurface, TsLineCoords\n'), ((14562, 14590), 'numpy.zeros_like', 'np.zeros_like', (['surf_2.length'], {}), '(surf_2.length)\n', (14575, 14590), True, 'import numpy as np\n'), ((15060, 15125), 'numpy.where', 'np.where', (['(aoi_angles_1 < aoi_angles_2)', 'aoi_angles_1', 'aoi_angles_2'], {}), '(aoi_angles_1 < aoi_angles_2, aoi_angles_1, aoi_angles_2)\n', (15068, 15125), True, 'import numpy as np\n'), ((15194, 15259), 'numpy.where', 'np.where', (['(aoi_angles_1 < aoi_angles_2)', 'aoi_angles_2', 'aoi_angles_1'], {}), '(aoi_angles_1 < aoi_angles_2, aoi_angles_2, aoi_angles_1)\n', (15202, 15259), True, 'import numpy as np\n'), ((15553, 15659), 'numpy.where', 'np.where', (['((surf_1.length < DISTANCE_TOLERANCE) | (surf_2.length < DISTANCE_TOLERANCE))', '(0.0)', 'vf_aoi_raw'], {}), '((surf_1.length < DISTANCE_TOLERANCE) | (surf_2.length <\n DISTANCE_TOLERANCE), 0.0, vf_aoi_raw)\n', (15561, 15659), True, 'import numpy as np\n'), ((17845, 17875), 'numpy.zeros_like', 'np.zeros_like', (['gnd_surf.length'], {}), '(gnd_surf.length)\n', (17858, 17875), True, 'import numpy as np\n'), ((19206, 19271), 'numpy.where', 'np.where', (['(aoi_angles_1 < aoi_angles_2)', 'aoi_angles_1', 'aoi_angles_2'], {}), '(aoi_angles_1 < aoi_angles_2, aoi_angles_1, aoi_angles_2)\n', (19214, 19271), True, 'import numpy as np\n'), ((19340, 19405), 'numpy.where', 'np.where', (['(aoi_angles_1 < aoi_angles_2)', 'aoi_angles_2', 'aoi_angles_1'], {}), '(aoi_angles_1 < aoi_angles_2, aoi_angles_2, aoi_angles_1)\n', (19348, 19405), True, 'import numpy as np\n'), ((19672, 19776), 'numpy.where', 'np.where', (['((ts_length < DISTANCE_TOLERANCE) | (gnd_surf.length < DISTANCE_TOLERANCE))', '(0.0)', 'vf_aoi_raw'], {}), '((ts_length < DISTANCE_TOLERANCE) | (gnd_surf.length <\n DISTANCE_TOLERANCE), 0.0, vf_aoi_raw)\n', (19680, 19776), True, 'import numpy as np\n'), ((22161, 22213), 'numpy.where', 'np.where', (['is_obstructing', 'point_obstr.x', 'point_gnd.x'], {}), '(is_obstructing, point_obstr.x, point_gnd.x)\n', (22169, 22213), True, 'import numpy as np\n'), ((22230, 22282), 'numpy.where', 'np.where', (['is_obstructing', 'point_obstr.y', 'point_gnd.y'], {}), '(is_obstructing, point_obstr.y, point_gnd.y)\n', (22238, 22282), True, 'import numpy as np\n'), ((22303, 22322), 'pvfactors.geometry.timeseries.TsPointCoords', 'TsPointCoords', (['x', 'y'], {}), '(x, y)\n', (22316, 22322), False, 'from pvfactors.geometry.timeseries import TsPointCoords, TsSurface, TsLineCoords\n'), ((24717, 24767), 'numpy.tile', 'np.tile', (['low_angles', '(self.n_integral_sections, 1)'], {}), '(low_angles, (self.n_integral_sections, 1))\n', (24724, 24767), True, 'import numpy as np\n'), ((24796, 24847), 'numpy.tile', 'np.tile', (['high_angles', '(self.n_integral_sections, 1)'], {}), '(high_angles, (self.n_integral_sections, 1))\n', (24803, 24847), True, 'import numpy as np\n'), ((25181, 25241), 'numpy.where', 'np.where', (['include_integral_section', 'self.integrand_back', '(0.0)'], {}), '(include_integral_section, self.integrand_back, 0.0)\n', (25189, 25241), True, 'import numpy as np\n'), ((25322, 25383), 'numpy.where', 'np.where', (['include_integral_section', 'self.integrand_front', '(0.0)'], {}), '(include_integral_section, self.integrand_front, 0.0)\n', (25330, 25383), True, 'import numpy as np\n'), ((26816, 26836), 'numpy.arccos', 'np.arccos', (['cos_theta'], {}), '(cos_theta)\n', (26825, 26836), True, 'import numpy as np\n'), ((30499, 30515), 'numpy.array', 'np.array', (['angles'], {}), '(angles)\n', (30507, 30515), True, 'import numpy as np\n'), ((4888, 4916), 'numpy.ones_like', 'np.ones_like', (['tilted_to_left'], {}), '(tilted_to_left)\n', (4900, 4916), True, 'import numpy as np\n'), ((4968, 4996), 'numpy.ones_like', 'np.ones_like', (['tilted_to_left'], {}), '(tilted_to_left)\n', (4980, 4996), True, 'import numpy as np\n'), ((5179, 5207), 'numpy.ones_like', 'np.ones_like', (['tilted_to_left'], {}), '(tilted_to_left)\n', (5191, 5207), True, 'import numpy as np\n'), ((5259, 5287), 'numpy.ones_like', 'np.ones_like', (['tilted_to_left'], {}), '(tilted_to_left)\n', (5271, 5287), True, 'import numpy as np\n'), ((5716, 5740), 'pvfactors.geometry.timeseries.TsLineCoords', 'TsLineCoords', (['pt_1', 'pt_2'], {}), '(pt_1, pt_2)\n', (5728, 5740), False, 'from pvfactors.geometry.timeseries import TsPointCoords, TsSurface, TsLineCoords\n'), ((6574, 6625), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_aoi_left', 'vf_aoi_right'], {}), '(tilted_to_left, vf_aoi_left, vf_aoi_right)\n', (6582, 6625), True, 'import numpy as np\n'), ((7064, 7115), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_aoi_right', 'vf_aoi_left'], {}), '(tilted_to_left, vf_aoi_right, vf_aoi_left)\n', (7072, 7115), True, 'import numpy as np\n'), ((23633, 23665), 'numpy.abs', 'np.abs', (['(high_angles - low_angles)'], {}), '(high_angles - low_angles)\n', (23639, 23665), True, 'import numpy as np\n'), ((8535, 8575), 'numpy.where', 'np.where', (['tilted_to_left', '(0.0)', 'vf_i_to_j'], {}), '(tilted_to_left, 0.0, vf_i_to_j)\n', (8543, 8575), True, 'import numpy as np\n'), ((8767, 8807), 'numpy.where', 'np.where', (['tilted_to_left', '(0.0)', 'vf_j_to_i'], {}), '(tilted_to_left, 0.0, vf_j_to_i)\n', (8775, 8807), True, 'import numpy as np\n'), ((9379, 9419), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_i_to_j', '(0.0)'], {}), '(tilted_to_left, vf_i_to_j, 0.0)\n', (9387, 9419), True, 'import numpy as np\n'), ((9612, 9652), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_j_to_i', '(0.0)'], {}), '(tilted_to_left, vf_j_to_i, 0.0)\n', (9620, 9652), True, 'import numpy as np\n'), ((19959, 20000), 'numpy.where', 'np.where', (['tilted_to_left', '(0.0)', 'vf_aoi_raw'], {}), '(tilted_to_left, 0.0, vf_aoi_raw)\n', (19967, 20000), True, 'import numpy as np\n'), ((20042, 20083), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_aoi_raw', '(0.0)'], {}), '(tilted_to_left, vf_aoi_raw, 0.0)\n', (20050, 20083), True, 'import numpy as np\n'), ((20128, 20169), 'numpy.where', 'np.where', (['tilted_to_left', 'vf_aoi_raw', '(0.0)'], {}), '(tilted_to_left, vf_aoi_raw, 0.0)\n', (20136, 20169), True, 'import numpy as np\n'), ((20211, 20252), 'numpy.where', 'np.where', (['tilted_to_left', '(0.0)', 'vf_aoi_raw'], {}), '(tilted_to_left, 0.0, vf_aoi_raw)\n', (20219, 20252), True, 'import numpy as np\n'), ((27488, 27499), 'pvlib.tools.cosd', 'cosd', (['aoi_1'], {}), '(aoi_1)\n', (27492, 27499), False, 'from pvlib.tools import cosd\n'), ((27502, 27513), 'pvlib.tools.cosd', 'cosd', (['aoi_2'], {}), '(aoi_2)\n', (27506, 27513), False, 'from pvlib.tools import cosd\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import codecs
import warnings
import argparse
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(LOCAL_PATH, '..', '..'))
import yaml
import functools
import numpy as np
from paddle.inference import create_predictor, PrecisionType
from paddle.inference import Config as PredictConfig
import medicalseg.transforms as T
from medicalseg.cvlibs import manager
from medicalseg.utils import get_sys_env, logger, get_image_list
from medicalseg.utils.visualize import get_pseudo_color_map
from medicalseg.cvlibs import Config
from medicalseg.core import evaluate
from medicalseg.utils import get_sys_env, logger, config_check, utils
from tools import HUnorm, resample
from tools import Prep
from medicalseg.core.infer_window import sliding_window_inference
import paddle
def parse_args():
parser = argparse.ArgumentParser(description='Test')
parser.add_argument(
"--config",
dest="cfg",
help="The config file.",
default=None,
type=str,
required=True)
parser.add_argument(
'--model_path',
dest='model_path',
help='The path of model ',
type=str,
default=None,
required=True)
parser.add_argument(
'--image_path',
dest='image_path',
help='The directory or path or file list of the images to be predicted.',
type=str,
default=None,
required=True)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size of one gpu or cpu.',
type=int,
default=1)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the predict result.',
type=str,
default='./output')
parser.add_argument(
'--device',
choices=['cpu', 'gpu'],
default="gpu",
help="Select which device to inference, defaults to gpu.")
parser.add_argument(
'--use_trt',
default=False,
type=eval,
choices=[True, False],
help='Whether to use Nvidia TensorRT to accelerate prediction.')
parser.add_argument(
"--precision",
default="fp32",
type=str,
choices=["fp32", "fp16", "int8"],
help='The tensorrt precision.')
parser.add_argument(
'--enable_auto_tune',
default=False,
type=eval,
choices=[True, False],
help='Whether to enable tuned dynamic shape. We uses some images to collect '
'the dynamic shape for trt sub graph, which avoids setting dynamic shape manually.'
)
parser.add_argument(
'--auto_tuned_shape_file',
type=str,
default="auto_tune_tmp.pbtxt",
help='The temp file to save tuned dynamic shape.')
parser.add_argument(
'--cpu_threads',
default=10,
type=int,
help='Number of threads to predict when using cpu.')
parser.add_argument(
'--enable_mkldnn',
default=False,
type=eval,
choices=[True, False],
help='Enable to use mkldnn to speed up when using cpu.')
parser.add_argument(
"--benchmark",
type=eval,
default=False,
help="Whether to log some information about environment, model, configuration and performance."
)
parser.add_argument(
"--model_name",
default="",
type=str,
help='When `--benchmark` is True, the specified model name is displayed.'
)
parser.add_argument(
'--with_argmax',
dest='with_argmax',
help='Perform argmax operation on the predict result.',
action='store_true')
parser.add_argument(
'--print_detail',
default=True,
type=eval,
choices=[True, False],
help='Print GLOG information of Paddle Inference.')
return parser.parse_args()
class ModelLike_infer:
def __init__(self, input_handle,output_handle,predictor):
self.input_handle = input_handle
self.output_handle = output_handle
self.predictor = predictor
def infer_likemodel(self,input_handle, output_handle, predictor,data):
input_handle.reshape(data.shape)
input_handle.copy_from_cpu(data.numpy())
predictor.run()
return paddle.to_tensor(output_handle.copy_to_cpu())
def infer_model(self,data):
return (self.infer_likemodel(self.input_handle, self.output_handle, self.predictor,data),)
class Predictor:
def __init__(self, args):
"""
Prepare for prediction.
The usage and docs of paddle inference, please refer to
https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html
"""
self.args = args
# self.cfg = DeployConfig(args.cfg)
cfg = Config(args.cfg)
model = cfg.model
if args.model_path:
utils.load_entire_model(model, args.model_path)
logger.info('Loaded trained params of model successfully')
self.model=model
# self._init_base_config()
if args.device == 'cpu':
self._init_cpu_config()
else:
self._init_gpu_config()
# self.predictor = create_predictor(self.pred_cfg)
if hasattr(args, 'benchmark') and args.benchmark:
import auto_log
pid = os.getpid()
self.autolog = auto_log.AutoLogger(
model_name=args.model_name,
model_precision=args.precision,
batch_size=args.batch_size,
data_shape="dynamic",
save_path=None,
inference_config=None,
pids=pid,
process_name=None,
gpu_ids=0,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=0,
logger=logger)
def _init_base_config(self):
"初始化基础配置"
self.pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)
if not self.args.print_detail:
self.pred_cfg.disable_glog_info()
self.pred_cfg.enable_memory_optim()
self.pred_cfg.switch_ir_optim(True)
def _init_cpu_config(self):
"""
Init the config for x86 cpu.
"""
logger.info("Use CPU")
def _init_gpu_config(self):
"""
Init the config for nvidia gpu.
"""
logger.info("Use GPU")
def run(self, imgs_path):
if not isinstance(imgs_path, (list, tuple)):
imgs_path = [imgs_path]
# input_names = self.predictor.get_input_names()
# input_handle = self.predictor.get_input_handle(input_names[0])
# output_names = self.predictor.get_output_names()
# output_handle = self.predictor.get_output_handle(output_names[0])
results = []
args = self.args
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# infer_likemodel = ModelLike_infer(input_handle,output_handle,self.predictor)
for i in range(0, len(imgs_path), args.batch_size):
# # warm up
# if i == 0 and args.benchmark:
# for j in range(5):
# data = np.array([
# self._preprocess(img) # load from original
# for img in imgs_path[0:args.batch_size]
# ])
# input_handle.reshape(data.shape)
# input_handle.copy_from_cpu(data)
# self.predictor.run()
# results = output_handle.copy_to_cpu()
# results = self._postprocess(results)
# inference
if args.benchmark:
self.autolog.times.start()
data = np.array([
self._preprocess(p) for p in imgs_path[i:i + args.batch_size]
])
data=paddle.to_tensor(data)
if args.benchmark:
self.autolog.times.stamp()
results = sliding_window_inference(data,(128,128,128),1,self.model)
results=paddle.to_tensor(results)
if args.benchmark:
self.autolog.times.stamp()
results = self._postprocess(results)
if args.benchmark:
self.autolog.times.end(stamp=True)
self._save_npy(results, imgs_path[i:i + args.batch_size])
logger.info("Finish")
def _preprocess(self, img):
"""load img and transform it
Args:
Img(str): A batch of image path
"""
preT=T.Compose([])
return preT(img)[0]
def _postprocess(self, results):
"results is numpy array, optionally postprocess with argmax"
if self.args.with_argmax:
results = np.argmax(results, axis=1)
return results
def _save_npy(self, results, imgs_path):
for i in range(results.shape[0]):
basename = os.path.basename(imgs_path[i])
basename, _ = os.path.splitext(basename)
basename = f'{basename}.npy'
np.save(os.path.join(self.args.save_dir, basename), results)
def main(args):
imgs_list = get_image_list(
args.image_path) # get image list from image path
# # support autotune to collect dynamic shape, works only with trt on.
# if use_auto_tune(args):
# tune_img_nums = 10
# auto_tune(args, imgs_list, tune_img_nums)
# infer with paddle inference.
predictor = Predictor(args)
predictor.run(imgs_list)
# if use_auto_tune(args) and \
# os.path.exists(args.auto_tuned_shape_file):
# os.remove(args.auto_tuned_shape_file)
# test the speed.
if args.benchmark:
predictor.autolog.report()
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"paddle.to_tensor",
"os.path.abspath",
"paddle.inference.Config",
"medicalseg.utils.logger.info",
"argparse.ArgumentParser",
"os.getpid",
"os.makedirs",
"numpy.argmax",
"os.path.basename",
"medicalseg.transforms.Compose",
"os.path.exists",
"medicalseg.core.infer_window.sliding_window_inference... | [((707, 732), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (722, 732), False, 'import os\n'), ((750, 786), 'os.path.join', 'os.path.join', (['LOCAL_PATH', '""".."""', '""".."""'], {}), "(LOCAL_PATH, '..', '..')\n", (762, 786), False, 'import os\n'), ((1472, 1515), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test"""'}), "(description='Test')\n", (1495, 1515), False, 'import argparse\n'), ((9957, 9988), 'medicalseg.utils.get_image_list', 'get_image_list', (['args.image_path'], {}), '(args.image_path)\n', (9971, 9988), False, 'from medicalseg.utils import get_sys_env, logger, get_image_list\n'), ((5487, 5503), 'medicalseg.cvlibs.Config', 'Config', (['args.cfg'], {}), '(args.cfg)\n', (5493, 5503), False, 'from medicalseg.cvlibs import Config\n'), ((6684, 6730), 'paddle.inference.Config', 'PredictConfig', (['self.cfg.model', 'self.cfg.params'], {}), '(self.cfg.model, self.cfg.params)\n', (6697, 6730), True, 'from paddle.inference import Config as PredictConfig\n'), ((7006, 7028), 'medicalseg.utils.logger.info', 'logger.info', (['"""Use CPU"""'], {}), "('Use CPU')\n", (7017, 7028), False, 'from medicalseg.utils import get_sys_env, logger, config_check, utils\n'), ((7141, 7163), 'medicalseg.utils.logger.info', 'logger.info', (['"""Use GPU"""'], {}), "('Use GPU')\n", (7152, 7163), False, 'from medicalseg.utils import get_sys_env, logger, config_check, utils\n'), ((9184, 9205), 'medicalseg.utils.logger.info', 'logger.info', (['"""Finish"""'], {}), "('Finish')\n", (9195, 9205), False, 'from medicalseg.utils import get_sys_env, logger, config_check, utils\n'), ((9357, 9370), 'medicalseg.transforms.Compose', 'T.Compose', (['[]'], {}), '([])\n', (9366, 9370), True, 'import medicalseg.transforms as T\n'), ((5570, 5617), 'medicalseg.utils.utils.load_entire_model', 'utils.load_entire_model', (['model', 'args.model_path'], {}), '(model, args.model_path)\n', (5593, 5617), False, 'from medicalseg.utils import get_sys_env, logger, config_check, utils\n'), ((5630, 5688), 'medicalseg.utils.logger.info', 'logger.info', (['"""Loaded trained params of model successfully"""'], {}), "('Loaded trained params of model successfully')\n", (5641, 5688), False, 'from medicalseg.utils import get_sys_env, logger, config_check, utils\n'), ((6035, 6046), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6044, 6046), False, 'import os\n'), ((6074, 6394), 'auto_log.AutoLogger', 'auto_log.AutoLogger', ([], {'model_name': 'args.model_name', 'model_precision': 'args.precision', 'batch_size': 'args.batch_size', 'data_shape': '"""dynamic"""', 'save_path': 'None', 'inference_config': 'None', 'pids': 'pid', 'process_name': 'None', 'gpu_ids': '(0)', 'time_keys': "['preprocess_time', 'inference_time', 'postprocess_time']", 'warmup': '(0)', 'logger': 'logger'}), "(model_name=args.model_name, model_precision=args.\n precision, batch_size=args.batch_size, data_shape='dynamic', save_path=\n None, inference_config=None, pids=pid, process_name=None, gpu_ids=0,\n time_keys=['preprocess_time', 'inference_time', 'postprocess_time'],\n warmup=0, logger=logger)\n", (6093, 6394), False, 'import auto_log\n'), ((7621, 7650), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (7635, 7650), False, 'import os\n'), ((7664, 7690), 'os.makedirs', 'os.makedirs', (['args.save_dir'], {}), '(args.save_dir)\n', (7675, 7690), False, 'import os\n'), ((8658, 8680), 'paddle.to_tensor', 'paddle.to_tensor', (['data'], {}), '(data)\n', (8674, 8680), False, 'import paddle\n'), ((8781, 8843), 'medicalseg.core.infer_window.sliding_window_inference', 'sliding_window_inference', (['data', '(128, 128, 128)', '(1)', 'self.model'], {}), '(data, (128, 128, 128), 1, self.model)\n', (8805, 8843), False, 'from medicalseg.core.infer_window import sliding_window_inference\n'), ((8859, 8884), 'paddle.to_tensor', 'paddle.to_tensor', (['results'], {}), '(results)\n', (8875, 8884), False, 'import paddle\n'), ((9564, 9590), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (9573, 9590), True, 'import numpy as np\n'), ((9725, 9755), 'os.path.basename', 'os.path.basename', (['imgs_path[i]'], {}), '(imgs_path[i])\n', (9741, 9755), False, 'import os\n'), ((9782, 9808), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (9798, 9808), False, 'import os\n'), ((9870, 9912), 'os.path.join', 'os.path.join', (['self.args.save_dir', 'basename'], {}), '(self.args.save_dir, basename)\n', (9882, 9912), False, 'import os\n')] |
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
import pickle
import seaborn as sns
import sys
from time import time
try:
# if calling from XGMix
from Utils.utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf
from XGFix.phasing import *
from XGFix.simple_switch import simple_switch
except ModuleNotFoundError:
# if calling from XGFix
from utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf
from phasing import *
from simple_switch import simple_switch
def mask_base_prob(base_prob, d=0):
"""
given base out with shape [H, W, A] where
- H is number of Haplotypes
- W is number of Windows
- A is number of Ancestry
filter out all windows that are not more than d windows away from center
"""
base_prob = np.array(base_prob)
H, W, A = base_prob.shape
c = int((W-1)/2)
masked = np.copy(base_prob)
masked[:,np.arange(c-d),:] = 0
masked[:,np.arange(c+d+1,W),:] = 0
return masked
def check(Y_m, Y_p, w, base, check_criterion):
check = False
if check_criterion == "all":
check = True
elif check_criterion == "disc_smooth":
check = Y_m[w] != Y_m[w-1] or Y_p[w] != Y_p[w-1]
elif check_criterion == "disc_base":
base_Y_ms, base_Y_ps = np.argmax(base[:,w-1:w +1,:],axis=2)
check = base_Y_ms[0] != base_Y_ms[1] or base_Y_ps[0] != base_Y_ps[1]
elif check_criterion == "disc_either":
base_Y_ms, base_Y_ps = np.argmax(base[:,w-1:w+1,:],axis=2)
base_check = base_Y_ms[0] != base_Y_ms[1] or base_Y_ps[0] != base_Y_ps[1]
smooth_check = Y_m[w] != Y_m[w-1] or Y_p[w] != Y_p[w-1]
check = base_check or smooth_check
else:
print("Warning: check criteration not recognized. Checking all windows")
check = True
return check
def base_to_smooth_data(base_prob, sws, pad_size=None, labels=None):
base_prob = np.copy(base_prob)
N, W, A = base_prob.shape
if pad_size is None:
pad_size = (1+sws)//2
# pad it.
pad_left = np.flip(base_prob[:,0:pad_size,:],axis=1)
pad_right = np.flip(base_prob[:,-pad_size:,:],axis=1)
base_prob_padded = np.concatenate([pad_left,base_prob,pad_right],axis=1)
# window it.
windowed_data = np.zeros((N,W,A*sws),dtype="float32")
for ppl,dat in enumerate(base_prob_padded):
for win in range(windowed_data.shape[1]):
windowed_data[ppl,win,:] = dat[win:win+sws].ravel()
# reshape
windowed_data = windowed_data.reshape(-1,windowed_data.shape[2])
windowed_labels = None if labels is None else labels.reshape(-1)
return windowed_data, windowed_labels
def load_smoother(path_to_smoother, verbose=True):
if verbose:
print("Loading smoother...")
if path_to_smoother[-3:]==".gz":
with gzip.open(path_to_smoother, 'rb') as unzipped:
smoother = pickle.load(unzipped)
else:
smoother = pickle.load(open(path_to_smoother,"rb"))
return smoother
def XGFix(M, P, base_prob, smoother, max_it=50, non_lin_s=0, check_criterion="disc_smooth", max_center_offset=0, prob_comp="max", d=None, prior_switch_prob = 0.5,
naive_switch=None, end_naive_switch=None, padding=True, verbose=False):
if verbose:
# print configs
print("max center offset:", max_center_offset)
print("non_lin_s:", non_lin_s)
print("Mask:", d)
print("including naive switch:", naive_switch)
print("including end naive switch:", end_naive_switch)
print("prior switch prob:", prior_switch_prob)
print("check criterion:", check_criterion)
print("probability comparison:", prob_comp)
print("padding:", padding)
N, W, A = base_prob.shape
sws = len(smoother.feature_importances_)//A # smoother size
window_size = len(M)//W # window size
# initial position
X_m, X_p = np.copy([M,P]).astype(int)
# inferred labels of initial position
smooth_data, _ = base_to_smooth_data(base_prob, sws=sws)
Y_m, Y_p = smoother.predict(smooth_data).reshape(2,W)
# define windows to iterate through
centers = (np.arange(W-sws+1)+(sws-1)/2).astype(int)
iter_windows = np.arange(1,W) if padding else centers
# Track convergence and progression
X_m_its = [] # monitor convergence
XGFix_tracker = (np.zeros_like(Y_m), np.ones_like(Y_p))
history = np.array([Y_m, Y_p])
# Fix
st = time()
for it in range(max_it):
if verbose:
sys.stdout.write("\riteration %i/%i" % (it+1, max_it))
if naive_switch:
# Naive switch: heuristic to catch obvious errors and save computations
_, _, M_track, _, _ = simple_switch(Y_m,Y_p,slack=naive_switch,cont=False,verbose=False,animate=False)
X_m, X_p = correct_phase_error(X_m, X_p, M_track, window_size)
base_prob = np.array(correct_phase_error(base_prob[0], base_prob[1], M_track, window_size))
smooth_data, _ = base_to_smooth_data(base_prob, sws=sws)
Y_m, Y_p = smoother.predict(smooth_data).reshape(2,W)
history = np.dstack([history, [Y_m, Y_p]])
# Stop if converged
if np.any([np.all(X_m == X_m_it) for X_m_it in X_m_its]):
if verbose:
print(); print("converged, stopping..", end="")
break
else:
X_m_its.append(X_m)
# Iterate through windows
for w in iter_windows:
# Heuristic to save computation, only check if there's a nuance
if check(Y_m, Y_p, w, base_prob, check_criterion):
# Different permutations depending on window position
if w in centers:
center = w
max_center_offset_w, non_lin_s_w = max_center_offset, non_lin_s
else:
center = centers[0] if w < centers[0] else centers[-1]
max_center_offset_w, non_lin_s_w = 0, 0
# defining scope
scope_idxs = center + np.arange(sws) - int((sws-1)/2)
# indices of pair-wise permutations
switch_idxs = []
switch_idxs += [np.array([j]) for j in range(w-max_center_offset_w, w+max_center_offset_w+1)] # single switches: xxxxxxoooooo
switch_idxs += [np.array([w-j,w]) for j in range(1,non_lin_s_w)] # double switches left of center: xxxoocxxx
switch_idxs += [np.array([w,w+j+1]) for j in range(non_lin_s_w)] # double switches right of center: xxxcooxxx
# init collection of permutations and add the original
mps = []
m_orig, p_orig = np.copy(base_prob[:,scope_idxs,:])
mps.append(m_orig); mps.append(p_orig)
# adding more permutations
for switch_idx in switch_idxs:
switch_idx = np.concatenate([[scope_idxs[0]], switch_idx.reshape(-1), [scope_idxs[-1]+1]])
m, p = [], []
for s in range(len(switch_idx)-1):
m_s, p_s = base_prob[:,np.arange(switch_idx[s],switch_idx[s+1]),:]
if s%2:
m_s, p_s = p_s, m_s
m.append(m_s); p.append(p_s)
m, p = np.copy(np.concatenate(m,axis=0)), np.copy(np.concatenate(p,axis=0))
mps.append(m); mps.append(p);
# get 2D probabilities for permutations
mps = np.array(mps) if d is None else mask_base_prob(mps, d=d)
outs = smoother.predict_proba( mps.reshape(len(mps),-1) ).reshape(-1,2,A)
# map permutation probabilities to a scalar (R^2 -> R) for comparison
if prob_comp=="prod":
probs = np.prod(np.max(outs,axis=2),axis=1)
if prob_comp=="max":
probs = np.max(np.max(outs,axis=2),axis=1)
# select the most probable one
original_prob, switch_probs = probs[0], probs[1:]
best_switch_prob = np.max(switch_probs)
best_switch = switch_idxs[np.argmax(switch_probs)].reshape(-1)
# if more likely than the original, replace the output of the base
if best_switch_prob*prior_switch_prob > original_prob*(1-prior_switch_prob):
switched = True
m, p = [], []
switch_idx = np.concatenate([[0], best_switch, [W]])
for s in range(len(switch_idx)-1):
m_s, p_s = base_prob[:,np.arange(switch_idx[s],switch_idx[s+1]),:]
if s%2:
m_s, p_s = p_s, m_s
m.append(m_s); p.append(p_s)
m, p = np.copy(np.concatenate(m,axis=0)), np.copy(np.concatenate(p,axis=0))
base_prob = np.copy(np.array([m,p]))
# track the change
for switch in best_switch:
M_track, P_track = track_switch(np.zeros_like(Y_m), np.ones_like(Y_p), switch)
XGFix_tracker = track_switch(XGFix_tracker[0], XGFix_tracker[1], switch)
# correct inferred error on SNP level and re-label
X_m, X_p = correct_phase_error(X_m, X_p, M_track, window_size)
smooth_data, _ = base_to_smooth_data(base_prob, sws=sws)
Y_m, Y_p = smoother.predict(smooth_data).reshape(2,W)
history = np.dstack([history, [Y_m, Y_p]])
if naive_switch:
end_naive_switch = naive_switch
if end_naive_switch:
_, _, M_track, _, _ = simple_switch(Y_m,Y_p,slack=end_naive_switch,cont=False,verbose=False,animate=False)
X_m, X_p = correct_phase_error(X_m, X_p, M_track, window_size)
base_prob = np.array(correct_phase_error(base_prob[0], base_prob[1], M_track, window_size))
smooth_data, _ = base_to_smooth_data(base_prob, sws=sws)
Y_m, Y_p = smoother.predict(smooth_data).reshape(2,W)
history = np.dstack([history, [Y_m, Y_p]])
history = np.dstack([history, [Y_m, Y_p]])
if verbose:
print(); print("runtime:", np.round(time()-st))
return X_m, X_p, Y_m, Y_p, history, XGFix_tracker
def main(query_file, fb_file, smoother_file, output_basename, chm, n_windows=None, verbose=False):
# Read X from query file
# TODO: does the npy need some modification?
query_vcf_data = read_vcf(query_file, chm=chm, fields="*")
X = vcf_to_npy(query_vcf_data)
H, C = X.shape
N = H//2
# Load a smoother
S = load_smoother(smoother_file)
# Read base_prob from fb
base_prob = fb2proba(fb_file, n_wind=n_windows)
print(base_prob.shape)
H_, W, A = base_prob.shape
base_prob = base_prob.reshape(H//2,2,W,A)
assert H == H_, "Number of haplotypes from base probabilities must match number of query haplotypes"
# Phase
X_phased = np.zeros((N,2,C), dtype=int)
Y_phased = np.zeros((N,2,W), dtype=int)
for i, X_i in enumerate(X.reshape(N,2,C)):
sys.stdout.write("\rPhasing individual %i/%i" % (i+1, N))
X_m, X_p = np.copy(X_i)
X_m, X_p, Y_m, Y_p, history, XGFix_tracker = XGFix(X_m, X_p, base_prob=base_prob[i], smoother=S,
check_criterion="disc_base", verbose=True)
X_phased[i] = np.copy(np.array((X_m,X_p)))
Y_phased[i] = np.copy(np.array((Y_m,Y_p)))
X_phased = X_phased.reshape(H,C)
print()
# Write results
if verbose:
print("Writing phased SNPs to disc...")
npy_to_vcf(query_vcf_data, X_phased, output_basename)
return
if __name__ == "__main__":
# TODO: Ask for citation
# Infer mode from number of arguments (could be extended to connect with XGMix)
mode = None
if len(sys.argv) == 6:
mode = ""
# Usage message
if mode is None:
if len(sys.argv) > 1:
print("Error: Incorrect number of arguments.")
print("Usage:")
print(" $ python3 XGFIX.py <query_file> <fb_file> <smoother_file> <output_basename> <chm>")
sys.exit(0)
_, query_file, fb_file, smoother_file, output_basename, chm = sys.argv
main(query_file, fb_file, smoother_file, output_basename, chm, verbose=True)
# main(*sys.argv)
| [
"sys.stdout.write",
"numpy.argmax",
"pickle.load",
"numpy.arange",
"utils.read_vcf",
"simple_switch.simple_switch",
"numpy.zeros_like",
"numpy.copy",
"utils.npy_to_vcf",
"numpy.max",
"utils.vcf_to_npy",
"numpy.dstack",
"numpy.ones_like",
"utils.fb2proba",
"sys.exit",
"numpy.concatenate... | [((845, 864), 'numpy.array', 'np.array', (['base_prob'], {}), '(base_prob)\n', (853, 864), True, 'import numpy as np\n'), ((929, 947), 'numpy.copy', 'np.copy', (['base_prob'], {}), '(base_prob)\n', (936, 947), True, 'import numpy as np\n'), ((1964, 1982), 'numpy.copy', 'np.copy', (['base_prob'], {}), '(base_prob)\n', (1971, 1982), True, 'import numpy as np\n'), ((2099, 2143), 'numpy.flip', 'np.flip', (['base_prob[:, 0:pad_size, :]'], {'axis': '(1)'}), '(base_prob[:, 0:pad_size, :], axis=1)\n', (2106, 2143), True, 'import numpy as np\n'), ((2157, 2201), 'numpy.flip', 'np.flip', (['base_prob[:, -pad_size:, :]'], {'axis': '(1)'}), '(base_prob[:, -pad_size:, :], axis=1)\n', (2164, 2201), True, 'import numpy as np\n'), ((2222, 2278), 'numpy.concatenate', 'np.concatenate', (['[pad_left, base_prob, pad_right]'], {'axis': '(1)'}), '([pad_left, base_prob, pad_right], axis=1)\n', (2236, 2278), True, 'import numpy as np\n'), ((2314, 2356), 'numpy.zeros', 'np.zeros', (['(N, W, A * sws)'], {'dtype': '"""float32"""'}), "((N, W, A * sws), dtype='float32')\n", (2322, 2356), True, 'import numpy as np\n'), ((4446, 4466), 'numpy.array', 'np.array', (['[Y_m, Y_p]'], {}), '([Y_m, Y_p])\n', (4454, 4466), True, 'import numpy as np\n'), ((4487, 4493), 'time.time', 'time', ([], {}), '()\n', (4491, 4493), False, 'from time import time\n'), ((10280, 10312), 'numpy.dstack', 'np.dstack', (['[history, [Y_m, Y_p]]'], {}), '([history, [Y_m, Y_p]])\n', (10289, 10312), True, 'import numpy as np\n'), ((10650, 10691), 'utils.read_vcf', 'read_vcf', (['query_file'], {'chm': 'chm', 'fields': '"""*"""'}), "(query_file, chm=chm, fields='*')\n", (10658, 10691), False, 'from utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf\n'), ((10700, 10726), 'utils.vcf_to_npy', 'vcf_to_npy', (['query_vcf_data'], {}), '(query_vcf_data)\n', (10710, 10726), False, 'from utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf\n'), ((10865, 10900), 'utils.fb2proba', 'fb2proba', (['fb_file'], {'n_wind': 'n_windows'}), '(fb_file, n_wind=n_windows)\n', (10873, 10900), False, 'from utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf\n'), ((11138, 11168), 'numpy.zeros', 'np.zeros', (['(N, 2, C)'], {'dtype': 'int'}), '((N, 2, C), dtype=int)\n', (11146, 11168), True, 'import numpy as np\n'), ((11182, 11212), 'numpy.zeros', 'np.zeros', (['(N, 2, W)'], {'dtype': 'int'}), '((N, 2, W), dtype=int)\n', (11190, 11212), True, 'import numpy as np\n'), ((11803, 11856), 'utils.npy_to_vcf', 'npy_to_vcf', (['query_vcf_data', 'X_phased', 'output_basename'], {}), '(query_vcf_data, X_phased, output_basename)\n', (11813, 11856), False, 'from utils import read_vcf, vcf_to_npy, fb2proba, npy_to_vcf\n'), ((4252, 4267), 'numpy.arange', 'np.arange', (['(1)', 'W'], {}), '(1, W)\n', (4261, 4267), True, 'import numpy as np\n'), ((4392, 4410), 'numpy.zeros_like', 'np.zeros_like', (['Y_m'], {}), '(Y_m)\n', (4405, 4410), True, 'import numpy as np\n'), ((4412, 4429), 'numpy.ones_like', 'np.ones_like', (['Y_p'], {}), '(Y_p)\n', (4424, 4429), True, 'import numpy as np\n'), ((9830, 9923), 'simple_switch.simple_switch', 'simple_switch', (['Y_m', 'Y_p'], {'slack': 'end_naive_switch', 'cont': '(False)', 'verbose': '(False)', 'animate': '(False)'}), '(Y_m, Y_p, slack=end_naive_switch, cont=False, verbose=False,\n animate=False)\n', (9843, 9923), False, 'from simple_switch import simple_switch\n'), ((10232, 10264), 'numpy.dstack', 'np.dstack', (['[history, [Y_m, Y_p]]'], {}), '([history, [Y_m, Y_p]])\n', (10241, 10264), True, 'import numpy as np\n'), ((11266, 11325), 'sys.stdout.write', 'sys.stdout.write', (["('\\rPhasing individual %i/%i' % (i + 1, N))"], {}), "('\\rPhasing individual %i/%i' % (i + 1, N))\n", (11282, 11325), False, 'import sys\n'), ((11343, 11355), 'numpy.copy', 'np.copy', (['X_i'], {}), '(X_i)\n', (11350, 11355), True, 'import numpy as np\n'), ((12339, 12350), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12347, 12350), False, 'import sys\n'), ((961, 977), 'numpy.arange', 'np.arange', (['(c - d)'], {}), '(c - d)\n', (970, 977), True, 'import numpy as np\n'), ((996, 1019), 'numpy.arange', 'np.arange', (['(c + d + 1)', 'W'], {}), '(c + d + 1, W)\n', (1005, 1019), True, 'import numpy as np\n'), ((2935, 2956), 'pickle.load', 'pickle.load', (['unzipped'], {}), '(unzipped)\n', (2946, 2956), False, 'import pickle\n'), ((3945, 3960), 'numpy.copy', 'np.copy', (['[M, P]'], {}), '([M, P])\n', (3952, 3960), True, 'import numpy as np\n'), ((4556, 4612), 'sys.stdout.write', 'sys.stdout.write', (["('\\riteration %i/%i' % (it + 1, max_it))"], {}), "('\\riteration %i/%i' % (it + 1, max_it))\n", (4572, 4612), False, 'import sys\n'), ((4755, 4844), 'simple_switch.simple_switch', 'simple_switch', (['Y_m', 'Y_p'], {'slack': 'naive_switch', 'cont': '(False)', 'verbose': '(False)', 'animate': '(False)'}), '(Y_m, Y_p, slack=naive_switch, cont=False, verbose=False,\n animate=False)\n', (4768, 4844), False, 'from simple_switch import simple_switch\n'), ((5173, 5205), 'numpy.dstack', 'np.dstack', (['[history, [Y_m, Y_p]]'], {}), '([history, [Y_m, Y_p]])\n', (5182, 5205), True, 'import numpy as np\n'), ((11593, 11613), 'numpy.array', 'np.array', (['(X_m, X_p)'], {}), '((X_m, X_p))\n', (11601, 11613), True, 'import numpy as np\n'), ((11644, 11664), 'numpy.array', 'np.array', (['(Y_m, Y_p)'], {}), '((Y_m, Y_p))\n', (11652, 11664), True, 'import numpy as np\n'), ((1334, 1376), 'numpy.argmax', 'np.argmax', (['base[:, w - 1:w + 1, :]'], {'axis': '(2)'}), '(base[:, w - 1:w + 1, :], axis=2)\n', (1343, 1376), True, 'import numpy as np\n'), ((4191, 4213), 'numpy.arange', 'np.arange', (['(W - sws + 1)'], {}), '(W - sws + 1)\n', (4200, 4213), True, 'import numpy as np\n'), ((5254, 5275), 'numpy.all', 'np.all', (['(X_m == X_m_it)'], {}), '(X_m == X_m_it)\n', (5260, 5275), True, 'import numpy as np\n'), ((6766, 6802), 'numpy.copy', 'np.copy', (['base_prob[:, scope_idxs, :]'], {}), '(base_prob[:, scope_idxs, :])\n', (6773, 6802), True, 'import numpy as np\n'), ((8198, 8218), 'numpy.max', 'np.max', (['switch_probs'], {}), '(switch_probs)\n', (8204, 8218), True, 'import numpy as np\n'), ((1522, 1564), 'numpy.argmax', 'np.argmax', (['base[:, w - 1:w + 1, :]'], {'axis': '(2)'}), '(base[:, w - 1:w + 1, :], axis=2)\n', (1531, 1564), True, 'import numpy as np\n'), ((6273, 6286), 'numpy.array', 'np.array', (['[j]'], {}), '([j])\n', (6281, 6286), True, 'import numpy as np\n'), ((6416, 6436), 'numpy.array', 'np.array', (['[w - j, w]'], {}), '([w - j, w])\n', (6424, 6436), True, 'import numpy as np\n'), ((6541, 6565), 'numpy.array', 'np.array', (['[w, w + j + 1]'], {}), '([w, w + j + 1])\n', (6549, 6565), True, 'import numpy as np\n'), ((7613, 7626), 'numpy.array', 'np.array', (['mps'], {}), '(mps)\n', (7621, 7626), True, 'import numpy as np\n'), ((8578, 8617), 'numpy.concatenate', 'np.concatenate', (['[[0], best_switch, [W]]'], {}), '([[0], best_switch, [W]])\n', (8592, 8617), True, 'import numpy as np\n'), ((9676, 9708), 'numpy.dstack', 'np.dstack', (['[history, [Y_m, Y_p]]'], {}), '([history, [Y_m, Y_p]])\n', (9685, 9708), True, 'import numpy as np\n'), ((10378, 10384), 'time.time', 'time', ([], {}), '()\n', (10382, 10384), False, 'from time import time\n'), ((6123, 6137), 'numpy.arange', 'np.arange', (['sws'], {}), '(sws)\n', (6132, 6137), True, 'import numpy as np\n'), ((7921, 7941), 'numpy.max', 'np.max', (['outs'], {'axis': '(2)'}), '(outs, axis=2)\n', (7927, 7941), True, 'import numpy as np\n'), ((8021, 8041), 'numpy.max', 'np.max', (['outs'], {'axis': '(2)'}), '(outs, axis=2)\n', (8027, 8041), True, 'import numpy as np\n'), ((9033, 9049), 'numpy.array', 'np.array', (['[m, p]'], {}), '([m, p])\n', (9041, 9049), True, 'import numpy as np\n'), ((7423, 7448), 'numpy.concatenate', 'np.concatenate', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (7437, 7448), True, 'import numpy as np\n'), ((7458, 7483), 'numpy.concatenate', 'np.concatenate', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (7472, 7483), True, 'import numpy as np\n'), ((8261, 8284), 'numpy.argmax', 'np.argmax', (['switch_probs'], {}), '(switch_probs)\n', (8270, 8284), True, 'import numpy as np\n'), ((8932, 8957), 'numpy.concatenate', 'np.concatenate', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (8946, 8957), True, 'import numpy as np\n'), ((8967, 8992), 'numpy.concatenate', 'np.concatenate', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (8981, 8992), True, 'import numpy as np\n'), ((9194, 9212), 'numpy.zeros_like', 'np.zeros_like', (['Y_m'], {}), '(Y_m)\n', (9207, 9212), True, 'import numpy as np\n'), ((9214, 9231), 'numpy.ones_like', 'np.ones_like', (['Y_p'], {}), '(Y_p)\n', (9226, 9231), True, 'import numpy as np\n'), ((7211, 7254), 'numpy.arange', 'np.arange', (['switch_idx[s]', 'switch_idx[s + 1]'], {}), '(switch_idx[s], switch_idx[s + 1])\n', (7220, 7254), True, 'import numpy as np\n'), ((8720, 8763), 'numpy.arange', 'np.arange', (['switch_idx[s]', 'switch_idx[s + 1]'], {}), '(switch_idx[s], switch_idx[s + 1])\n', (8729, 8763), True, 'import numpy as np\n')] |
from itertools import chain, combinations
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
from utils.utils import choose
from collections import defaultdict
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
cos = F.cosine_similarity
N = 3
M = 5
a = torch.arange(1, 1+M)
b = 2 * a
c = torch.div(1, torch.arange(1, 1+M))
d = torch.square(a)
from torch.linalg import norm
def v(coalition_v, grand_v, if_norm=False):
coalition_v_ = torch.div(coalition_v, norm(coalition_v)) if (if_norm and norm(coalition_v) != 0) else coalition_v
grand_v_ = torch.div(grand_v, norm(grand_v)) if (if_norm and norm(grand_v) != 0) else grand_v
return cos(coalition_v_, grand_v_, 0)
from math import factorial as fac
def calculate_svs(vectors, N, d):
grand = torch.stack(vectors).sum(dim=0)
svs = torch.zeros(N)
for coalition in powerset(range(N)):
if not coalition: continue
coalition_v = torch.zeros(d)
for i in coalition:
coalition_v += vectors[i]
for i in coalition:
with_i = v(coalition_v, grand)
without_i = v(coalition_v - vectors[i], grand)
svs[i] += 1.0 / choose(N-1, len(coalition)-1) * (with_i - without_i)
return torch.div(svs, sum(svs))
from itertools import permutations
from random import shuffle
def calculate_sv_hats(vectors, N, d, K=30):
grand = torch.stack(vectors).sum(dim=0)
svs = torch.zeros(N)
all_permutations = list(permutations(range(N)))
shuffle(all_permutations)
for permutation in all_permutations[:K]:
permutation_v = torch.zeros(d)
for i in permutation:
without_i = v(permutation_v, grand)
permutation_v += vectors[i]
with_i = v(permutation_v, grand)
svs[i] += with_i - without_i
return torch.div(svs, sum(svs))
from scipy.stats import pearsonr
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', None)
trials = 50
N = 10
d = 1000
from time import process_time
def clock(name, start, time_dict):
now = process_time()
time_dict[name] += now - start
return now
results = defaultdict(list)
def generate_random_vectors(N, d, uniform=True):
if uniform:
return [torch.randn(d) for i in range(N)]
else:
vectors = []
for i in range(N):
rand_v = torch.zeros(d)
for j in range(d):
if j == 0:
rand_v[j] = j
else:
rand_v[j] = (torch.randn(1) * rand_v[j-1])**2 + j
rand_v += torch.randn(d)
vectors.append(torch.div(rand_v, norm(rand_v)) )
return vectors
def experiment(N, d, trials=50, epsilon=0.1, delta=0.1):
time_dict = defaultdict(float)
now = process_time()
K = int(np.ceil( np.log(2.0/delta) * 1**2 / (2.0 * epsilon **2) ))
# print("For epsilon {}, delta {}, sampling method needs {} samples.".format(epsilon, delta, K))
for i in range(trials):
vectors = generate_random_vectors(N, d, True)
grand = torch.stack(vectors).sum(dim=0)
now = clock('init', now, time_dict)
svs = calculate_svs(vectors, N, d)
now = clock('true svs', now, time_dict)
sv_hats = calculate_sv_hats(vectors, N, d, K)
now = clock('sv hats', now, time_dict)
cosines = torch.tensor([cos(v, grand, 0) for v in vectors])
cosines = torch.div(cosines, sum(cosines))
now = clock('cosines', now, time_dict)
results['svs'].append(svs)
results['sv_hats'].append(sv_hats)
diff_cos = cosines - svs
results['diff'].append(diff_cos)
results['l1diff'].append(sum(np.abs(diff_cos)).item() )
results['l2diff'].append(norm(diff_cos).item())
results['cossim'].append( cos(cosines, svs, 0).item())
r, p = pearsonr(cosines, svs)
results['pearsonr'].append(r)
results['pearsonp'].append(p)
diff_hat = svs - sv_hats
results['diff_hat'].append(diff_hat)
results['l1diff_hat'].append(sum(np.abs(diff_hat)).item() )
results['l2diff_hat'].append(norm(diff_hat).item())
results['cossim_hat'].append( cos(sv_hats, svs, 0).item())
r, p = pearsonr(sv_hats, svs)
results['pearsonr_hat'].append(r)
results['pearsonp_hat'].append(p)
now = clock('results', now, time_dict)
return results, time_dict
import matplotlib.pyplot as plt
trials = 10
# Experiment vs N
Nmin, Nmax = 5, 10
d = 1000
stats_dict = defaultdict(list)
for n in range(5, Nmax+1):
results, time_dict = experiment(n, d, trials=trials)
df = pd.DataFrame(results, columns=['l1diff', 'l1diff_hat', 'l2diff', 'l2diff_hat', 'pearsonr', 'pearsonr_hat'])
stats_dict['n'].append(n)
for column in df.columns:
stats_dict[column+'_mean'].append(df[column].mean())
stats_dict[column+'_std'].append(df[column].std())
stats_dict['cosines'].append(time_dict['cosines'] / trials)
stats_dict['sv hats'].append(time_dict['sv hats'] / trials)
stats_df = pd.DataFrame(stats_dict)
stats_df.to_csv('error_vs_N={}-{}.csv'.format(Nmin, Nmax), index=False)
# Experiment vs d
dmin, dmax = 10, 15
n = 10
stats_dict = defaultdict(list)
for d in range(10, dmax+1):
d = 2**d
results, time_dict = experiment(n, d, trials=trials)
df = pd.DataFrame(results, columns=['l1diff', 'l1diff_hat', 'l2diff', 'l2diff_hat', 'pearsonr', 'pearsonr_hat'])
stats_dict['d'].append(d)
for column in df.columns:
stats_dict[column+'_mean'].append(df[column].mean())
stats_dict[column+'_std'].append(df[column].std())
stats_dict['cosines'].append(time_dict['cosines'] / trials)
stats_dict['sv hats'].append(time_dict['sv hats'] / trials)
stats_df = pd.DataFrame(stats_dict)
stats_df.to_csv('error_vs_d={}-{}.csv'.format(2**dmin, 2**dmax), index=False)
exit()
data = defaultdict(list)
for coalition in powerset(range(N)):
if not coalition: continue
coalition_v = torch.zeros(M)
for i in coalition:
coalition_v += vectors[i]
data['coalition'].append(coalition)
data['utility'].append(cos(grand, coalition_v, 0).item() )
data['sum of svs'].append(sum([svs[i] for i in coalition]).item() )
df = pd.DataFrame(data)
df['utility_left_over'] = df['utility'] - df['sum of svs']
df['efficient'] = df['utility_left_over'] == 0
print(df)
| [
"pandas.DataFrame",
"numpy.abs",
"torch.stack",
"numpy.log",
"random.shuffle",
"time.process_time",
"torch.randn",
"scipy.stats.pearsonr",
"collections.defaultdict",
"itertools.combinations",
"torch.arange",
"torch.linalg.norm",
"torch.zeros",
"pandas.set_option",
"torch.square"
] | [((437, 459), 'torch.arange', 'torch.arange', (['(1)', '(1 + M)'], {}), '(1, 1 + M)\n', (449, 459), False, 'import torch\n'), ((511, 526), 'torch.square', 'torch.square', (['a'], {}), '(a)\n', (523, 526), False, 'import torch\n'), ((1902, 1944), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (1915, 1944), True, 'import pandas as pd\n'), ((1945, 1988), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', 'None'], {}), "('display.max_colwidth', None)\n", (1958, 1988), True, 'import pandas as pd\n'), ((2164, 2181), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2175, 2181), False, 'from collections import defaultdict\n'), ((4249, 4266), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4260, 4266), False, 'from collections import defaultdict\n'), ((4761, 4785), 'pandas.DataFrame', 'pd.DataFrame', (['stats_dict'], {}), '(stats_dict)\n', (4773, 4785), True, 'import pandas as pd\n'), ((4919, 4936), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4930, 4936), False, 'from collections import defaultdict\n'), ((5441, 5465), 'pandas.DataFrame', 'pd.DataFrame', (['stats_dict'], {}), '(stats_dict)\n', (5453, 5465), True, 'import pandas as pd\n'), ((5562, 5579), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5573, 5579), False, 'from collections import defaultdict\n'), ((5898, 5916), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (5910, 5916), True, 'import pandas as pd\n'), ((485, 507), 'torch.arange', 'torch.arange', (['(1)', '(1 + M)'], {}), '(1, 1 + M)\n', (497, 507), False, 'import torch\n'), ((971, 985), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (982, 985), False, 'import torch\n'), ((1502, 1516), 'torch.zeros', 'torch.zeros', (['N'], {}), '(N)\n', (1513, 1516), False, 'import torch\n'), ((1567, 1592), 'random.shuffle', 'shuffle', (['all_permutations'], {}), '(all_permutations)\n', (1574, 1592), False, 'from random import shuffle\n'), ((2093, 2107), 'time.process_time', 'process_time', ([], {}), '()\n', (2105, 2107), False, 'from time import process_time\n'), ((2652, 2670), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (2663, 2670), False, 'from collections import defaultdict\n'), ((2678, 2692), 'time.process_time', 'process_time', ([], {}), '()\n', (2690, 2692), False, 'from time import process_time\n'), ((4355, 4466), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': "['l1diff', 'l1diff_hat', 'l2diff', 'l2diff_hat', 'pearsonr', 'pearsonr_hat']"}), "(results, columns=['l1diff', 'l1diff_hat', 'l2diff',\n 'l2diff_hat', 'pearsonr', 'pearsonr_hat'])\n", (4367, 4466), True, 'import pandas as pd\n'), ((5035, 5146), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': "['l1diff', 'l1diff_hat', 'l2diff', 'l2diff_hat', 'pearsonr', 'pearsonr_hat']"}), "(results, columns=['l1diff', 'l1diff_hat', 'l2diff',\n 'l2diff_hat', 'pearsonr', 'pearsonr_hat'])\n", (5047, 5146), True, 'import pandas as pd\n'), ((5661, 5675), 'torch.zeros', 'torch.zeros', (['M'], {}), '(M)\n', (5672, 5675), False, 'import torch\n'), ((1069, 1083), 'torch.zeros', 'torch.zeros', (['d'], {}), '(d)\n', (1080, 1083), False, 'import torch\n'), ((1654, 1668), 'torch.zeros', 'torch.zeros', (['d'], {}), '(d)\n', (1665, 1668), False, 'import torch\n'), ((3636, 3658), 'scipy.stats.pearsonr', 'pearsonr', (['cosines', 'svs'], {}), '(cosines, svs)\n', (3644, 3658), False, 'from scipy.stats import pearsonr\n'), ((3976, 3998), 'scipy.stats.pearsonr', 'pearsonr', (['sv_hats', 'svs'], {}), '(sv_hats, svs)\n', (3984, 3998), False, 'from scipy.stats import pearsonr\n'), ((193, 211), 'itertools.combinations', 'combinations', (['s', 'r'], {}), '(s, r)\n', (205, 211), False, 'from itertools import chain, combinations\n'), ((642, 659), 'torch.linalg.norm', 'norm', (['coalition_v'], {}), '(coalition_v)\n', (646, 659), False, 'from torch.linalg import norm\n'), ((749, 762), 'torch.linalg.norm', 'norm', (['grand_v'], {}), '(grand_v)\n', (753, 762), False, 'from torch.linalg import norm\n'), ((932, 952), 'torch.stack', 'torch.stack', (['vectors'], {}), '(vectors)\n', (943, 952), False, 'import torch\n'), ((1463, 1483), 'torch.stack', 'torch.stack', (['vectors'], {}), '(vectors)\n', (1474, 1483), False, 'import torch\n'), ((2257, 2271), 'torch.randn', 'torch.randn', (['d'], {}), '(d)\n', (2268, 2271), False, 'import torch\n'), ((2346, 2360), 'torch.zeros', 'torch.zeros', (['d'], {}), '(d)\n', (2357, 2360), False, 'import torch\n'), ((2496, 2510), 'torch.randn', 'torch.randn', (['d'], {}), '(d)\n', (2507, 2510), False, 'import torch\n'), ((677, 694), 'torch.linalg.norm', 'norm', (['coalition_v'], {}), '(coalition_v)\n', (681, 694), False, 'from torch.linalg import norm\n'), ((780, 793), 'torch.linalg.norm', 'norm', (['grand_v'], {}), '(grand_v)\n', (784, 793), False, 'from torch.linalg import norm\n'), ((2944, 2964), 'torch.stack', 'torch.stack', (['vectors'], {}), '(vectors)\n', (2955, 2964), False, 'import torch\n'), ((2547, 2559), 'torch.linalg.norm', 'norm', (['rand_v'], {}), '(rand_v)\n', (2551, 2559), False, 'from torch.linalg import norm\n'), ((2711, 2730), 'numpy.log', 'np.log', (['(2.0 / delta)'], {}), '(2.0 / delta)\n', (2717, 2730), True, 'import numpy as np\n'), ((3547, 3561), 'torch.linalg.norm', 'norm', (['diff_cos'], {}), '(diff_cos)\n', (3551, 3561), False, 'from torch.linalg import norm\n'), ((3883, 3897), 'torch.linalg.norm', 'norm', (['diff_hat'], {}), '(diff_hat)\n', (3887, 3897), False, 'from torch.linalg import norm\n'), ((3493, 3509), 'numpy.abs', 'np.abs', (['diff_cos'], {}), '(diff_cos)\n', (3499, 3509), True, 'import numpy as np\n'), ((3825, 3841), 'numpy.abs', 'np.abs', (['diff_hat'], {}), '(diff_hat)\n', (3831, 3841), True, 'import numpy as np\n'), ((2446, 2460), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (2457, 2460), False, 'import torch\n')] |
import gensim
import numpy as np
import pandas as pd
import re
import os
import time
import jieba
import cv2
import json
import urllib
import random
import hashlib
from snownlp import sentiment
from snownlp import SnowNLP
import jieba.posseg as pseg
from gensim.models import word2vec
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models, transforms
from PIL import Image
from tensorflow.keras.applications import vgg19
from tensorflow.keras.applications import resnet50
from tensorflow.keras.preprocessing import image
from collections import Counter
from scipy.linalg import norm
train_csv_path = r'G:\毕设\数据集\微博\train.csv'
text_csv_path = r'G:\毕设\数据集\微博\text.csv'
user_csv_path = r'G:\毕设\数据集\微博\user.csv'
image_csv_path = r'G:\毕设\数据集\微博\image.csv'
en_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_index.json'
cn_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_cn.json'
image_class_vgg19_score_path = r'G:\毕设\数据集\微博\image_class_vgg19.txt'
image_class_resnet50_score_path = r'G:\毕设\数据集\微博\image_class_resnet50.txt'
train_negative_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/negative.txt'
train_positive_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/positive.txt'
sentiment_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/sentiment.marshal'
stopwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/stopwords.txt"
word2vec_txt_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/word2vec_corpus.txt"
word2vec_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/text8.model"
possentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/possentiwords.txt"
negsentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/negsentiwords.txt"
appid = '20190716000318328'
secretKey = '<KEY>'
url_baidu = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
def train_data_read(train_csv_path):
"""
训练数据的读入
df_text 文本信息列
df_user 用户信息列
df_image 图片信息列
"""
logging.info("正在载入数据中...")
#微博信息
df_text = pd.read_csv(train_csv_path,usecols=['id','text','category','label'])
#用户信息
df_user = pd.read_csv(train_csv_path,usecols=['id','userGender','userFollowCount','userFansCount','userWeiboCount','userLocation','userDescription'])
#微博图片信息
df_image = pd.read_csv(train_csv_path,usecols=['id','piclist'])
logging.info("数据载入完成")
return df_text,df_user,df_image
def text_data_read():
'''
文本特征文件的读取
:return: 文本特征文件
'''
df_text = pd.read_csv(text_csv_path)
return df_text
def text_insert_cols(df_text,new_features_list):
'''
增加文本新的特征列,方便后续提取并补充值
:param df_text: 文本信息
:return: df_text: 新文本信息dataframe
'''
logging.info("正在扩展文本新特征列...")
col_name = list(df_text.columns)
# 插入新列之前列名去重
col_name = col_name + sorted(set(new_features_list) - set(col_name), key=new_features_list.index)
df_text = df_text.reindex(columns=col_name, fill_value=0)
logging.info("文本新特征列扩展完成")
return df_text
def text_feature_extraction(df_text):
logging.info("开始文本特征提取...")
# #统计字符串长度
# df_text['text_length'] = df_text['text'].str.len()
# #将情感分数列转为float
# df_text['sentiment_score'] = df_text['sentiment_score'].astype(float)
for j in range(1,65):
df_text['word2vec_'+str(j)] = df_text['word2vec_'+str(j)].astype(float)
# #其余数据统计
i = 0
for index, row in df_text.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_text.shape[0]))
#获得需要处理的文本内容
text_content = row['text']
# #获得是否含有问号以及问号的数量
# if row['num_questmarks'] > 0:
# df_text.at[i, 'contains_questmark'] = 1
# df_text.at[i,'contains_questmark'], df_text.at[i,'num_questmarks'] = text_questmark(text_content)
# #获得是否含有感叹号以及感叹号的数量
# if row['num_exclammarks'] > 0:
# df_text.at[i, 'contains_exclammark'] = 1
# df_text.at[i, 'contains_exclammark'], df_text.at[i, 'num_exclammarks'] = text_exclammark(text_content)
# #获得是否含有hashtag以及hashtag的数量
# if row['num_hashtags'] > 0:
# df_text.at[i, 'contains_hashtag'] = 1
# df_text.at[i, 'contains_hashtag'], df_text.at[i, 'num_hashtags'] = text_hashtag(text_content)
# #获得是否含有url以及url的数量
# if row['num_URLs'] > 0:
# df_text.at[i, 'contains_URL'] = 1
# df_text.at[i, 'contains_URL'], df_text.at[i, 'num_URLs'] = text_url(text_content)
# #获得是否含有@以及@的数量
# if row['num_mentions'] > 0:
# df_text.at[i, 'contains_mention'] = 1
# df_text.at[i, 'contains_mention'], df_text.at[i, 'num_mentions'] = text_mention(text_content)
# #获得文本情感分数
# df_text.at[i, 'sentiment_score'] = text_sentiment_score(text_content)
# #词性标注,统计名词、动词、代词数量并返回
# df_text.at[i, 'num_noun'],df_text.at[i, 'num_verb'],df_text.at[i, 'num_pronoun'] = text_part_of_speech(text_content)
# #计算每条微博正文的词向量均值
df_text.at[i,-64:] = text_compute_word2vec(text_content).tolist()
# #获得每条微博的积极词汇数、消极词汇数
# df_text.at[i, 'num_possentiwords'], df_text.at[i, 'num_negsentiwords'] = text_pos_neg_sentiwords(text_content)
#获取新闻是否含有第一人称、第二人称、第三人称
# df_text.at[i, 'contains_firstorderpron'], df_text.at[i, 'contains_secondorderpron'], df_text.at[i, 'contains_thirdorderpron'] = text_get_fir_sec_thi_orderpron(text_content)
i += 1
logging.info("文本特征提取结束...")
return df_text
def text_get_fir_sec_thi_orderpron(text_content):
"""
统计第一、二、三人称是否存在于微博中
:param text_content:
:return: has_first, has_second, has_third(0:不包含,1:包含)
"""
has_first = 0 #第一人称
has_second = 0 #第二人称
has_third = 0 #第三人称
if text_content.find('我') != -1:
has_first = 1
elif text_content.find('你') != -1:
has_second = 1
elif text_content.find('他') != -1 or text_content.find('她') != -1 or text_content.find('它') != -1:
has_third = 1
return has_first, has_second, has_third
def text_pos_neg_sentiwords(text_content):
# 去除停用词的分词String
new_text_content = jieba_clear_text(text_content)
#将词组转成list
list_new_text_content = new_text_content.split(' ')
#统计积极词、消极词
num_pos = 0
num_neg = 0
for word in list_new_text_content:
if word in possentiwords:
num_pos += 1
elif word in negsentiwords:
num_neg += 1
return num_pos,num_neg
def text_part_of_speech(text_content):
"""
将文本中的汉字进行词性标注并返回数量
:param text_content: 文本信息
:return: n名词数量,v动词数量,r代词数量
"""
#选取所有的汉字
if pd.isna(text_content):
return 0,0,0
words = pseg.cut("".join(re.findall(u"[\u4e00-\u9fa5]",text_content)))
n = 0 #名词数量
r = 0 #代词数量
v = 0 #动词数量
for w in words:
if (w.flag.startswith('n')):
n += 1
elif (w.flag.startswith('v')):
v += 1
elif (w.flag.startswith('r')):
r += 1
return n,v,r
def text_questmark(text_content):
"""
处理文本中的问号
:param text_content:处理对象文本
:return: 是否含有问号(1:有,0:无),问号数量
"""
en_questmark_nums = text_content.count("?")
cn_questmark_nums = text_content.count("?")
if(en_questmark_nums + cn_questmark_nums > 0):
return 1,en_questmark_nums + cn_questmark_nums
else:
return 0,0
def text_train_sentiment():
#微博语料训练
sentiment.train(train_negative_corpus_path,train_positive_corpus_path)
#保存模型,同时修改snownlp->sentiment->__init__.py->data_path
sentiment.save(sentiment_model_path)
def text_sentiment_score(text_content):
"""
获得文本的情感分数
0<------------------>1
消极 积极
:param text_content: 处理对象文本
:return: sentiment_score.sentiments 情感分数
"""
if pd.isna(text_content):
return 0
#去除停用词
new_text_content = jieba_clear_text(text_content)
try:
sentiment_score = SnowNLP(new_text_content).sentiments
except:
return 0
return sentiment_score
def jieba_clear_text(text):
"""
jieba分词,并使用自定义停用词表去除停用词以及长度为1的词
"""
text_n= "".join(re.findall(u"[\u4e00-\u9fa5]", text))
raw_result = "$".join(jieba.cut(text_n))
myword_list = []
#去除停用词
for myword in raw_result.split('$'):
if myword not in stopwords:
myword_list.append(myword)
return " ".join(myword_list)
def get_stopwords_list():
"""
获得停用词的列表
:return: stopwords:停用词列表
"""
my_stopwords = []
fstop = open(stopwords_path, "r", encoding='UTF-8')
for eachWord in fstop.readlines():
my_stopwords.append(eachWord.strip())
fstop.close()
return my_stopwords
def get_possentiwords_list():
"""
获得积极词汇列表
:return:
"""
my_possentiwords = []
fp = open(possentiwords_path, "r", encoding='UTF-8')
for eachWord in fp.readlines():
my_possentiwords.append(eachWord.strip())
fp.close()
return my_possentiwords
def get_negsentiwords_list():
"""
获得消极词汇列表
:return:
"""
my_negsentiwords = []
fn = open(negsentiwords_path, "r", encoding='UTF-8')
for eachWord in fn.readlines():
my_negsentiwords.append(eachWord.strip())
fn.close()
return my_negsentiwords
def text_exclammark(text_content):
"""
处理文本中的感叹号
:param text_content:处理对象文本
:return: 是否含有感叹(1:有,0:无),感叹数量
"""
en_exclammark_nums = text_content.count("!")
cn_exclammark_nums = text_content.count("!")
if(en_exclammark_nums + cn_exclammark_nums > 0):
return 1,en_exclammark_nums + cn_exclammark_nums
else:
return 0,0
def text_hashtag(text_content):
"""
判断文本中是否存在hashtag
微博中hashtag由两个#构成,例如 #毕业设计#
:param text_content: 处理对象文本
:return: 是否含有hashtag(1:有,0:无),hashtag数量
"""
hashtag_nums = text_content.count("#")
if(hashtag_nums == 0):
return 0,0
else:
return 1,hashtag_nums/2
def text_url(text_content):
"""
判断文本中是否存在微博URL
:param text_content: 处理对象文本
:return: 是否含有url(1:有,0:无),url数量
"""
url = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', text_content)
if url:
return 1,len(url)
else:
return 0,0
def text_mention(text_content):
"""
处理微博正文中的用户@
:param text_content: 处理对象文本
:return: 是否含有@(1:有,0:无),@数量
"""
mention_nums = text_content.count("@")
if(mention_nums > 0):
return 1,mention_nums
else:
return 0,0
def text_train_word2vec_model(word2vec_txt_path,word2vec_model_path):
"""
训练word2vec词向量模型
:param word2vec_txt_path: 语料路径
:param word2vec_model_path: 模型保存路径
:return: 词向量模型
"""
sentences = word2vec.Text8Corpus(word2vec_txt_path)
model = word2vec.Word2Vec(sentences,size=100,workers=4)
# 1.sentences:可以是一个List,对于大语料集,建议使用BrownCorpus,Text8Corpus或·ineSentence构建。
# 2.sg: 用于设置训练算法,默认为0,对应CBOW算法;sg=1则采用skip-gram算法。
# 3.size:是指输出的词的向量维数,默认为100。大的size需要更多的训练数据,但是效果会更好. 推荐值为几十到几百。
# 4.window:为训练的窗口大小,8表示每个词考虑前8个词与后8个词(实际代码中还有一个随机选窗口的过程,窗口大小<=5),默认值为5。
# 5.alpha: 是学习速率
# 6.seed:用于随机数发生器。与初始化词向量有关。
# 7.min_count: 可以对字典做截断. 词频少于min_count次数的单词会被丢弃掉, 默认值为5。
# 8.max_vocab_size: 设置词向量构建期间的RAM限制。如果所有独立单词个数超过这个,则就消除掉其中最不频繁的一个。每一千万个单词需要大约1GB的RAM。设置成None则没有限制。
# 9.sample: 表示 采样的阈值,如果一个词在训练样本中出现的频率越大,那么就越会被采样。默认为1e-3,范围是(0,1e-5)
# 10.workers:参数控制训练的并行数。
# 11.hs: 是否使用HS方法,0表示: Negative Sampling,1表示:Hierarchical Softmax 。默认为0
# 12.negative: 如果>0,则会采用negative samping,用于设置多少个noise words
# 13.cbow_mean: 如果为0,则采用上下文词向量的和,如果为1(default)则采用均值。只有使用CBOW的时候才起作用。
# 14.hashfxn: hash函数来初始化权重。默认使用python的hash函数
# 15.iter: 迭代次数,默认为5。
# 16.trim_rule: 用于设置词汇表的整理规则,指定那些单词要留下,哪些要被删除。可以设置为None(min_count会被使用)或者一个接受()并返回RU·E_DISCARD,uti·s.RU·E_KEEP或者uti·s.RU·E_DEFAU·T的函数。
# 17.sorted_vocab: 如果为1(defau·t),则在分配word index 的时候会先对单词基于频率降序排序。
# 18.batch_words:每一批的传递给线程的单词的数量,默认为10000
model.save(word2vec_model_path)
return model
def text_load_word2vec_model(word2vec_model_path):
"""
加载训练完成的word2vec词向量模型
:param word2vec_model_path: 模型路径
:return: 词向量模型
"""
model = word2vec.Word2Vec.load(word2vec_model_path)
return model
def text_get_clear_word2vec_corpus(word2vec_txt_path):
"""
从原始微博文本获得word2vec语料文本
:param word2vec_txt_path: 语料保存位置
:return: 0
"""
with open(word2vec_txt_path, 'a') as f:
for index, row in df_text.iterrows():
text_content = row['text']
raw_txt = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content)))
f.write(raw_txt + "\n")
logging.info("清理word2vec语料文本结束")
def text_compute_word2vec(text_content):
if pd.isna(text_content):
return np.zeros(64)
raw_txt_list = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content))).split(' ')
text_word2vec_score_list = []
for word in raw_txt_list:
try:
#自己训练的词库用这一句
# text_word2vec_score_list.append(model_word2vec.wv[word])
text_word2vec_score_list.append(model_word2vec[word])
except KeyError:
text_word2vec_score_list.append(np.zeros(64))
result_mean_array = np.mean(np.array(text_word2vec_score_list),axis=0)
return result_mean_array
def user_data_read():
"""
用户特征文件的读取
:return: 用户特征文件
"""
df_user = pd.read_csv(user_csv_path)
return df_user
def user_insert_cols(df_user,new_features_list):
"""
增加用户新的特征列,方便后续提取并补充值
:param df_user: 用户信息
:return: df_user: 新用户信息dataframe
"""
logging.info("正在扩展用户新特征列...")
col_name = list(df_user.columns)
col_name = col_name + new_features_list
df_user = df_user.reindex(columns=col_name, fill_value=0)
logging.info("用户新特征列扩展完成")
return df_user
def user_feature_extraction(df_user):
logging.info("开始用户特征提取...")
#将 关注/粉丝比 列转为float
df_user['folfans_ratio'] = df_user['folfans_ratio'].astype(float)
#其余数据统计
i = 0
for index, row in df_user.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_user.shape[0]))
#获得需要处理的文本内容
user_follow_count = row['user_follow_count']
user_fans_count = row['user_fans_count']
#计算 关注/粉丝比
df_user.at[i,'folfans_ratio'] = user_compute_folfans_ratio(user_follow_count,user_fans_count)
i += 1
logging.info("用户特征提取结束...")
return df_user
def user_compute_folfans_ratio(user_follow_count,user_fans_count):
"""
计算关注/粉丝比
:param user_follow_count: 关注数
:param user_fans_count: 粉丝数
:return:
"""
if( user_fans_count == 0):
return 0
else:
return user_follow_count/user_fans_count
def image_data_read():
"""
图片特征文件的读取
:return: 图片特征文件
"""
df_image = pd.read_csv(image_csv_path)
return df_image
def image_insert_cols(df_image,new_features_list):
"""
增加图片新的特征列,方便后续提取并补充值
:param df_image: 图片信息
:return: df_image: 新图片信息dataframe
"""
logging.info("正在扩展图片新特征列...")
col_name = list(df_image.columns)
#插入新列之前列名去重
col_name = col_name + sorted(set(new_features_list) - set(col_name), key = new_features_list.index)
df_image = df_image.reindex(columns=col_name, fill_value=0)
logging.info("图片新特征列扩展完成")
return df_image
def image_feature_extraction(df_image):
logging.info("开始图片特征提取...")
#将第三列到最后列转为float
df_image.iloc[:,-2048:] = df_image.iloc[:,-2048:].astype(float)
# df_image.iloc[:, -2:] = df_image.iloc[:, -2:].astype(object)
# return df_image
# df_image['sim_image_word'] = df_image['sim_image_word'].astype(float)
#其余数据统计
i = 0
image_name = []
for index, row in df_image.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_image.shape[0]))
#获得需要处理的文本内容
if (pd.isna(df_image.iloc[i,1])):
i += 1
continue
else:
image_list = row['piclist'].split('\t')
# 计算 颜色矩
# filename1 = 'G:/train/rumor_pic/' + image_list[0]
# filename2 = 'G:/train/truth_pic/' + image_list[0]
filename1 = 'G:/test/rumor_images/' + image_list[0]
filename2 = 'G:/test/nonrumor_images/' + image_list[0]
filename= ''
if (os.path.isfile(filename1)):
filename = filename1
else:
filename = filename2
#计算颜色矩
# df_image.at[i, -9:] = image_color_moments(filename)
#计算深度学习特征 ---PyTorch ResNet50 CNN
try:
df_image.at[i, -2048:] = image_resnet_cnn(filename,model_resnet50)
except Exception as e:
logging.info("图片有问题"+str(e))
# df_image['tf_vgg19_class'] = image_get_class(filename)
# # 获得图片的宽度、高度、k物理大小kb
# df_image.at[i, 'image_width'], df_image.at[i, 'image_height'], df_image.at[i, 'image_kb'] = image_get_width_height_kb(filename)
# #计算图文相似度,当存在多张图片的时候采用第一张图片作为该博文的代表图片
# df_image.at[i, 'sim_image_word'] = image_get_img_word_sim(i, row['tf_vgg19_class'], row['tf_resnet50_class'])
i += 1
logging.info("图片特征提取结束...")
return df_image
def image_get_img_word_sim(index, vgg19_class_name, resnet50_class_name):
"""
similarity_score = arg max{ log( f_i * c_j * swv(term_i,term_j) ) }
1 ≤ i ≤ n, 1 ≤ j ≤m
swv(term_i,term_j)即term_i和term_j词向量的余弦相似度
f_i即第i个词汇(微博正文)的词频
c_j即第j个词汇(图片分类名)的可信度
"""
#微博正文
text_content = df_text['text'][index]
if pd.isna(text_content):
return 0
#去除停用词和英文单词并分词为list
list_clear_weibo_text = jieba_clear_text("".join(re.findall(u"[\u4e00-\u9fa5]", text_content))).split(' ')
#获得微博正文的词频
dict_weibo_text = Counter(list_clear_weibo_text)
#获得分类的词向量
try:
#获取单词的词向量
term_vgg19_class_name = model_word2vec[dict_image_class[vgg19_class_name]]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_vgg19_class_name = np.zeros(64)
try:
#获取单词的词向量
term_resnet50_class_name = model_word2vec[dict_image_class[resnet50_class_name]]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_resnet50_class_name = np.zeros(64)
list_vgg19_sim = []
list_resnet50_sim = []
#遍历微博正文词频表
for(word, frequency) in dict_weibo_text.items():
try:
#获取单词的词向量
term_i = model_word2vec[word]
except Exception:
#word2vec中不存在这个词汇,以64位0补充
term_i = np.zeros(64)
if np.all(term_i == 0):
list_vgg19_sim.append(0)
list_resnet50_sim.append(0)
continue
if np.all(term_vgg19_class_name == 0):
list_vgg19_sim.append(0)
if np.all(term_resnet50_class_name == 0):
list_resnet50_sim.append(0)
if np.all(term_vgg19_class_name != 0):
# 计算余弦相似度
swv_vgg19 = np.dot(term_i, term_vgg19_class_name) / (norm(term_i) * norm(term_vgg19_class_name))
# 计算图文相似度
list_vgg19_sim.append(np.log(1 + frequency * float(list_vgg19_score[index]) * swv_vgg19))
if np.all(term_resnet50_class_name != 0):
#计算余弦相似度
swv_resnet50 = np.dot(term_i, term_resnet50_class_name) / (norm(term_i) * norm(term_resnet50_class_name))
#计算图文相似度
list_resnet50_sim.append(np.log(1 + frequency*float(list_resnet50_score[index])*swv_resnet50))
similarity_score = (max(list_vgg19_sim,default=0) + max(list_resnet50_sim,default=0)) / 2
print(similarity_score)
return similarity_score
def image_get_score_list(image_class_vgg19_score_path, image_class_resnet50_score_path):
#获得vgg19和resnet50分类时的可信度
with open(image_class_vgg19_score_path, "r", encoding='UTF-8') as f1:
str_vgg19_score = f1.read()
#分数以空格分开,将str转成list
list_vgg19_score = str_vgg19_score.split(" ")
with open(image_class_resnet50_score_path, "r", encoding='UTF-8') as f2:
str_resnet50_score = f2.read()
#分数以空格分开,将str转成list
list_resnet50_score = str_resnet50_score.split(" ")
return list_vgg19_score, list_resnet50_score
def image_get_width_height_kb(img_path):
try:
im = Image.open(img_path) # 返回一个Image对象
except:
return 0, 0, 0
fsize = os.path.getsize(img_path)
fsize = fsize / float(1024)
return im.size[0], im.size[1], round(fsize, 2)
def image_color_moments(filename):
"""
提取图像颜色矩
:param filename: 文件路径名
:return: color_feature:颜色矩特征
"""
img = cv2.imread(filename)
if img is None:
return
# Convert BGR to HSV colorspace OpenCV 默认的颜色空间是 BGR,类似于RGB,但不是RGB
# HSV颜色空间的色调、饱和度、明度与人眼对颜色的主观认识相对比较符合,与其他颜色空间相比HSV空间能更好的反映人类对颜色的感知
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Split the channels - h,s,v
h, s, v = cv2.split(hsv)
# Initialize the color feature
color_feature = []
# N = h.shape[0] * h.shape[1]
# The first central moment - average 一阶矩(均值)
h_mean = np.mean(h) # np.sum(h)/float(N)
s_mean = np.mean(s) # np.sum(s)/float(N)
v_mean = np.mean(v) # np.sum(v)/float(N)
color_feature.extend([h_mean, s_mean, v_mean])
# The second central moment - standard deviation 二阶矩(方差)
h_std = np.std(h) # np.sqrt(np.mean(abs(h - h.mean())**2))
s_std = np.std(s) # np.sqrt(np.mean(abs(s - s.mean())**2))
v_std = np.std(v) # np.sqrt(np.mean(abs(v - v.mean())**2))
color_feature.extend([h_std, s_std, v_std])
# The third central moment - the third root of the skewness 三阶矩(斜度)
h_skewness = np.mean(abs(h - h.mean())**3)
s_skewness = np.mean(abs(s - s.mean())**3)
v_skewness = np.mean(abs(v - v.mean())**3)
h_thirdMoment = h_skewness**(1./3)
s_thirdMoment = s_skewness**(1./3)
v_thirdMoment = v_skewness**(1./3)
color_feature.extend([h_thirdMoment, s_thirdMoment, v_thirdMoment])
return color_feature
class net(nn.Module):
def __init__(self):
super(net, self).__init__()
# resnet50
self.net = models.resnet50(pretrained=True)
def forward(self, input):
output = self.net.conv1(input)
output = self.net.bn1(output)
output = self.net.relu(output)
output = self.net.maxpool(output)
output = self.net.layer1(output)
output = self.net.layer2(output)
output = self.net.layer3(output)
output = self.net.layer4(output)
output = self.net.avgpool(output)
return output
def image_resnet_cnn(img_path, net):
transform = transforms.Compose([
#图片变换为256*256
transforms.Resize((256,256)),
#用来将图片从中心裁剪成224*224
transforms.CenterCrop((224,224)),
#将图片转成Tensor张量
transforms.ToTensor()]
)
#读入图片并进行统一转换
try:
img = Image.open(img_path)
img = transform(img)
logging.info(img.shape)
x = Variable(torch.unsqueeze(img, dim=0).float(), requires_grad=False)
logging.info(x.shape)
# 启用GPU加速
if torch.cuda.is_available():
x = x.cuda()
net = net.cuda()
# 转回CPU,不然可能出错
y = net(x).cpu()
y = torch.squeeze(y)
cnn_features = y.data.numpy().tolist()
logging.info(y.shape)
return cnn_features
except:
return np.zeros(2048).tolist()
def image_get_class(img_path):
img_array = []
for i in img_path:
if (i == 'nothing'):
img_array.append('no')
else:
img = image.load_img(i, target_size=(224, 224))
# 将图片转换为(224,224,3)数组,最后的3是因为RGB三色彩图
img = image.img_to_array(img)
# 跟前面的例子一样,使用模型进行预测是批处理模式,
# 所以对于单个的图片,要扩展一维成为(1,224,224,3)这样的形式
# 相当于建立一个预测队列,但其中只有一张图片
img = np.expand_dims(img, axis=0)
predict_class_vgg = model_tf_vgg19.predict(img)
# 获取图片识别可能性最高的3个结果
desc_vgg = vgg19.decode_predictions(predict_class_vgg, top=1)
# desc = resnet50.decode_predictions(predict_class, top=3)
# 我们的预测队列中只有一张图片,所以结果也只有第一个有效,显示出来
img_array.append(desc_vgg[0][0][1])
print(i)
# 使用模型预测(识别)
return img_array
def translateBaidu(text, f='en', t='zh'):
salt = random.randint(32768, 65536)
sign = appid + text + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
url = url_baidu + '?appid=' + appid + '&q=' + urllib.parse.quote(text) + '&from=' + f + '&to=' + t + '&salt=' + str(salt) + '&sign=' + sign
response = urllib.request.urlopen(url)
content = response.read().decode('utf-8')
data = json.loads(content)
result = str(data['trans_result'][0]['dst'])
return result
def get_cn_json_class(en_imagenet_class_path, cn_imagenet_class_path):
fn = open(en_imagenet_class_path, "r", encoding='UTF-8')
j = fn.read()
dic = json.loads(j) #英文原版
fn.close()
txt_dic = {} #中文
for i in range(0, 1000):
try:
start = time.time()
txt_dic[dic[str(i)][1]] = translateBaidu(dic[str(i)][1])
end = time.time()
if end - start < 1:
time.sleep(1) # api接口限制,每秒调用1次
except Exception as e:
print(e)
json_str = json.dumps(txt_dic)
file_object = open(cn_imagenet_class_path, 'w')
file_object.write(json_str)
file_object.close()
def image_get_class_cn_dict(cn_imagenet_class_path):
"""
获得分类的中文对照词典
:param cn_imagenet_class_path:
:return:
"""
fn = open(cn_imagenet_class_path, "r", encoding='UTF-8')
str_json = fn.read()
dic = json.loads(str_json)
fn.close()
return dic
# #*******************文本特征提取开始***************************
# #原始数据的读入
# #df_text,df_user,df_image = train_data_read(train_csv_path)
# start = time.time()
# # 读入停用词表、积极词汇表、消极词汇表
# stopwords = get_stopwords_list()
# possentiwords = get_possentiwords_list()
# negsentiwords = get_negsentiwords_list()
# #文本的读入
# df_text = text_data_read()
# #微博文本扩展特征数据列
# new_text_features_list = ['text_length', 'contains_questmark', 'num_questmarks', 'contains_exclammark',
# 'num_exclammarks', 'contains_hashtag', 'num_hashtags', 'contains_URL',
# 'num_URLs', 'contains_mention', 'num_mentions', 'sentiment_score',
# 'num_noun','num_verb','num_pronoun','num_possentiwords','num_negsentiwords',
# 'contains_firstorderpron','contains_secondorderpron','contains_thirdorderpron']
# # 浪费时间
# for i in range(1,101):
# new_text_features_list.append('word2vec_'+str(i))
# df_text = text_insert_cols(df_text,new_text_features_list)
# #加载sentiment model
# if not os.path.isfile(sentiment_model_path + '.3'):
# # 情感分析语料模型训练
# text_train_sentiment()
# else:
# logging.info("sentiment model is ready!")
# #加载word2vec model
# if not os.path.isfile(word2vec_model_path):
# # 获得词向量训练语料
# text_get_clear_word2vec_corpus(word2vec_txt_path)
# # 训练word2vec模型
# model_word2vec = text_train_word2vec_model(word2vec_txt_path, word2vec_model_path)
# else:
# # 加载word2vec模型
# #model_word2vec = text_load_word2vec_model(word2vec_model_path)
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# remember_delete = 1
# #文本特征提取
# df_text = text_feature_extraction(df_text)
# #文本特征保存
# df_text.to_csv(text_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************文本特征提取结束***************************
# #*******************用户特征提取开始***************************
# start = time.time()
# #原始数据读入
# df_user = user_data_read()
# #用户新特征列扩展
# new_user_features_list = ['folfans_ratio']
# df_user = user_insert_cols(df_user,new_user_features_list)
# #用户特征提取
# df_user = user_feature_extraction(df_user)
# #用户特征保存
# df_user.to_csv(user_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************用户特征提取结束***************************
# #*******************图片特征提取开始***************************
# start = time.time()
# #原始数据读入
# df_image = image_data_read()
# #图片新特征列扩展
# new_image_features_list = ['h_first_moment','s_first_moment','v_first_moment',
# 'h_second_moment','s_second_moment','v_second_moment',
# 'h_third_moment','s_third_moment','v_third_moment',
# 'tf_vgg19_class','tf_resnet50_class','image_width','image_height','image_kb','sim_image_word']
# for i in range(1,2049):
# new_image_features_list.append('resnet_'+str(i))
# df_image = image_insert_cols(df_image,new_image_features_list)
# #pytorch ResNet 50网络
# model_resnet50 = net()
# model_resnet50.eval()
# model_resnet50 = model_resnet50.cuda()
# #tensorflow vgg19和resnet50模型
# model_tf_vgg19 = vgg19.VGG19(weights='imagenet')
# model_tf_resnet50 = resnet50.ResNet50(weights='imagenet')
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# #获得vgg19和resnet50分类的图片top1可信度list
# list_vgg19_score, list_resnet50_score = image_get_score_list(image_class_vgg19_score_path, image_class_resnet50_score_path)
# #获得中文对照词典
# dict_image_class = image_get_class_cn_dict(cn_imagenet_class_path)
# #获得文本特征中的微博原文
# df_text = pd.read_csv(text_csv_path, usecols=['text']) #只加载text列,提升速度,减小不必要的内存损耗
# #图片特征提取
# df_image = image_feature_extraction(df_image)
# #图片特征保存
# df_image.to_csv(image_csv_path,index=0)#不保留行索引
# end = time.time()
# logging.info("运行时间:"+str(end-start))
# #*******************图片特征提取结束***************************
# 2020-02-09 19:30:23,551 : INFO : 图片有问题Given groups=1, weight of size 64 3 7 7, expected input[1, 1, 224, 224] to have 3 channels, but got 1 channels instead
# Loaded runtime CuDNN library: 7.5.1 but source was compiled with: 7.6.5. CuDNN library major and minor version needs to match or have higher minor version in case of CuDNN 7.0 or later version. If using a binary install, upgrade your CuDNN library. If building from sources, make sure the library loaded at runtime is compatible with the version specified during compile configuration.
#新数据集的训练和测试
train_rumor_txt_path = r'G:\test\tweets\train_rumor.txt'
train_non_rumor_txt_path = r'G:\test\tweets\train_nonrumor.txt'
test_rumor_txt_path = r'G:\test\tweets\test_rumor.txt'
test_non_rumor_txt_path = r'G:\test\tweets\test_nonrumor.txt'
social_feature_txt_path = r'G:\test\social_feature.txt'
test_csv_path = r"G:/result_origin.csv"
#将原始数据集提取为需要数据内容的csv文件
def get_train_csv(rumor_path, label, save_path):
features_list = ['id', 'user_name', 'tweet_url', 'user_url', 'publish_time',
'original', 'retweet_count', 'comment_count', 'praise_count', 'user_id',
'user_authentication_type', 'user_fans_count', 'user_follow_count', 'user_weibo_count', 'publish_platform',
'piclist', 'text', 'label']
write_list = []
with open(rumor_path,'r', encoding='UTF-8') as f:
list_rumor = f.readlines()
i = 1
list_content = []
for line in list_rumor:
if i == 1: # 基础信息列
info_list = line.split('|')
info_list[-1] = info_list[-1].replace('\n','')
list_content.extend(info_list)
info_list.clear()
i += 1
elif i == 2: # 图片列
list_content.append(line.replace('|null\n',''))
i += 1
else: # 微博正文
list_content.append(line.replace('\n','').replace(',',','))
list_content.append(str(label)+'\n')
write_list.append(','.join(list_content))
list_content.clear()
i = 1
with open(save_path, 'w+', encoding='UTF-8') as fwrite:
fwrite.write(','.join(features_list)+'\n')
fwrite.writelines(write_list)
# get_train_csv(test_rumor_txt_path, 1, r"G:/test_data.csv")
#删除新数据集csv文件不需要的列,并规范化命名,与原始数据集命名相同
def polish_test_csv():
# 删除多余特征
# drop_list = ['id', 'user_name', 'tweet_url', 'user_url', 'publish_time', 'original','retweet_count','comment_count','praise_count','user_id','user_authentication_type','publish_platform','people','location','organization','words']
# df = pd.read_csv(r"G:/result_test.csv")
# df.drop(drop_list, axis=1, inplace=True)
# print(df.shape)
# df.to_csv(r"G:/result_origin.csv",index=0)#不保留行索引
df = pd.read_csv(r"G:/result_origin.csv")
# 处理piclist列,使其保持和原始train.csv数据列一样
i = 0
for index, row in df.iterrows():
# 获得piclist列
if not pd.isna(row['piclist']):
new_content_list = []
pic_list = row['piclist'].split('|')
for item in pic_list:
new_content_list.append(item.split('/')[-1])
df.at[i, 'piclist'] = '\t'.join(new_content_list)
new_content_list.clear()
i += 1
df.to_csv(r"G:/result_origin.csv",index=0)#不保留行索引
#将社交特征与测试集csv融合起来
def new_csv_fusion():
df1 = pd.read_csv(r"G:/test_data.csv")
df2 = pd.read_csv(r"G:/social_feature.csv")
result = pd.merge(df1, df2, on="id")
result.to_csv(r"G:/result_test.csv", index=0) # 不保留行索引
print(df1.shape)
print(result)
print(result.shape)
#补充文本特征
def get_text_feature():
# 原始数据的读入
df_text = pd.read_csv(test_csv_path)
start = time.time()
# 读入停用词表、积极词汇表、消极词汇表
stopwords = get_stopwords_list()
possentiwords = get_possentiwords_list()
negsentiwords = get_negsentiwords_list()
# 微博文本扩展特征数据列
new_text_features_list = ['contains_questmark', 'contains_exclammark', 'contains_hashtag', 'contains_URL',
'contains_mention', 'num_noun', 'num_verb', 'num_pronoun', 'category']
# 浪费时间
for i in range(1, 65):
new_text_features_list.append('word2vec_' + str(i))
df_text = text_insert_cols(df_text, new_text_features_list)
# # 加载word2vec model
# if not os.path.isfile(word2vec_model_path):
# # 获得词向量训练语料
# text_get_clear_word2vec_corpus(word2vec_txt_path)
# # 训练word2vec模型
# model_word2vec = text_train_word2vec_model(word2vec_txt_path, word2vec_model_path)
# else:
# # 加载word2vec模型
# # model_word2vec = text_load_word2vec_model(word2vec_model_path)
# model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(
# r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# remember_delete = 1
# 文本特征提取
# df_text = text_feature_extraction(df_text)
# 文本特征保存
df_text.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
#补充用户特征
def get_user_feature():
start = time.time()
# 原始数据读入
df_user = pd.read_csv(test_csv_path)
# 用户新特征列扩展
new_user_features_list = ['folfans_ratio', 'user_gender', 'user_location', 'user_description']
df_user = user_insert_cols(df_user, new_user_features_list)
# 用户特征提取
df_user = user_feature_extraction(df_user)
# 用户特征保存
df_user.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
#补充图片特征
def get_image_feature(test_csv_path):
start = time.time()
# 原始数据读入
df_image = pd.read_csv(test_csv_path)
stopwords = get_stopwords_list()
# 图片新特征列扩展
# new_image_features_list = ['h_first_moment','s_first_moment','v_first_moment',
# 'h_second_moment','s_second_moment','v_second_moment',
# 'h_third_moment','s_third_moment','v_third_moment',
# 'tf_vgg19_class','tf_resnet50_class','image_width','image_height','image_kb','sim_image_word']
new_image_features_list = ['sim_image_word']
# for i in range(1,2049):
# new_image_features_list.append('resnet_'+str(i))
df_image = image_insert_cols(df_image, new_image_features_list)
# pytorch ResNet 50网络
# model_resnet50 = net()
# model_resnet50.eval()
# model_resnet50 = model_resnet50.cuda()
# tensorflow vgg19和resnet50模型
# model_tf_vgg19 = vgg19.VGG19(weights='imagenet')
# model_tf_resnet50 = resnet50.ResNet50(weights='imagenet')
model_word2vec = gensim.models.KeyedVectors.load_word2vec_format(
r'G:\毕设\数据集\微博\news_12g_baidubaike_20g_novel_90g_embedding_64.bin', binary=True)
# 获得vgg19和resnet50分类的图片top1可信度list
list_vgg19_score, list_resnet50_score = image_get_score_list(r'G:\test_image_class_vgg19.txt',
r'G:\test_image_class_resnet50.txt')
# 获得中文对照词典
dict_image_class = image_get_class_cn_dict(cn_imagenet_class_path)
# 获得文本特征中的微博原文
df_text = pd.read_csv(test_csv_path, usecols=['text']) # 只加载text列,提升速度,减小不必要的内存损耗
# 图片特征提取
df_image = image_feature_extraction(df_image)
# 图片特征保存
df_image.to_csv(test_csv_path, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
fusion_csv_path = r'G:\毕设\数据集\微博\fusion_news_features.csv'
fusion_csv_path_0404_origin = r'G:\毕设\数据集\微博\fusion_news_features_0404_origin.csv'
fusion_csv_path_0404 = r'G:\毕设\数据集\微博\fusion_news_features_0404.csv'
start = time.time()
# 原始数据读入
df_image = pd.read_csv(fusion_csv_path_0404_origin)
new_text_features_list = []
# 浪费时间
for i in range(1, 65):
new_text_features_list.append('word2vec_' + str(i))
df_1 = pd.read_csv(fusion_csv_path_0404,usecols=new_text_features_list)
df = pd.concat([df_image, df_1], axis=1)
df.to_csv(fusion_csv_path_0404_origin, index=0) # 不保留行索引
end = time.time()
logging.info("运行时间:" + str(end - start)) | [
"pandas.read_csv",
"json.dumps",
"os.path.isfile",
"numpy.mean",
"gensim.models.KeyedVectors.load_word2vec_format",
"gensim.models.word2vec.Text8Corpus",
"gensim.models.word2vec.Word2Vec.load",
"gensim.models.word2vec.Word2Vec",
"tensorflow.keras.applications.vgg19.decode_predictions",
"random.ran... | [((37266, 37361), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (37285, 37361), False, 'import logging\n'), ((37577, 37588), 'time.time', 'time.time', ([], {}), '()\n', (37586, 37588), False, 'import time\n'), ((37609, 37649), 'pandas.read_csv', 'pd.read_csv', (['fusion_csv_path_0404_origin'], {}), '(fusion_csv_path_0404_origin)\n', (37620, 37649), True, 'import pandas as pd\n'), ((37771, 37836), 'pandas.read_csv', 'pd.read_csv', (['fusion_csv_path_0404'], {'usecols': 'new_text_features_list'}), '(fusion_csv_path_0404, usecols=new_text_features_list)\n', (37782, 37836), True, 'import pandas as pd\n'), ((37841, 37876), 'pandas.concat', 'pd.concat', (['[df_image, df_1]'], {'axis': '(1)'}), '([df_image, df_1], axis=1)\n', (37850, 37876), True, 'import pandas as pd\n'), ((37941, 37952), 'time.time', 'time.time', ([], {}), '()\n', (37950, 37952), False, 'import time\n'), ((2199, 2225), 'logging.info', 'logging.info', (['"""正在载入数据中..."""'], {}), "('正在载入数据中...')\n", (2211, 2225), False, 'import logging\n'), ((2250, 2322), 'pandas.read_csv', 'pd.read_csv', (['train_csv_path'], {'usecols': "['id', 'text', 'category', 'label']"}), "(train_csv_path, usecols=['id', 'text', 'category', 'label'])\n", (2261, 2322), True, 'import pandas as pd\n'), ((2346, 2496), 'pandas.read_csv', 'pd.read_csv', (['train_csv_path'], {'usecols': "['id', 'userGender', 'userFollowCount', 'userFansCount', 'userWeiboCount',\n 'userLocation', 'userDescription']"}), "(train_csv_path, usecols=['id', 'userGender', 'userFollowCount',\n 'userFansCount', 'userWeiboCount', 'userLocation', 'userDescription'])\n", (2357, 2496), True, 'import pandas as pd\n'), ((2513, 2567), 'pandas.read_csv', 'pd.read_csv', (['train_csv_path'], {'usecols': "['id', 'piclist']"}), "(train_csv_path, usecols=['id', 'piclist'])\n", (2524, 2567), True, 'import pandas as pd\n'), ((2570, 2592), 'logging.info', 'logging.info', (['"""数据载入完成"""'], {}), "('数据载入完成')\n", (2582, 2592), False, 'import logging\n'), ((2716, 2742), 'pandas.read_csv', 'pd.read_csv', (['text_csv_path'], {}), '(text_csv_path)\n', (2727, 2742), True, 'import pandas as pd\n'), ((2919, 2948), 'logging.info', 'logging.info', (['"""正在扩展文本新特征列..."""'], {}), "('正在扩展文本新特征列...')\n", (2931, 2948), False, 'import logging\n'), ((3171, 3197), 'logging.info', 'logging.info', (['"""文本新特征列扩展完成"""'], {}), "('文本新特征列扩展完成')\n", (3183, 3197), False, 'import logging\n'), ((3261, 3288), 'logging.info', 'logging.info', (['"""开始文本特征提取..."""'], {}), "('开始文本特征提取...')\n", (3273, 3288), False, 'import logging\n'), ((5614, 5641), 'logging.info', 'logging.info', (['"""文本特征提取结束..."""'], {}), "('文本特征提取结束...')\n", (5626, 5641), False, 'import logging\n'), ((6783, 6804), 'pandas.isna', 'pd.isna', (['text_content'], {}), '(text_content)\n', (6790, 6804), True, 'import pandas as pd\n'), ((7564, 7635), 'snownlp.sentiment.train', 'sentiment.train', (['train_negative_corpus_path', 'train_positive_corpus_path'], {}), '(train_negative_corpus_path, train_positive_corpus_path)\n', (7579, 7635), False, 'from snownlp import sentiment\n'), ((7696, 7732), 'snownlp.sentiment.save', 'sentiment.save', (['sentiment_model_path'], {}), '(sentiment_model_path)\n', (7710, 7732), False, 'from snownlp import sentiment\n'), ((7940, 7961), 'pandas.isna', 'pd.isna', (['text_content'], {}), '(text_content)\n', (7947, 7961), True, 'import pandas as pd\n'), ((10217, 10286), 're.findall', 're.findall', (['"""https?://(?:[-\\\\w.]|(?:%[\\\\da-fA-F]{2}))+"""', 'text_content'], {}), "('https?://(?:[-\\\\w.]|(?:%[\\\\da-fA-F]{2}))+', text_content)\n", (10227, 10286), False, 'import re\n'), ((10825, 10864), 'gensim.models.word2vec.Text8Corpus', 'word2vec.Text8Corpus', (['word2vec_txt_path'], {}), '(word2vec_txt_path)\n', (10845, 10864), False, 'from gensim.models import word2vec\n'), ((10877, 10926), 'gensim.models.word2vec.Word2Vec', 'word2vec.Word2Vec', (['sentences'], {'size': '(100)', 'workers': '(4)'}), '(sentences, size=100, workers=4)\n', (10894, 10926), False, 'from gensim.models import word2vec\n'), ((12280, 12323), 'gensim.models.word2vec.Word2Vec.load', 'word2vec.Word2Vec.load', (['word2vec_model_path'], {}), '(word2vec_model_path)\n', (12302, 12323), False, 'from gensim.models import word2vec\n'), ((12754, 12786), 'logging.info', 'logging.info', (['"""清理word2vec语料文本结束"""'], {}), "('清理word2vec语料文本结束')\n", (12766, 12786), False, 'import logging\n'), ((12836, 12857), 'pandas.isna', 'pd.isna', (['text_content'], {}), '(text_content)\n', (12843, 12857), True, 'import pandas as pd\n'), ((13503, 13529), 'pandas.read_csv', 'pd.read_csv', (['user_csv_path'], {}), '(user_csv_path)\n', (13514, 13529), True, 'import pandas as pd\n'), ((13706, 13735), 'logging.info', 'logging.info', (['"""正在扩展用户新特征列..."""'], {}), "('正在扩展用户新特征列...')\n", (13718, 13735), False, 'import logging\n'), ((13883, 13909), 'logging.info', 'logging.info', (['"""用户新特征列扩展完成"""'], {}), "('用户新特征列扩展完成')\n", (13895, 13909), False, 'import logging\n'), ((13972, 13999), 'logging.info', 'logging.info', (['"""开始用户特征提取..."""'], {}), "('开始用户特征提取...')\n", (13984, 13999), False, 'import logging\n'), ((14484, 14511), 'logging.info', 'logging.info', (['"""用户特征提取结束..."""'], {}), "('用户特征提取结束...')\n", (14496, 14511), False, 'import logging\n'), ((14903, 14930), 'pandas.read_csv', 'pd.read_csv', (['image_csv_path'], {}), '(image_csv_path)\n', (14914, 14930), True, 'import pandas as pd\n'), ((15112, 15141), 'logging.info', 'logging.info', (['"""正在扩展图片新特征列..."""'], {}), "('正在扩展图片新特征列...')\n", (15124, 15141), False, 'import logging\n'), ((15368, 15394), 'logging.info', 'logging.info', (['"""图片新特征列扩展完成"""'], {}), "('图片新特征列扩展完成')\n", (15380, 15394), False, 'import logging\n'), ((15460, 15487), 'logging.info', 'logging.info', (['"""开始图片特征提取..."""'], {}), "('开始图片特征提取...')\n", (15472, 15487), False, 'import logging\n'), ((17257, 17284), 'logging.info', 'logging.info', (['"""图片特征提取结束..."""'], {}), "('图片特征提取结束...')\n", (17269, 17284), False, 'import logging\n'), ((17645, 17666), 'pandas.isna', 'pd.isna', (['text_content'], {}), '(text_content)\n', (17652, 17666), True, 'import pandas as pd\n'), ((17857, 17887), 'collections.Counter', 'Counter', (['list_clear_weibo_text'], {}), '(list_clear_weibo_text)\n', (17864, 17887), False, 'from collections import Counter\n'), ((20423, 20448), 'os.path.getsize', 'os.path.getsize', (['img_path'], {}), '(img_path)\n', (20438, 20448), False, 'import os\n'), ((20666, 20686), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (20676, 20686), False, 'import cv2\n'), ((20873, 20909), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (20885, 20909), False, 'import cv2\n'), ((20957, 20971), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (20966, 20971), False, 'import cv2\n'), ((21126, 21136), 'numpy.mean', 'np.mean', (['h'], {}), '(h)\n', (21133, 21136), True, 'import numpy as np\n'), ((21172, 21182), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (21179, 21182), True, 'import numpy as np\n'), ((21218, 21228), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (21225, 21228), True, 'import numpy as np\n'), ((21375, 21384), 'numpy.std', 'np.std', (['h'], {}), '(h)\n', (21381, 21384), True, 'import numpy as np\n'), ((21439, 21448), 'numpy.std', 'np.std', (['s'], {}), '(s)\n', (21445, 21448), True, 'import numpy as np\n'), ((21503, 21512), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (21509, 21512), True, 'import numpy as np\n'), ((24364, 24392), 'random.randint', 'random.randint', (['(32768)', '(65536)'], {}), '(32768, 65536)\n', (24378, 24392), False, 'import random\n'), ((24650, 24677), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (24672, 24677), False, 'import urllib\n'), ((24735, 24754), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (24745, 24754), False, 'import json\n'), ((24983, 24996), 'json.loads', 'json.loads', (['j'], {}), '(j)\n', (24993, 24996), False, 'import json\n'), ((25360, 25379), 'json.dumps', 'json.dumps', (['txt_dic'], {}), '(txt_dic)\n', (25370, 25379), False, 'import json\n'), ((25718, 25738), 'json.loads', 'json.loads', (['str_json'], {}), '(str_json)\n', (25728, 25738), False, 'import json\n'), ((32664, 32699), 'pandas.read_csv', 'pd.read_csv', (['"""G:/result_origin.csv"""'], {}), "('G:/result_origin.csv')\n", (32675, 32699), True, 'import pandas as pd\n'), ((33245, 33276), 'pandas.read_csv', 'pd.read_csv', (['"""G:/test_data.csv"""'], {}), "('G:/test_data.csv')\n", (33256, 33276), True, 'import pandas as pd\n'), ((33288, 33324), 'pandas.read_csv', 'pd.read_csv', (['"""G:/social_feature.csv"""'], {}), "('G:/social_feature.csv')\n", (33299, 33324), True, 'import pandas as pd\n'), ((33339, 33366), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'on': '"""id"""'}), "(df1, df2, on='id')\n", (33347, 33366), True, 'import pandas as pd\n'), ((33551, 33577), 'pandas.read_csv', 'pd.read_csv', (['test_csv_path'], {}), '(test_csv_path)\n', (33562, 33577), True, 'import pandas as pd\n'), ((33591, 33602), 'time.time', 'time.time', ([], {}), '()\n', (33600, 33602), False, 'import time\n'), ((34878, 34889), 'time.time', 'time.time', ([], {}), '()\n', (34887, 34889), False, 'import time\n'), ((34980, 34991), 'time.time', 'time.time', ([], {}), '()\n', (34989, 34991), False, 'import time\n'), ((35019, 35045), 'pandas.read_csv', 'pd.read_csv', (['test_csv_path'], {}), '(test_csv_path)\n', (35030, 35045), True, 'import pandas as pd\n'), ((35361, 35372), 'time.time', 'time.time', ([], {}), '()\n', (35370, 35372), False, 'import time\n'), ((35477, 35488), 'time.time', 'time.time', ([], {}), '()\n', (35486, 35488), False, 'import time\n'), ((35517, 35543), 'pandas.read_csv', 'pd.read_csv', (['test_csv_path'], {}), '(test_csv_path)\n', (35528, 35543), True, 'import pandas as pd\n'), ((36491, 36631), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['"""G:\\\\毕设\\\\数据集\\\\微博\\\\news_12g_baidubaike_20g_novel_90g_embedding_64.bin"""'], {'binary': '(True)'}), "(\n 'G:\\\\毕设\\\\数据集\\\\微博\\\\news_12g_baidubaike_20g_novel_90g_embedding_64.bin',\n binary=True)\n", (36538, 36631), False, 'import gensim\n'), ((36990, 37034), 'pandas.read_csv', 'pd.read_csv', (['test_csv_path'], {'usecols': "['text']"}), "(test_csv_path, usecols=['text'])\n", (37001, 37034), True, 'import pandas as pd\n'), ((37204, 37215), 'time.time', 'time.time', ([], {}), '()\n', (37213, 37215), False, 'import time\n'), ((8274, 8300), 're.findall', 're.findall', (['u"""[一-龥]"""', 'text'], {}), "(u'[一-龥]', text)\n", (8284, 8300), False, 'import re\n'), ((8339, 8356), 'jieba.cut', 'jieba.cut', (['text_n'], {}), '(text_n)\n', (8348, 8356), False, 'import jieba\n'), ((12874, 12886), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (12882, 12886), True, 'import numpy as np\n'), ((13343, 13377), 'numpy.array', 'np.array', (['text_word2vec_score_list'], {}), '(text_word2vec_score_list)\n', (13351, 13377), True, 'import numpy as np\n'), ((15925, 15953), 'pandas.isna', 'pd.isna', (['df_image.iloc[i, 1]'], {}), '(df_image.iloc[i, 1])\n', (15932, 15953), True, 'import pandas as pd\n'), ((18639, 18658), 'numpy.all', 'np.all', (['(term_i == 0)'], {}), '(term_i == 0)\n', (18645, 18658), True, 'import numpy as np\n'), ((18769, 18803), 'numpy.all', 'np.all', (['(term_vgg19_class_name == 0)'], {}), '(term_vgg19_class_name == 0)\n', (18775, 18803), True, 'import numpy as np\n'), ((18853, 18890), 'numpy.all', 'np.all', (['(term_resnet50_class_name == 0)'], {}), '(term_resnet50_class_name == 0)\n', (18859, 18890), True, 'import numpy as np\n'), ((18943, 18977), 'numpy.all', 'np.all', (['(term_vgg19_class_name != 0)'], {}), '(term_vgg19_class_name != 0)\n', (18949, 18977), True, 'import numpy as np\n'), ((19245, 19282), 'numpy.all', 'np.all', (['(term_resnet50_class_name != 0)'], {}), '(term_resnet50_class_name != 0)\n', (19251, 19282), True, 'import numpy as np\n'), ((20340, 20360), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (20350, 20360), False, 'from PIL import Image\n'), ((22151, 22183), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (22166, 22183), False, 'from torchvision import models, transforms\n'), ((22907, 22927), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (22917, 22927), False, 'from PIL import Image\n'), ((22965, 22988), 'logging.info', 'logging.info', (['img.shape'], {}), '(img.shape)\n', (22977, 22988), False, 'import logging\n'), ((23077, 23098), 'logging.info', 'logging.info', (['x.shape'], {}), '(x.shape)\n', (23089, 23098), False, 'import logging\n'), ((23129, 23154), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23152, 23154), False, 'import torch\n'), ((23271, 23287), 'torch.squeeze', 'torch.squeeze', (['y'], {}), '(y)\n', (23284, 23287), False, 'import torch\n'), ((23343, 23364), 'logging.info', 'logging.info', (['y.shape'], {}), '(y.shape)\n', (23355, 23364), False, 'import logging\n'), ((6856, 6890), 're.findall', 're.findall', (['u"""[一-龥]"""', 'text_content'], {}), "(u'[一-龥]', text_content)\n", (6866, 6890), False, 'import re\n'), ((8080, 8105), 'snownlp.SnowNLP', 'SnowNLP', (['new_text_content'], {}), '(new_text_content)\n', (8087, 8105), False, 'from snownlp import SnowNLP\n'), ((16382, 16407), 'os.path.isfile', 'os.path.isfile', (['filename1'], {}), '(filename1)\n', (16396, 16407), False, 'import os\n'), ((18100, 18112), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (18108, 18112), True, 'import numpy as np\n'), ((18320, 18332), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (18328, 18332), True, 'import numpy as np\n'), ((22706, 22735), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (22723, 22735), False, 'from torchvision import models, transforms\n'), ((22772, 22805), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224, 224)'], {}), '((224, 224))\n', (22793, 22805), False, 'from torchvision import models, transforms\n'), ((22837, 22858), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (22856, 22858), False, 'from torchvision import models, transforms\n'), ((23615, 23656), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['i'], {'target_size': '(224, 224)'}), '(i, target_size=(224, 224))\n', (23629, 23656), False, 'from tensorflow.keras.preprocessing import image\n'), ((23724, 23747), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (23742, 23747), False, 'from tensorflow.keras.preprocessing import image\n'), ((23891, 23918), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (23905, 23918), True, 'import numpy as np\n'), ((24033, 24083), 'tensorflow.keras.applications.vgg19.decode_predictions', 'vgg19.decode_predictions', (['predict_class_vgg'], {'top': '(1)'}), '(predict_class_vgg, top=1)\n', (24057, 24083), False, 'from tensorflow.keras.applications import vgg19\n'), ((25102, 25113), 'time.time', 'time.time', ([], {}), '()\n', (25111, 25113), False, 'import time\n'), ((25201, 25212), 'time.time', 'time.time', ([], {}), '()\n', (25210, 25212), False, 'import time\n'), ((32823, 32846), 'pandas.isna', 'pd.isna', (["row['piclist']"], {}), "(row['piclist'])\n", (32830, 32846), True, 'import pandas as pd\n'), ((18615, 18627), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (18623, 18627), True, 'import numpy as np\n'), ((19025, 19062), 'numpy.dot', 'np.dot', (['term_i', 'term_vgg19_class_name'], {}), '(term_i, term_vgg19_class_name)\n', (19031, 19062), True, 'import numpy as np\n'), ((19332, 19372), 'numpy.dot', 'np.dot', (['term_i', 'term_resnet50_class_name'], {}), '(term_i, term_resnet50_class_name)\n', (19338, 19372), True, 'import numpy as np\n'), ((25261, 25274), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (25271, 25274), False, 'import time\n'), ((1152, 1163), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1161, 1163), False, 'import os\n'), ((1264, 1275), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1273, 1275), False, 'import os\n'), ((1370, 1381), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1379, 1381), False, 'import os\n'), ((1475, 1486), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1484, 1486), False, 'import os\n'), ((1579, 1590), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1588, 1590), False, 'import os\n'), ((1691, 1702), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1700, 1702), False, 'import os\n'), ((1794, 1805), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1803, 1805), False, 'import os\n'), ((1903, 1914), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1912, 1914), False, 'import os\n'), ((12667, 12701), 're.findall', 're.findall', (['u"""[一-龥]"""', 'text_content'], {}), "(u'[一-龥]', text_content)\n", (12677, 12701), False, 'import re\n'), ((12931, 12965), 're.findall', 're.findall', (['u"""[一-龥]"""', 'text_content'], {}), "(u'[一-龥]', text_content)\n", (12941, 12965), False, 'import re\n'), ((13297, 13309), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (13305, 13309), True, 'import numpy as np\n'), ((17762, 17796), 're.findall', 're.findall', (['u"""[一-龥]"""', 'text_content'], {}), "(u'[一-龥]', text_content)\n", (17772, 17796), False, 'import re\n'), ((19066, 19078), 'scipy.linalg.norm', 'norm', (['term_i'], {}), '(term_i)\n', (19070, 19078), False, 'from scipy.linalg import norm\n'), ((19081, 19108), 'scipy.linalg.norm', 'norm', (['term_vgg19_class_name'], {}), '(term_vgg19_class_name)\n', (19085, 19108), False, 'from scipy.linalg import norm\n'), ((19376, 19388), 'scipy.linalg.norm', 'norm', (['term_i'], {}), '(term_i)\n', (19380, 19388), False, 'from scipy.linalg import norm\n'), ((19391, 19421), 'scipy.linalg.norm', 'norm', (['term_resnet50_class_name'], {}), '(term_resnet50_class_name)\n', (19395, 19421), False, 'from scipy.linalg import norm\n'), ((23011, 23038), 'torch.unsqueeze', 'torch.unsqueeze', (['img'], {'dim': '(0)'}), '(img, dim=0)\n', (23026, 23038), False, 'import torch\n'), ((23420, 23434), 'numpy.zeros', 'np.zeros', (['(2048)'], {}), '(2048)\n', (23428, 23434), True, 'import numpy as np\n'), ((24541, 24565), 'urllib.parse.quote', 'urllib.parse.quote', (['text'], {}), '(text)\n', (24559, 24565), False, 'import urllib\n')] |
from __future__ import print_function, division, absolute_import
import time
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
# test_Batch()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
class TestBatch(unittest.TestCase):
def setUp(self):
reseed()
def test_init(self):
attr_names = ["images", "heatmaps", "segmentation_maps", "keypoints",
"bounding_boxes", "polygons"]
batch = ia.Batch()
for attr_name in attr_names:
assert getattr(batch, "%s_unaug" % (attr_name,)) is None
assert getattr(batch, "%s_aug" % (attr_name,)) is None
assert batch.data is None
# we exploit here that Batch() init does not verify its inputs
batch = ia.Batch(
images=0,
heatmaps=1,
segmentation_maps=2,
keypoints=3,
bounding_boxes=4,
polygons=5,
data=6
)
for i, attr_name in enumerate(attr_names):
assert getattr(batch, "%s_unaug" % (attr_name,)) == i
assert getattr(batch, "%s_aug" % (attr_name,)) is None
assert batch.data == 6
def test_property_warnings(self):
batch = ia.Batch()
# self.assertWarns does not exist in py2.7
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = batch.images
assert len(caught_warnings) == 1
assert "is deprecated" in str(caught_warnings[-1].message)
_ = batch.heatmaps
assert len(caught_warnings) == 2
assert "is deprecated" in str(caught_warnings[-1].message)
_ = batch.segmentation_maps
assert len(caught_warnings) == 3
assert "is deprecated" in str(caught_warnings[-1].message)
_ = batch.keypoints
assert len(caught_warnings) == 4
assert "is deprecated" in str(caught_warnings[-1].message)
_ = batch.bounding_boxes
assert len(caught_warnings) == 5
assert "is deprecated" in str(caught_warnings[-1].message)
def test_deepcopy(self):
batch = ia.Batch()
observed = batch.deepcopy()
keys = list(observed.__dict__.keys())
assert len(keys) >= 12
for attr_name in keys:
assert getattr(observed, attr_name) is None
batch = ia.Batch(images=np.zeros((1, 1, 3), dtype=np.uint8))
observed = batch.deepcopy()
for attr_name in observed.__dict__.keys():
if attr_name != "images_unaug":
assert getattr(observed, attr_name) is None
assert ia.is_np_array(observed.images_unaug)
batch = ia.Batch(
images=np.zeros((1, 1, 3), dtype=np.uint8),
heatmaps=[
ia.HeatmapsOnImage(np.zeros((1, 1, 1), dtype=np.float32),
shape=(4, 4, 3))
],
segmentation_maps=[
ia.SegmentationMapOnImage(np.zeros((1, 1), dtype=np.int32),
shape=(5, 5, 3),
nb_classes=20)
],
keypoints=[
ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)], shape=(6, 6, 3))
],
bounding_boxes=[
ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)],
shape=(7, 7, 3))
],
polygons=[
ia.PolygonsOnImage([
ia.Polygon([(0, 0), (10, 0), (10, 10)])
], shape=(100, 100, 3))
],
data={"test": 123, "foo": "bar", "test2": [1, 2, 3]}
)
observed = batch.deepcopy()
for attr_name in observed.__dict__.keys():
if "_unaug" not in attr_name and attr_name != "data":
assert getattr(observed, attr_name) is None
assert ia.is_np_array(observed.images_unaug)
assert observed.images_unaug.shape == (1, 1, 3)
assert isinstance(observed.heatmaps_unaug[0], ia.HeatmapsOnImage)
assert isinstance(observed.segmentation_maps_unaug[0],
ia.SegmentationMapOnImage)
assert isinstance(observed.keypoints_unaug[0], ia.KeypointsOnImage)
assert isinstance(observed.bounding_boxes_unaug[0],
ia.BoundingBoxesOnImage)
assert isinstance(observed.polygons_unaug[0], ia.PolygonsOnImage)
assert isinstance(observed.data, dict)
assert observed.heatmaps_unaug[0].shape == (4, 4, 3)
assert observed.segmentation_maps_unaug[0].shape == (5, 5, 3)
assert observed.keypoints_unaug[0].shape == (6, 6, 3)
assert observed.bounding_boxes_unaug[0].shape == (7, 7, 3)
assert observed.polygons_unaug[0].shape == (100, 100, 3)
assert observed.heatmaps_unaug[0].arr_0to1.shape == (1, 1, 1)
assert observed.segmentation_maps_unaug[0].arr.shape == (1, 1, 20)
assert observed.keypoints_unaug[0].keypoints[0].x == 1
assert observed.keypoints_unaug[0].keypoints[0].y == 2
assert observed.bounding_boxes_unaug[0].bounding_boxes[0].x1 == 1
assert observed.bounding_boxes_unaug[0].bounding_boxes[0].y1 == 2
assert observed.bounding_boxes_unaug[0].bounding_boxes[0].x2 == 3
assert observed.bounding_boxes_unaug[0].bounding_boxes[0].y2 == 4
assert observed.polygons_unaug[0].polygons[0].exterior[0, 0] == 0
assert observed.polygons_unaug[0].polygons[0].exterior[0, 1] == 0
assert observed.polygons_unaug[0].polygons[0].exterior[1, 0] == 10
assert observed.polygons_unaug[0].polygons[0].exterior[1, 1] == 0
assert observed.polygons_unaug[0].polygons[0].exterior[2, 0] == 10
assert observed.polygons_unaug[0].polygons[0].exterior[2, 1] == 10
assert observed.data["test"] == 123
assert observed.data["foo"] == "bar"
assert observed.data["test2"] == [1, 2, 3]
| [
"warnings.simplefilter",
"imgaug.Keypoint",
"imgaug.is_np_array",
"numpy.zeros",
"imgaug.testutils.reseed",
"time.time",
"imgaug.BoundingBox",
"imgaug.Batch",
"matplotlib.use",
"warnings.catch_warnings",
"imgaug.Polygon"
] | [((433, 454), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (447, 454), False, 'import matplotlib\n'), ((619, 630), 'time.time', 'time.time', ([], {}), '()\n', (628, 630), False, 'import time\n'), ((667, 678), 'time.time', 'time.time', ([], {}), '()\n', (676, 678), False, 'import time\n'), ((835, 843), 'imgaug.testutils.reseed', 'reseed', ([], {}), '()\n', (841, 843), False, 'from imgaug.testutils import reseed\n'), ((1016, 1026), 'imgaug.Batch', 'ia.Batch', ([], {}), '()\n', (1024, 1026), True, 'import imgaug as ia\n'), ((1322, 1428), 'imgaug.Batch', 'ia.Batch', ([], {'images': '(0)', 'heatmaps': '(1)', 'segmentation_maps': '(2)', 'keypoints': '(3)', 'bounding_boxes': '(4)', 'polygons': '(5)', 'data': '(6)'}), '(images=0, heatmaps=1, segmentation_maps=2, keypoints=3,\n bounding_boxes=4, polygons=5, data=6)\n', (1330, 1428), True, 'import imgaug as ia\n'), ((1789, 1799), 'imgaug.Batch', 'ia.Batch', ([], {}), '()\n', (1797, 1799), True, 'import imgaug as ia\n'), ((2765, 2775), 'imgaug.Batch', 'ia.Batch', ([], {}), '()\n', (2773, 2775), True, 'import imgaug as ia\n'), ((3252, 3289), 'imgaug.is_np_array', 'ia.is_np_array', (['observed.images_unaug'], {}), '(observed.images_unaug)\n', (3266, 3289), True, 'import imgaug as ia\n'), ((4556, 4593), 'imgaug.is_np_array', 'ia.is_np_array', (['observed.images_unaug'], {}), '(observed.images_unaug)\n', (4570, 4593), True, 'import imgaug as ia\n'), ((1864, 1900), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1887, 1900), False, 'import warnings\n'), ((1933, 1964), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (1954, 1964), False, 'import warnings\n'), ((3009, 3044), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (3017, 3044), True, 'import numpy as np\n'), ((3336, 3371), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.uint8'}), '((1, 1, 3), dtype=np.uint8)\n', (3344, 3371), True, 'import numpy as np\n'), ((3431, 3468), 'numpy.zeros', 'np.zeros', (['(1, 1, 1)'], {'dtype': 'np.float32'}), '((1, 1, 1), dtype=np.float32)\n', (3439, 3468), True, 'import numpy as np\n'), ((3611, 3643), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {'dtype': 'np.int32'}), '((1, 1), dtype=np.int32)\n', (3619, 3643), True, 'import numpy as np\n'), ((3837, 3858), 'imgaug.Keypoint', 'ia.Keypoint', ([], {'x': '(1)', 'y': '(2)'}), '(x=1, y=2)\n', (3848, 3858), True, 'import imgaug as ia\n'), ((3984, 4022), 'imgaug.BoundingBox', 'ia.BoundingBox', ([], {'x1': '(1)', 'y1': '(2)', 'x2': '(3)', 'y2': '(4)'}), '(x1=1, y1=2, x2=3, y2=4)\n', (3998, 4022), True, 'import imgaug as ia\n'), ((4157, 4196), 'imgaug.Polygon', 'ia.Polygon', (['[(0, 0), (10, 0), (10, 10)]'], {}), '([(0, 0), (10, 0), (10, 10)])\n', (4167, 4196), True, 'import imgaug as ia\n')] |
"""
LTI SSM Policy training N-step
physics informed policy gradient
"""
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
from scipy.io import loadmat
import torch
import torch.nn.functional as F
from torch import nn
from sklearn.model_selection import train_test_split
import time
"""
Ground truth state space model (SSM)
"""
A = np.matrix([[0.9950,0.0017,0.0000,0.0031],[0.0007,0.9957,0.0003,0.0031],
[0.0000,0.0003,0.9834,0.0000],[0.2015,0.4877,0.0100,0.2571]])
B = np.matrix([[1.7586e-06],[1.7584e-06],
[1.8390e-10],[5.0563e-04]])
E = np.matrix([[0.0002,0.0000,0.0000],[0.0002,0.0000,0.0000],
[0.0163,0.0000,0.0000],[0.0536,0.0005,0.0001]])
C = np.matrix([[0.0,0.0,0.0,1.0]])
# model dimensions
nx = 4
ny = 1
nu = 1
nd = 3
"""
Generate data for training, validation and testing
"""
# Train set
samples_day = 288 # 288 samples per day with 5 min sampling
umax = 4e3
start_day = 7
start_sim = samples_day*start_day
Sim_days = 7 # number of simulated days
U_day = umax*np.sin(np.arange(0, 2*np.pi,2*np.pi/samples_day)) # daily control profile
U = np.matlib.repmat(U_day, 1, Sim_days) # Sim_days control profile
N_sim = U.shape[1] # number of simulation steps train phase
file = loadmat('../TimeSeries/disturb.mat') # load disturbance file
Disturb = file['D']
D = Disturb[:,start_sim:start_sim+N_sim]
Y = np.zeros((ny,N_sim)) # output trajectory placeholders
X = np.zeros((nx,N_sim+1)) # state trajectory placeholders
X[:,0] = 20*np.ones(nx) # initial conditions
# Training set open loop response LTI SSM
for k in range(0,N_sim):
x0 = np.asmatrix(X[:,k]).T
d = np.asmatrix(D[:,k]).T
u = np.asmatrix(U[:,k]).T
# LTI SSM
xn = A*x0 + B*u + E*d
y = C*x0
X[:,k+1] = xn.ravel()
Y[:,k] = y.ravel()
# Validation + Test set
N_sim_start = start_sim
Sim_days_test = 21
U_t = np.matlib.repmat(U_day, 1, Sim_days_test) # Sim_days control profile
N_sim_t = U_t.shape[1] # number of simulation steps test phase
D_t = Disturb[:,N_sim_start:N_sim_start+N_sim_t]
Y_t = np.zeros((ny,N_sim_t)) # output trajectory placeholders
X_t = np.zeros((nx,N_sim_t+1)) # state trajectory placeholders
X_t[:,0] = X[:,-1]
# Validation + Test set open loop response LTI SSM
for k in range(0,N_sim_t):
x0 = np.asmatrix(X_t[:,k]).T
d = np.asmatrix(D_t[:,k]).T
u = np.asmatrix(U_t[:,k]).T
# LTI SSM
xn = A*x0 + B*u + E*d
y = C*x0
X_t[:,k+1] = xn.ravel()
Y_t[:,k] = y.ravel()
"""
Algorithm 1 -Constrained model predictive control policy with N>1
"""
# Model definition
class NeuroSSM_policy_N_step(nn.Module):
def __init__(self, nx, nu, ny, nd, nr, N, n_hidden):
super().__init__()
# model layers weights
self.A = nn.Linear(nx,nx, bias=False)
self.B = nn.Linear(nu,nx, bias=False)
self.E = nn.Linear(nd,nx, bias=False)
self.C = nn.Linear(nx,ny, bias=False)
# positive policy layers weights
self.policy1 = nn.Linear(nx+nd*N+nr*N, n_hidden, bias=False)
self.policy2 = nn.Linear(n_hidden, nu*N, bias=False)
self.N = N
self.nr = nr
self.nd = nd
# initialize model weights
with torch.no_grad():
self.A.weight.copy_(torch.from_numpy(A))
self.B.weight.copy_(torch.from_numpy(B))
self.E.weight.copy_(torch.from_numpy(E))
self.C.weight.copy_(torch.from_numpy(C))
# fix first 4 layers of model weights
child_counter = 0
for child in self.children():
child_counter += 1
if child_counter <= 4:
for param in child.parameters():
param.requires_grad = False
def forward(self,x,D,R,YMIN,YMAX,UMIN,UMAX):
x = x.view(x.size(0), -1) # initial state comming from KF or direct measurements
X = torch.tensor([])
Y = torch.tensor([])
U = torch.tensor([])
Sy_min = torch.tensor([])
Sy_max = torch.tensor([])
Su_min = torch.tensor([])
Su_max = torch.tensor([])
D_xi = D.reshape(-1,self.N*self.nd)
R_xi = R.reshape(-1,self.N*self.nr)
xi = torch.cat((x,D_xi,R_xi),1)
u_hidden = F.relu(self.policy1(xi))
U = self.policy2(u_hidden)
U = U.unsqueeze(2)
for k in range(0,N):
ymin = YMIN[:,k,:]
ymax = YMAX[:,k,:]
umin = UMIN[:,k,:]
umax = UMAX[:,k,:]
d = D[:,k,:]
u = U[:,k,:]
# predict step
x = self.A(x) + self.B(u) + self.E(d)
y = self.C(x)
# control input constraints Algo 1 lines 4-6
s_umin = F.relu(-u+umin) # control lower bound u>= umin
s_umax = F.relu(u-umax) # control upper bound u <= umax
# controlled state constraints Algo 1 lines 8-10
s_ymin = F.relu(-y+ymin) # state lower bound u>= umin
s_ymax = F.relu(y-ymax) # state upper bound u <= umax
Sy_min = torch.cat((Sy_min, s_ymin), 0)
Sy_max = torch.cat((Sy_max, s_ymax), 0)
Su_min = torch.cat((Su_min, s_umin), 0)
Su_max = torch.cat((Su_max, s_umax), 0)
X = torch.cat((X, x), 0)
Y = torch.cat((Y, y), 0)
return X, Y, U, Sy_min, Sy_max, Su_min, Su_max
"""
# Model instantiation
"""
N = 8
n_hidden = 10*N # number of hidden neurons
nr = 1 # number of outputs/references
ny = 1
model = NeuroSSM_policy_N_step(nx,nu,ny,nd,nr, N, n_hidden)
model = model.float()
model.parameters
for param in model.named_parameters():
print(param)
"""
# Dataset creation
"""
#R = np.ones(Y.shape) +20 # reference signal
R_day = 20+2*np.sin(np.arange(0, 2*np.pi,2*np.pi/samples_day)) # daily control profile
R = np.matlib.repmat(R_day, 1, Sim_days) # Sim_days control profile
ymin_val = 19
ymax_val = 25
ymin = ymin_val*torch.ones(samples_day*Sim_days,ny)
ymax = ymax_val*torch.ones(samples_day*Sim_days,ny)
umin_val = 0
umax_val = 5000
umin = umin_val*torch.ones(samples_day*Sim_days,nu)
umax = umax_val*torch.ones(samples_day*Sim_days,nu)
# slack variables targets
sx_targets = torch.zeros(samples_day*Sim_days,nx)
sy_targets = torch.zeros(samples_day*Sim_days,ny)
su_targets = torch.zeros(samples_day*Sim_days,nu)
u_targets = torch.zeros(samples_day*Sim_days,nu)
# dataset
R_train = R.T.reshape((-1,N,nu))
D_train = D.T.reshape((-1,N,nd))
YMIN = ymin.T.reshape((-1,N,ny))
YMAX = ymax.T.reshape((-1,N,ny))
UMIN = umin.T.reshape((-1,N,nu))
UMAX = umax.T.reshape((-1,N,nu))
X_train = X[:,range(0,N_sim,N)].T
a = 10
b = 10
X0_train = a +b*np.random.rand(X_train.shape[0],X_train.shape[1])
X0_in = torch.from_numpy(X0_train).float()
D_in = torch.from_numpy(D_train).float()
R_in = torch.from_numpy(R_train).float()
R_target_use = torch.tensor([])
for k in range(R_in.shape[1]):
R_target_use = torch.cat((R_target_use, R_in[:,k]), 0)
"""
# MSE citerion and optimizer
"""
# objective and optimizer
criterion = nn.MSELoss() # we'll convert this to RMSE later
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=0.001)
"""
# Weights of the multi-objective loss - equation (16)
"""
# constraints weight
Q_con_u = 5e-7
Q_con_x = 50
Q_con_y = 50
Q_u = 1e-7
Q_u = 1e-6
Q_ref = 20
"""
Policy Optimization Training Loop using Algorithm 1
"""
epochs = 30000
losses = []
for i in range(epochs):
i+=1
X_pred, Y_pred, U_pred, Sy_min, Sy_max, Su_min, Su_max = model.forward(X0_in,D_in,R_in,YMIN,YMAX,UMIN,UMAX)
loss = Q_ref*criterion(Y_pred, R_target_use) + Q_u*criterion(U_pred.flatten(), u_targets) \
+ Q_con_y*criterion(Sy_min, sy_targets) + Q_con_y*criterion(Sy_max, sy_targets) \
+ Q_con_u*criterion(Su_min, su_targets) + Q_con_u*criterion(Su_max, su_targets)
losses.append(loss)
if i%1000 == 1:
print(f'epoch: {i:2} loss: {loss.item():10.8f}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
plt.plot(range(epochs), losses)
plt.ylabel('Loss')
plt.xlabel('epoch');
replicate_paper = False
"""
load model parameters of trained policy from Section 3.7.
"""
if replicate_paper:
model = torch.load('Trained_DeepMPCPolicy_N_'+str(N)+'_sec_3_7.pt')
# else:
# torch.save(model, 'Trained_DeepMPCPolicy_N_'+str(N)+'_sec_3_7.pt')
"""
Running closed-loop control simulations from Section 3.7.
"""
Eval_runs = 20
CPU_mean_time = np.zeros(Eval_runs)
CPU_max_time = np.zeros(Eval_runs)
ymin_val = 19
ymax_val = 25
ymin = ymin_val*torch.ones(samples_day*Sim_days_test,ny)
ymax = ymax_val*torch.ones(samples_day*Sim_days_test,ny)
umin_val = 0
umax_val = 5000
umin = umin_val*torch.ones(samples_day*Sim_days_test,nu)
umax = umax_val*torch.ones(samples_day*Sim_days_test,nu)
R_t = np.matlib.repmat(R_day, 1, Sim_days_test) # Sim_days control profile
# Control Validation closed loop
for run in range(0,Eval_runs):
StepTime = np.zeros(N_sim_t)
U_torch = np.zeros((nu,N_sim_t-N)) # input trajectory placeholders
X_cl = np.zeros((nx,N_sim_t+1-N)) # state trajectory placeholders
X_cl[:,0] = 20*np.ones(nx) # initial conditions
Y_cl = np.zeros((ny,N_sim_t-N)) # output trajectory placeholders
for k in range(0,N_sim_t-N):
x0 = np.asmatrix(X_cl[:,k])
d = np.asmatrix(D_t[:,k:k+N]).T
r = np.asmatrix(R_t[:,k:k+N]).T
umin_k = np.asmatrix(umin[k:k+N,:])
umax_k = np.asmatrix(umax[k:k+N,:])
ymin_k = np.asmatrix(ymin[k:k+N,:])
ymax_k = np.asmatrix(ymax[k:k+N,:])
x0_in = torch.from_numpy(x0).float()
d_in = torch.from_numpy(d).float().unsqueeze(0)
r_in = torch.from_numpy(r).float().unsqueeze(0)
umin_in = torch.from_numpy(umin_k).float().unsqueeze(0)
umax_in = torch.from_numpy(umax_k).float().unsqueeze(0)
ymin_in = torch.from_numpy(ymin_k).float().unsqueeze(0)
ymax_in = torch.from_numpy(ymax_k).float().unsqueeze(0)
start_step_time = time.time()
X_pred, Y_pred, U_pred, Sy_min, Sy_max, Su_min, Su_max = model.forward(x0_in,d_in,r_in,ymin_in,ymax_in,umin_in,umax_in)
StepTime[k] = time.time() - start_step_time
if N>1:
U_torch[:,k] = U_pred[:,1].T.detach().numpy().ravel()
d0 = d[1,:].T
else:
U_torch[:,k] = U_pred.T.detach().numpy().ravel()
d0 = d.T
x0 = x0.T
# LTI SSM
xn = A*x0 + B*U_torch[:,k] + E*d0
y = C*x0
X_cl[:,k+1] = xn.ravel()
Y_cl[:,k] = y.ravel()
CPU_mean_time[run] = np.mean(StepTime)
CPU_max_time[run] = np.amax(StepTime)
CPU_mean_time_paper = np.mean(CPU_mean_time)*1e3
CPU_max_time_paper = np.amax(CPU_max_time)*1e3
plt.figure()
plt.subplot(411)
plt.plot(X_cl.T, '--', label='X True')
plt.title('Closed-loop control validation: train set')
plt.ylabel('X')
plt.xlabel('time steps')
plt.show()
plt.subplot(412)
plt.plot(R_t.T, '--', label='R')
plt.plot(Y_cl.T, label='Y')
plt.ylabel('Y')
plt.xlabel('time steps')
plt.legend()
plt.show()
plt.subplot(413)
plt.plot(U_torch.T, label='U Trained')
# plt.plot(U.T, '--', label='U True')
plt.ylabel('U')
plt.xlabel('time steps')
plt.legend()
plt.show()
plt.subplot(414)
plt.plot(D_t.T)
# plt.plot(U.T, '--', label='U True')
plt.ylabel('D')
plt.xlabel('time steps')
plt.legend()
plt.show()
# show policy weights
fig, (plt1, plt2, plt3) = plt.subplots(1, 3)
fig.suptitle('Pytorch linear SSM weights')
img1 = plt1.imshow(model.policy1.weight.data)
fig.colorbar(img1, ax=plt1)
plt1.title.set_text('policy1')
img2 = plt2.imshow(model.policy2.weight.data)
fig.colorbar(img2, ax=plt2)
plt2.title.set_text('policy2')
img3 = plt3.imshow(torch.mm(model.policy2.weight.data,F.relu(model.policy1.weight.data)))
fig.colorbar(img3, ax=plt3)
img3 = plt3.title.set_text('Effective Policy')
fig.tight_layout()
| [
"matplotlib.pyplot.title",
"scipy.io.loadmat",
"numpy.ones",
"torch.cat",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"torch.ones",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.relu",
"numpy.matlib.repmat",
"matplotlib.pyplot.s... | [((354, 493), 'numpy.matrix', 'np.matrix', (['[[0.995, 0.0017, 0.0, 0.0031], [0.0007, 0.9957, 0.0003, 0.0031], [0.0, \n 0.0003, 0.9834, 0.0], [0.2015, 0.4877, 0.01, 0.2571]]'], {}), '([[0.995, 0.0017, 0.0, 0.0031], [0.0007, 0.9957, 0.0003, 0.0031],\n [0.0, 0.0003, 0.9834, 0.0], [0.2015, 0.4877, 0.01, 0.2571]])\n', (363, 493), True, 'import numpy as np\n'), ((506, 572), 'numpy.matrix', 'np.matrix', (['[[1.7586e-06], [1.7584e-06], [1.839e-10], [0.00050563]]'], {}), '([[1.7586e-06], [1.7584e-06], [1.839e-10], [0.00050563]])\n', (515, 572), True, 'import numpy as np\n'), ((590, 692), 'numpy.matrix', 'np.matrix', (['[[0.0002, 0.0, 0.0], [0.0002, 0.0, 0.0], [0.0163, 0.0, 0.0], [0.0536, \n 0.0005, 0.0001]]'], {}), '([[0.0002, 0.0, 0.0], [0.0002, 0.0, 0.0], [0.0163, 0.0, 0.0], [\n 0.0536, 0.0005, 0.0001]])\n', (599, 692), True, 'import numpy as np\n'), ((714, 747), 'numpy.matrix', 'np.matrix', (['[[0.0, 0.0, 0.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0, 1.0]])\n', (723, 747), True, 'import numpy as np\n'), ((1118, 1154), 'numpy.matlib.repmat', 'np.matlib.repmat', (['U_day', '(1)', 'Sim_days'], {}), '(U_day, 1, Sim_days)\n', (1134, 1154), True, 'import numpy as np\n'), ((1251, 1287), 'scipy.io.loadmat', 'loadmat', (['"""../TimeSeries/disturb.mat"""'], {}), "('../TimeSeries/disturb.mat')\n", (1258, 1287), False, 'from scipy.io import loadmat\n'), ((1377, 1398), 'numpy.zeros', 'np.zeros', (['(ny, N_sim)'], {}), '((ny, N_sim))\n', (1385, 1398), True, 'import numpy as np\n'), ((1435, 1460), 'numpy.zeros', 'np.zeros', (['(nx, N_sim + 1)'], {}), '((nx, N_sim + 1))\n', (1443, 1460), True, 'import numpy as np\n'), ((1881, 1922), 'numpy.matlib.repmat', 'np.matlib.repmat', (['U_day', '(1)', 'Sim_days_test'], {}), '(U_day, 1, Sim_days_test)\n', (1897, 1922), True, 'import numpy as np\n'), ((2069, 2092), 'numpy.zeros', 'np.zeros', (['(ny, N_sim_t)'], {}), '((ny, N_sim_t))\n', (2077, 2092), True, 'import numpy as np\n'), ((2131, 2158), 'numpy.zeros', 'np.zeros', (['(nx, N_sim_t + 1)'], {}), '((nx, N_sim_t + 1))\n', (2139, 2158), True, 'import numpy as np\n'), ((6092, 6128), 'numpy.matlib.repmat', 'np.matlib.repmat', (['R_day', '(1)', 'Sim_days'], {}), '(R_day, 1, Sim_days)\n', (6108, 6128), True, 'import numpy as np\n'), ((6463, 6502), 'torch.zeros', 'torch.zeros', (['(samples_day * Sim_days)', 'nx'], {}), '(samples_day * Sim_days, nx)\n', (6474, 6502), False, 'import torch\n'), ((6513, 6552), 'torch.zeros', 'torch.zeros', (['(samples_day * Sim_days)', 'ny'], {}), '(samples_day * Sim_days, ny)\n', (6524, 6552), False, 'import torch\n'), ((6563, 6602), 'torch.zeros', 'torch.zeros', (['(samples_day * Sim_days)', 'nu'], {}), '(samples_day * Sim_days, nu)\n', (6574, 6602), False, 'import torch\n'), ((6612, 6651), 'torch.zeros', 'torch.zeros', (['(samples_day * Sim_days)', 'nu'], {}), '(samples_day * Sim_days, nu)\n', (6623, 6651), False, 'import torch\n'), ((7122, 7138), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (7134, 7138), False, 'import torch\n'), ((7308, 7320), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (7318, 7320), False, 'from torch import nn\n'), ((8340, 8358), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (8350, 8358), True, 'import matplotlib.pyplot as plt\n'), ((8359, 8378), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (8369, 8378), True, 'import matplotlib.pyplot as plt\n'), ((8750, 8769), 'numpy.zeros', 'np.zeros', (['Eval_runs'], {}), '(Eval_runs)\n', (8758, 8769), True, 'import numpy as np\n'), ((8785, 8804), 'numpy.zeros', 'np.zeros', (['Eval_runs'], {}), '(Eval_runs)\n', (8793, 8804), True, 'import numpy as np\n'), ((9098, 9139), 'numpy.matlib.repmat', 'np.matlib.repmat', (['R_day', '(1)', 'Sim_days_test'], {}), '(R_day, 1, Sim_days_test)\n', (9114, 9139), True, 'import numpy as np\n'), ((11138, 11150), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11148, 11150), True, 'import matplotlib.pyplot as plt\n'), ((11151, 11167), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(411)'], {}), '(411)\n', (11162, 11167), True, 'import matplotlib.pyplot as plt\n'), ((11168, 11206), 'matplotlib.pyplot.plot', 'plt.plot', (['X_cl.T', '"""--"""'], {'label': '"""X True"""'}), "(X_cl.T, '--', label='X True')\n", (11176, 11206), True, 'import matplotlib.pyplot as plt\n'), ((11207, 11261), 'matplotlib.pyplot.title', 'plt.title', (['"""Closed-loop control validation: train set"""'], {}), "('Closed-loop control validation: train set')\n", (11216, 11261), True, 'import matplotlib.pyplot as plt\n'), ((11262, 11277), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""X"""'], {}), "('X')\n", (11272, 11277), True, 'import matplotlib.pyplot as plt\n'), ((11278, 11302), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (11288, 11302), True, 'import matplotlib.pyplot as plt\n'), ((11303, 11313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11311, 11313), True, 'import matplotlib.pyplot as plt\n'), ((11314, 11330), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(412)'], {}), '(412)\n', (11325, 11330), True, 'import matplotlib.pyplot as plt\n'), ((11331, 11363), 'matplotlib.pyplot.plot', 'plt.plot', (['R_t.T', '"""--"""'], {'label': '"""R"""'}), "(R_t.T, '--', label='R')\n", (11339, 11363), True, 'import matplotlib.pyplot as plt\n'), ((11364, 11391), 'matplotlib.pyplot.plot', 'plt.plot', (['Y_cl.T'], {'label': '"""Y"""'}), "(Y_cl.T, label='Y')\n", (11372, 11391), True, 'import matplotlib.pyplot as plt\n'), ((11392, 11407), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (11402, 11407), True, 'import matplotlib.pyplot as plt\n'), ((11408, 11432), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (11418, 11432), True, 'import matplotlib.pyplot as plt\n'), ((11433, 11445), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11443, 11445), True, 'import matplotlib.pyplot as plt\n'), ((11446, 11456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11454, 11456), True, 'import matplotlib.pyplot as plt\n'), ((11457, 11473), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(413)'], {}), '(413)\n', (11468, 11473), True, 'import matplotlib.pyplot as plt\n'), ((11474, 11512), 'matplotlib.pyplot.plot', 'plt.plot', (['U_torch.T'], {'label': '"""U Trained"""'}), "(U_torch.T, label='U Trained')\n", (11482, 11512), True, 'import matplotlib.pyplot as plt\n'), ((11551, 11566), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""U"""'], {}), "('U')\n", (11561, 11566), True, 'import matplotlib.pyplot as plt\n'), ((11567, 11591), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (11577, 11591), True, 'import matplotlib.pyplot as plt\n'), ((11592, 11604), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11602, 11604), True, 'import matplotlib.pyplot as plt\n'), ((11605, 11615), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11613, 11615), True, 'import matplotlib.pyplot as plt\n'), ((11616, 11632), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(414)'], {}), '(414)\n', (11627, 11632), True, 'import matplotlib.pyplot as plt\n'), ((11633, 11648), 'matplotlib.pyplot.plot', 'plt.plot', (['D_t.T'], {}), '(D_t.T)\n', (11641, 11648), True, 'import matplotlib.pyplot as plt\n'), ((11687, 11702), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""D"""'], {}), "('D')\n", (11697, 11702), True, 'import matplotlib.pyplot as plt\n'), ((11703, 11727), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time steps"""'], {}), "('time steps')\n", (11713, 11727), True, 'import matplotlib.pyplot as plt\n'), ((11728, 11740), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11738, 11740), True, 'import matplotlib.pyplot as plt\n'), ((11741, 11751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11749, 11751), True, 'import matplotlib.pyplot as plt\n'), ((11803, 11821), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (11815, 11821), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1514), 'numpy.ones', 'np.ones', (['nx'], {}), '(nx)\n', (1510, 1514), True, 'import numpy as np\n'), ((6202, 6240), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days)', 'ny'], {}), '(samples_day * Sim_days, ny)\n', (6212, 6240), False, 'import torch\n'), ((6254, 6292), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days)', 'ny'], {}), '(samples_day * Sim_days, ny)\n', (6264, 6292), False, 'import torch\n'), ((6335, 6373), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days)', 'nu'], {}), '(samples_day * Sim_days, nu)\n', (6345, 6373), False, 'import torch\n'), ((6387, 6425), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days)', 'nu'], {}), '(samples_day * Sim_days, nu)\n', (6397, 6425), False, 'import torch\n'), ((7189, 7229), 'torch.cat', 'torch.cat', (['(R_target_use, R_in[:, k])', '(0)'], {}), '((R_target_use, R_in[:, k]), 0)\n', (7198, 7229), False, 'import torch\n'), ((8851, 8894), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days_test)', 'ny'], {}), '(samples_day * Sim_days_test, ny)\n', (8861, 8894), False, 'import torch\n'), ((8908, 8951), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days_test)', 'ny'], {}), '(samples_day * Sim_days_test, ny)\n', (8918, 8951), False, 'import torch\n'), ((8994, 9037), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days_test)', 'nu'], {}), '(samples_day * Sim_days_test, nu)\n', (9004, 9037), False, 'import torch\n'), ((9051, 9094), 'torch.ones', 'torch.ones', (['(samples_day * Sim_days_test)', 'nu'], {}), '(samples_day * Sim_days_test, nu)\n', (9061, 9094), False, 'import torch\n'), ((9253, 9270), 'numpy.zeros', 'np.zeros', (['N_sim_t'], {}), '(N_sim_t)\n', (9261, 9270), True, 'import numpy as np\n'), ((9290, 9317), 'numpy.zeros', 'np.zeros', (['(nu, N_sim_t - N)'], {}), '((nu, N_sim_t - N))\n', (9298, 9317), True, 'import numpy as np\n'), ((9358, 9389), 'numpy.zeros', 'np.zeros', (['(nx, N_sim_t + 1 - N)'], {}), '((nx, N_sim_t + 1 - N))\n', (9366, 9389), True, 'import numpy as np\n'), ((9481, 9508), 'numpy.zeros', 'np.zeros', (['(ny, N_sim_t - N)'], {}), '((ny, N_sim_t - N))\n', (9489, 9508), True, 'import numpy as np\n'), ((10970, 10987), 'numpy.mean', 'np.mean', (['StepTime'], {}), '(StepTime)\n', (10977, 10987), True, 'import numpy as np\n'), ((11012, 11029), 'numpy.amax', 'np.amax', (['StepTime'], {}), '(StepTime)\n', (11019, 11029), True, 'import numpy as np\n'), ((11056, 11078), 'numpy.mean', 'np.mean', (['CPU_mean_time'], {}), '(CPU_mean_time)\n', (11063, 11078), True, 'import numpy as np\n'), ((11105, 11126), 'numpy.amax', 'np.amax', (['CPU_max_time'], {}), '(CPU_max_time)\n', (11112, 11126), True, 'import numpy as np\n'), ((1046, 1094), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / samples_day)'], {}), '(0, 2 * np.pi, 2 * np.pi / samples_day)\n', (1055, 1094), True, 'import numpy as np\n'), ((1612, 1632), 'numpy.asmatrix', 'np.asmatrix', (['X[:, k]'], {}), '(X[:, k])\n', (1623, 1632), True, 'import numpy as np\n'), ((1642, 1662), 'numpy.asmatrix', 'np.asmatrix', (['D[:, k]'], {}), '(D[:, k])\n', (1653, 1662), True, 'import numpy as np\n'), ((1672, 1692), 'numpy.asmatrix', 'np.asmatrix', (['U[:, k]'], {}), '(U[:, k])\n', (1683, 1692), True, 'import numpy as np\n'), ((2297, 2319), 'numpy.asmatrix', 'np.asmatrix', (['X_t[:, k]'], {}), '(X_t[:, k])\n', (2308, 2319), True, 'import numpy as np\n'), ((2329, 2351), 'numpy.asmatrix', 'np.asmatrix', (['D_t[:, k]'], {}), '(D_t[:, k])\n', (2340, 2351), True, 'import numpy as np\n'), ((2361, 2383), 'numpy.asmatrix', 'np.asmatrix', (['U_t[:, k]'], {}), '(U_t[:, k])\n', (2372, 2383), True, 'import numpy as np\n'), ((2767, 2796), 'torch.nn.Linear', 'nn.Linear', (['nx', 'nx'], {'bias': '(False)'}), '(nx, nx, bias=False)\n', (2776, 2796), False, 'from torch import nn\n'), ((2813, 2842), 'torch.nn.Linear', 'nn.Linear', (['nu', 'nx'], {'bias': '(False)'}), '(nu, nx, bias=False)\n', (2822, 2842), False, 'from torch import nn\n'), ((2859, 2888), 'torch.nn.Linear', 'nn.Linear', (['nd', 'nx'], {'bias': '(False)'}), '(nd, nx, bias=False)\n', (2868, 2888), False, 'from torch import nn\n'), ((2905, 2934), 'torch.nn.Linear', 'nn.Linear', (['nx', 'ny'], {'bias': '(False)'}), '(nx, ny, bias=False)\n', (2914, 2934), False, 'from torch import nn\n'), ((2998, 3051), 'torch.nn.Linear', 'nn.Linear', (['(nx + nd * N + nr * N)', 'n_hidden'], {'bias': '(False)'}), '(nx + nd * N + nr * N, n_hidden, bias=False)\n', (3007, 3051), False, 'from torch import nn\n'), ((3067, 3106), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', '(nu * N)'], {'bias': '(False)'}), '(n_hidden, nu * N, bias=False)\n', (3076, 3106), False, 'from torch import nn\n'), ((3922, 3938), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (3934, 3938), False, 'import torch\n'), ((3951, 3967), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (3963, 3967), False, 'import torch\n'), ((3981, 3997), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (3993, 3997), False, 'import torch\n'), ((4023, 4039), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4035, 4039), False, 'import torch\n'), ((4058, 4074), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4070, 4074), False, 'import torch\n'), ((4093, 4109), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4105, 4109), False, 'import torch\n'), ((4128, 4144), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (4140, 4144), False, 'import torch\n'), ((4292, 4321), 'torch.cat', 'torch.cat', (['(x, D_xi, R_xi)', '(1)'], {}), '((x, D_xi, R_xi), 1)\n', (4301, 4321), False, 'import torch\n'), ((6926, 6976), 'numpy.random.rand', 'np.random.rand', (['X_train.shape[0]', 'X_train.shape[1]'], {}), '(X_train.shape[0], X_train.shape[1])\n', (6940, 6976), True, 'import numpy as np\n'), ((6988, 7014), 'torch.from_numpy', 'torch.from_numpy', (['X0_train'], {}), '(X0_train)\n', (7004, 7014), False, 'import torch\n'), ((7031, 7056), 'torch.from_numpy', 'torch.from_numpy', (['D_train'], {}), '(D_train)\n', (7047, 7056), False, 'import torch\n'), ((7072, 7097), 'torch.from_numpy', 'torch.from_numpy', (['R_train'], {}), '(R_train)\n', (7088, 7097), False, 'import torch\n'), ((9437, 9448), 'numpy.ones', 'np.ones', (['nx'], {}), '(nx)\n', (9444, 9448), True, 'import numpy as np\n'), ((9595, 9618), 'numpy.asmatrix', 'np.asmatrix', (['X_cl[:, k]'], {}), '(X_cl[:, k])\n', (9606, 9618), True, 'import numpy as np\n'), ((9724, 9753), 'numpy.asmatrix', 'np.asmatrix', (['umin[k:k + N, :]'], {}), '(umin[k:k + N, :])\n', (9735, 9753), True, 'import numpy as np\n'), ((9768, 9797), 'numpy.asmatrix', 'np.asmatrix', (['umax[k:k + N, :]'], {}), '(umax[k:k + N, :])\n', (9779, 9797), True, 'import numpy as np\n'), ((9812, 9841), 'numpy.asmatrix', 'np.asmatrix', (['ymin[k:k + N, :]'], {}), '(ymin[k:k + N, :])\n', (9823, 9841), True, 'import numpy as np\n'), ((9856, 9885), 'numpy.asmatrix', 'np.asmatrix', (['ymax[k:k + N, :]'], {}), '(ymax[k:k + N, :])\n', (9867, 9885), True, 'import numpy as np\n'), ((10349, 10360), 'time.time', 'time.time', ([], {}), '()\n', (10358, 10360), False, 'import time\n'), ((12129, 12162), 'torch.nn.functional.relu', 'F.relu', (['model.policy1.weight.data'], {}), '(model.policy1.weight.data)\n', (12135, 12162), True, 'import torch.nn.functional as F\n'), ((3232, 3247), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3245, 3247), False, 'import torch\n'), ((4895, 4912), 'torch.nn.functional.relu', 'F.relu', (['(-u + umin)'], {}), '(-u + umin)\n', (4901, 4912), True, 'import torch.nn.functional as F\n'), ((4970, 4986), 'torch.nn.functional.relu', 'F.relu', (['(u - umax)'], {}), '(u - umax)\n', (4976, 4986), True, 'import torch.nn.functional as F\n'), ((5115, 5132), 'torch.nn.functional.relu', 'F.relu', (['(-y + ymin)'], {}), '(-y + ymin)\n', (5121, 5132), True, 'import torch.nn.functional as F\n'), ((5188, 5204), 'torch.nn.functional.relu', 'F.relu', (['(y - ymax)'], {}), '(y - ymax)\n', (5194, 5204), True, 'import torch.nn.functional as F\n'), ((5284, 5314), 'torch.cat', 'torch.cat', (['(Sy_min, s_ymin)', '(0)'], {}), '((Sy_min, s_ymin), 0)\n', (5293, 5314), False, 'import torch\n'), ((5336, 5366), 'torch.cat', 'torch.cat', (['(Sy_max, s_ymax)', '(0)'], {}), '((Sy_max, s_ymax), 0)\n', (5345, 5366), False, 'import torch\n'), ((5388, 5418), 'torch.cat', 'torch.cat', (['(Su_min, s_umin)', '(0)'], {}), '((Su_min, s_umin), 0)\n', (5397, 5418), False, 'import torch\n'), ((5440, 5470), 'torch.cat', 'torch.cat', (['(Su_max, s_umax)', '(0)'], {}), '((Su_max, s_umax), 0)\n', (5449, 5470), False, 'import torch\n'), ((5500, 5520), 'torch.cat', 'torch.cat', (['(X, x)', '(0)'], {}), '((X, x), 0)\n', (5509, 5520), False, 'import torch\n'), ((5537, 5557), 'torch.cat', 'torch.cat', (['(Y, y)', '(0)'], {}), '((Y, y), 0)\n', (5546, 5557), False, 'import torch\n'), ((6020, 6068), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2 * np.pi / samples_day)'], {}), '(0, 2 * np.pi, 2 * np.pi / samples_day)\n', (6029, 6068), True, 'import numpy as np\n'), ((9630, 9658), 'numpy.asmatrix', 'np.asmatrix', (['D_t[:, k:k + N]'], {}), '(D_t[:, k:k + N])\n', (9641, 9658), True, 'import numpy as np\n'), ((9670, 9698), 'numpy.asmatrix', 'np.asmatrix', (['R_t[:, k:k + N]'], {}), '(R_t[:, k:k + N])\n', (9681, 9698), True, 'import numpy as np\n'), ((10512, 10523), 'time.time', 'time.time', ([], {}), '()\n', (10521, 10523), False, 'import time\n'), ((3281, 3300), 'torch.from_numpy', 'torch.from_numpy', (['A'], {}), '(A)\n', (3297, 3300), False, 'import torch\n'), ((3334, 3353), 'torch.from_numpy', 'torch.from_numpy', (['B'], {}), '(B)\n', (3350, 3353), False, 'import torch\n'), ((3387, 3406), 'torch.from_numpy', 'torch.from_numpy', (['E'], {}), '(E)\n', (3403, 3406), False, 'import torch\n'), ((3440, 3459), 'torch.from_numpy', 'torch.from_numpy', (['C'], {}), '(C)\n', (3456, 3459), False, 'import torch\n'), ((9908, 9928), 'torch.from_numpy', 'torch.from_numpy', (['x0'], {}), '(x0)\n', (9924, 9928), False, 'import torch\n'), ((9952, 9971), 'torch.from_numpy', 'torch.from_numpy', (['d'], {}), '(d)\n', (9968, 9971), False, 'import torch\n'), ((10008, 10027), 'torch.from_numpy', 'torch.from_numpy', (['r'], {}), '(r)\n', (10024, 10027), False, 'import torch\n'), ((10076, 10100), 'torch.from_numpy', 'torch.from_numpy', (['umin_k'], {}), '(umin_k)\n', (10092, 10100), False, 'import torch\n'), ((10140, 10164), 'torch.from_numpy', 'torch.from_numpy', (['umax_k'], {}), '(umax_k)\n', (10156, 10164), False, 'import torch\n'), ((10204, 10228), 'torch.from_numpy', 'torch.from_numpy', (['ymin_k'], {}), '(ymin_k)\n', (10220, 10228), False, 'import torch\n'), ((10268, 10292), 'torch.from_numpy', 'torch.from_numpy', (['ymax_k'], {}), '(ymax_k)\n', (10284, 10292), False, 'import torch\n')] |
import numpy as np
from math import *
def cosine_similarity(a, b):
# numerator = sum(a*b for a,b in zip(x,y))
# denominator = square_rooted(x) * square_rooted(y)
return dot(a, b) / ((dot(a, a) ** .5) * (dot(b, b) ** .5))
# return round(numerator/float(denominator),3)
def dot(A, B):
return (sum(a * b for a, b in zip(A, B)))
def cosine_sim(x, y):
l1 = []
l2 = []
X_set = set(x)
Y_set = set(y)
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
l1.append(1) if w in X_set else l1.append(0) # create a vector
l2.append(1) if w in Y_set else l2.append(0)
# cosine formula
cs = sum(w1 * w2 for w1, w2 in zip(l1, l2))
cosine = cs / float((sum(l1) * sum(l2)) ** 0.5)
# print("similarity: ", cosine)
return cosine
def jaccard_similarity(x, y):
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality)
def dice_similarity(x, y):
len_x = len(set(x))
len_y = len(set(y))
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
return (2.0 * intersection_cardinality) / (len_x + len_y)
def euclidean_distance(x, y):
return sqrt(sum([(a - b) ** 2 for a, b in zip(x, y)]))
def weighted_euclidean_distance(x, y, w):
return sqrt(sum((1 if a <= 0 else w) * (pow(a - b, 2)) for a, b in zip(x, y)))
def manhattan_distance(x, y):
return sum(abs(a - b) for a, b in zip(x, y))
def square_rooted(x):
return round(sqrt(sum([a * a for a in x])), 3)
def nth_root(value, n_root):
root_value = 1 / float(n_root)
return round(value ** root_value, 3)
def minkowski_distance(x, y, p_value):
return nth_root(sum(pow(abs(a - b), p_value) for a, b in zip(x, y)), p_value)
def mahalanobis_distance(x, y):
x, y = trim_to_min(x, y)
ax = np.array(x)
ay = np.array(y)
delta = ax - ay
mat = np.array([x, y]).T
mcov = np.cov(mat)
try:
icov = np.linalg.inv(mcov)
except np.linalg.LinAlgError:
icov = None
if icov is not None:
m = np.dot(np.dot(delta, icov), delta)
return np.sqrt(m) if m >= 0 else np.sqrt(abs(m))
return 0.0
# X = np.vstack([x, y])
# V = np.cov(X.T)
# print(V)
# return 0.0
# VI = np.linalg.inv(V)
# print(np.diag(np.sqrt(np.dot(np.dot((x - y), VI), (x - y).T))))
def weighted_euclidean_distances(X, w):
from itertools import combinations
res = []
for xx, yy in combinations(X, 2):
res.append(weighted_euclidean_distance(xx, yy, w))
for xx in X:
res.append(weighted_euclidean_distance(xx, xx, w))
return res
def trim_to_min(text1 = [], text2 = []):
text1 = list(set(text1))
text2 = list(set(text2))
if len(text1) > len(text2):
text1 = text1[:len(text2)]
elif len(text1) < len(text2):
text2 = text2[:len(text1)]
return text1, text2 | [
"itertools.combinations",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot",
"numpy.cov",
"numpy.sqrt"
] | [((1965, 1976), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1973, 1976), True, 'import numpy as np\n'), ((1986, 1997), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1994, 1997), True, 'import numpy as np\n'), ((2059, 2070), 'numpy.cov', 'np.cov', (['mat'], {}), '(mat)\n', (2065, 2070), True, 'import numpy as np\n'), ((2608, 2626), 'itertools.combinations', 'combinations', (['X', '(2)'], {}), '(X, 2)\n', (2620, 2626), False, 'from itertools import combinations\n'), ((2029, 2045), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2037, 2045), True, 'import numpy as np\n'), ((2095, 2114), 'numpy.linalg.inv', 'np.linalg.inv', (['mcov'], {}), '(mcov)\n', (2108, 2114), True, 'import numpy as np\n'), ((2214, 2233), 'numpy.dot', 'np.dot', (['delta', 'icov'], {}), '(delta, icov)\n', (2220, 2233), True, 'import numpy as np\n'), ((2257, 2267), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (2264, 2267), True, 'import numpy as np\n')] |
# MEAN FILTERING AND EDGE DETECTION FOR VIDEO
# AUTHOR: <NAME>
import numpy as np
import cv2
import skvideo.io
def Videogray(video):
cap = cv2.VideoCapture(video)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
output_video = cv2.VideoWriter('video_gray.avi', codec, fps, (width, height), 0)
for f in range(0, total_frames):
ret, frame = cap.read()
if ret == False:
break
# Make it Gray
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Write output video
output_video.write(frame_gray)
def Pad(A, size):
padded_A = np.zeros((A.shape[0] + (size - 1), A.shape[1] + (size - 1), A.shape[2] + (size - 1)), np.uint8)
padded_A[(size // 2):-(size // 2), (size // 2):-(size // 2), (size // 2):-(size // 2)] = A
print(padded_A.shape)
print(padded_A)
return padded_A
def Mean(video):
total_frames = 166
height = 320
width = 560
new = np.zeros((total_frames, height, width))
for i in range(1, total_frames):
for j in range(1, height):
for k in range(1, width):
new[i,j,k] = ((video[i-1:i+1, j-1:j+1, k-1:k+1]).sum())/(3*3*3)
return new
def MySobel(video):
Gradient_X = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]])
Gradient_Y = np.array([[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]])
[total_frames, height, width] = np.shape(video)
sobel_video = np.zeros(shape=(total_frames, height, width))
for i in range(total_frames - 2):
for j in range(height - 2):
for k in range(width - 2):
x = np.sum(np.multiply(Gradient_X, video[i:i + 3, j:j + 3, k:k + 3]))
y = np.sum(np.multiply(Gradient_Y, video[i:i + 3, j:j + 3, k:k + 3]))
sobel_video[i + 1, j + 1, k + 1] = np.sqrt(x ** 2 + y ** 2)
return sobel_video
def main():
Videogray('video.avi')
video = skvideo.io.vread('video_gray.avi')
video = video[:, :, :, 0]
video = video.reshape((video.shape[0], video.shape[1], video.shape[2]))
print(video.shape)
#---TASK 1---#
# Pad the Video
padded_video = Pad(video, 3)
print(padded_video.shape)
# Apply Mean Filter
output_1 = Mean(padded_video)
print("The output_1 shape is:", output_1.shape)
# Save the Video
skvideo.io.vwrite('output_1.avi', output_1)
#---TASK 2---#
# Apply MySobel Function
output_2 = MySobel(video)
print("The output_2 shape is:", output_2.shape)
# Save the Video
skvideo.io.vwrite('output_2.avi', output_2)
if __name__ == "__main__":
main() | [
"numpy.multiply",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"numpy.zeros",
"cv2.VideoCapture",
"numpy.shape",
"numpy.array",
"cv2.VideoWriter",
"numpy.sqrt"
] | [((152, 175), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (168, 175), False, 'import cv2\n'), ((390, 432), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (412, 432), False, 'import cv2\n'), ((451, 516), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""video_gray.avi"""', 'codec', 'fps', '(width, height)', '(0)'], {}), "('video_gray.avi', codec, fps, (width, height), 0)\n", (466, 516), False, 'import cv2\n'), ((784, 884), 'numpy.zeros', 'np.zeros', (['(A.shape[0] + (size - 1), A.shape[1] + (size - 1), A.shape[2] + (size - 1))', 'np.uint8'], {}), '((A.shape[0] + (size - 1), A.shape[1] + (size - 1), A.shape[2] + (\n size - 1)), np.uint8)\n', (792, 884), True, 'import numpy as np\n'), ((1137, 1176), 'numpy.zeros', 'np.zeros', (['(total_frames, height, width)'], {}), '((total_frames, height, width))\n', (1145, 1176), True, 'import numpy as np\n'), ((1427, 1491), 'numpy.array', 'np.array', (['[[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]])\n', (1435, 1491), True, 'import numpy as np\n'), ((1509, 1573), 'numpy.array', 'np.array', (['[[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]]'], {}), '([[1.0, 2.0, 1.0], [0.0, 0.0, 0.0], [-1.0, -2.0, -1.0]])\n', (1517, 1573), True, 'import numpy as np\n'), ((1610, 1625), 'numpy.shape', 'np.shape', (['video'], {}), '(video)\n', (1618, 1625), True, 'import numpy as np\n'), ((1644, 1689), 'numpy.zeros', 'np.zeros', ([], {'shape': '(total_frames, height, width)'}), '(shape=(total_frames, height, width))\n', (1652, 1689), True, 'import numpy as np\n'), ((649, 688), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (661, 688), False, 'import cv2\n'), ((2030, 2054), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2037, 2054), True, 'import numpy as np\n'), ((1830, 1887), 'numpy.multiply', 'np.multiply', (['Gradient_X', 'video[i:i + 3, j:j + 3, k:k + 3]'], {}), '(Gradient_X, video[i:i + 3, j:j + 3, k:k + 3])\n', (1841, 1887), True, 'import numpy as np\n'), ((1918, 1975), 'numpy.multiply', 'np.multiply', (['Gradient_Y', 'video[i:i + 3, j:j + 3, k:k + 3]'], {}), '(Gradient_Y, video[i:i + 3, j:j + 3, k:k + 3])\n', (1929, 1975), True, 'import numpy as np\n')] |
import glob
import cv2
import json
import os
import numpy as np
import xml.etree.cElementTree as ET
from weed_annotator.annotation_converter.BoundingBox import BoundingBox
from weed_annotator.annotation_converter.Polygon import Polygon
from weed_annotator.annotation_converter.Annotation import Annotation
class AnnotationConverter:
@staticmethod
def write_cvat(annotations, annotation_file):
labels = []
root = AnnotationConverter._init_cvat([])
for annotation in annotations:
image = ET.SubElement(root, "image", name="%s" % (annotation.get_image_name().replace(".png", ".jpg")),
id="0",
width="%s" % annotation.get_img_width(), height="%s" % annotation.get_img_height())
polygon_list = annotation.get_polygons()
for poly in polygon_list:
label = poly.get_label()
if label not in labels:
labels.append(label)
points = poly.get_polygon_points()
poly_string = ""
for x, y in zip(points["x"], points["y"]):
poly_string += "%f, %f;" % (x, y)
poly_string = poly_string[:-1]
ET.SubElement(image, "polygon", label=label, points=poly_string, occluded="0", z_order="1")
AnnotationConverter._add_label_to_cvat(root, labels)
tree = ET.ElementTree(root)
tree.write(f"{annotation_file}")
@staticmethod
def _init_cvat(label_list):
root = ET.Element("annotations")
ET.SubElement(root, "version").text = "1.1"
meta = ET.SubElement(root, "meta")
task = ET.SubElement(meta, "task")
ET.SubElement(task, "flipped").text = "False"
ET.SubElement(task, "labels")
root = AnnotationConverter._add_label_to_cvat(root, label_list)
owner = ET.SubElement(task, "owner")
ET.SubElement(owner, "username").text = "user"
ET.SubElement(owner, "email").text = "user@mail.dk"
return root
@staticmethod
def _add_label_to_cvat(root, label_list):
for se in root:
if se.tag == "meta":
for sse in se:
if sse.tag == "task":
for ssse in sse:
if ssse.tag == "labels":
existing_labels = []
for sssse in ssse:
for ssssse in sssse:
existing_labels.append(ssssse.text)
for label in label_list:
if label in existing_labels:
continue
new_label = ET.SubElement(ssse, "label")
ET.SubElement(new_label, "name").text = label
ET.SubElement(new_label, "attributes")
return root
@staticmethod
def extend_cvat(ann, path_to_annotation_file):
if not os.path.isfile(path_to_annotation_file):
# ToDo: Automatically extract all labels in annotations
os.makedirs(os.path.dirname(path_to_annotation_file), exist_ok=True)
root = AnnotationConverter._init_cvat([])
else:
root = ET.parse(path_to_annotation_file).getroot()
image = ET.SubElement(root, "image", name="%s" % (ann.get_image_name()), id="0",
width="%s" % ann.get_img_width(),
height="%s" % ann.get_img_height())
polygon_anns = ann.get_polygons()
if polygon_anns:
labels = []
for polygon_ann in polygon_anns:
label = polygon_ann.get_label()
if label not in labels:
labels.append(label)
polygon_pts = polygon_ann.get_polygon_points_as_array()
poly_string = ""
for point in polygon_pts:
# Keeping cvat format
x = point[0]
y = point[1]
poly_string += "%f, %f;" % (x, y)
poly_string = poly_string[:-1]
ET.SubElement(image, "polygon", label=label, points=poly_string, occluded="0", z_order="1")
root = AnnotationConverter._add_label_to_cvat(root, labels)
tree = ET.ElementTree(root)
tree.write(path_to_annotation_file)
@staticmethod
def get_mask(annotation, label_list, width, height, color=(255, 255, 255)):
mask = np.zeros((width, height, 3), dtype=np.uint8)
if annotation is not None:
polygons = annotation.get_polygons()
for pol in polygons:
if pol.get_label() not in label_list:
continue
cv2.fillPoly(mask, pts=[pol.get_polygon_points_as_array()], color=color)
return mask
@staticmethod
def read_cvat_all(path_to_annotation_file):
root = ET.parse(path_to_annotation_file).getroot()
annotations = []
for img in root.findall('image'):
annotation = AnnotationConverter._cvat_to_annotation(img)
annotations.append(annotation)
return annotations
@staticmethod
def read_cvat_by_id(path_to_annotation_file, img_id):
try:
root = ET.parse(path_to_annotation_file).getroot()
for img in root.findall('image'):
if img_id == img.attrib["name"]:
annotation = AnnotationConverter._cvat_to_annotation(img)
return annotation
except FileNotFoundError:
return None
@staticmethod
def _cvat_to_annotation(img_xml_info):
img_width = float(img_xml_info.attrib["width"])
img_height = float(img_xml_info.attrib["height"])
polygon_list = []
for pol in img_xml_info.findall("polygon"):
polygon_ann = Polygon(pol.attrib["label"])
points_strs = pol.attrib["points"].split(";")
for points_str in points_strs:
points_str = points_str.split(",")
polygon_ann.add_point(int(float(points_str[0])), int(float(points_str[1])))
polygon_list.append(polygon_ann)
annotation = Annotation(img_xml_info.attrib["name"], img_width, img_height, None, polygon_list)
return annotation
@staticmethod
def read_from_mask(label, mask_folder):
mask_paths = glob.glob(mask_folder)
mask_paths.sort()
annotations = []
for mask_path in mask_paths:
rgb_mask = cv2.imread(mask_path)
gray_image = cv2.cvtColor(rgb_mask, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_image, 1, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pol_list = []
for contour in contours:
points = cv2.approxPolyDP(contour, 0.001 * cv2.arcLength(contour, True), True)
if points.shape[0] < 15:
continue
polygon = Polygon(label)
for p in points[:, 0, :]:
polygon.add_point(p[0], p[1])
pol_list.append(polygon)
img_name = os.path.basename(mask_path)
annotations.append(Annotation(img_name, rgb_mask.shape[0], rgb_mask.shape[1], None, pol_list))
return annotations
@staticmethod
def mask_to_cvat(mask_folder, annotation_file, label):
annotations = AnnotationConverter.read_from_mask(label, mask_folder)
AnnotationConverter.write_cvat(annotations, annotation_file)
@staticmethod
def supervisely_to_cvat(path_to_supervisely_annotations, cvat_save_folder):
annotations = AnnotationConverter.read_from_supervisely(path_to_supervisely_annotations)
AnnotationConverter.write_cvat(annotations, cvat_save_folder)
@staticmethod
def read_from_supervisely(path_to_annotation_folder):
annotation_files = glob.glob(f"{path_to_annotation_folder}/*json")
annotations = []
for annotation_file in annotation_files:
bb_list = []
pol_list = []
with open(annotation_file) as json_file:
supervisely_ann = json.load(json_file)
img_width = supervisely_ann["size"]["width"]
img_height = supervisely_ann["size"]["height"]
for object in supervisely_ann["objects"]:
if object["geometryType"] == "polygon":
pol = Polygon(object["classTitle"])
for p in object["points"]["exterior"]:
pol.add_point(p[0], p[1])
pol_list.append(pol)
if object["geometryType"] == "rectangle":
left = object["points"]["exterior"][0][0]
top = object["points"]["exterior"][0][1]
right = object["points"]["exterior"][1][0]
bottom = object["points"]["exterior"][1][1]
bb = BoundingBox(object["classTitle"], left, top, (right - left), (bottom - top))
bb_list.append(bb)
pass
img_name = os.path.basename(annotation_file).replace(".json", "")
annotations.append(Annotation(img_name, img_width, img_height, bb_list, pol_list))
return annotations
| [
"cv2.findContours",
"xml.etree.cElementTree.parse",
"json.load",
"weed_annotator.annotation_converter.BoundingBox.BoundingBox",
"xml.etree.cElementTree.ElementTree",
"cv2.cvtColor",
"os.path.basename",
"cv2.threshold",
"weed_annotator.annotation_converter.Polygon.Polygon",
"numpy.zeros",
"os.pat... | [((1429, 1449), 'xml.etree.cElementTree.ElementTree', 'ET.ElementTree', (['root'], {}), '(root)\n', (1443, 1449), True, 'import xml.etree.cElementTree as ET\n'), ((1557, 1582), 'xml.etree.cElementTree.Element', 'ET.Element', (['"""annotations"""'], {}), "('annotations')\n", (1567, 1582), True, 'import xml.etree.cElementTree as ET\n'), ((1650, 1677), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['root', '"""meta"""'], {}), "(root, 'meta')\n", (1663, 1677), True, 'import xml.etree.cElementTree as ET\n'), ((1693, 1720), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['meta', '"""task"""'], {}), "(meta, 'task')\n", (1706, 1720), True, 'import xml.etree.cElementTree as ET\n'), ((1783, 1812), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['task', '"""labels"""'], {}), "(task, 'labels')\n", (1796, 1812), True, 'import xml.etree.cElementTree as ET\n'), ((1901, 1929), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['task', '"""owner"""'], {}), "(task, 'owner')\n", (1914, 1929), True, 'import xml.etree.cElementTree as ET\n'), ((4481, 4501), 'xml.etree.cElementTree.ElementTree', 'ET.ElementTree', (['root'], {}), '(root)\n', (4495, 4501), True, 'import xml.etree.cElementTree as ET\n'), ((4660, 4704), 'numpy.zeros', 'np.zeros', (['(width, height, 3)'], {'dtype': 'np.uint8'}), '((width, height, 3), dtype=np.uint8)\n', (4668, 4704), True, 'import numpy as np\n'), ((6388, 6474), 'weed_annotator.annotation_converter.Annotation.Annotation', 'Annotation', (["img_xml_info.attrib['name']", 'img_width', 'img_height', 'None', 'polygon_list'], {}), "(img_xml_info.attrib['name'], img_width, img_height, None,\n polygon_list)\n", (6398, 6474), False, 'from weed_annotator.annotation_converter.Annotation import Annotation\n'), ((6581, 6603), 'glob.glob', 'glob.glob', (['mask_folder'], {}), '(mask_folder)\n', (6590, 6603), False, 'import glob\n'), ((8166, 8213), 'glob.glob', 'glob.glob', (['f"""{path_to_annotation_folder}/*json"""'], {}), "(f'{path_to_annotation_folder}/*json')\n", (8175, 8213), False, 'import glob\n'), ((1591, 1621), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['root', '"""version"""'], {}), "(root, 'version')\n", (1604, 1621), True, 'import xml.etree.cElementTree as ET\n'), ((1729, 1759), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['task', '"""flipped"""'], {}), "(task, 'flipped')\n", (1742, 1759), True, 'import xml.etree.cElementTree as ET\n'), ((1938, 1970), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['owner', '"""username"""'], {}), "(owner, 'username')\n", (1951, 1970), True, 'import xml.etree.cElementTree as ET\n'), ((1993, 2022), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['owner', '"""email"""'], {}), "(owner, 'email')\n", (2006, 2022), True, 'import xml.etree.cElementTree as ET\n'), ((3125, 3164), 'os.path.isfile', 'os.path.isfile', (['path_to_annotation_file'], {}), '(path_to_annotation_file)\n', (3139, 3164), False, 'import os\n'), ((6049, 6077), 'weed_annotator.annotation_converter.Polygon.Polygon', 'Polygon', (["pol.attrib['label']"], {}), "(pol.attrib['label'])\n", (6056, 6077), False, 'from weed_annotator.annotation_converter.Polygon import Polygon\n'), ((6715, 6736), 'cv2.imread', 'cv2.imread', (['mask_path'], {}), '(mask_path)\n', (6725, 6736), False, 'import cv2\n'), ((6762, 6804), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb_mask', 'cv2.COLOR_BGR2GRAY'], {}), '(rgb_mask, cv2.COLOR_BGR2GRAY)\n', (6774, 6804), False, 'import cv2\n'), ((6831, 6883), 'cv2.threshold', 'cv2.threshold', (['gray_image', '(1)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_image, 1, 255, cv2.THRESH_BINARY)\n', (6844, 6883), False, 'import cv2\n'), ((6918, 6984), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (6934, 6984), False, 'import cv2\n'), ((7410, 7437), 'os.path.basename', 'os.path.basename', (['mask_path'], {}), '(mask_path)\n', (7426, 7437), False, 'import os\n'), ((1261, 1357), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['image', '"""polygon"""'], {'label': 'label', 'points': 'poly_string', 'occluded': '"""0"""', 'z_order': '"""1"""'}), "(image, 'polygon', label=label, points=poly_string, occluded=\n '0', z_order='1')\n", (1274, 1357), True, 'import xml.etree.cElementTree as ET\n'), ((3258, 3298), 'os.path.dirname', 'os.path.dirname', (['path_to_annotation_file'], {}), '(path_to_annotation_file)\n', (3273, 3298), False, 'import os\n'), ((4302, 4398), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['image', '"""polygon"""'], {'label': 'label', 'points': 'poly_string', 'occluded': '"""0"""', 'z_order': '"""1"""'}), "(image, 'polygon', label=label, points=poly_string, occluded=\n '0', z_order='1')\n", (4315, 4398), True, 'import xml.etree.cElementTree as ET\n'), ((5096, 5129), 'xml.etree.cElementTree.parse', 'ET.parse', (['path_to_annotation_file'], {}), '(path_to_annotation_file)\n', (5104, 5129), True, 'import xml.etree.cElementTree as ET\n'), ((7239, 7253), 'weed_annotator.annotation_converter.Polygon.Polygon', 'Polygon', (['label'], {}), '(label)\n', (7246, 7253), False, 'from weed_annotator.annotation_converter.Polygon import Polygon\n'), ((7469, 7543), 'weed_annotator.annotation_converter.Annotation.Annotation', 'Annotation', (['img_name', 'rgb_mask.shape[0]', 'rgb_mask.shape[1]', 'None', 'pol_list'], {}), '(img_name, rgb_mask.shape[0], rgb_mask.shape[1], None, pol_list)\n', (7479, 7543), False, 'from weed_annotator.annotation_converter.Annotation import Annotation\n'), ((8426, 8446), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (8435, 8446), False, 'import json\n'), ((9464, 9526), 'weed_annotator.annotation_converter.Annotation.Annotation', 'Annotation', (['img_name', 'img_width', 'img_height', 'bb_list', 'pol_list'], {}), '(img_name, img_width, img_height, bb_list, pol_list)\n', (9474, 9526), False, 'from weed_annotator.annotation_converter.Annotation import Annotation\n'), ((3402, 3435), 'xml.etree.cElementTree.parse', 'ET.parse', (['path_to_annotation_file'], {}), '(path_to_annotation_file)\n', (3410, 3435), True, 'import xml.etree.cElementTree as ET\n'), ((5456, 5489), 'xml.etree.cElementTree.parse', 'ET.parse', (['path_to_annotation_file'], {}), '(path_to_annotation_file)\n', (5464, 5489), True, 'import xml.etree.cElementTree as ET\n'), ((8701, 8730), 'weed_annotator.annotation_converter.Polygon.Polygon', 'Polygon', (["object['classTitle']"], {}), "(object['classTitle'])\n", (8708, 8730), False, 'from weed_annotator.annotation_converter.Polygon import Polygon\n'), ((9214, 9286), 'weed_annotator.annotation_converter.BoundingBox.BoundingBox', 'BoundingBox', (["object['classTitle']", 'left', 'top', '(right - left)', '(bottom - top)'], {}), "(object['classTitle'], left, top, right - left, bottom - top)\n", (9225, 9286), False, 'from weed_annotator.annotation_converter.BoundingBox import BoundingBox\n'), ((9378, 9411), 'os.path.basename', 'os.path.basename', (['annotation_file'], {}), '(annotation_file)\n', (9394, 9411), False, 'import os\n'), ((7107, 7135), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (7120, 7135), False, 'import cv2\n'), ((2811, 2839), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['ssse', '"""label"""'], {}), "(ssse, 'label')\n", (2824, 2839), True, 'import xml.etree.cElementTree as ET\n'), ((2958, 2996), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['new_label', '"""attributes"""'], {}), "(new_label, 'attributes')\n", (2971, 2996), True, 'import xml.etree.cElementTree as ET\n'), ((2876, 2908), 'xml.etree.cElementTree.SubElement', 'ET.SubElement', (['new_label', '"""name"""'], {}), "(new_label, 'name')\n", (2889, 2908), True, 'import xml.etree.cElementTree as ET\n')] |
from __future__ import absolute_import, division, print_function
import time
import cPickle
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from data.reader import data_iterator, ptb_raw_data
import models.stacked as stacked
from utils import cast_floatX, get_noise_x, load_params, save_params
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default="data", help='path')
parser.add_argument('--dataset', default="ptb", help='dataset', choices=['ptb', 'enwik8'])
parser.add_argument('--model', default="rhn", help='model to stack', choices=['rhn', 'rnn','lstm'])
parser.add_argument('--hidden_size', type=int, default=830, help='Hidden layer dimension')
parser.add_argument('--epochs', type=int, default=300, help='total number of epochs to train for')
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--depth', type=int, default=10, help='the recurrence transition depth for rhn')
parser.add_argument('--learning_rate', type=float, default=0.2, help='learning rate')
parser.add_argument('--load_model', default="", help='load model')
parser.add_argument('--tied_embeddings', type=bool, default=True, help="use same embedding matrix for input and output word embeddings")
parser.add_argument('--tied_noise', type=bool, default=True, help="use same dropout masks for the T and H non-linearites")
config = parser.parse_args()
config.seed = 1234
config.init_scale = 0.04
config.init_T_bias = -2.0
config.lr_decay = 1.02
config.weight_decay = 1e-7
config.max_grad_norm = 10
config.num_steps = 35
config.max_epoch = 20 # number of epochs after which learning decay starts
config.drop_x = 0.25 # variational dropout rate over input word embeddings
config.drop_i = 0.75 # variational dropout rate over inputs of RHN layers(s), applied seperately in each RHN layer
config.drop_s = 0.25 # variational dropout rate over recurrent state
config.drop_o = 0.75 # variational dropout rate over outputs of RHN layer(s), applied before classification layer
config.vocab_size = 10000
print("Data loading")
train_data, valid_data, test_data, _ = ptb_raw_data(config.data_path)
print('Compiling model')
_is_training = T.iscalar('is_training')
_lr = theano.shared(cast_floatX(config.learning_rate), 'lr')
_input_data = T.imatrix('input_data') # (batch_size, num_steps)
_noise_x = T.matrix('noise_x') # (batch_size, num_steps)
# model
_theano_rng = RandomStreams(config.seed // 2 + 321) # generates random numbers directly on GPU
flat_probs, params, rhn_updates, hidden_states = stacked.model(_input_data,
_noise_x,
_lr,
_is_training,
config,
_theano_rng)
# loss
_targets = T.imatrix('targets') # (batch_size, num_steps)
flat_targets = _targets.T.flatten()
xentropies = T.nnet.categorical_crossentropy(flat_probs, flat_targets) # (batch_size * num_steps,)
pred_loss = xentropies.sum() / config.batch_size
l2_loss = 0.5 * T.sum(T.stack([T.sum(p**2) for p in params])) # regularization
loss = pred_loss + config.weight_decay * l2_loss
# compute gradients
grads = theano.grad(loss, params)
global_grad_norm = T.sqrt(T.sum(T.stack([T.sum(g**2) for g in grads]))) # gradient clipping
clip_factor = theano.ifelse.ifelse(global_grad_norm < config.max_grad_norm,
cast_floatX(1),
T.cast(config.max_grad_norm / global_grad_norm, theano.config.floatX))
param_updates = [(p, p - _lr * clip_factor * g) for p, g in zip(params, grads)]
num_params = np.sum([param.get_value().size for param in params])
train = theano.function(
[_input_data, _targets, _noise_x],
loss,
givens = {_is_training: np.int32(1)},
updates = rhn_updates + param_updates)
evaluate = theano.function(
[_input_data, _targets],
loss,
# Note that noise_x is unused in computation graph of this function since _is_training is false.
givens = {_is_training: np.int32(0), _noise_x: T.zeros((config.batch_size, config.num_steps))},
updates = rhn_updates)
print('Done. Number of parameters: %d' % num_params)
if config.load_model:
print('Loading model...')
load_params(config.load_model, params)
def run_epoch(data, config, is_train, verbose=False):
"""Run the model on the given data."""
epoch_size = ((len(data) // config.batch_size) - 1) // config.num_steps
start_time = time.time()
costs = 0.0
iters = 0
for hidden_state in hidden_states:
hidden_state.set_value(np.zeros_like(hidden_state.get_value()))
for step, (x, y) in enumerate(data_iterator(data, config.batch_size, config.num_steps)):
if is_train:
noise_x = get_noise_x(x, config.drop_x)
cost = train(x, y, noise_x)
else:
cost = evaluate(x, y)
costs += cost
iters += config.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" % (step * 1.0 / epoch_size, np.exp(costs / iters),
iters * config.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
trains, vals, tests, best_val, save_path = [np.inf], [np.inf], [np.inf], np.inf, None
for i in range(config.epochs):
lr_decay = config.lr_decay ** max(i - config.max_epoch + 1, 0.0)
_lr.set_value(cast_floatX(config.learning_rate / lr_decay))
print("Epoch: %d Learning rate: %.3f" % (i + 1, _lr.get_value()))
train_perplexity = run_epoch(train_data, config, is_train=True, verbose=True)
print("Epoch: %d Train Perplexity: %.3f, Bits: %.3f" % (i + 1, train_perplexity, np.log2(train_perplexity)))
valid_perplexity = run_epoch(valid_data, config, is_train=False)
print("Epoch: %d Valid Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, valid_perplexity, np.log2(valid_perplexity)))
test_perplexity = run_epoch(test_data, config, is_train=False)
print("Epoch: %d Test Perplexity (batched): %.3f, Bits: %.3f" % (i + 1, test_perplexity, np.log2(test_perplexity)))
trains.append(train_perplexity)
vals.append(valid_perplexity)
tests.append(test_perplexity)
if valid_perplexity < best_val:
best_val = valid_perplexity
print("Best Batched Valid Perplexity improved to %.03f" % best_val)
save_params('./theano_rhn_' + config.dataset + '_' + str(config.seed) + '_best_model.pkl', params)
print("Training is over.")
best_val_epoch = np.argmin(vals)
print("Best Batched Validation Perplexity %.03f (Bits: %.3f) was at Epoch %d" %
(vals[best_val_epoch], np.log2(vals[best_val_epoch]), best_val_epoch))
print("Training Perplexity at this Epoch was %.03f, Bits: %.3f" %
(trains[best_val_epoch], np.log2(trains[best_val_epoch])))
print("Batched Test Perplexity at this Epoch was %.03f, Bits: %.3f" %
(tests[best_val_epoch], np.log2(tests[best_val_epoch])))
| [
"theano.tensor.nnet.categorical_crossentropy",
"argparse.ArgumentParser",
"data.reader.data_iterator",
"numpy.argmin",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"utils.get_noise_x",
"models.stacked.model",
"numpy.exp",
"theano.tensor.iscalar",
"data.reader.ptb_raw_data",
"utils.cast_floatX",
... | [((404, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (427, 429), False, 'import argparse\n'), ((2312, 2342), 'data.reader.ptb_raw_data', 'ptb_raw_data', (['config.data_path'], {}), '(config.data_path)\n', (2324, 2342), False, 'from data.reader import data_iterator, ptb_raw_data\n'), ((2384, 2408), 'theano.tensor.iscalar', 'T.iscalar', (['"""is_training"""'], {}), "('is_training')\n", (2393, 2408), True, 'import theano.tensor as T\n'), ((2484, 2507), 'theano.tensor.imatrix', 'T.imatrix', (['"""input_data"""'], {}), "('input_data')\n", (2493, 2507), True, 'import theano.tensor as T\n'), ((2549, 2568), 'theano.tensor.matrix', 'T.matrix', (['"""noise_x"""'], {}), "('noise_x')\n", (2557, 2568), True, 'import theano.tensor as T\n'), ((2629, 2666), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'RandomStreams', (['(config.seed // 2 + 321)'], {}), '(config.seed // 2 + 321)\n', (2642, 2666), True, 'from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n'), ((2764, 2840), 'models.stacked.model', 'stacked.model', (['_input_data', '_noise_x', '_lr', '_is_training', 'config', '_theano_rng'], {}), '(_input_data, _noise_x, _lr, _is_training, config, _theano_rng)\n', (2777, 2840), True, 'import models.stacked as stacked\n'), ((3180, 3200), 'theano.tensor.imatrix', 'T.imatrix', (['"""targets"""'], {}), "('targets')\n", (3189, 3200), True, 'import theano.tensor as T\n'), ((3286, 3343), 'theano.tensor.nnet.categorical_crossentropy', 'T.nnet.categorical_crossentropy', (['flat_probs', 'flat_targets'], {}), '(flat_probs, flat_targets)\n', (3317, 3343), True, 'import theano.tensor as T\n'), ((3579, 3604), 'theano.grad', 'theano.grad', (['loss', 'params'], {}), '(loss, params)\n', (3590, 3604), False, 'import theano\n'), ((2429, 2462), 'utils.cast_floatX', 'cast_floatX', (['config.learning_rate'], {}), '(config.learning_rate)\n', (2440, 2462), False, 'from utils import cast_floatX, get_noise_x, load_params, save_params\n'), ((3777, 3791), 'utils.cast_floatX', 'cast_floatX', (['(1)'], {}), '(1)\n', (3788, 3791), False, 'from utils import cast_floatX, get_noise_x, load_params, save_params\n'), ((3797, 3866), 'theano.tensor.cast', 'T.cast', (['(config.max_grad_norm / global_grad_norm)', 'theano.config.floatX'], {}), '(config.max_grad_norm / global_grad_norm, theano.config.floatX)\n', (3803, 3866), True, 'import theano.tensor as T\n'), ((4578, 4616), 'utils.load_params', 'load_params', (['config.load_model', 'params'], {}), '(config.load_model, params)\n', (4589, 4616), False, 'from utils import cast_floatX, get_noise_x, load_params, save_params\n'), ((4809, 4820), 'time.time', 'time.time', ([], {}), '()\n', (4818, 4820), False, 'import time\n'), ((5534, 5555), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (5540, 5555), True, 'import numpy as np\n'), ((6865, 6880), 'numpy.argmin', 'np.argmin', (['vals'], {}), '(vals)\n', (6874, 6880), True, 'import numpy as np\n'), ((4994, 5050), 'data.reader.data_iterator', 'data_iterator', (['data', 'config.batch_size', 'config.num_steps'], {}), '(data, config.batch_size, config.num_steps)\n', (5007, 5050), False, 'from data.reader import data_iterator, ptb_raw_data\n'), ((5763, 5807), 'utils.cast_floatX', 'cast_floatX', (['(config.learning_rate / lr_decay)'], {}), '(config.learning_rate / lr_decay)\n', (5774, 5807), False, 'from utils import cast_floatX, get_noise_x, load_params, save_params\n'), ((4118, 4129), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (4126, 4129), True, 'import numpy as np\n'), ((4372, 4383), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (4380, 4383), True, 'import numpy as np\n'), ((4395, 4441), 'theano.tensor.zeros', 'T.zeros', (['(config.batch_size, config.num_steps)'], {}), '((config.batch_size, config.num_steps))\n', (4402, 4441), True, 'import theano.tensor as T\n'), ((5090, 5119), 'utils.get_noise_x', 'get_noise_x', (['x', 'config.drop_x'], {}), '(x, config.drop_x)\n', (5101, 5119), False, 'from utils import cast_floatX, get_noise_x, load_params, save_params\n'), ((3453, 3466), 'theano.tensor.sum', 'T.sum', (['(p ** 2)'], {}), '(p ** 2)\n', (3458, 3466), True, 'import theano.tensor as T\n'), ((3646, 3659), 'theano.tensor.sum', 'T.sum', (['(g ** 2)'], {}), '(g ** 2)\n', (3651, 3659), True, 'import theano.tensor as T\n'), ((6048, 6073), 'numpy.log2', 'np.log2', (['train_perplexity'], {}), '(train_perplexity)\n', (6055, 6073), True, 'import numpy as np\n'), ((6241, 6266), 'numpy.log2', 'np.log2', (['valid_perplexity'], {}), '(valid_perplexity)\n', (6248, 6266), True, 'import numpy as np\n'), ((6430, 6454), 'numpy.log2', 'np.log2', (['test_perplexity'], {}), '(test_perplexity)\n', (6437, 6454), True, 'import numpy as np\n'), ((6996, 7025), 'numpy.log2', 'np.log2', (['vals[best_val_epoch]'], {}), '(vals[best_val_epoch])\n', (7003, 7025), True, 'import numpy as np\n'), ((7147, 7178), 'numpy.log2', 'np.log2', (['trains[best_val_epoch]'], {}), '(trains[best_val_epoch])\n', (7154, 7178), True, 'import numpy as np\n'), ((7287, 7317), 'numpy.log2', 'np.log2', (['tests[best_val_epoch]'], {}), '(tests[best_val_epoch])\n', (7294, 7317), True, 'import numpy as np\n'), ((5386, 5407), 'numpy.exp', 'np.exp', (['(costs / iters)'], {}), '(costs / iters)\n', (5392, 5407), True, 'import numpy as np\n'), ((5495, 5506), 'time.time', 'time.time', ([], {}), '()\n', (5504, 5506), False, 'import time\n')] |
'''
Utility functions, etc. for working with SVG documents.
'''
from __future__ import absolute_import
import logging
import re
from six.moves import map
import numpy as np
import six
from functools import reduce
__all__ = ['shape_points', 'get_CTM', 'get_current_transformation_matrix']
CRE_TRANSFORM = re.compile(r'(?P<operation>(skew[XY]|scale|translate|rotate))'
r'\((?P<args>[^\)]+)\)')
FLOAT_PATTERN = r'[+-]?\d+(\.\d+)?([eE][+-]?\d+)?' # 2, 1.23, 23e39, 1.23e-6, etc.
CRE_PATH_COMMAND = re.compile(r'((?P<xy_command>[ML])\s*(?P<x>{0}),\s*(?P<y>{0})\s*|'
r'(?P<x_command>[H])\s*(?P<hx>{0})\s*|'
r'(?P<y_command>[V])\s*(?P<vy>{0})\s*|'
r'(?P<command>[Z]\s*))'
.format(FLOAT_PATTERN))
def shape_path_points(svg_path_d):
'''
Parameters
----------
svg_path_d : str
``"d"`` attribute of SVG ``path`` element.
Returns
-------
list
List of coordinates of points found in SVG path.
Each point is represented by a dictionary with keys ``x`` and ``y``.
'''
# TODO Add support for relative commands, e.g., `l, h, v`.
def _update_path_state(path_state, match):
if match.group('xy_command'):
for dim_j in 'xy':
path_state[dim_j] = float(match.group(dim_j))
if path_state.get('x0') is None:
for dim_j in 'xy':
path_state['%s0' % dim_j] = path_state[dim_j]
elif match.group('x_command'):
path_state['x'] = float(match.group('hx'))
elif match.group('y_command'):
path_state['y'] = float(match.group('vy'))
elif match.group('command') == 'Z':
for dim_j in 'xy':
path_state[dim_j] = path_state['%s0' % dim_j]
return path_state
# Some commands in a SVG path element `"d"` attribute require previous state.
#
# For example, the `"H"` command is a horizontal move, so the previous
# ``y`` position is required to resolve the new `(x, y)` position.
#
# Iterate through the commands in the `"d"` attribute in order and maintain
# the current path position in the `path_state` dictionary.
#
# See [here][1] for more information.
#
# [1]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d
path_state = {'x': None, 'y': None}
return [{k: v for k, v in six.iteritems(_update_path_state(path_state,
match_i))
if k in 'xy'}
for match_i in CRE_PATH_COMMAND.finditer(svg_path_d)]
def get_transform(op):
'''
See SVG `transform`_ documentation for matrix definitions.
_transform:: https://www.w3.org/TR/SVG11/coords.html#TransformMatrixDefined
'''
# Start with identity matrix (no transform).
T = np.eye(3)
args = list(map(float, re.split(r'\s*[,\s]\s*', op['args'])))
if op['operation'] == 'matrix':
T[0] = args[:3]
T[1] = args[3:]
elif op['operation'] == 'translate':
if len(args) == 1:
args.append(args[0])
T[0, 2] = args[0]
T[1, 2] = args[1]
elif op['operation'] == 'scale':
if len(args) == 1:
args.append(args[0])
T[0, 0] = args[0]
T[1, 1] = args[1]
elif op['operation'] == 'rotate':
angle = (args[0] / 180.) * np.pi
T = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
if len(args) == 3:
# Rotation point was specified; `(cx, cy)`.
rotate_angle, cx, cy = args
# Translate, perform rotation, and translate back.
C = np.eye(3)
C[:2, 2] = cx, cy
C_reverse = C.copy()
C_reverse[:2, 2] = -cx, -cy
T = reduce(np.matmul, (C, T, C_reverse))
elif op['operation'] == 'skewX':
T[0, 1] = np.tan(args[0] / 180 * np.pi)
elif op['operation'] == 'skewY':
T[1, 0] = np.tan(args[0] / 180 * np.pi)
return T
def get_current_transformation_matrix(element):
transforms = []
parents = [element.getparent()]
while True:
parent = parents[-1].getparent()
if parent is None:
break
parents.append(parent)
for i, parent in enumerate(parents[::-1]):
if 'transform' in parent.attrib:
transforms_i = [get_transform(match.groupdict())
for match in CRE_TRANSFORM
.finditer(parent.attrib['transform'])]
transforms.extend(transforms_i)
if transforms:
return reduce(np.matmul, transforms)
else:
return np.eye(3, dtype=float)
def get_CTM(*args, **kwargs):
'''
Alias for :func:`get_current_transformation_matrix()`.
'''
return get_current_transformation_matrix(*args, **kwargs)
def shape_points(svg_element):
'''
Parameters
----------
svg_element : lxml.etree.Element
Either a ``<svg:path>`` or ``<svg:polygon>`` element.
Returns
-------
list
List of coordinates of points found in SVG shape element.
Each point is represented by a dictionary with keys ``x`` and ``y``.
.. versionchanged:: 0.4.0
Add support for Python 3.
'''
if svg_element.tag.endswith('/svg}path'):
# Decode `svg:path` vertices from [`"d"`][1] attribute.
#
# [1]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/d
points = shape_path_points(svg_element.attrib['d'])
# Convert dictionary points to lists.
points = [[p['x'], p['y']] for p in points]
elif svg_element.tag.endswith('/svg}polygon'):
# Decode `svg:polygon` vertices from [`"points"`][2] attribute.
#
# [2]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/points
points = [list(map(float, v.split(',')))
for i, v in enumerate(svg_element.attrib['points'].strip()
.split(' '))]
elif svg_element.tag.endswith('/svg}line'):
# Decode `svg:polygon` vertices from [`"points"`][2] attribute.
#
# [2]: https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/points
points = [list(map(float, p))
for p in [[svg_element.attrib['x1'],
svg_element.attrib['y1']],
[svg_element.attrib['x2'],
svg_element.attrib['y2']]]]
else:
raise NotImplementedError('Unsupported SVG tag: `%s`', svg_element.tag)
points = np.asarray(points)
T = get_CTM(svg_element)
if not np.equal(T, np.eye(3)).all():
if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
logging.debug('Applying transformation matrix: `%s`',
T[:2].ravel())
padding = np.expand_dims(np.array([1] * len(points)), axis=1)
points_padded = np.append(points, padding, axis=1)
points = np.matmul(T, points_padded.T)[:2].T
return points.tolist()
| [
"re.split",
"numpy.asarray",
"six.moves.map",
"numpy.append",
"numpy.tan",
"numpy.sin",
"numpy.matmul",
"numpy.cos",
"functools.reduce",
"numpy.eye",
"logging.getLogger",
"re.compile"
] | [((321, 411), 're.compile', 're.compile', (['"""(?P<operation>(skew[XY]|scale|translate|rotate))\\\\((?P<args>[^\\\\)]+)\\\\)"""'], {}), "(\n '(?P<operation>(skew[XY]|scale|translate|rotate))\\\\((?P<args>[^\\\\)]+)\\\\)')\n", (331, 411), False, 'import re\n'), ((3018, 3027), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3024, 3027), True, 'import numpy as np\n'), ((6953, 6971), 'numpy.asarray', 'np.asarray', (['points'], {}), '(points)\n', (6963, 6971), True, 'import numpy as np\n'), ((4906, 4935), 'functools.reduce', 'reduce', (['np.matmul', 'transforms'], {}), '(np.matmul, transforms)\n', (4912, 4935), False, 'from functools import reduce\n'), ((4963, 4985), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'float'}), '(3, dtype=float)\n', (4969, 4985), True, 'import numpy as np\n'), ((7321, 7355), 'numpy.append', 'np.append', (['points', 'padding'], {'axis': '(1)'}), '(points, padding, axis=1)\n', (7330, 7355), True, 'import numpy as np\n'), ((3056, 3094), 're.split', 're.split', (['"""\\\\s*[,\\\\s]\\\\s*"""', "op['args']"], {}), "('\\\\s*[,\\\\s]\\\\s*', op['args'])\n", (3064, 3094), False, 'import re\n'), ((7374, 7403), 'numpy.matmul', 'np.matmul', (['T', 'points_padded.T'], {}), '(T, points_padded.T)\n', (7383, 7403), True, 'import numpy as np\n'), ((7028, 7037), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (7034, 7037), True, 'import numpy as np\n'), ((7058, 7077), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7075, 7077), False, 'import logging\n'), ((3936, 3945), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3942, 3945), True, 'import numpy as np\n'), ((4069, 4105), 'functools.reduce', 'reduce', (['np.matmul', '(C, T, C_reverse)'], {}), '(np.matmul, (C, T, C_reverse))\n', (4075, 4105), False, 'from functools import reduce\n'), ((4163, 4192), 'numpy.tan', 'np.tan', (['(args[0] / 180 * np.pi)'], {}), '(args[0] / 180 * np.pi)\n', (4169, 4192), True, 'import numpy as np\n'), ((6605, 6618), 'six.moves.map', 'map', (['float', 'p'], {}), '(float, p)\n', (6608, 6618), False, 'from six.moves import map\n'), ((4250, 4279), 'numpy.tan', 'np.tan', (['(args[0] / 180 * np.pi)'], {}), '(args[0] / 180 * np.pi)\n', (4256, 4279), True, 'import numpy as np\n'), ((3601, 3614), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3607, 3614), True, 'import numpy as np\n'), ((3660, 3673), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3666, 3673), True, 'import numpy as np\n'), ((3675, 3688), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3681, 3688), True, 'import numpy as np\n'), ((3617, 3630), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3623, 3630), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from settings import float_type, jitter_level,std_qmu_init, np_float_type, np_int_type
from functions import eye, variational_expectations, block_diagonal
from mean_functions import Zero
from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag
from conditionals import conditional, conditional_batch, conditional_stack
from quadrature import hermgauss
class ChainedGPs_DS(object):
"""
Chained Gaussian Processes
The key reference for this algorithm is:
::
@article{saul2016chained,
title={Chained Gaussian Processes},
author={<NAME> and <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:1604.05263},
year={2016}
}
Independent latent GPs : f_c ~ GP(m_c,k_c), for all c
Mean field posterior : q(F,U)=\prod_c q_c(f_c,u_c),
Sparse GP posterior : q_c(f_c,u_c) = p_c(f_c|u_c)q(u_c), for all c
Arbitrary likelihood p(y_n|x_n, F) = p(y_n|x_n, F_n), for all i
"""
def __init__(self, X, Y, kerns,likelihood,Zs,mean_functions=None, whiten=True,q_diag=False, f_indices=None,
n_samp=10):
'''
- X is a data matrix, size N x D
- Y is a data matrix, size N x R
- kerns, likelihood, mean_functions are appropriate (single or list of) GPflow objects
- Zs is a list of matrices of pseudo inputs, size M[k] x C
- num_latent is the number of latent process to use, default to
Y.shape[1]
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
'''
self.likelihood = likelihood
self.kerns = kerns
self.C = len(kerns)
self.D = X.get_shape()[1]
self.mean_functions = [Zero() for _ in range(self.C)] if mean_functions is None else mean_functions
self.f_indices = f_indices # function of one variable
self.X = X
self.Y = Y
self.Zs = Zs
self.num_inducing = [z.get_shape()[0] for z in Zs]
self.num_latent = Y.get_shape()[-1]
self.num_data = Y.get_shape()[0]
self.whiten=whiten
self.q_diag = q_diag
self.n_samp =n_samp
self.initialize_inference()
def initialize_inference(self):
with tf.variable_scope("inference") as scope:
self.q_mu,self.q_sqrt = [],[]
for c in range(self.C):
q_mu = np.ones((self.num_inducing[c], self.num_latent))
#np.random.randn(self.num_inducing[c], self.num_latent)*std_qmu_init
self.q_mu.append( tf.get_variable("q_mu%d"%c,[self.num_inducing[c], self.num_latent],\
initializer=tf.constant_initializer(q_mu,\
dtype=float_type)))
if self.q_diag:
q_sqrt = np.ones((self.num_inducing[c], self.num_latent))
self.q_sqrt.append( tf.get_variable("q_sqrt%d"%c,[self.num_inducing[c],self.num_latent], \
initializer=tf.constant_initializer(q_sqrt,dtype=float_type)) )
else:
q_sqrt = np.array([np.eye(self.num_inducing[c]) for _ in range(self.num_latent)]).swapaxes(0, 2)
self.q_sqrt.append( tf.get_variable("q_sqrt%d"%c,[self.num_inducing[c],self.num_inducing[c],self.num_latent], \
initializer=tf.constant_initializer(q_sqrt,dtype=float_type)) )
def build_prior_KL(self):
KL = tf.Variable(0,name='KL',trainable=False,dtype=float_type)
for i in range(self.C):
if self.whiten:
if self.q_diag:
KL += gauss_kl_white_diag(self.q_mu[i], self.q_sqrt[i])
else:
KL += gauss_kl_white(self.q_mu[i], self.q_sqrt[i])
else:
K = self.kerns[i].K(self.Zs[self.f_indices[i]]) + eye(self.num_inducing[i]) * jitter_level
if self.q_diag:
KL += gauss_kl_diag(self.q_mu[i], self.q_sqrt[i], K)
else:
KL += gauss_kl(self.q_mu[i], self.q_sqrt[i], K)
return KL
def get_covariate(self,Xnew,c):
return tf.transpose(tf.gather(tf.transpose(Xnew),self.f_indices[c]))
def build_predict_fs(self, Xnew):
mus, vars = [],[]
for c in range(self.C):
x = self.get_covariate(Xnew,c)
mu, var = conditional(x, self.Zs[c], self.kerns[c], self.q_mu[c],
q_sqrt=self.q_sqrt[c], full_cov=False, whiten=self.whiten)
mus.append(mu+self.mean_functions[c](x))
vars.append(var)
return tf.stack(mus),tf.stack(vars)
def sample_fs(self, Xnew):
fs_mean, fs_var = self.build_predict_fs( Xnew) # C x N x R
samples_shape = fs_mean.get_shape().as_list() + [self.n_samp]
return tf.random_normal(shape=samples_shape, dtype=float_type) * \
tf.sqrt(tf.expand_dims(fs_var, -1)) + tf.expand_dims(fs_mean, -1)
def sample_predictor(self,Xnew):
raise NotImplementedError
def build_likelihood(self):
"""
This gives a variational bound on the model likelihood.
"""
cost = -self.build_prior_KL()
samples_pred = self.sample_predictor(self.X)
sto_exp = self.likelihood.logp(samples_pred,tf.expand_dims(self.Y,-1))
cost += tf.reduce_sum(sto_exp)/self.n_samp
return cost
class SVAGP_DS(ChainedGPs_DS):
"""
Sparse Variational Additive Gaussian Process
"""
def sample_predictor(self,Xnew):
samples_fs =self.sample_fs(Xnew)
return tf.reduce_sum(samples_fs,axis=0)
class SVMGP_DS(ChainedGPs_DS):
"""
Sparse Variational Multiplicative Gaussian Process
"""
def sample_predictor(self,Xnew):
samples_fs =self.sample_fs(Xnew)
return tf.reduce_prod(samples_fs,axis=0)
class SVGP(object):
def __init__(self, X, Y, kern,likelihood,Z,mean_function=Zero(),num_latent=None, whiten=True,q_diag=True):
self.likelihood = likelihood
self.kern = kern
self.mean_function = mean_function
self.X = X
self.Y = Y
self.Z = Z
self.D = X.get_shape()[1]
self.num_latent = Y.get_shape()[1]
self.num_inducing = Z.get_shape()[0]
self.num_data = Y.get_shape()[0]
self.whiten=whiten
self.q_diag = q_diag
self.q_mu = tf.get_variable("q_mu",[self.num_inducing, self.num_latent],
initializer=tf.constant_initializer(np.zeros((self.num_inducing, self.num_latent))))
if self.q_diag:
q_sqrt = np.ones((self.num_inducing, self.num_latent))
self.q_sqrt= tf.get_variable("q_sqrt",[self.num_inducing,self.num_latent], \
initializer=tf.constant_initializer(q_sqrt,dtype=float_type))
else:
q_sqrt = np.array([np.eye(self.num_inducing) for _ in range(self.num_latent)]).swapaxes(0, 2)
self.q_sqrt= tf.get_variable("q_sqrt",[self.num_inducing,self.num_inducing,self.num_latent], \
initializer=tf.constant_initializer(q_sqrt,dtype=float_type))
def build_prior_KL(self):
if self.whiten:
if self.q_diag:
KL = gauss_kl_white_diag(self.q_mu, self.q_sqrt)
else:
KL = gauss_kl_white(self.q_mu, self.q_sqrt)
else:
K = self.kern.K(self.Z) + eye(self.num_inducing) * jitter_level
if self.q_diag:
KL = gauss_kl_diag(self.q_mu, self.q_sqrt, K)
else:
KL = gauss_kl(self.q_mu, self.q_sqrt, K)
return KL
def build_predictor(self,Xnew, full_cov=False):
return self.build_predict(Xnew, full_cov=full_cov)
def build_likelihood(self):
"""
This gives a variational bound on the model likelihood.
"""
# Get prior KL.
KL = self.build_prior_KL()
# Get conditionals
fmean, fvar = self.build_predictor(self.X, full_cov=False)
# Get variational expectations.
var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y)
# re-scale for minibatch size
scale = tf.cast(self.num_data, float_type) / \
tf.cast(tf.shape(self.X)[0], float_type)
return tf.reduce_sum(var_exp) * scale - KL
def build_predict(self, Xnew, full_cov=False):
mu, var = conditional(Xnew, self.Z, self.kern, self.q_mu,
q_sqrt=self.q_sqrt, full_cov=full_cov, whiten=self.whiten)
return mu + self.mean_function(Xnew), var
def build_predict_batch(self,Xnew):
mu, var = conditional_batch(Xnew, self.Z, self.kern, self.q_mu,
q_sqrt=self.q_sqrt, whiten=self.whiten)
return mu , var # (removed mean for now)
class SVGP_shared(SVGP):
def build_predictor(self,Xnew, full_cov=False):
fmean, fvar = self.build_predict_batch(Xnew)
return tf.reduce_sum(fmean,1),tf.reduce_sum(fvar,[1,2])
class CSVGPs(object):
"""
Coupled SVGPs
"""
def __init__(self, X, Y, kerns,likelihood,Zs, n_param=0,
f_indices=None,mean_functions=None, whiten=True, W=None,
n_samp=50,sampling=False):
'''
- X is a data matrix, size N x D
- Y is a data matrix, size N x R
- kerns/mean_functions: list of C kernels/mean_functions
- likelihood: Likelihood object
- Zs: list of C matrices of pseudo inputs, size M[c] x D[c]
- q_diag is a boolean. If True, the covariance is approximated by a
diagonal matrix.
- whiten is a boolean. If True, we use the whitened representation of
the inducing points.
'''
self.likelihood = likelihood
self.kerns = kerns
self.C = len(kerns)
self.D = X.get_shape()[1]
self.mean_functions = [Zero() for _ in range(self.C)] if mean_functions is None else mean_functions
self.f_indices = f_indices # function of one variable
self.X = X
self.Y = Y
self.f_indices = [[c] for c in range(self.C)] if f_indices is None else f_indices
self.num_latent = Y.get_shape()[-1]
self.num_data = Y.get_shape()[0]
self.whiten=whiten
self.n_samp =n_samp
self.sampling = sampling
self.W = W if W is not None else np.ones((self.C,1),dtype=np.float32)
self.num_inducing = [z.shape[0] for z in Zs]
self.Mtot = np.sum(self.num_inducing)
with tf.variable_scope("inference") as scope:
q_mu = np.random.randn(self.Mtot, self.num_latent)*std_qmu_init
self.q_mu = tf.get_variable("q_mu", [self.Mtot, self.num_latent],
initializer=tf.constant_initializer(q_mu))
q_sqrt = np.array([np.eye(self.Mtot) for _ in range(self.num_latent)]).swapaxes(0, 2)
self.q_sqrt = tf.get_variable("q_sqrt", [self.Mtot, self.Mtot, self.num_latent], \
initializer=tf.constant_initializer(q_sqrt, dtype=float_type))
self.Zs = []
with tf.variable_scope("ind_points") as scope:
for c,z_np in enumerate(Zs):
with tf.variable_scope("ind_points%d" % c) as scope:
self.Zs.append(tf.Variable(z_np, tf.float32, name='Z'))
def get_covariate(self,Xnew,c):
return tf.transpose(tf.gather(tf.transpose(Xnew),self.f_indices[c]))
def build_prior_KL(self):
if self.whiten:
KL = gauss_kl_white(self.q_mu, self.q_sqrt)
else:
diag = [self.kerns[d].K(self.Zs[d]) for d in range(self.C)]
K = block_diagonal(diag)+ \
eye(self.Mtot) * jitter_level
KL = gauss_kl(self.q_mu, self.q_sqrt, K)
return KL
def build_predict_joint(self,Xnew):
"""
Batch Computing of q(f^n_1,...,f^n_C)
- Xnew: N x D
- Output:
- mean: N x C x R
- var: N x C x C x R
"""
mu, var = conditional_stack(Xnew, self.Zs, self.kerns, self.q_mu,f_indices=self.f_indices,\
q_sqrt=self.q_sqrt, whiten=self.whiten)
return mu , var # (removed mean function for now)
def build_predictor(self,Xnew):
raise NotImplementedError
def sample_joint(self,Xnew):
mu, var = self.build_predict_joint(Xnew) # N * C * R, N * C * C * R
L = tf.cholesky( tf.transpose(var,(3,0,1,2)) ) # R x N x C x C
L_tiled = tf.tile( tf.expand_dims(L, 0), [self.n_samp, 1, 1, 1, 1]) # Nsamp x R x N x C x C
shape = tf.shape(L_tiled)[:-1] # Nsamp x R x N x C
x = tf.expand_dims(tf.random_normal(shape),-1) # Nsamp x R x N x C
s = tf.transpose( tf.matmul(L_tiled, x), (0,2,3,1,4) ) # Nsamp x N x C x R x 1
return tf.reshape(s,tf.shape(s)[:-1]) + tf.expand_dims(mu,0) # Nsamp x N x C x R
def build_likelihood(self):
# Get prior KL.
KL = self.build_prior_KL()
if self.sampling:
samples_pred = self.sample_predictor(self.X)
sto_exp = self.likelihood.logp(samples_pred, tf.expand_dims(self.Y, 0))
var_exp = tf.reduce_mean(sto_exp,0)
else:
# Get conditionals
fmean, fvar = self.build_predictor(self.X)
# Get variational expectations.
var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y)
return tf.reduce_sum(var_exp) - KL
class SVAGP(CSVGPs):
def build_predictor(self,Xnew):
fmean, fvar = self.build_predict_joint(Xnew)
return tf.reduce_sum(fmean*self.W,1),tf.reduce_sum(fvar*tf.square(self.W),[1,2])
def sample_predictor(self,Xnew):
s = self.sample_joint(Xnew) # Nsamp x N x C x R
return tf.reduce_sum(s*self.W,2) # Nsamp x N x R
class SVMGP(CSVGPs):
def sample_predictor(self,Xnew):
s = self.sample_joint(Xnew) # Nsamp x N x C x R
return tf.reduce_prod(s,2) # Nsamp x N x R | [
"tensorflow.reduce_sum",
"numpy.sum",
"tensorflow.constant_initializer",
"numpy.ones",
"tensorflow.matmul",
"tensorflow.reduce_prod",
"tensorflow.Variable",
"conditionals.conditional_batch",
"numpy.random.randn",
"functions.block_diagonal",
"tensorflow.variable_scope",
"tensorflow.stack",
"t... | [((3690, 3750), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""KL"""', 'trainable': '(False)', 'dtype': 'float_type'}), "(0, name='KL', trainable=False, dtype=float_type)\n", (3701, 3750), True, 'import tensorflow as tf\n'), ((5856, 5889), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['samples_fs'], {'axis': '(0)'}), '(samples_fs, axis=0)\n', (5869, 5889), True, 'import tensorflow as tf\n'), ((6087, 6121), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['samples_fs'], {'axis': '(0)'}), '(samples_fs, axis=0)\n', (6101, 6121), True, 'import tensorflow as tf\n'), ((6205, 6211), 'mean_functions.Zero', 'Zero', ([], {}), '()\n', (6209, 6211), False, 'from mean_functions import Zero\n'), ((8728, 8838), 'conditionals.conditional', 'conditional', (['Xnew', 'self.Z', 'self.kern', 'self.q_mu'], {'q_sqrt': 'self.q_sqrt', 'full_cov': 'full_cov', 'whiten': 'self.whiten'}), '(Xnew, self.Z, self.kern, self.q_mu, q_sqrt=self.q_sqrt,\n full_cov=full_cov, whiten=self.whiten)\n', (8739, 8838), False, 'from conditionals import conditional, conditional_batch, conditional_stack\n'), ((8987, 9084), 'conditionals.conditional_batch', 'conditional_batch', (['Xnew', 'self.Z', 'self.kern', 'self.q_mu'], {'q_sqrt': 'self.q_sqrt', 'whiten': 'self.whiten'}), '(Xnew, self.Z, self.kern, self.q_mu, q_sqrt=self.q_sqrt,\n whiten=self.whiten)\n', (9004, 9084), False, 'from conditionals import conditional, conditional_batch, conditional_stack\n'), ((10848, 10873), 'numpy.sum', 'np.sum', (['self.num_inducing'], {}), '(self.num_inducing)\n', (10854, 10873), True, 'import numpy as np\n'), ((12427, 12553), 'conditionals.conditional_stack', 'conditional_stack', (['Xnew', 'self.Zs', 'self.kerns', 'self.q_mu'], {'f_indices': 'self.f_indices', 'q_sqrt': 'self.q_sqrt', 'whiten': 'self.whiten'}), '(Xnew, self.Zs, self.kerns, self.q_mu, f_indices=self.\n f_indices, q_sqrt=self.q_sqrt, whiten=self.whiten)\n', (12444, 12553), False, 'from conditionals import conditional, conditional_batch, conditional_stack\n'), ((14205, 14233), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(s * self.W)', '(2)'], {}), '(s * self.W, 2)\n', (14218, 14233), True, 'import tensorflow as tf\n'), ((14379, 14399), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['s', '(2)'], {}), '(s, 2)\n', (14393, 14399), True, 'import tensorflow as tf\n'), ((2409, 2439), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""inference"""'], {}), "('inference')\n", (2426, 2439), True, 'import tensorflow as tf\n'), ((4623, 4742), 'conditionals.conditional', 'conditional', (['x', 'self.Zs[c]', 'self.kerns[c]', 'self.q_mu[c]'], {'q_sqrt': 'self.q_sqrt[c]', 'full_cov': '(False)', 'whiten': 'self.whiten'}), '(x, self.Zs[c], self.kerns[c], self.q_mu[c], q_sqrt=self.q_sqrt[\n c], full_cov=False, whiten=self.whiten)\n', (4634, 4742), False, 'from conditionals import conditional, conditional_batch, conditional_stack\n'), ((4872, 4885), 'tensorflow.stack', 'tf.stack', (['mus'], {}), '(mus)\n', (4880, 4885), True, 'import tensorflow as tf\n'), ((4886, 4900), 'tensorflow.stack', 'tf.stack', (['vars'], {}), '(vars)\n', (4894, 4900), True, 'import tensorflow as tf\n'), ((5202, 5229), 'tensorflow.expand_dims', 'tf.expand_dims', (['fs_mean', '(-1)'], {}), '(fs_mean, -1)\n', (5216, 5229), True, 'import tensorflow as tf\n'), ((5566, 5592), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.Y', '(-1)'], {}), '(self.Y, -1)\n', (5580, 5592), True, 'import tensorflow as tf\n'), ((5609, 5631), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sto_exp'], {}), '(sto_exp)\n', (5622, 5631), True, 'import tensorflow as tf\n'), ((6888, 6933), 'numpy.ones', 'np.ones', (['(self.num_inducing, self.num_latent)'], {}), '((self.num_inducing, self.num_latent))\n', (6895, 6933), True, 'import numpy as np\n'), ((8509, 8543), 'tensorflow.cast', 'tf.cast', (['self.num_data', 'float_type'], {}), '(self.num_data, float_type)\n', (8516, 8543), True, 'import tensorflow as tf\n'), ((9321, 9344), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fmean', '(1)'], {}), '(fmean, 1)\n', (9334, 9344), True, 'import tensorflow as tf\n'), ((9344, 9371), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['fvar', '[1, 2]'], {}), '(fvar, [1, 2])\n', (9357, 9371), True, 'import tensorflow as tf\n'), ((10736, 10774), 'numpy.ones', 'np.ones', (['(self.C, 1)'], {'dtype': 'np.float32'}), '((self.C, 1), dtype=np.float32)\n', (10743, 10774), True, 'import numpy as np\n'), ((10888, 10918), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""inference"""'], {}), "('inference')\n", (10905, 10918), True, 'import tensorflow as tf\n'), ((11499, 11530), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ind_points"""'], {}), "('ind_points')\n", (11516, 11530), True, 'import tensorflow as tf\n'), ((11913, 11951), 'kullback_leiblers.gauss_kl_white', 'gauss_kl_white', (['self.q_mu', 'self.q_sqrt'], {}), '(self.q_mu, self.q_sqrt)\n', (11927, 11951), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((12145, 12180), 'kullback_leiblers.gauss_kl', 'gauss_kl', (['self.q_mu', 'self.q_sqrt', 'K'], {}), '(self.q_mu, self.q_sqrt, K)\n', (12153, 12180), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((12856, 12887), 'tensorflow.transpose', 'tf.transpose', (['var', '(3, 0, 1, 2)'], {}), '(var, (3, 0, 1, 2))\n', (12868, 12887), True, 'import tensorflow as tf\n'), ((12929, 12949), 'tensorflow.expand_dims', 'tf.expand_dims', (['L', '(0)'], {}), '(L, 0)\n', (12943, 12949), True, 'import tensorflow as tf\n'), ((13019, 13036), 'tensorflow.shape', 'tf.shape', (['L_tiled'], {}), '(L_tiled)\n', (13027, 13036), True, 'import tensorflow as tf\n'), ((13089, 13112), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {}), '(shape)\n', (13105, 13112), True, 'import tensorflow as tf\n'), ((13163, 13184), 'tensorflow.matmul', 'tf.matmul', (['L_tiled', 'x'], {}), '(L_tiled, x)\n', (13172, 13184), True, 'import tensorflow as tf\n'), ((13272, 13293), 'tensorflow.expand_dims', 'tf.expand_dims', (['mu', '(0)'], {}), '(mu, 0)\n', (13286, 13293), True, 'import tensorflow as tf\n'), ((13595, 13621), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['sto_exp', '(0)'], {}), '(sto_exp, 0)\n', (13609, 13621), True, 'import tensorflow as tf\n'), ((13864, 13886), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['var_exp'], {}), '(var_exp)\n', (13877, 13886), True, 'import tensorflow as tf\n'), ((14022, 14054), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(fmean * self.W)', '(1)'], {}), '(fmean * self.W, 1)\n', (14035, 14054), True, 'import tensorflow as tf\n'), ((1896, 1902), 'mean_functions.Zero', 'Zero', ([], {}), '()\n', (1900, 1902), False, 'from mean_functions import Zero\n'), ((2551, 2599), 'numpy.ones', 'np.ones', (['(self.num_inducing[c], self.num_latent)'], {}), '((self.num_inducing[c], self.num_latent))\n', (2558, 2599), True, 'import numpy as np\n'), ((4422, 4440), 'tensorflow.transpose', 'tf.transpose', (['Xnew'], {}), '(Xnew)\n', (4434, 4440), True, 'import tensorflow as tf\n'), ((5086, 5141), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'samples_shape', 'dtype': 'float_type'}), '(shape=samples_shape, dtype=float_type)\n', (5102, 5141), True, 'import tensorflow as tf\n'), ((7548, 7591), 'kullback_leiblers.gauss_kl_white_diag', 'gauss_kl_white_diag', (['self.q_mu', 'self.q_sqrt'], {}), '(self.q_mu, self.q_sqrt)\n', (7567, 7591), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((7631, 7669), 'kullback_leiblers.gauss_kl_white', 'gauss_kl_white', (['self.q_mu', 'self.q_sqrt'], {}), '(self.q_mu, self.q_sqrt)\n', (7645, 7669), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((7809, 7849), 'kullback_leiblers.gauss_kl_diag', 'gauss_kl_diag', (['self.q_mu', 'self.q_sqrt', 'K'], {}), '(self.q_mu, self.q_sqrt, K)\n', (7822, 7849), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((7889, 7924), 'kullback_leiblers.gauss_kl', 'gauss_kl', (['self.q_mu', 'self.q_sqrt', 'K'], {}), '(self.q_mu, self.q_sqrt, K)\n', (7897, 7924), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((8621, 8643), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['var_exp'], {}), '(var_exp)\n', (8634, 8643), True, 'import tensorflow as tf\n'), ((10255, 10261), 'mean_functions.Zero', 'Zero', ([], {}), '()\n', (10259, 10261), False, 'from mean_functions import Zero\n'), ((10948, 10991), 'numpy.random.randn', 'np.random.randn', (['self.Mtot', 'self.num_latent'], {}), '(self.Mtot, self.num_latent)\n', (10963, 10991), True, 'import numpy as np\n'), ((11802, 11820), 'tensorflow.transpose', 'tf.transpose', (['Xnew'], {}), '(Xnew)\n', (11814, 11820), True, 'import tensorflow as tf\n'), ((12054, 12074), 'functions.block_diagonal', 'block_diagonal', (['diag'], {}), '(diag)\n', (12068, 12074), False, 'from functions import eye, variational_expectations, block_diagonal\n'), ((13546, 13571), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.Y', '(0)'], {}), '(self.Y, 0)\n', (13560, 13571), True, 'import tensorflow as tf\n'), ((3001, 3049), 'numpy.ones', 'np.ones', (['(self.num_inducing[c], self.num_latent)'], {}), '((self.num_inducing[c], self.num_latent))\n', (3008, 3049), True, 'import numpy as np\n'), ((3866, 3915), 'kullback_leiblers.gauss_kl_white_diag', 'gauss_kl_white_diag', (['self.q_mu[i]', 'self.q_sqrt[i]'], {}), '(self.q_mu[i], self.q_sqrt[i])\n', (3885, 3915), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((3964, 4008), 'kullback_leiblers.gauss_kl_white', 'gauss_kl_white', (['self.q_mu[i]', 'self.q_sqrt[i]'], {}), '(self.q_mu[i], self.q_sqrt[i])\n', (3978, 4008), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((4192, 4238), 'kullback_leiblers.gauss_kl_diag', 'gauss_kl_diag', (['self.q_mu[i]', 'self.q_sqrt[i]', 'K'], {}), '(self.q_mu[i], self.q_sqrt[i], K)\n', (4205, 4238), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((4287, 4328), 'kullback_leiblers.gauss_kl', 'gauss_kl', (['self.q_mu[i]', 'self.q_sqrt[i]', 'K'], {}), '(self.q_mu[i], self.q_sqrt[i], K)\n', (4295, 4328), False, 'from kullback_leiblers import gauss_kl_white, gauss_kl_white_diag, gauss_kl, gauss_kl_diag\n'), ((5172, 5198), 'tensorflow.expand_dims', 'tf.expand_dims', (['fs_var', '(-1)'], {}), '(fs_var, -1)\n', (5186, 5198), True, 'import tensorflow as tf\n'), ((6793, 6839), 'numpy.zeros', 'np.zeros', (['(self.num_inducing, self.num_latent)'], {}), '((self.num_inducing, self.num_latent))\n', (6801, 6839), True, 'import numpy as np\n'), ((7069, 7118), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_sqrt'], {'dtype': 'float_type'}), '(q_sqrt, dtype=float_type)\n', (7092, 7118), True, 'import tensorflow as tf\n'), ((7392, 7441), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_sqrt'], {'dtype': 'float_type'}), '(q_sqrt, dtype=float_type)\n', (7415, 7441), True, 'import tensorflow as tf\n'), ((7722, 7744), 'functions.eye', 'eye', (['self.num_inducing'], {}), '(self.num_inducing)\n', (7725, 7744), False, 'from functions import eye, variational_expectations, block_diagonal\n'), ((8572, 8588), 'tensorflow.shape', 'tf.shape', (['self.X'], {}), '(self.X)\n', (8580, 8588), True, 'import tensorflow as tf\n'), ((11135, 11164), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_mu'], {}), '(q_mu)\n', (11158, 11164), True, 'import tensorflow as tf\n'), ((11413, 11462), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_sqrt'], {'dtype': 'float_type'}), '(q_sqrt, dtype=float_type)\n', (11436, 11462), True, 'import tensorflow as tf\n'), ((11603, 11640), 'tensorflow.variable_scope', 'tf.variable_scope', (["('ind_points%d' % c)"], {}), "('ind_points%d' % c)\n", (11620, 11640), True, 'import tensorflow as tf\n'), ((12098, 12112), 'functions.eye', 'eye', (['self.Mtot'], {}), '(self.Mtot)\n', (12101, 12112), False, 'from functions import eye, variational_expectations, block_diagonal\n'), ((13252, 13263), 'tensorflow.shape', 'tf.shape', (['s'], {}), '(s)\n', (13260, 13263), True, 'import tensorflow as tf\n'), ((14071, 14088), 'tensorflow.square', 'tf.square', (['self.W'], {}), '(self.W)\n', (14080, 14088), True, 'import tensorflow as tf\n'), ((4093, 4118), 'functions.eye', 'eye', (['self.num_inducing[i]'], {}), '(self.num_inducing[i])\n', (4096, 4118), False, 'from functions import eye, variational_expectations, block_diagonal\n'), ((11686, 11725), 'tensorflow.Variable', 'tf.Variable', (['z_np', 'tf.float32'], {'name': '"""Z"""'}), "(z_np, tf.float32, name='Z')\n", (11697, 11725), True, 'import tensorflow as tf\n'), ((2826, 2873), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_mu'], {'dtype': 'float_type'}), '(q_mu, dtype=float_type)\n', (2849, 2873), True, 'import tensorflow as tf\n'), ((7164, 7189), 'numpy.eye', 'np.eye', (['self.num_inducing'], {}), '(self.num_inducing)\n', (7170, 7189), True, 'import numpy as np\n'), ((11197, 11214), 'numpy.eye', 'np.eye', (['self.Mtot'], {}), '(self.Mtot)\n', (11203, 11214), True, 'import numpy as np\n'), ((3215, 3264), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_sqrt'], {'dtype': 'float_type'}), '(q_sqrt, dtype=float_type)\n', (3238, 3264), True, 'import tensorflow as tf\n'), ((3593, 3642), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['q_sqrt'], {'dtype': 'float_type'}), '(q_sqrt, dtype=float_type)\n', (3616, 3642), True, 'import tensorflow as tf\n'), ((3329, 3357), 'numpy.eye', 'np.eye', (['self.num_inducing[c]'], {}), '(self.num_inducing[c])\n', (3335, 3357), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
2D MAF data of 2Na2O.3SiO2 glass
================================
"""
# %%
# The following example illustrates an application of the statistical learning method
# applied in determining the distribution of the nuclear shielding tensor parameters
# from a 2D magic-angle flipping (MAF) spectrum. In this example, we use the 2D MAF
# spectrum [#f1]_ of :math:`2\text{Na}_2\text{O}\cdot3\text{SiO}_2` glass.
#
# Before getting started
# ----------------------
#
# Import all relevant packages.
import csdmpy as cp
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from mrinversion.kernel.nmr import ShieldingPALineshape
from mrinversion.linear_model import SmoothLasso, TSVDCompression
from mrinversion.utils import plot_3d, to_Haeberlen_grid
# sphinx_gallery_thumbnail_number = 5
# %%
# Setup for the matplotlib figures.
def plot2D(csdm_object, **kwargs):
plt.figure(figsize=(4.5, 3.5))
ax = plt.subplot(projection="csdm")
ax.imshow(csdm_object, cmap="gist_ncar_r", aspect="auto", **kwargs)
ax.invert_xaxis()
ax.invert_yaxis()
plt.tight_layout()
plt.show()
# %%
# Dataset setup
# -------------
#
# Import the dataset
# ''''''''''''''''''
#
# Load the dataset. Here, we import the dataset as the CSDM data-object.
# The 2D MAF dataset in csdm format
filename = "https://osu.box.com/shared/static/k405dsptwe1p43x8mfi1wc1geywrypzc.csdf"
data_object = cp.load(filename)
# For inversion, we only interest ourselves with the real part of the complex dataset.
data_object = data_object.real
# We will also convert the coordinates of both dimensions from Hz to ppm.
_ = [item.to("ppm", "nmr_frequency_ratio") for item in data_object.dimensions]
# %%
# Here, the variable ``data_object`` is a
# `CSDM <https://csdmpy.readthedocs.io/en/latest/api/CSDM.html>`_
# object that holds the real part of the 2D MAF dataset. The plot of the 2D MAF dataset
# is
plot2D(data_object)
# %%
# There are two dimensions in this dataset. The dimension at index 0 is the pure
# anisotropic dimension, while the dimension at index 1 is the isotropic chemical shift
# dimension.
#
# Prepping the data for inversion
# '''''''''''''''''''''''''''''''
# **Step-1: Data Alignment**
#
# When using the csdm objects with the ``mrinversion`` package, the dimension at index
# 0 must be the dimension undergoing the linear inversion. In this example, we plan to
# invert the pure anisotropic shielding line-shape. In the ``data_object``, the
# anisotropic dimension is already at index 0 and, therefore, no further action is
# required.
#
# **Step-2: Optimization**
#
# Also notice, the signal from the 2D MAF dataset occupies a small fraction of the
# two-dimensional frequency grid. For optimum performance, truncate the dataset to the
# relevant region before proceeding. Use the appropriate array indexing/slicing to
# select the signal region.
data_object_truncated = data_object[:, 220:280]
plot2D(data_object_truncated)
# %%
# Linear Inversion setup
# ----------------------
#
# Dimension setup
# '''''''''''''''
#
# **Anisotropic-dimension:**
# The dimension of the dataset that holds the pure anisotropic frequency
# contributions. In ``mrinversion``, this must always be the dimension at index 0 of
# the data object.
anisotropic_dimension = data_object_truncated.dimensions[0]
# %%
# **x-y dimensions:**
# The two inverse dimensions corresponding to the `x` and `y`-axis of the `x`-`y` grid.
inverse_dimensions = [
cp.LinearDimension(count=25, increment="500 Hz", label="x"), # the `x`-dimension.
cp.LinearDimension(count=25, increment="500 Hz", label="y"), # the `y`-dimension.
]
# %%
# Generating the kernel
# '''''''''''''''''''''
#
# For MAF datasets, the line-shape kernel corresponds to the pure nuclear shielding
# anisotropy line-shapes. Use the
# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class to generate a
# shielding line-shape kernel.
lineshape = ShieldingPALineshape(
anisotropic_dimension=anisotropic_dimension,
inverse_dimension=inverse_dimensions,
channel="29Si",
magnetic_flux_density="9.4 T",
rotor_angle="90°",
rotor_frequency="12 kHz",
number_of_sidebands=4,
)
# %%
# Here, ``lineshape`` is an instance of the
# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class. The required
# arguments of this class are the `anisotropic_dimension`, `inverse_dimension`, and
# `channel`. We have already defined the first two arguments in the previous
# sub-section. The value of the `channel` argument is the nucleus observed in the MAF
# experiment. In this example, this value is '29Si'.
# The remaining arguments, such as the `magnetic_flux_density`, `rotor_angle`,
# and `rotor_frequency`, are set to match the conditions under which the 2D MAF
# spectrum was acquired. The value of the `number_of_sidebands` argument is the number
# of sidebands calculated for each line-shape within the kernel. Unless, you have a lot
# of spinning sidebands in your MAF dataset, four sidebands should be enough.
#
# Once the ShieldingPALineshape instance is created, use the
# :meth:`~mrinversion.kernel.nmr.ShieldingPALineshape.kernel` method of the
# instance to generate the MAF line-shape kernel.
K = lineshape.kernel(supersampling=1)
print(K.shape)
# %%
# The kernel ``K`` is a NumPy array of shape (128, 625), where the axes with 128 and
# 625 points are the anisotropic dimension and the features (x-y coordinates)
# corresponding to the :math:`25\times 25` `x`-`y` grid, respectively.
# %%
# Data Compression
# ''''''''''''''''
#
# Data compression is optional but recommended. It may reduce the size of the
# inverse problem and, thus, further computation time.
new_system = TSVDCompression(K, data_object_truncated)
compressed_K = new_system.compressed_K
compressed_s = new_system.compressed_s
print(f"truncation_index = {new_system.truncation_index}")
# %%
# Solving the inverse problem
# ---------------------------
#
# Smooth LASSO cross-validation
# '''''''''''''''''''''''''''''
#
# Solve the smooth-lasso problem. Ordinarily, one should use the statistical learning
# method to solve the inverse problem over a range of α and λ values and then determine
# the best nuclear shielding tensor parameter distribution for the given 2D MAF
# dataset. Considering the limited build time for the documentation, we skip this step
# and evaluate the distribution at pre-optimized α and λ values. The optimum values are
# :math:`\alpha = 2.2\times 10^{-8}` and :math:`\lambda = 1.27\times 10^{-6}`.
# The following commented code was used in determining the optimum α and λ values.
# %%
# from mrinversion.linear_model import SmoothLassoCV
# import numpy as np
# # setup the pre-defined range of alpha and lambda values
# lambdas = 10 ** (-4 - 3 * (np.arange(20) / 19))
# alphas = 10 ** (-4.5 - 5 * (np.arange(20) / 19))
# # setup the smooth lasso cross-validation class
# s_lasso = SmoothLassoCV(
# alphas=alphas, # A numpy array of alpha values.
# lambdas=lambdas, # A numpy array of lambda values.
# sigma=0.003, # The standard deviation of noise from the MAF data.
# folds=10, # The number of folds in n-folds cross-validation.
# inverse_dimension=inverse_dimensions, # previously defined inverse dimensions.
# verbose=1, # If non-zero, prints the progress as the computation proceeds.
# )
# # run fit using the compressed kernel and compressed data.
# s_lasso.fit(compressed_K, compressed_s)
# # the optimum hyper-parameters, alpha and lambda, from the cross-validation.
# print(s_lasso.hyperparameters)
# # {'alpha': 2.198392648862289e-08, 'lambda': 1.2742749857031348e-06}
# # the solution
# f_sol = s_lasso.f
# # the cross-validation error curve
# CV_metric = s_lasso.cross_validation_curve
# %%
# If you use the above ``SmoothLassoCV`` method, skip the following code-block.
# Setup the smooth lasso class
s_lasso = SmoothLasso(
alpha=2.198e-8, lambda1=1.27e-6, inverse_dimension=inverse_dimensions
)
# run the fit method on the compressed kernel and compressed data.
s_lasso.fit(K=compressed_K, s=compressed_s)
# %%
# The optimum solution
# ''''''''''''''''''''
#
# The :attr:`~mrinversion.linear_model.SmoothLasso.f` attribute of the instance holds
# the solution,
f_sol = s_lasso.f # f_sol is a CSDM object.
# %%
# where ``f_sol`` is the optimum solution.
#
# The fit residuals
# '''''''''''''''''
#
# To calculate the residuals between the data and predicted data(fit), use the
# :meth:`~mrinversion.linear_model.SmoothLasso.residuals` method, as follows,
residuals = s_lasso.residuals(K=K, s=data_object_truncated)
# residuals is a CSDM object.
# The plot of the residuals.
plot2D(residuals, vmax=data_object_truncated.max(), vmin=data_object_truncated.min())
# %%
# The standard deviation of the residuals is
residuals.std()
# %%
# Saving the solution
# '''''''''''''''''''
#
# To serialize the solution to a file, use the `save()` method of the CSDM object,
# for example,
f_sol.save("2Na2O.3SiO2_inverse.csdf") # save the solution
residuals.save("2Na2O.3SiO2_residue.csdf") # save the residuals
# %%
# Data Visualization
# ------------------
#
# At this point, we have solved the inverse problem and obtained an optimum
# distribution of the nuclear shielding tensor parameters from the 2D MAF dataset. You
# may use any data visualization and interpretation tool of choice for further
# analysis. In the following sections, we provide minimal visualization to complete the
# case study.
#
# Visualizing the 3D solution
# '''''''''''''''''''''''''''
# Normalize the solution
f_sol /= f_sol.max()
# Convert the coordinates of the solution, `f_sol`, from Hz to ppm.
[item.to("ppm", "nmr_frequency_ratio") for item in f_sol.dimensions]
# The 3D plot of the solution
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
plot_3d(ax, f_sol, elev=25, azim=-50, x_lim=[0, 150], y_lim=[0, 150], z_lim=[-60, -120])
plt.tight_layout()
plt.show()
# %%
# From the 3D plot, we observe three distinct regions corresponding to the
# :math:`\text{Q}^4`, :math:`\text{Q}^3`, and :math:`\text{Q}^2` sites, respectively.
# The :math:`\text{Q}^4` sites are resolved in the 3D distribution; however, we observe
# partial overlapping :math:`\text{Q}^3` and :math:`\text{Q}^2` sites. The following is
# a naive selection of the three regions. One may also apply sophisticated
# classification algorithms to better quantify the Q-species.
Q4_region = f_sol[0:6, 0:6, 14:35] * 3
Q4_region.description = "Q4 region x 3"
Q3_region = f_sol[0:8, 7:, 20:39]
Q3_region.description = "Q3 region"
Q2_region = f_sol[:10, 6:18, 36:52]
Q2_region.description = "Q2 region"
# %%
# An approximate plot of the respective regions is shown below.
# Calculate the normalization factor for the 2D contours and 1D projections from the
# original solution, `f_sol`. Use this normalization factor to scale the intensities
# from the sub-regions.
max_2d = [
f_sol.sum(axis=0).max().value,
f_sol.sum(axis=1).max().value,
f_sol.sum(axis=2).max().value,
]
max_1d = [
f_sol.sum(axis=(1, 2)).max().value,
f_sol.sum(axis=(0, 2)).max().value,
f_sol.sum(axis=(0, 1)).max().value,
]
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
# plot for the Q4 region
plot_3d(
ax,
Q4_region,
x_lim=[0, 150], # the x-limit
y_lim=[0, 150], # the y-limit
z_lim=[-60, -120], # the z-limit
max_2d=max_2d, # normalization factors for the 2D contours projections
max_1d=max_1d, # normalization factors for the 1D projections
cmap=cm.Reds_r, # colormap
)
# plot for the Q3 region
plot_3d(
ax,
Q3_region,
x_lim=[0, 150], # the x-limit
y_lim=[0, 150], # the y-limit
z_lim=[-60, -120], # the z-limit
max_2d=max_2d, # normalization factors for the 2D contours projections
max_1d=max_1d, # normalization factors for the 1D projections
cmap=cm.Blues_r, # colormap
)
# plot for the Q2 region
plot_3d(
ax,
Q2_region,
elev=25, # the elevation angle in the z plane
azim=-50, # the azimuth angle in the x-y plane
x_lim=[0, 150], # the x-limit
y_lim=[0, 150], # the y-limit
z_lim=[-60, -120], # the z-limit
max_2d=max_2d, # normalization factors for the 2D contours projections
max_1d=max_1d, # normalization factors for the 1D projections
cmap=cm.Oranges_r, # colormap
box=False, # draw a box around the region
)
ax.legend()
plt.tight_layout()
plt.show()
# %%
# Convert the 3D tensor distribution in Haeberlen parameters
# ----------------------------------------------------------
# You may re-bin the 3D tensor parameter distribution from a
# :math:`\rho(\delta_\text{iso}, x, y)` distribution to
# :math:`\rho(\delta_\text{iso}, \zeta_\sigma, \eta_\sigma)` distribution as follows.
# Create the zeta and eta dimensions,, as shown below.
zeta = cp.as_dimension(np.arange(40) * 4 - 40, unit="ppm", label="zeta")
eta = cp.as_dimension(np.arange(16) / 15, label="eta")
# Use the `to_Haeberlen_grid` function to convert the tensor parameter distribution.
fsol_Hae = to_Haeberlen_grid(f_sol, zeta, eta)
# %%
# The 3D plot
# '''''''''''
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
plot_3d(ax, fsol_Hae, x_lim=[0, 1], y_lim=[-40, 120], z_lim=[-60, -120], alpha=0.1)
plt.tight_layout()
plt.show()
# %%
# References
# ----------
#
# .. [#f1] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Stebbins
# , <NAME>. Silicon site distributions in an alkali silicate glass derived by
# two-dimensional 29Si nuclear magnetic resonance, J. Non. Cryst. Solids, **204**,
# (1996), 294–300. `doi:10.1016/S0022-3093(96)00601-1
# <https://doi.org/doi:10.1016/S0022-3093(96)00601-1>`_.
| [
"matplotlib.pyplot.subplot",
"csdmpy.load",
"matplotlib.pyplot.show",
"mrinversion.linear_model.SmoothLasso",
"mrinversion.utils.to_Haeberlen_grid",
"matplotlib.pyplot.figure",
"mrinversion.linear_model.TSVDCompression",
"numpy.arange",
"mrinversion.kernel.nmr.ShieldingPALineshape",
"csdmpy.Linear... | [((1459, 1476), 'csdmpy.load', 'cp.load', (['filename'], {}), '(filename)\n', (1466, 1476), True, 'import csdmpy as cp\n'), ((3975, 4206), 'mrinversion.kernel.nmr.ShieldingPALineshape', 'ShieldingPALineshape', ([], {'anisotropic_dimension': 'anisotropic_dimension', 'inverse_dimension': 'inverse_dimensions', 'channel': '"""29Si"""', 'magnetic_flux_density': '"""9.4 T"""', 'rotor_angle': '"""90°"""', 'rotor_frequency': '"""12 kHz"""', 'number_of_sidebands': '(4)'}), "(anisotropic_dimension=anisotropic_dimension,\n inverse_dimension=inverse_dimensions, channel='29Si',\n magnetic_flux_density='9.4 T', rotor_angle='90°', rotor_frequency=\n '12 kHz', number_of_sidebands=4)\n", (3995, 4206), False, 'from mrinversion.kernel.nmr import ShieldingPALineshape\n'), ((5737, 5778), 'mrinversion.linear_model.TSVDCompression', 'TSVDCompression', (['K', 'data_object_truncated'], {}), '(K, data_object_truncated)\n', (5752, 5778), False, 'from mrinversion.linear_model import SmoothLasso, TSVDCompression\n'), ((7925, 8014), 'mrinversion.linear_model.SmoothLasso', 'SmoothLasso', ([], {'alpha': '(2.198e-08)', 'lambda1': '(1.27e-06)', 'inverse_dimension': 'inverse_dimensions'}), '(alpha=2.198e-08, lambda1=1.27e-06, inverse_dimension=\n inverse_dimensions)\n', (7936, 8014), False, 'from mrinversion.linear_model import SmoothLasso, TSVDCompression\n'), ((9796, 9824), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (9806, 9824), True, 'import matplotlib.pyplot as plt\n'), ((9830, 9858), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (9841, 9858), True, 'import matplotlib.pyplot as plt\n'), ((9859, 9952), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'f_sol'], {'elev': '(25)', 'azim': '(-50)', 'x_lim': '[0, 150]', 'y_lim': '[0, 150]', 'z_lim': '[-60, -120]'}), '(ax, f_sol, elev=25, azim=-50, x_lim=[0, 150], y_lim=[0, 150], z_lim\n =[-60, -120])\n', (9866, 9952), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((9948, 9966), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9964, 9966), True, 'import matplotlib.pyplot as plt\n'), ((9967, 9977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9975, 9977), True, 'import matplotlib.pyplot as plt\n'), ((11199, 11227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (11209, 11227), True, 'import matplotlib.pyplot as plt\n'), ((11233, 11261), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (11244, 11261), True, 'import matplotlib.pyplot as plt\n'), ((11288, 11411), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'Q4_region'], {'x_lim': '[0, 150]', 'y_lim': '[0, 150]', 'z_lim': '[-60, -120]', 'max_2d': 'max_2d', 'max_1d': 'max_1d', 'cmap': 'cm.Reds_r'}), '(ax, Q4_region, x_lim=[0, 150], y_lim=[0, 150], z_lim=[-60, -120],\n max_2d=max_2d, max_1d=max_1d, cmap=cm.Reds_r)\n', (11295, 11411), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((11630, 11754), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'Q3_region'], {'x_lim': '[0, 150]', 'y_lim': '[0, 150]', 'z_lim': '[-60, -120]', 'max_2d': 'max_2d', 'max_1d': 'max_1d', 'cmap': 'cm.Blues_r'}), '(ax, Q3_region, x_lim=[0, 150], y_lim=[0, 150], z_lim=[-60, -120],\n max_2d=max_2d, max_1d=max_1d, cmap=cm.Blues_r)\n', (11637, 11754), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((11973, 12134), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'Q2_region'], {'elev': '(25)', 'azim': '(-50)', 'x_lim': '[0, 150]', 'y_lim': '[0, 150]', 'z_lim': '[-60, -120]', 'max_2d': 'max_2d', 'max_1d': 'max_1d', 'cmap': 'cm.Oranges_r', 'box': '(False)'}), '(ax, Q2_region, elev=25, azim=-50, x_lim=[0, 150], y_lim=[0, 150],\n z_lim=[-60, -120], max_2d=max_2d, max_1d=max_1d, cmap=cm.Oranges_r, box\n =False)\n', (11980, 12134), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((12455, 12473), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12471, 12473), True, 'import matplotlib.pyplot as plt\n'), ((12474, 12484), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12482, 12484), True, 'import matplotlib.pyplot as plt\n'), ((13097, 13132), 'mrinversion.utils.to_Haeberlen_grid', 'to_Haeberlen_grid', (['f_sol', 'zeta', 'eta'], {}), '(f_sol, zeta, eta)\n', (13114, 13132), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((13167, 13195), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (13177, 13195), True, 'import matplotlib.pyplot as plt\n'), ((13201, 13229), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (13212, 13229), True, 'import matplotlib.pyplot as plt\n'), ((13230, 13317), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'fsol_Hae'], {'x_lim': '[0, 1]', 'y_lim': '[-40, 120]', 'z_lim': '[-60, -120]', 'alpha': '(0.1)'}), '(ax, fsol_Hae, x_lim=[0, 1], y_lim=[-40, 120], z_lim=[-60, -120],\n alpha=0.1)\n', (13237, 13317), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((13314, 13332), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13330, 13332), True, 'import matplotlib.pyplot as plt\n'), ((13333, 13343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13341, 13343), True, 'import matplotlib.pyplot as plt\n'), ((940, 970), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 3.5)'}), '(figsize=(4.5, 3.5))\n', (950, 970), True, 'import matplotlib.pyplot as plt\n'), ((980, 1010), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""csdm"""'}), "(projection='csdm')\n", (991, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1131, 1149), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1147, 1149), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1162, 1164), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3569), 'csdmpy.LinearDimension', 'cp.LinearDimension', ([], {'count': '(25)', 'increment': '"""500 Hz"""', 'label': '"""x"""'}), "(count=25, increment='500 Hz', label='x')\n", (3528, 3569), True, 'import csdmpy as cp\n'), ((3597, 3656), 'csdmpy.LinearDimension', 'cp.LinearDimension', ([], {'count': '(25)', 'increment': '"""500 Hz"""', 'label': '"""y"""'}), "(count=25, increment='500 Hz', label='y')\n", (3615, 3656), True, 'import csdmpy as cp\n'), ((12967, 12980), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (12976, 12980), True, 'import numpy as np\n'), ((12895, 12908), 'numpy.arange', 'np.arange', (['(40)'], {}), '(40)\n', (12904, 12908), True, 'import numpy as np\n')] |
import os
import cv2
import sys
import time
import collections
import torch
import argparse
import numpy as np
from torch.autograd import Variable
from torch.utils import data
import PSENet.models as models
from PSENet.pse import pse
from argparse import Namespace
from PIL import Image
import torchvision.transforms as transforms
from PSENet.aic_ocr.text_engine import TextEngine
class AIC_Demo(object):
def __init__(self):
self.img = None
if torch.cuda.is_available():
os.environ['CUDA_VISIBLE_DEVICES'] = str(torch.cuda.current_device())
self.gpu = True
else:
self.gpu = False
def get_img(self, img):
img = img[:, :, [2, 1, 0]]
return img
def scale(self, img, long_size):
h, w = img.shape[0:2]
scale = long_size * 1.0 / max(h, w)
img = cv2.resize(img, dsize=None, fx=scale, fy=scale)
return img
def debug(self, imgs):
col = []
for i in range(len(imgs)):
row = []
for j in range(len(imgs[i])):
row.append(imgs[i][j])
res = np.concatenate(row, axis=1)
col.append(res)
res = np.concatenate(col, axis=0)
return res
def test(self, args):
# Setup Model
if args.arch == "resnet50":
model = models.resnet50(pretrained=True, num_classes=7, scale=args.scale)
elif args.arch == "resnet101":
model = models.resnet101(pretrained=True, num_classes=7, scale=args.scale)
elif args.arch == "resnet152":
model = models.resnet152(pretrained=True, num_classes=7, scale=args.scale)
for param in model.parameters():
param.requires_grad = False
model = model.cuda()
if args.resume is not None:
if os.path.isfile(args.resume):
print("Loading model and optimizer from checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
# model.load_state_dict(checkpoint['state_dict'])
d = collections.OrderedDict()
for key, value in checkpoint['state_dict'].items():
tmp = key[7:]
d[tmp] = value
model.load_state_dict(d)
print("Loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
sys.stdout.flush()
else:
print("No checkpoint found at '{}'".format(args.resume))
sys.stdout.flush()
model.eval()
img = self.get_img(self.img)
org_img = img[:, :, [2, 1, 0]]
org_img = np.expand_dims(org_img, axis=0)
org_img = torch.from_numpy(org_img)
scaled_img = self.scale(img, args.long_size)
scaled_img = Image.fromarray(scaled_img)
scaled_img = scaled_img.convert('RGB')
scaled_img = transforms.ToTensor()(scaled_img)
scaled_img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(scaled_img)
img = scaled_img.unsqueeze(0)
img = Variable(img.cuda())
org_img = org_img.numpy().astype('uint8')[0]
text_box = org_img.copy()
torch.cuda.synchronize()
with torch.no_grad():
outputs = model(img)
score = torch.sigmoid(outputs[:, 0, :, :])
outputs = (torch.sign(outputs - args.binary_th) + 1) / 2
text = outputs[:, 0, :, :]
kernels = outputs[:, 0:args.kernel_num, :, :] * text
score = score.data.cpu().numpy()[0].astype(np.float32)
text = text.data.cpu().numpy()[0].astype(np.uint8)
kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)
# c++ version pse
pred = pse(kernels, args.min_kernel_area / (args.scale * args.scale))
# python version pse
# pred = pypse(kernels, args.min_kernel_area / (args.scale * args.scale))
scale = (org_img.shape[1] * 1.0 / pred.shape[1], org_img.shape[0] * 1.0 / pred.shape[0])
label = pred
label_num = np.max(label) + 1
bboxes = []
for i in range(1, label_num):
points = np.array(np.where(label == i)).transpose((1, 0))[:, ::-1]
if points.shape[0] < args.min_area / (args.scale * args.scale):
continue
score_i = np.mean(score[label == i])
if score_i < args.min_score:
continue
rect = cv2.minAreaRect(points)
bbox = cv2.boxPoints(rect) * scale
bbox = bbox.astype('int32')
bboxes.append(bbox.reshape(-1))
torch.cuda.synchronize()
for bbox in bboxes:
cv2.drawContours(text_box, [bbox.reshape(4, 2)], -1, (0, 255, 0), 2)
text_box = cv2.resize(text_box, (text.shape[1], text.shape[0]))
results = self.debug([[text_box]])
return results
def run_ocr(self, img):
self.img = img
# text detection
args = Namespace(arch='resnet50', binary_th=1.0, kernel_num=7, long_size=1200, min_area=800.0, min_kernel_area=5.0, min_score=0.93, resume='PSENet/pretrained/ic15_res50_pretrain_ic17.pth.tar', scale=1)
output = self.test(args)
# text recognizer
text_engine = TextEngine(cuda=True)
recognized_text = text_engine.recognize_text_aic(cropped_images)
print('recognized: {}'.format(list(recognized_text)))
exit()
return output | [
"torch.cuda.synchronize",
"argparse.Namespace",
"os.path.isfile",
"numpy.mean",
"sys.stdout.flush",
"cv2.boxPoints",
"cv2.minAreaRect",
"torch.cuda.current_device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"PSENet.pse.pse",
"torch.load",
"torch.sign",
"numpy.max",
"PSENet.mod... | [((497, 522), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (520, 522), False, 'import torch\n'), ((901, 948), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': 'None', 'fx': 'scale', 'fy': 'scale'}), '(img, dsize=None, fx=scale, fy=scale)\n', (911, 948), False, 'import cv2\n'), ((1249, 1276), 'numpy.concatenate', 'np.concatenate', (['col'], {'axis': '(0)'}), '(col, axis=0)\n', (1263, 1276), True, 'import numpy as np\n'), ((2865, 2896), 'numpy.expand_dims', 'np.expand_dims', (['org_img'], {'axis': '(0)'}), '(org_img, axis=0)\n', (2879, 2896), True, 'import numpy as np\n'), ((2916, 2941), 'torch.from_numpy', 'torch.from_numpy', (['org_img'], {}), '(org_img)\n', (2932, 2941), False, 'import torch\n'), ((3028, 3055), 'PIL.Image.fromarray', 'Image.fromarray', (['scaled_img'], {}), '(scaled_img)\n', (3043, 3055), False, 'from PIL import Image\n'), ((3447, 3471), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (3469, 3471), False, 'import torch\n'), ((3558, 3592), 'torch.sigmoid', 'torch.sigmoid', (['outputs[:, 0, :, :]'], {}), '(outputs[:, 0, :, :])\n', (3571, 3592), False, 'import torch\n'), ((4004, 4066), 'PSENet.pse.pse', 'pse', (['kernels', '(args.min_kernel_area / (args.scale * args.scale))'], {}), '(kernels, args.min_kernel_area / (args.scale * args.scale))\n', (4007, 4066), False, 'from PSENet.pse import pse\n'), ((4905, 4929), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4927, 4929), False, 'import torch\n'), ((5073, 5125), 'cv2.resize', 'cv2.resize', (['text_box', '(text.shape[1], text.shape[0])'], {}), '(text_box, (text.shape[1], text.shape[0]))\n', (5083, 5125), False, 'import cv2\n'), ((5293, 5496), 'argparse.Namespace', 'Namespace', ([], {'arch': '"""resnet50"""', 'binary_th': '(1.0)', 'kernel_num': '(7)', 'long_size': '(1200)', 'min_area': '(800.0)', 'min_kernel_area': '(5.0)', 'min_score': '(0.93)', 'resume': '"""PSENet/pretrained/ic15_res50_pretrain_ic17.pth.tar"""', 'scale': '(1)'}), "(arch='resnet50', binary_th=1.0, kernel_num=7, long_size=1200,\n min_area=800.0, min_kernel_area=5.0, min_score=0.93, resume=\n 'PSENet/pretrained/ic15_res50_pretrain_ic17.pth.tar', scale=1)\n", (5302, 5496), False, 'from argparse import Namespace\n'), ((5574, 5595), 'PSENet.aic_ocr.text_engine.TextEngine', 'TextEngine', ([], {'cuda': '(True)'}), '(cuda=True)\n', (5584, 5595), False, 'from PSENet.aic_ocr.text_engine import TextEngine\n'), ((1177, 1204), 'numpy.concatenate', 'np.concatenate', (['row'], {'axis': '(1)'}), '(row, axis=1)\n', (1191, 1204), True, 'import numpy as np\n'), ((1407, 1472), 'PSENet.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)', 'num_classes': '(7)', 'scale': 'args.scale'}), '(pretrained=True, num_classes=7, scale=args.scale)\n', (1422, 1472), True, 'import PSENet.models as models\n'), ((1958, 1985), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (1972, 1985), False, 'import os\n'), ((3126, 3147), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3145, 3147), True, 'import torchvision.transforms as transforms\n'), ((3182, 3257), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3202, 3257), True, 'import torchvision.transforms as transforms\n'), ((3488, 3503), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3501, 3503), False, 'import torch\n'), ((4331, 4344), 'numpy.max', 'np.max', (['label'], {}), '(label)\n', (4337, 4344), True, 'import numpy as np\n'), ((4619, 4645), 'numpy.mean', 'np.mean', (['score[label == i]'], {}), '(score[label == i])\n', (4626, 4645), True, 'import numpy as np\n'), ((4736, 4759), 'cv2.minAreaRect', 'cv2.minAreaRect', (['points'], {}), '(points)\n', (4751, 4759), False, 'import cv2\n'), ((578, 605), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (603, 605), False, 'import torch\n'), ((1534, 1600), 'PSENet.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)', 'num_classes': '(7)', 'scale': 'args.scale'}), '(pretrained=True, num_classes=7, scale=args.scale)\n', (1550, 1600), True, 'import PSENet.models as models\n'), ((2112, 2135), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (2122, 2135), False, 'import torch\n'), ((2242, 2267), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2265, 2267), False, 'import collections\n'), ((2594, 2612), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2610, 2612), False, 'import sys\n'), ((2723, 2741), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2739, 2741), False, 'import sys\n'), ((3613, 3649), 'torch.sign', 'torch.sign', (['(outputs - args.binary_th)'], {}), '(outputs - args.binary_th)\n', (3623, 3649), False, 'import torch\n'), ((4780, 4799), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (4793, 4799), False, 'import cv2\n'), ((1662, 1728), 'PSENet.models.resnet152', 'models.resnet152', ([], {'pretrained': '(True)', 'num_classes': '(7)', 'scale': 'args.scale'}), '(pretrained=True, num_classes=7, scale=args.scale)\n', (1678, 1728), True, 'import PSENet.models as models\n'), ((4440, 4460), 'numpy.where', 'np.where', (['(label == i)'], {}), '(label == i)\n', (4448, 4460), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pickle, os, csv, math, time
from joblib import Parallel, delayed
import datetime as dt
from datetime import date, datetime, timedelta
from collections import Counter
import copy as cp
import tqdm
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from lightgbm import LGBMRegressor, LGBMClassifier
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.metrics import (
log_loss,
f1_score,
precision_score,
recall_score,
accuracy_score,
)
# import matplotlib.pyplot as plt
# import matplotlib.ticker as ticker
import collections
# import shap
import seaborn as sns
import random
from sklearn.linear_model import LinearRegression, LogisticRegression
np.seterr(all="ignore")
import matplotlib.pyplot as plt
import tqdm
import math
import statsmodels.api as sm
import pandas as pd
import statsmodels.formula.api as smf
import numpy as np
import sys
sys.path.append("../models/")
from lmm import LMM
import util as util_fqi
from contrastive_deepnet import ContrastiveNet, ContrastiveDataset
class LMMFQIagent:
def __init__(
self,
train_tuples,
test_tuples,
iters=150,
gamma=0.1,
batch_size=100,
prioritize=False,
estimator="gbm",
weights=np.array([1, 1, 1, 1, 1]) / 5.0,
maxT=36,
state_dim=10,
):
self.iters = iters
self.gamma = gamma
self.batch_size = batch_size
self.prioritize_a = prioritize
self.training_set, self.test_set = util_fqi.construct_dicts(
train_tuples, test_tuples
)
self.raw_test = test_tuples
self.visits = {"train": len(train_tuples), "test": len(test_tuples)}
self.NV = {"train": len(train_tuples), "test": len(test_tuples)}
self.n_samples = len(self.training_set["s"])
_, self.unique_actions, self.action_counts, _ = self.sub_actions()
self.state_feats = [str(x) for x in range(10)]
self.n_features = len(self.state_feats)
self.reward_weights = weights
self.maxT = maxT
self.piB = util_fqi.learnBehaviour(
self.training_set, self.test_set, state_dim=state_dim
)
self.n_actions = len(self.unique_actions)
print("N actions: ", self.n_actions)
s = pd.Series(np.arange(self.n_actions))
self.actions_onehot = pd.get_dummies(s).values
self.estimator = estimator
self.state_dim = state_dim
if self.estimator == "gbm":
self.q_est = LGBMRegressor(n_estimators=50, silent=True)
elif self.estimator == "lmm":
self.q_est = LMM(model="regression", num_classes=self.n_actions)
elif self.estimator == "linnet":
self.q_est = ContrastiveNet(model_name="linnet")
# self.piE = LogisticRegression() #LMM(model='classification', num_classes=self.n_actions)
self.piE_foreground = LogisticRegression()
self.piE_background = LogisticRegression()
def sub_actions(self):
a = self.training_set["a"]
a = list(a)
unique_actions = 0
action_counts = 0
n_actions = 0
unique_actions, action_counts = np.unique(a, axis=0, return_counts=True)
n_actions = len(unique_actions)
return a, unique_actions, action_counts, n_actions
def sampleTuples(self):
# ids = list(np.random.choice(np.arange(self.n_samples), self.batch_size, replace=False))
ids = np.arange(self.batch_size)
batch = {}
for k in self.training_set.keys():
batch[k] = np.asarray(self.training_set[k], dtype=object)[ids]
batch["r"] = batch["r"] # np.dot(batch['r'], self.reward_weights)
batch["s_ids"] = np.asarray(ids, dtype=int)
batch["ns_ids"] = np.asarray(ids, dtype=int) + 1
return batch
def fitQ(self, batch, Q):
if self.estimator == "lmm":
batch_foreground = {}
batch_background = {}
groups = []
elts = ["s", "s_ids", "ns"]
for el in elts:
batch_foreground[el] = []
batch_background[el] = []
for i in range(len(batch["s_ids"])):
if batch["ds"][i] == "foreground":
batch_foreground["s_ids"].append(batch["s_ids"][i])
batch_foreground["s"].append(batch["s"][i])
batch_foreground["ns"].append(batch["ns"][i])
else:
batch_background["s_ids"].append(batch["s_ids"][i])
batch_background["s"].append(batch["s"][i])
batch_background["ns"].append(batch["ns"][i])
for i in range(len(batch["s_ids"])):
if batch["ds"][i] == "foreground":
groups.append(1)
else:
groups.append(0)
# input = [state action]
s = pd.Series(batch["a"])
as_onehot = pd.get_dummies(s).values
x_shared = np.hstack((np.asarray(batch["s"]), as_onehot))
# x_shared = np.hstack((np.asarray(batch['s']), np.expand_dims(np.asarray(batch['a']), 1)))
y_shared = np.squeeze(batch["r"]) + (
self.gamma * np.max(Q[batch["ns_ids"], :], axis=1)
)
self.q_est.fit(x_shared, y_shared, groups)
return batch, batch_foreground, batch_background
elif self.estimator == "gbm":
# input = [state action]
x = np.hstack(
(np.asarray(batch["s"]), np.expand_dims(np.asarray(batch["a"]), 1))
)
### Response variable is just the value of the Q table at these indices
### Important note: at this point we've already acounted for the discount factor gamma
### and we've already taken a max across actions.
# action_idxs = (batch['a'] + 2).astype(int)
# Q_states = Q[batch['s_ids'], :]
# y = Q_states[np.arange(len(Q_states)), action_idxs]
# old version
y = np.squeeze(batch["r"]) + (
self.gamma * np.max(Q[batch["ns_ids"], :], axis=1)
)
self.q_est.fit(x, y)
return batch, None, None
elif self.estimator == "linnet":
batch_foreground = {}
batch_background = {}
groups = []
elts = ["s", "s_ids", "ns"]
for el in elts:
batch_foreground[el] = []
batch_background[el] = []
for i in range(len(batch["s_ids"])):
if batch["ds"][i] == "foreground":
batch_foreground["s_ids"].append(batch["s_ids"][i])
batch_foreground["s"].append(batch["s"][i])
batch_foreground["ns"].append(batch["ns"][i])
else:
batch_background["s_ids"].append(batch["s_ids"][i])
batch_background["s"].append(batch["s"][i])
batch_background["ns"].append(batch["ns"][i])
for i in range(len(batch["s_ids"])):
if batch["ds"][i] == "foreground":
groups.append(1)
else:
groups.append(0)
ds = ContrastiveDataset(batch, from_batch=True)
X = ds.X
y = ds.y
y = np.squeeze(y) + (self.gamma * np.max(Q[batch["ns_ids"], :], axis=1))
self.q_est.fit(X, y)
return None, batch_background, batch_foreground
else:
raise Exception(
"Q_est must be either an LGBM regressor, LMM, or Linear Net"
)
def updateQtable(self, Qtable, batch, batch_fg, batch_bg, iter_num):
# Update for foregound using just foreground
# Update for background using shared
if self.estimator == "lmm":
bg_size = len(batch_bg["s"])
fg_size = len(batch_fg["s"])
# for i, a in enumerate(self.unique_actions):
for i in range(len(self.unique_actions)):
a = self.actions_onehot[i, :]
Qtable[batch_bg["s_ids"], i] = self.q_est.predict(
np.hstack((batch_bg["ns"], np.tile(a, (bg_size, 1)))),
groups=np.tile([0], (bg_size)),
)
Qtable[batch_fg["s_ids"], i] = self.q_est.predict(
np.hstack((batch_fg["ns"], np.tile(a, (fg_size, 1)))),
groups=np.tile([1], (fg_size)),
)
elif self.estimator == "gbm":
for i, a in enumerate(self.unique_actions):
# import ipdb; ipdb.set_trace()
Qtable[batch["s_ids"], i] = self.q_est.predict(
np.hstack((batch["ns"], np.tile(a, (self.batch_size, 1))))
)
elif self.estimator == "linnet":
for i in range(len(self.unique_actions)):
for j in range(len(batch_bg["s_ids"])):
one_hot_a = [0] * 25
s = batch_bg["ns"][j]
a = self.unique_actions[i]
one_hot_a[a] = 1
blank_s = [0] * 46
blank_a = [0] * 25
s_a = np.hstack((s, one_hot_a, blank_s, blank_a))
Qtable[j, i] = self.q_est.predict(s_a.astype("float32"))
for i in range(len(self.unique_actions)):
for j in range(len(batch_fg["s_ids"])):
one_hot_a = [0] * 25
s = batch_fg["ns"][j]
a = self.unique_actions[i]
one_hot_a[a] = 1
s_a = np.hstack((s, one_hot_a, s, one_hot_a))
Qtable[j, i] = self.q_est.predict(s_a.astype("float32"))
return Qtable
def runFQI(self, repeats=10):
print("Learning policy")
meanQtable = np.zeros((self.n_samples + 1, self.n_actions))
for r in range(repeats):
print("Run", r, ":")
print("Initialize: get batch, set initial Q")
Qtable = np.zeros((self.n_samples + 1, self.n_actions))
Qdist = []
# print('Run FQI')
for k, iteration in enumerate(tqdm.tqdm(range(self.iters))):
# copy q-table
Qold = cp.deepcopy(Qtable)
# sample batch
batch = self.sampleTuples()
# learn q_est with samples, targets from batch
batch, batch_fg, batch_bg = self.fitQ(batch, Qtable)
# update Q table for all s given new estimator
self.updateQtable(Qtable, batch, batch_fg, batch_bg, iteration)
# check divergence from last estimate
Qdist.append(mean_absolute_error(Qold, Qtable))
# plt.plot(Qdist)
meanQtable += Qtable
meanQtable = meanQtable / repeats
print("Learn policy")
self.Qtable = meanQtable
# Since the Q table is constructed contrastively, the policy is contrastive?
self.getPi(meanQtable)
return Qdist
def getPi(self, Qtable):
optA = np.argmax(Qtable, axis=1)
if self.state_dim == 3:
rescaled_optA = []
for a in optA:
rescaled_optA.append(a - 2)
optA = rescaled_optA
fg_training = []
fg_optA = []
bg_training = []
bg_optA = []
for i, g in enumerate(self.training_set["ds"]):
if g == "foreground":
fg_training.append(self.training_set["s"][i])
fg_optA.append(optA[i])
else:
bg_training.append(self.training_set["s"][i])
bg_optA.append(optA[i])
# import ipdb; ipdb.set_trace()
# This doesn't on Pendulum env right now because fg_optA is all the same class and bg_optA is all the same class.
print(str(fg_optA))
print(str(bg_optA))
self.piE_foreground.fit(np.asarray(fg_training), fg_optA)
self.piE_background.fit(np.asarray(bg_training), bg_optA)
# print("Done Fitting")
| [
"numpy.argmax",
"sklearn.metrics.mean_absolute_error",
"numpy.arange",
"numpy.tile",
"numpy.unique",
"sys.path.append",
"util.construct_dicts",
"numpy.max",
"copy.deepcopy",
"pandas.get_dummies",
"numpy.asarray",
"contrastive_deepnet.ContrastiveNet",
"contrastive_deepnet.ContrastiveDataset",... | [((764, 787), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (773, 787), True, 'import numpy as np\n'), ((962, 991), 'sys.path.append', 'sys.path.append', (['"""../models/"""'], {}), "('../models/')\n", (977, 991), False, 'import sys\n'), ((1581, 1632), 'util.construct_dicts', 'util_fqi.construct_dicts', (['train_tuples', 'test_tuples'], {}), '(train_tuples, test_tuples)\n', (1605, 1632), True, 'import util as util_fqi\n'), ((2156, 2234), 'util.learnBehaviour', 'util_fqi.learnBehaviour', (['self.training_set', 'self.test_set'], {'state_dim': 'state_dim'}), '(self.training_set, self.test_set, state_dim=state_dim)\n', (2179, 2234), True, 'import util as util_fqi\n'), ((2979, 2999), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2997, 2999), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((3030, 3050), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3048, 3050), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((3252, 3292), 'numpy.unique', 'np.unique', (['a'], {'axis': '(0)', 'return_counts': '(True)'}), '(a, axis=0, return_counts=True)\n', (3261, 3292), True, 'import numpy as np\n'), ((3534, 3560), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (3543, 3560), True, 'import numpy as np\n'), ((3799, 3825), 'numpy.asarray', 'np.asarray', (['ids'], {'dtype': 'int'}), '(ids, dtype=int)\n', (3809, 3825), True, 'import numpy as np\n'), ((10007, 10053), 'numpy.zeros', 'np.zeros', (['(self.n_samples + 1, self.n_actions)'], {}), '((self.n_samples + 1, self.n_actions))\n', (10015, 10053), True, 'import numpy as np\n'), ((11273, 11298), 'numpy.argmax', 'np.argmax', (['Qtable'], {'axis': '(1)'}), '(Qtable, axis=1)\n', (11282, 11298), True, 'import numpy as np\n'), ((1328, 1353), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1])\n', (1336, 1353), True, 'import numpy as np\n'), ((2374, 2399), 'numpy.arange', 'np.arange', (['self.n_actions'], {}), '(self.n_actions)\n', (2383, 2399), True, 'import numpy as np\n'), ((2431, 2448), 'pandas.get_dummies', 'pd.get_dummies', (['s'], {}), '(s)\n', (2445, 2448), True, 'import pandas as pd\n'), ((2588, 2631), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'n_estimators': '(50)', 'silent': '(True)'}), '(n_estimators=50, silent=True)\n', (2601, 2631), False, 'from lightgbm import LGBMRegressor, LGBMClassifier\n'), ((3852, 3878), 'numpy.asarray', 'np.asarray', (['ids'], {'dtype': 'int'}), '(ids, dtype=int)\n', (3862, 3878), True, 'import numpy as np\n'), ((4993, 5014), 'pandas.Series', 'pd.Series', (["batch['a']"], {}), "(batch['a'])\n", (5002, 5014), True, 'import pandas as pd\n'), ((10200, 10246), 'numpy.zeros', 'np.zeros', (['(self.n_samples + 1, self.n_actions)'], {}), '((self.n_samples + 1, self.n_actions))\n', (10208, 10246), True, 'import numpy as np\n'), ((12123, 12146), 'numpy.asarray', 'np.asarray', (['fg_training'], {}), '(fg_training)\n', (12133, 12146), True, 'import numpy as np\n'), ((12189, 12212), 'numpy.asarray', 'np.asarray', (['bg_training'], {}), '(bg_training)\n', (12199, 12212), True, 'import numpy as np\n'), ((2695, 2746), 'lmm.LMM', 'LMM', ([], {'model': '"""regression"""', 'num_classes': 'self.n_actions'}), "(model='regression', num_classes=self.n_actions)\n", (2698, 2746), False, 'from lmm import LMM\n'), ((3646, 3692), 'numpy.asarray', 'np.asarray', (['self.training_set[k]'], {'dtype': 'object'}), '(self.training_set[k], dtype=object)\n', (3656, 3692), True, 'import numpy as np\n'), ((5039, 5056), 'pandas.get_dummies', 'pd.get_dummies', (['s'], {}), '(s)\n', (5053, 5056), True, 'import pandas as pd\n'), ((5263, 5285), 'numpy.squeeze', 'np.squeeze', (["batch['r']"], {}), "(batch['r'])\n", (5273, 5285), True, 'import numpy as np\n'), ((10429, 10448), 'copy.deepcopy', 'cp.deepcopy', (['Qtable'], {}), '(Qtable)\n', (10440, 10448), True, 'import copy as cp\n'), ((2813, 2848), 'contrastive_deepnet.ContrastiveNet', 'ContrastiveNet', ([], {'model_name': '"""linnet"""'}), "(model_name='linnet')\n", (2827, 2848), False, 'from contrastive_deepnet import ContrastiveNet, ContrastiveDataset\n'), ((5098, 5120), 'numpy.asarray', 'np.asarray', (["batch['s']"], {}), "(batch['s'])\n", (5108, 5120), True, 'import numpy as np\n'), ((5319, 5356), 'numpy.max', 'np.max', (["Q[batch['ns_ids'], :]"], {'axis': '(1)'}), "(Q[batch['ns_ids'], :], axis=1)\n", (5325, 5356), True, 'import numpy as np\n'), ((6148, 6170), 'numpy.squeeze', 'np.squeeze', (["batch['r']"], {}), "(batch['r'])\n", (6158, 6170), True, 'import numpy as np\n'), ((7354, 7396), 'contrastive_deepnet.ContrastiveDataset', 'ContrastiveDataset', (['batch'], {'from_batch': '(True)'}), '(batch, from_batch=True)\n', (7372, 7396), False, 'from contrastive_deepnet import ContrastiveNet, ContrastiveDataset\n'), ((10886, 10919), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['Qold', 'Qtable'], {}), '(Qold, Qtable)\n', (10905, 10919), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((5607, 5629), 'numpy.asarray', 'np.asarray', (["batch['s']"], {}), "(batch['s'])\n", (5617, 5629), True, 'import numpy as np\n'), ((6204, 6241), 'numpy.max', 'np.max', (["Q[batch['ns_ids'], :]"], {'axis': '(1)'}), "(Q[batch['ns_ids'], :], axis=1)\n", (6210, 6241), True, 'import numpy as np\n'), ((7455, 7468), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (7465, 7468), True, 'import numpy as np\n'), ((8371, 8392), 'numpy.tile', 'np.tile', (['[0]', 'bg_size'], {}), '([0], bg_size)\n', (8378, 8392), True, 'import numpy as np\n'), ((8583, 8604), 'numpy.tile', 'np.tile', (['[1]', 'fg_size'], {}), '([1], fg_size)\n', (8590, 8604), True, 'import numpy as np\n'), ((5646, 5668), 'numpy.asarray', 'np.asarray', (["batch['a']"], {}), "(batch['a'])\n", (5656, 5668), True, 'import numpy as np\n'), ((7485, 7522), 'numpy.max', 'np.max', (["Q[batch['ns_ids'], :]"], {'axis': '(1)'}), "(Q[batch['ns_ids'], :], axis=1)\n", (7491, 7522), True, 'import numpy as np\n'), ((8316, 8340), 'numpy.tile', 'np.tile', (['a', '(bg_size, 1)'], {}), '(a, (bg_size, 1))\n', (8323, 8340), True, 'import numpy as np\n'), ((8528, 8552), 'numpy.tile', 'np.tile', (['a', '(fg_size, 1)'], {}), '(a, (fg_size, 1))\n', (8535, 8552), True, 'import numpy as np\n'), ((9353, 9396), 'numpy.hstack', 'np.hstack', (['(s, one_hot_a, blank_s, blank_a)'], {}), '((s, one_hot_a, blank_s, blank_a))\n', (9362, 9396), True, 'import numpy as np\n'), ((9777, 9816), 'numpy.hstack', 'np.hstack', (['(s, one_hot_a, s, one_hot_a)'], {}), '((s, one_hot_a, s, one_hot_a))\n', (9786, 9816), True, 'import numpy as np\n'), ((8877, 8909), 'numpy.tile', 'np.tile', (['a', '(self.batch_size, 1)'], {}), '(a, (self.batch_size, 1))\n', (8884, 8909), True, 'import numpy as np\n')] |
import numpy as np
from hmf.density_field.transfer import Transfer
from hmf.density_field.transfer_models import EH_BAO
import pytest
import camb
# def rms(a):
# print(a)
# print("RMS: ", np.sqrt(np.mean(np.square(a))))
# return np.sqrt(np.mean(np.square(a)))
#
# def check_close(t, t2, fit):
# t.update(transfer_model=fit)
# assert np.mean(np.abs((t.power - t2.power) / t.power)) < 1
@pytest.fixture
def transfers():
return Transfer(transfer_model="EH"), Transfer(transfer_model="EH")
@pytest.mark.parametrize(
["name", "val",],
[("z", 0.1), ("sigma_8", 0.82), ("n", 0.95), ("cosmo_params", {"H0": 68.0})],
)
def test_updates(transfers, name, val):
t, t2 = transfers
t.update(**{name: val})
assert (
np.mean(np.abs((t.power - t2.power) / t.power)) < 1
and np.mean(np.abs((t.power - t2.power) / t.power)) > 1e-6
)
def test_halofit():
t = Transfer(lnk_min=-20, lnk_max=20, dlnk=0.05, transfer_model="EH")
print(EH_BAO._defaults)
print("in test_transfer, params are: ", t.transfer_params)
assert np.isclose(t.power[0], t.nonlinear_power[0])
assert 5 * t.power[-1] < t.nonlinear_power[-1]
def test_ehnobao():
t = Transfer(transfer_model="EH")
tnobao = Transfer(transfer_model="EH_NoBAO")
assert np.isclose(t._unnormalised_lnT[0], tnobao._unnormalised_lnT[0], rtol=1e-5)
def test_bondefs():
t = Transfer(transfer_model="BondEfs")
print(np.exp(t._unnormalised_lnT))
assert np.isclose(np.exp(t._unnormalised_lnT[0]), 1, rtol=1e-5)
@pytest.mark.skip("Too slow and needs to be constantly updated.")
def test_data(datadir):
import camb
from astropy.cosmology import LambdaCDM
cp = camb.CAMBparams()
cp.set_matter_power(kmax=100.0)
t = Transfer(
cosmo_model=LambdaCDM(Om0=0.3, Ode0=0.7, H0=70.0, Ob0=0.05),
sigma_8=0.8,
n=1,
transfer_params={"camb_params": cp},
lnk_min=np.log(1e-11),
lnk_max=np.log(1e11),
)
pdata = np.genfromtxt(datadir / "power_for_hmf_tests.dat")
assert np.sqrt(np.mean(np.square(t.power - pdata[:, 1]))) < 0.001
def test_camb_extrapolation():
t = Transfer(transfer_params={"extrapolate_with_eh": True}, transfer_model="CAMB")
k = np.logspace(1.5, 2, 20)
eh = t.transfer._eh.lnt(np.log(k))
camb = t.transfer.lnt(np.log(k))
eh += eh[0] - camb[0]
assert np.isclose(eh[-1], camb[-1], rtol=1e-1)
def test_setting_kmax():
t = Transfer(
transfer_params={"extrapolate_with_eh": True, "kmax": 1.0},
transfer_model="CAMB",
)
assert t.transfer.params["camb_params"].Transfer.kmax == 1.0
camb_transfers = camb.get_transfer_functions(t.transfer.params["camb_params"])
T = camb_transfers.get_matter_transfer_data().transfer_data
assert np.max(T[0]) < 2.0
| [
"astropy.cosmology.LambdaCDM",
"hmf.density_field.transfer.Transfer",
"camb.CAMBparams",
"numpy.abs",
"numpy.log",
"numpy.logspace",
"camb.get_transfer_functions",
"numpy.square",
"numpy.genfromtxt",
"numpy.isclose",
"numpy.max",
"numpy.exp",
"pytest.mark.parametrize",
"pytest.mark.skip"
] | [((517, 640), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['name', 'val']", "[('z', 0.1), ('sigma_8', 0.82), ('n', 0.95), ('cosmo_params', {'H0': 68.0})]"], {}), "(['name', 'val'], [('z', 0.1), ('sigma_8', 0.82), (\n 'n', 0.95), ('cosmo_params', {'H0': 68.0})])\n", (540, 640), False, 'import pytest\n'), ((1548, 1612), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Too slow and needs to be constantly updated."""'], {}), "('Too slow and needs to be constantly updated.')\n", (1564, 1612), False, 'import pytest\n'), ((914, 979), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'lnk_min': '(-20)', 'lnk_max': '(20)', 'dlnk': '(0.05)', 'transfer_model': '"""EH"""'}), "(lnk_min=-20, lnk_max=20, dlnk=0.05, transfer_model='EH')\n", (922, 979), False, 'from hmf.density_field.transfer import Transfer\n'), ((1082, 1126), 'numpy.isclose', 'np.isclose', (['t.power[0]', 't.nonlinear_power[0]'], {}), '(t.power[0], t.nonlinear_power[0])\n', (1092, 1126), True, 'import numpy as np\n'), ((1208, 1237), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH"""'}), "(transfer_model='EH')\n", (1216, 1237), False, 'from hmf.density_field.transfer import Transfer\n'), ((1251, 1286), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH_NoBAO"""'}), "(transfer_model='EH_NoBAO')\n", (1259, 1286), False, 'from hmf.density_field.transfer import Transfer\n'), ((1298, 1373), 'numpy.isclose', 'np.isclose', (['t._unnormalised_lnT[0]', 'tnobao._unnormalised_lnT[0]'], {'rtol': '(1e-05)'}), '(t._unnormalised_lnT[0], tnobao._unnormalised_lnT[0], rtol=1e-05)\n', (1308, 1373), True, 'import numpy as np\n'), ((1403, 1437), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""BondEfs"""'}), "(transfer_model='BondEfs')\n", (1411, 1437), False, 'from hmf.density_field.transfer import Transfer\n'), ((1707, 1724), 'camb.CAMBparams', 'camb.CAMBparams', ([], {}), '()\n', (1722, 1724), False, 'import camb\n'), ((2006, 2056), 'numpy.genfromtxt', 'np.genfromtxt', (["(datadir / 'power_for_hmf_tests.dat')"], {}), "(datadir / 'power_for_hmf_tests.dat')\n", (2019, 2056), True, 'import numpy as np\n'), ((2169, 2247), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_params': "{'extrapolate_with_eh': True}", 'transfer_model': '"""CAMB"""'}), "(transfer_params={'extrapolate_with_eh': True}, transfer_model='CAMB')\n", (2177, 2247), False, 'from hmf.density_field.transfer import Transfer\n'), ((2257, 2280), 'numpy.logspace', 'np.logspace', (['(1.5)', '(2)', '(20)'], {}), '(1.5, 2, 20)\n', (2268, 2280), True, 'import numpy as np\n'), ((2396, 2434), 'numpy.isclose', 'np.isclose', (['eh[-1]', 'camb[-1]'], {'rtol': '(0.1)'}), '(eh[-1], camb[-1], rtol=0.1)\n', (2406, 2434), True, 'import numpy as np\n'), ((2471, 2566), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_params': "{'extrapolate_with_eh': True, 'kmax': 1.0}", 'transfer_model': '"""CAMB"""'}), "(transfer_params={'extrapolate_with_eh': True, 'kmax': 1.0},\n transfer_model='CAMB')\n", (2479, 2566), False, 'from hmf.density_field.transfer import Transfer\n'), ((2672, 2733), 'camb.get_transfer_functions', 'camb.get_transfer_functions', (["t.transfer.params['camb_params']"], {}), "(t.transfer.params['camb_params'])\n", (2699, 2733), False, 'import camb\n'), ((453, 482), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH"""'}), "(transfer_model='EH')\n", (461, 482), False, 'from hmf.density_field.transfer import Transfer\n'), ((484, 513), 'hmf.density_field.transfer.Transfer', 'Transfer', ([], {'transfer_model': '"""EH"""'}), "(transfer_model='EH')\n", (492, 513), False, 'from hmf.density_field.transfer import Transfer\n'), ((1448, 1475), 'numpy.exp', 'np.exp', (['t._unnormalised_lnT'], {}), '(t._unnormalised_lnT)\n', (1454, 1475), True, 'import numpy as np\n'), ((1499, 1529), 'numpy.exp', 'np.exp', (['t._unnormalised_lnT[0]'], {}), '(t._unnormalised_lnT[0])\n', (1505, 1529), True, 'import numpy as np\n'), ((2309, 2318), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (2315, 2318), True, 'import numpy as np\n'), ((2346, 2355), 'numpy.log', 'np.log', (['k'], {}), '(k)\n', (2352, 2355), True, 'import numpy as np\n'), ((2809, 2821), 'numpy.max', 'np.max', (['T[0]'], {}), '(T[0])\n', (2815, 2821), True, 'import numpy as np\n'), ((1799, 1846), 'astropy.cosmology.LambdaCDM', 'LambdaCDM', ([], {'Om0': '(0.3)', 'Ode0': '(0.7)', 'H0': '(70.0)', 'Ob0': '(0.05)'}), '(Om0=0.3, Ode0=0.7, H0=70.0, Ob0=0.05)\n', (1808, 1846), False, 'from astropy.cosmology import LambdaCDM\n'), ((1943, 1956), 'numpy.log', 'np.log', (['(1e-11)'], {}), '(1e-11)\n', (1949, 1956), True, 'import numpy as np\n'), ((1974, 1996), 'numpy.log', 'np.log', (['(100000000000.0)'], {}), '(100000000000.0)\n', (1980, 1996), True, 'import numpy as np\n'), ((767, 805), 'numpy.abs', 'np.abs', (['((t.power - t2.power) / t.power)'], {}), '((t.power - t2.power) / t.power)\n', (773, 805), True, 'import numpy as np\n'), ((831, 869), 'numpy.abs', 'np.abs', (['((t.power - t2.power) / t.power)'], {}), '((t.power - t2.power) / t.power)\n', (837, 869), True, 'import numpy as np\n'), ((2085, 2117), 'numpy.square', 'np.square', (['(t.power - pdata[:, 1])'], {}), '(t.power - pdata[:, 1])\n', (2094, 2117), True, 'import numpy as np\n')] |
'''
Created on 22 de mai de 2021
@author: jose
'''
from Harmonica import Harmonica
from typing import List
import numpy
class FormaOnda():
'''
classdocs
'''
def __init__(self, nAmostras, listaH: List[Harmonica]):
'''
Constructor
'''
self.__list = listaH
self.__intervAmostra = 1/60/nAmostras
self.__fo = numpy.zeros(nAmostras,dtype="float")
self.__quantHarm = len(listaH)
self.fazOnda()
def fazOndaOld(self):
for i in range(self.__quantHarm):
for j in range(self.__quantHarm-1, i-1, -1):
self.__fo += self.__list[j].getValores()
def fazOndaV2(self):
for i in range(self.__quantHarm-1,-1,-1):
self.__fo += self.__list[i].getValores()
for j in range(i):
self.__fo += self.__list[j].getValoresSemVDC()
def fazOnda(self):
for i in range(self.__quantHarm):
self.__fo += self.__list[i].getValores()
def getFO(self):
return self.__fo
def getFO2Arduino(self):
return self.__fo.tolist()
"""
def __init__(self, nAmostras, listaH: List[Harmonica]):
self._list = listaH
self._intervaloAmostra = 1/60/nAmostras
self._fo = np.zeros(nAmostras, dtype=float)
self._quantHarm = len(listaH)
def fazOnda(self):
for i in range(self._quantHarm):
for j in range(self._quantHarm-1,i-1,-1):
self._fo += self._list[j]._valores
def getFO(self):
return self._fo
"""
| [
"numpy.zeros"
] | [((372, 409), 'numpy.zeros', 'numpy.zeros', (['nAmostras'], {'dtype': '"""float"""'}), "(nAmostras, dtype='float')\n", (383, 409), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
#
# Author <NAME>
# E-mail <EMAIL>
# License MIT
# Created 06/01/2017
# Updated 06/01/2017
# Version 1.0.0
#
"""
Description of stats.py
======================
1 Shapiro (test normality)
2 Bartlett (test homostedasticity)
3 if normal and same variance then ANOVA and Tukey post hoc test
else kruskall wallis
TABLE 1
/media/sf_github/array1/repro.py : ghosal()
/media/sf_github/kea/kea.py figure2()
/media/sf_github/vqmm2/vqmm2.py figure1()
"""
from os import listdir
import sys
import utils
import numpy as np
from scipy import stats
from statsmodels.stats.multicomp import MultiComparison
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statistics import stdev
def assert_normality(data):
"""Description of assert_normality
The Shapiro-Wilk test tests the null hypothesis that the data was drawn
from a normal distribution
if test_stat and p_value close to 1 then data is normal
"""
print("Values " + str(data))
statistic, pvalue = stats.shapiro(data)
print("Shapiro Statistic " + str(statistic) + " and p-value " + str(pvalue))
if pvalue > 0.05:
print("Normal")
return True
else:
print("Not normal")
return False
def bartlett(data):
"""Description of bartlett
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bartlett.html
"""
if len(data) == 3:
statistic, pvalue = stats.bartlett(data[0], data[1], data[2])
elif len(data) == 4:
statistic, pvalue = stats.bartlett(data[0], data[1], data[2], data[3])
else:
utils.print_error("TODO barlett manage more values")
print("Bartlett Statistic " + str(statistic) + " and p-value " + str(pvalue))
if pvalue > 0.05:
return True
else:
return False
def assert_homoscedasticity(data):
"""Description of assert_homoscedasticity
Bartlett’s test tests the null hypothesis that all input samples are from
populations with equal variances. For samples from significantly non-normal
populations, Levene’s test levene is used.
"""
normality = True
for item in data:
normality = assert_normality(item)
if not normality:
break
if normality:
return bartlett(data)
else:
print("TODO levene")
def anova(data):
"""
return True is at least one mean is different from the other
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.f_oneway.html
"""
if len(data) == 3:
statistic, pvalue = stats.f_oneway(data[0], data[1], data[2])
elif len(data) == 4:
statistic, pvalue = stats.f_oneway(data[0], data[1], data[2], data[3])
else:
utils.print_error("TODO ANOVA manage more values")
print("ANOVA Statistic " + str(statistic) + " and p-value " + str(pvalue))
if pvalue < 0.05:
return True
else:
return False
def tukey(data, names):
names = np.array(names)
tmp = []
for item in data:
for val in item:
tmp.append(val)
data = np.array(tmp)
mc = MultiComparison(data, names)
result = mc.tukeyhsd()
print(result)
print(mc.groupsunique)
def kruskal(data):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kruskal.html
"""
if len(data) == 3:
statistic, pvalue = stats.kruskal(data[0], data[1], data[2])
elif len(data) == 4:
statistic, pvalue = stats.kruskal(data[0], data[1], data[2], data[3])
else:
utils.print_error("TODO kruskal manage more values")
print("Kruskal Statistic " + str(statistic) + " and p-value " + str(pvalue))
if pvalue > 0.05:
# same
return False
else:
# different
return True
def main():
utils.print_success("Statistical analysis")
stats_dir = "stats/"
stats_file = listdir(stats_dir)
for filen in stats_file:
utils.print_info(filen)
data = []
names = []
with open(stats_dir + filen, "r") as filep:
for line in filep:
# Read file with lines like this:
# GA,0.578947368421,0.631578947368,0.710526315789,0.722222222222
# SVMBFF,0.631578947368,0.684210526316,0.815789473684,0.66666666
# VQMM,0.736842105263,0.842105263158,0.842105263158,0.75,0.61111
row = line[:-1].split(",")
tmp = []
for index in range(1, len(row)):
names.append(row[0])
tmp.append(float(row[index]))
data.append(tmp)
print(filen.split(".")[0].split("_")[1].title() + " for " + row[0] + " \t= " + str("{0:.3f}".format(sum(tmp)/len(tmp))) + " ± " + str("{0:.3f}".format(stdev(tmp))))
if assert_homoscedasticity(data):
if anova(data):
tukey(data, names)
else:
print("All means are the same")
else:
if kruskal(data):
print("cf R")
# Dunn
# Conover-Iman
# Dwass-Steel-Citchlow-Fligner
else:
print("All means are the same")
if __name__ == "__main__":
main()
| [
"scipy.stats.kruskal",
"utils.print_info",
"scipy.stats.shapiro",
"statistics.stdev",
"utils.print_success",
"scipy.stats.bartlett",
"scipy.stats.f_oneway",
"statsmodels.stats.multicomp.MultiComparison",
"numpy.array",
"utils.print_error",
"os.listdir"
] | [((1022, 1041), 'scipy.stats.shapiro', 'stats.shapiro', (['data'], {}), '(data)\n', (1035, 1041), False, 'from scipy import stats\n'), ((2955, 2970), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (2963, 2970), True, 'import numpy as np\n'), ((3070, 3083), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (3078, 3083), True, 'import numpy as np\n'), ((3093, 3121), 'statsmodels.stats.multicomp.MultiComparison', 'MultiComparison', (['data', 'names'], {}), '(data, names)\n', (3108, 3121), False, 'from statsmodels.stats.multicomp import MultiComparison\n'), ((3780, 3823), 'utils.print_success', 'utils.print_success', (['"""Statistical analysis"""'], {}), "('Statistical analysis')\n", (3799, 3823), False, 'import utils\n'), ((3866, 3884), 'os.listdir', 'listdir', (['stats_dir'], {}), '(stats_dir)\n', (3873, 3884), False, 'from os import listdir\n'), ((1438, 1479), 'scipy.stats.bartlett', 'stats.bartlett', (['data[0]', 'data[1]', 'data[2]'], {}), '(data[0], data[1], data[2])\n', (1452, 1479), False, 'from scipy import stats\n'), ((2551, 2592), 'scipy.stats.f_oneway', 'stats.f_oneway', (['data[0]', 'data[1]', 'data[2]'], {}), '(data[0], data[1], data[2])\n', (2565, 2592), False, 'from scipy import stats\n'), ((3359, 3399), 'scipy.stats.kruskal', 'stats.kruskal', (['data[0]', 'data[1]', 'data[2]'], {}), '(data[0], data[1], data[2])\n', (3372, 3399), False, 'from scipy import stats\n'), ((3922, 3945), 'utils.print_info', 'utils.print_info', (['filen'], {}), '(filen)\n', (3938, 3945), False, 'import utils\n'), ((1533, 1583), 'scipy.stats.bartlett', 'stats.bartlett', (['data[0]', 'data[1]', 'data[2]', 'data[3]'], {}), '(data[0], data[1], data[2], data[3])\n', (1547, 1583), False, 'from scipy import stats\n'), ((1602, 1654), 'utils.print_error', 'utils.print_error', (['"""TODO barlett manage more values"""'], {}), "('TODO barlett manage more values')\n", (1619, 1654), False, 'import utils\n'), ((2646, 2696), 'scipy.stats.f_oneway', 'stats.f_oneway', (['data[0]', 'data[1]', 'data[2]', 'data[3]'], {}), '(data[0], data[1], data[2], data[3])\n', (2660, 2696), False, 'from scipy import stats\n'), ((2715, 2765), 'utils.print_error', 'utils.print_error', (['"""TODO ANOVA manage more values"""'], {}), "('TODO ANOVA manage more values')\n", (2732, 2765), False, 'import utils\n'), ((3453, 3502), 'scipy.stats.kruskal', 'stats.kruskal', (['data[0]', 'data[1]', 'data[2]', 'data[3]'], {}), '(data[0], data[1], data[2], data[3])\n', (3466, 3502), False, 'from scipy import stats\n'), ((3521, 3573), 'utils.print_error', 'utils.print_error', (['"""TODO kruskal manage more values"""'], {}), "('TODO kruskal manage more values')\n", (3538, 3573), False, 'import utils\n'), ((4768, 4778), 'statistics.stdev', 'stdev', (['tmp'], {}), '(tmp)\n', (4773, 4778), False, 'from statistics import stdev\n')] |
import random
import torch
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from networks.maddpg_critic_version_3 import MADDPGCriticVersion3
from networks.maddpg_actor_version_2 import MADDPGActorVersion2
from agents.base_agent import BaseAgent
from agents.game import Game
from utils.ounoise import OUNoise
from utils.experience_pack import ExperienceUnpack
import pdb
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class ActionQuery():
"""
Query result
"""
def __init__(self):
self.next_actions = None
"""
MADDPGAgent (Version 4)
1. Add gradient clipping of gradient of Q-function
2. Reset OUNoise after every calling learn()
3. In forward_all, if agent_other is not self, detach tensor of agent_other.forward()
4. change input shape of 1st layer of critic network from (state_size) to (state_size+action_size)
"""
class MADDPGAgentVersion4(BaseAgent):
def __init__(self, game, num_agents, state_size, action_size, name, random_seed=0,
lr_critic=1e-3, lr_actor=1e-3,
fc1_units=400, fc2_units=300,
buffer_size=int(1e6), batch_size=128,
gamma=0.99, tau=1e-3,
max_norm=1.0,
epsilon_start=1.0, epsilon_end=0.1, epsilon_decay=0.99,
exploration_mu=0.0, exploration_theta=0.15, exploration_sigma=0.2):
"""Initialize an Agent object.
Args:
game (class Game): meidator in chain-of-responsibility design pattern. (Broker chain)
random_seed (int): random seed.
max_norm (float): value of clip_grad_norm for critic optimizer
"""
super().__init__()
self.index_agent = None
self.game = game
self.num_agents = num_agents
self.state_size = state_size
self.action_size = action_size
self.name = name
self.seed = random.seed(random_seed)
self.max_norm = max_norm
self.epsilon = epsilon_start
self.epsilon_end = epsilon_end
self.epsilon_decay = epsilon_decay
# Actor Network (w/ Target Network)
self.actor_local = MADDPGActorVersion2(state_size, action_size, random_seed,
fc1_units=fc1_units, fc2_units=fc2_units).to(device)
self.actor_target = MADDPGActorVersion2(state_size, action_size, random_seed,
fc1_units=fc1_units, fc2_units=fc2_units).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor)
# Critic Network (w/ Target Network)
self.critic_local = MADDPGCriticVersion3(num_agents, state_size, action_size,
fcs1_units=fc1_units, fc2_units=fc2_units,
seed=random_seed).to(device)
self.critic_target = MADDPGCriticVersion3(num_agents, state_size, action_size,
fcs1_units=fc1_units, fc2_units=fc2_units,
seed=random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic)
# Noise process for action
# Noise process
self.noise = OUNoise(self.action_size, exploration_mu, exploration_theta, exploration_sigma)
# parameter of discounted reward
self.gamma = gamma
# soft update parameter
self.tau = tau
self.batch_size = batch_size
def step(self, states, actions, rewards, next_states, dones):
"""
Args:
states (numpy.array): states.shape[1] = (state_size*num_agents)
actions (numpy.array): actions.shape[1] = (actions_size*num_agents)
next_states (numpy.array): next_states.shape[1] = (state_size*num_agents)
"""
self.learn(states, actions, rewards, next_states, dones)
def act(self, state, add_noise=True):
"""
Returns actions for given state.
The input size of actor networks is state_size.
"""
state = torch.from_numpy(state).float().to(device)
with torch.no_grad():
self.actor_local.eval()
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.epsilon * self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def forward_all(self, next_states):
"""
Get next_actions. This is a chain-of-responsibility design pattern. (Broker chain)
Return:
1d differentiable tensor of next_actions.
"""
q = ActionQuery()
for i, agent in enumerate(self.game):
# get next_state_i of agent_i
n_state = next_states[:, i*self.state_size: (i+1)*self.state_size]
# pdb.set_trace()
if agent == self:
detach = False
else:
detach = True
# predict next_action and append it to actionQuery.actions
agent.query(n_state, q, detach)
return q.next_actions
def query(self, next_state, q, detach):
"""
Args:
q (class ActionQuery): parcel that stores actions
"""
next_action = self.actor_local(next_state)
if detach is True:
next_action = next_action.detach()
if q.next_actions is None:
q.next_actions = next_action
else:
q.next_actions = torch.cat((q.next_actions, next_action), dim=1)
# pdb.set_trace()
def learn(self, states, actions, rewards, next_states, dones):
"""Update policy and value parameters using given batch of experience tuples.
For agent i:
Q_target_i = r_i + gamma * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> actions for all agent
critic_target(state, action) -> Q-value
Args:
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
# divide fields update agent number i
experience_unpacks = ExperienceUnpack(states, actions, rewards, next_states, dones,
self.state_size, self.action_size, self.num_agents)
# upack field in agent_i
if self.index_agent is None:
self.index_agent = self.game.index_of_agent(self)
# pdb.set_trace()
states_i, actions_i, rewards_i, next_states_i, dones_i = experience_unpacks[self.index_agent]
# assert (states_i.shape[1] == (self.state_size)), 'Wrong shape of states_i'
# assert (actions_i.shape[1] == (self.action_size)), 'Wrong shape of actions_i'
# assert (rewards_i.shape[1] == (1)), 'Wrong shape of rewards_i'
# assert (dones_i.shape[1] == (1)), 'Wrong shape of dones_i'
# train critic
# loss fuction = Q_target(TD 1-step boostrapping) - Q_local(current)
next_actions = self.forward_all(next_states)
assert (next_actions.shape[1] == (self.action_size * self.num_agents)), 'Wrong shape of next_actions'
Q_targets_next = self.critic_target(next_states, next_actions)
Q_target_i = rewards_i + (self.gamma * Q_targets_next * (1-dones_i))
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_target_i)
self.critic_optimizer.zero_grad()
critic_loss.backward()
torch.nn.utils.clip_grad_norm_(self.critic_local.parameters(), self.max_norm)
self.critic_optimizer.step()
# train actor
actions_pred = self.forward_all(states)
actor_loss = - self.critic_local(states, actions).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# update critic
self.soft_update(self.critic_local, self.critic_target, self.tau)
# update actors
self.soft_update(self.actor_local, self.actor_target, self.tau)
#------ update noise ---#
self.epsilon = max(self.epsilon * self.epsilon_decay, self.epsilon_end)
self.noise.reset()
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Args:
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def model_dicts(self):
m_dicts = {'critic_{}'.format(self.name): self.critic_target,
'actor_{}'.format(self.name): self.actor_target}
return m_dicts
| [
"utils.experience_pack.ExperienceUnpack",
"networks.maddpg_actor_version_2.MADDPGActorVersion2",
"torch.nn.functional.mse_loss",
"torch.cat",
"numpy.clip",
"utils.ounoise.OUNoise",
"random.seed",
"torch.cuda.is_available",
"torch.no_grad",
"networks.maddpg_critic_version_3.MADDPGCriticVersion3",
... | [((439, 464), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (462, 464), False, 'import torch\n'), ((2022, 2046), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (2033, 2046), False, 'import random\n'), ((3487, 3566), 'utils.ounoise.OUNoise', 'OUNoise', (['self.action_size', 'exploration_mu', 'exploration_theta', 'exploration_sigma'], {}), '(self.action_size, exploration_mu, exploration_theta, exploration_sigma)\n', (3494, 3566), False, 'from utils.ounoise import OUNoise\n'), ((4707, 4729), 'numpy.clip', 'np.clip', (['action', '(-1)', '(1)'], {}), '(action, -1, 1)\n', (4714, 4729), True, 'import numpy as np\n'), ((6748, 6867), 'utils.experience_pack.ExperienceUnpack', 'ExperienceUnpack', (['states', 'actions', 'rewards', 'next_states', 'dones', 'self.state_size', 'self.action_size', 'self.num_agents'], {}), '(states, actions, rewards, next_states, dones, self.\n state_size, self.action_size, self.num_agents)\n', (6764, 6867), False, 'from utils.experience_pack import ExperienceUnpack\n'), ((8033, 8067), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['Q_expected', 'Q_target_i'], {}), '(Q_expected, Q_target_i)\n', (8043, 8067), True, 'import torch.nn.functional as F\n'), ((4446, 4461), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4459, 4461), False, 'import torch\n'), ((6003, 6050), 'torch.cat', 'torch.cat', (['(q.next_actions, next_action)'], {'dim': '(1)'}), '((q.next_actions, next_action), dim=1)\n', (6012, 6050), False, 'import torch\n'), ((2288, 2392), 'networks.maddpg_actor_version_2.MADDPGActorVersion2', 'MADDPGActorVersion2', (['state_size', 'action_size', 'random_seed'], {'fc1_units': 'fc1_units', 'fc2_units': 'fc2_units'}), '(state_size, action_size, random_seed, fc1_units=\n fc1_units, fc2_units=fc2_units)\n', (2307, 2392), False, 'from networks.maddpg_actor_version_2 import MADDPGActorVersion2\n'), ((2475, 2579), 'networks.maddpg_actor_version_2.MADDPGActorVersion2', 'MADDPGActorVersion2', (['state_size', 'action_size', 'random_seed'], {'fc1_units': 'fc1_units', 'fc2_units': 'fc2_units'}), '(state_size, action_size, random_seed, fc1_units=\n fc1_units, fc2_units=fc2_units)\n', (2494, 2579), False, 'from networks.maddpg_actor_version_2 import MADDPGActorVersion2\n'), ((2812, 2935), 'networks.maddpg_critic_version_3.MADDPGCriticVersion3', 'MADDPGCriticVersion3', (['num_agents', 'state_size', 'action_size'], {'fcs1_units': 'fc1_units', 'fc2_units': 'fc2_units', 'seed': 'random_seed'}), '(num_agents, state_size, action_size, fcs1_units=\n fc1_units, fc2_units=fc2_units, seed=random_seed)\n', (2832, 2935), False, 'from networks.maddpg_critic_version_3 import MADDPGCriticVersion3\n'), ((3070, 3193), 'networks.maddpg_critic_version_3.MADDPGCriticVersion3', 'MADDPGCriticVersion3', (['num_agents', 'state_size', 'action_size'], {'fcs1_units': 'fc1_units', 'fc2_units': 'fc2_units', 'seed': 'random_seed'}), '(num_agents, state_size, action_size, fcs1_units=\n fc1_units, fc2_units=fc2_units, seed=random_seed)\n', (3090, 3193), False, 'from networks.maddpg_critic_version_3 import MADDPGCriticVersion3\n'), ((4381, 4404), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (4397, 4404), False, 'import torch\n')] |
import numpy as np
from scipy.stats import norm
#Define the expected improvement as objective function to optimize over.
def expected_improvement(X,surrogate,X_sample,Y_sample,correct_label,classify,USE_SKLEARN=True,VERBOSE=True,boundary_penalty=lambda a,b: 0):
my_X = np.copy(X)
my_X = X.reshape(1, -1)
my_X_label = classify.predict(my_X)
#If not in this component, set it to zero immediately.
if my_X_label != correct_label.astype(int): return -0
my_xi = 0.0 #tuning parameter, set it to zero for now.
if USE_SKLEARN:
my_gp = surrogate.predict(my_X, return_std=True, return_cov=False)
my_mu = my_gp[0]
my_sigma = my_gp[1]
else:
my_gp = surrogate.predict(my_X)
my_mu = my_gp[0]
my_sigma = my_gp[1]
my_sigma = np.sqrt(np.absolute(my_sigma)).reshape(-1, 1)
my_mu = np.asarray(my_mu)
my_sigma = np.asarray(my_sigma)
with np.errstate(divide='warn'):
my_imp = my_mu - np.max(Y_sample) - my_xi
my_Z = np.divide(my_imp,my_sigma)
#norm = mvn(mean=np.zeros(X_sample[0,:].shape), cov=np.eye(X_sample.shape[1]))
my_ei = my_imp * norm.cdf(my_Z) + my_sigma * norm.pdf(my_Z)
my_ei[np.where(my_sigma <= 0.0)] = 0.0
#Here we penalize the acquisition function value according to boundary_penalty function, by default this would be disabled. See document for details.
my_ei = my_ei + boundary_penalty(my_X,X_sample)
my_ei = float(my_ei.ravel())
if VERBOSE: print('EI=',my_ei,'\n')
return - my_ei/X_sample.shape[0] #We want to minimize this quantity. maximizing expected improvement
def mean_square_prediction_error(X,surrogate,X_sample,Y_sample,correct_label,classify,USE_SKLEARN=True,VERBOSE=True,boundary_penalty=lambda a,b: 0):
my_X = np.copy(X)
my_X = X.reshape(1, -1)
my_X_label = classify.predict(my_X)
#If not in this component, set it to zero immediately.
if my_X_label != correct_label.astype(int): return -0
my_xi = 0.0 #tuning parameter, set it to zero for now.
if USE_SKLEARN:
#my_gp = surrogate.predict(my_X, return_std=False, return_cov=True)
#my_mu = my_gp[0]
#my_sigma = my_gp[1]
#my_gp_obs = surrogate.predict(X_sample, return_std=False, return_cov=True)
#my_mu_obs = my_gp_obs[0]
#my_sigma_obs = my_gp_obs[1]
X_joint = np.vstack((my_X,X_sample))
mu_cross, sigma_cross = surrogate.predict(X_joint, return_std=False, return_cov=True)
#print('\n',sigma_cross.shape,'>>>',my_X.shape[0])
sigma = sigma_cross[0:my_X.shape[0],0:my_X.shape[0]]
sigma_obs = sigma_cross[my_X.shape[0]:sigma_cross.shape[1],my_X.shape[0]:sigma_cross.shape[1]]
sigma_cross = sigma_cross[0:my_X.shape[0],my_X.shape[0]:sigma_cross.shape[1]]
sigma_cross = sigma_cross.reshape(-1,my_X.shape[0]).T
else:
#my_gp = surrogate.predict(my_X,full_cov=True)
#my_mu = my_gp[0]
#my_sigma = my_gp[1]
#my_gp_obs = surrogate.predict(X_sample,full_cov=True)
#my_mu_obs = my_gp_obs[0]
#my_sigma_obs = my_gp_obs[1]
X_joint = np.vstack((my_X,X_sample))
mu_cross, sigma_cross = surrogate.predict(X_joint, full_cov=True)
sigma = sigma_cross[0:my_X.shape[0],0:my_X.shape[0]]
sigma_obs = sigma_cross[my_X.shape[0]:sigma_cross.shape[1],my_X.shape[0]:sigma_cross.shape[1]]
sigma_cross = sigma_cross[0:my_X.shape[0],my_X.shape[0]:sigma_cross.shape[1]]
sigma_cross = sigma_cross.reshape(-1,my_X.shape[0]).T
mspe = sigma - sigma_cross @ sigma_obs @ sigma_cross.T + my_xi
if mspe.shape[0]>1:
mspe = np.diag(mspe)
else:
mspe = float(mspe.ravel())
mspe = mspe + boundary_penalty(my_X,X_sample)
if VERBOSE: print('MSPE=',mspe,'\n')
return mspe/X_sample.shape[0] #We want to minimize this quantity. minimizing mspe
| [
"numpy.divide",
"numpy.absolute",
"numpy.copy",
"numpy.asarray",
"scipy.stats.norm.pdf",
"numpy.errstate",
"scipy.stats.norm.cdf",
"numpy.max",
"numpy.where",
"numpy.diag",
"numpy.vstack"
] | [((273, 283), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (280, 283), True, 'import numpy as np\n'), ((856, 873), 'numpy.asarray', 'np.asarray', (['my_mu'], {}), '(my_mu)\n', (866, 873), True, 'import numpy as np\n'), ((889, 909), 'numpy.asarray', 'np.asarray', (['my_sigma'], {}), '(my_sigma)\n', (899, 909), True, 'import numpy as np\n'), ((1790, 1800), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (1797, 1800), True, 'import numpy as np\n'), ((919, 945), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""warn"""'}), "(divide='warn')\n", (930, 945), True, 'import numpy as np\n'), ((1012, 1039), 'numpy.divide', 'np.divide', (['my_imp', 'my_sigma'], {}), '(my_imp, my_sigma)\n', (1021, 1039), True, 'import numpy as np\n'), ((2369, 2396), 'numpy.vstack', 'np.vstack', (['(my_X, X_sample)'], {}), '((my_X, X_sample))\n', (2378, 2396), True, 'import numpy as np\n'), ((3133, 3160), 'numpy.vstack', 'np.vstack', (['(my_X, X_sample)'], {}), '((my_X, X_sample))\n', (3142, 3160), True, 'import numpy as np\n'), ((3652, 3665), 'numpy.diag', 'np.diag', (['mspe'], {}), '(mspe)\n', (3659, 3665), True, 'import numpy as np\n'), ((1208, 1233), 'numpy.where', 'np.where', (['(my_sigma <= 0.0)'], {}), '(my_sigma <= 0.0)\n', (1216, 1233), True, 'import numpy as np\n'), ((972, 988), 'numpy.max', 'np.max', (['Y_sample'], {}), '(Y_sample)\n', (978, 988), True, 'import numpy as np\n'), ((1151, 1165), 'scipy.stats.norm.cdf', 'norm.cdf', (['my_Z'], {}), '(my_Z)\n', (1159, 1165), False, 'from scipy.stats import norm\n'), ((1179, 1193), 'scipy.stats.norm.pdf', 'norm.pdf', (['my_Z'], {}), '(my_Z)\n', (1187, 1193), False, 'from scipy.stats import norm\n'), ((806, 827), 'numpy.absolute', 'np.absolute', (['my_sigma'], {}), '(my_sigma)\n', (817, 827), True, 'import numpy as np\n')] |
import numpy as np
from scipy.optimize import dual_annealing
from ..base import BaseEstimator
class LinearNoTrendFourier(BaseEstimator):
"""Linear regression with Fourier terms.
The Fourier terms allow capturing the periodic variability in the
time-series.
No trend is assumed in the time series.
Notes
-----
The model is defined as follows:
.. math::
y_t = \alpha + F_t + \epsilon_t
where :math:`\alpha` is the intercept, :math:`\epsilon_t` is the error
term and :math:`F_t` is the nth-order approximated Fourier series, i.e.,
.. math::
F_t = \sum{j=1}{M} a_j \cos(2 j \pi t) + b_j \sin(2 j \pi t)
This model is fitted to :math:`\{t, y_t\}` data using ordinary
least squares (OLS).
"""
def __init__(self, f_order=3, period=1):
"""
Parameters
----------
f_order : int, optional
Finite order of the truncated Fourier series (default=3).
"""
self.f_order = f_order
self.period = period
self._parameters = {
'fourier_terms': [],
'intercept': None,
}
super().__init__()
def _fourier_terms(self, t, degree):
return [np.cos(2 * degree * np.pi * t / self.period),
np.sin(2 * degree * np.pi * t / self.period)]
def _regressor_terms(self, t):
reg_terms = []
# fourier terms
for degree in range(1, self.f_order + 1):
reg_terms += self._fourier_terms(t, degree)
# intercept
reg_terms.append(np.ones(t.size))
reg_idx = {
'fourier_terms': slice(0, self.f_order * 2),
'intercept': self.f_order * 2
}
return reg_idx, reg_terms
def _solve_lstsq(self, t, y, reg_idx, reg_terms):
mat = np.stack(reg_terms).transpose()
p, ssr, _, _ = np.linalg.lstsq(mat, y, rcond=None)
for k, idx in reg_idx.items():
self._parameters[k] = p[idx]
return ssr
def _fit(self, t, y):
reg_idx, reg_terms = self._regressor_terms(t)
return self._solve_lstsq(t, y, reg_idx, reg_terms)[0]
def _compute_y(self, t, reg_idx, reg_terms):
p = np.empty((len(reg_terms)))
for k, idx in reg_idx.items():
p[idx] = self._parameters[k]
mat = np.stack(reg_terms).transpose()
return (mat @ p[:, None]).ravel()
def _predict(self, t):
reg_idx, reg_terms = self._regressor_terms(t)
return self._compute_y(t, reg_idx, reg_terms)
class LinearTrendFourier(LinearNoTrendFourier):
"""Linear regression with a single trend and Fourier terms.
The Fourier terms allow capturing the periodic variability in the
time-series.
Notes
-----
The model is defined as follows:
.. math::
y_t = \alpha + \beta t + F_t + \epsilon_t
where :math:`\alpha` is the intercept, :math:`\beta` is the slope,
:math:`\epsilon_t` is the error term and :math:`F_t` is the nth-order
approximated Fourier series, i.e.,
.. math::
F_t = \sum{j=1}{M} a_j \cos(2 j \pi t) + b_j \sin(2 j \pi t)
This model is fitted to :math:`\{t, y_t\}` data using ordinary
least squares (OLS).
"""
def __init__(self, f_order=3,period=1):
"""
Parameters
----------
f_order : int, optional
Finite order of the truncated Fourier series (default=3).
"""
super().__init__(f_order,period)
self._parameters.update({'trend': None})
def _regressor_terms(self, t):
reg_idx, reg_terms = super()._regressor_terms(t)
# add trend
reg_terms.append(t)
reg_idx.update({'trend': self.f_order * 2 + 1})
return reg_idx, reg_terms
class LinearBrokenTrendFourier(LinearTrendFourier):
"""Linear regression with a broken trend and Fourier terms.
The Fourier terms allow capturing the periodic variability in the
time-series. This model also allows capturing a sudden change in
trend at a given (a-priori known or unknown) point of
discontinuity.
See Also
--------
:class:`~trendfit.models.LinearTrendFourier`
Notes
-----
The model is defined as follows (see [1]_):
.. math::
y_t = \alpha + \beta t + \delta D_{t, T_1} + F_t + \epsilon_t
where :math:`\alpha` is the intercept, :math:`\beta` is the slope,
:math:`\epsilon_t` is the error term, :math:`F_t` is the nth-order
approximated Fourier series, i.e.,
.. math::
F_t = \sum{j=1}{M} a_j \cos(2 j \pi t) + b_j \sin(2 j \pi t)
and :math:`\delta D_{t, T_1}` is a term introduced for
representing a break in the slope, with :math:`\delta` being the
change in slope, :math:`T_1` the location of the slope discontinuity
and :math:`D_{t, T_1}` a dummy variable given by:
.. math::
:nowrap:
D_{t, T_1} = \left\{
\begin{array}{ll}
0 & \mathrm{if} t \leq T_1\\
t - T_1 & \mathrm{if} t \gt T_1
\end{array}
\right\}
When :math:`T_1` is defined a-priori, the model is fitted to
:math:`\{t, y_t\}` data using ordinary least squares
(OLS). Otherwise, the optimization algorithm implemented in
:func:`scipy.optimize.dual_annealing` is used to fit :math:`T_1`,
using the sum of squares of residuals returned by OLS as the
objective function.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, and <NAME>, 2019. "Nonparametric
estimation and bootstrap inference on trends in atmospheric time series:
an application to ethane" arXiv:1903.05403v1
"""
def __init__(self, f_order=3, period=1, t_break=None, opt_bounds=None,
**opt_kwargs):
"""
Parameters
----------
f_order : int, optional
Finite order of the truncated Fourier series (default=3).
t_break : float, optional
Location of the trend discontinuity. If None (default), the
location will be estimated when fitting the model to data.
opt_bounds : tuple, optional
limits of the search range for estimating ``t_break`` with
:func:`scipy.optimize.dual_annealing`.
If None (default), the whole range of the input time series
is used.
**opt_kwargs : key=value pairs, optional
Keyword arguments that will be passed to
:func:`scipy.optimize.dual_annealing`.
"""
super().__init__(f_order,period)
self._fit_t_break = t_break is None
self._parameters['t_break'] = t_break
self._opt_bounds = opt_bounds
self._opt_kwargs = opt_kwargs
def _regressor_terms(self, t, t_break):
reg_idx, reg_terms = super()._regressor_terms(t)
# add trend breaking point
reg_terms.append(np.where(t > t_break, t - t_break, 0.))
reg_idx.update({'trend_change': self.f_order * 2 + 2})
return reg_idx, reg_terms
def _fit(self, t, y):
def solve_for_location(t_break):
# solve system with a-priori t_break value
reg_idx, reg_terms = self._regressor_terms(t, t_break)
ssr = self._solve_lstsq(t, y, reg_idx, reg_terms)
# system solving issues with t_break near bounds
if not len(ssr):
return np.inf
else:
return ssr[0]
if self._fit_t_break:
if self._opt_bounds is None:
bounds = [(t[1], t[-1])]
else:
bounds = self._opt_bounds
kwargs = {'maxiter': 500}
kwargs.update(self._opt_kwargs)
res = dual_annealing(solve_for_location, bounds, **kwargs)
self._parameters['t_break'] = res.x[0]
# rerun lstsq to properly set other parameter values
reg_idx, reg_terms = self._regressor_terms(
t, self._parameters['t_break']
)
res_lstsq = self._solve_lstsq(t, y, reg_idx, reg_terms)
if self._fit_t_break:
return res
else:
return res_lstsq
def _predict(self, t):
reg_idx, reg_terms = self._regressor_terms(
t, self._parameters['t_break']
)
return self._compute_y(t, reg_idx, reg_terms)
def epanechnikov_kernel(u):
mask = np.abs(u) <= 1.
weight = 3 / 4 * (1 - u**2)
return weight * mask
class KernelTrend(BaseEstimator):
"""Non-parametric kernel regression.
"""
valid_kernels = {
'epanechnikov': epanechnikov_kernel
}
def __init__(self, kernel='epanechnikov', bandwidth=None):
super().__init__()
if isinstance(kernel, str) and kernel in self.valid_kernels:
self.kernel_func = self.valid_kernels[kernel]
elif callable(kernel):
self.kernel_func = kernel
else:
raise ValueError("Invalid kernel {}".format(kernel))
self._parameters = {
'bandwidth': bandwidth,
}
def _local_constant(self, t, y, tau=None):
if tau is None:
tau = t
pairwise_dists = np.subtract.outer(tau, t)
k = self.kernel_func(pairwise_dists / self._parameters['bandwidth'])
return k @ y / np.sum(k, axis=1)
def _fit(self, t, y):
self._t_scaled = (t - t[0]) / (t[-1] - t[0])
self._parameters['trend'] = self._local_constant(self._t_scaled, y)
def _predict(self, t):
t_scaled = (t - self._t[0]) / (self._t[-1] - self._t[0])
m_hat = self._local_constant(self._t_scaled, self._y, t_scaled)
return m_hat
| [
"numpy.stack",
"numpy.abs",
"numpy.linalg.lstsq",
"numpy.sum",
"scipy.optimize.dual_annealing",
"numpy.ones",
"numpy.subtract.outer",
"numpy.sin",
"numpy.where",
"numpy.cos"
] | [((1880, 1915), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['mat', 'y'], {'rcond': 'None'}), '(mat, y, rcond=None)\n', (1895, 1915), True, 'import numpy as np\n'), ((8473, 8482), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (8479, 8482), True, 'import numpy as np\n'), ((9267, 9292), 'numpy.subtract.outer', 'np.subtract.outer', (['tau', 't'], {}), '(tau, t)\n', (9284, 9292), True, 'import numpy as np\n'), ((1229, 1273), 'numpy.cos', 'np.cos', (['(2 * degree * np.pi * t / self.period)'], {}), '(2 * degree * np.pi * t / self.period)\n', (1235, 1273), True, 'import numpy as np\n'), ((1291, 1335), 'numpy.sin', 'np.sin', (['(2 * degree * np.pi * t / self.period)'], {}), '(2 * degree * np.pi * t / self.period)\n', (1297, 1335), True, 'import numpy as np\n'), ((1573, 1588), 'numpy.ones', 'np.ones', (['t.size'], {}), '(t.size)\n', (1580, 1588), True, 'import numpy as np\n'), ((6974, 7013), 'numpy.where', 'np.where', (['(t > t_break)', '(t - t_break)', '(0.0)'], {}), '(t > t_break, t - t_break, 0.0)\n', (6982, 7013), True, 'import numpy as np\n'), ((7811, 7863), 'scipy.optimize.dual_annealing', 'dual_annealing', (['solve_for_location', 'bounds'], {}), '(solve_for_location, bounds, **kwargs)\n', (7825, 7863), False, 'from scipy.optimize import dual_annealing\n'), ((9395, 9412), 'numpy.sum', 'np.sum', (['k'], {'axis': '(1)'}), '(k, axis=1)\n', (9401, 9412), True, 'import numpy as np\n'), ((1824, 1843), 'numpy.stack', 'np.stack', (['reg_terms'], {}), '(reg_terms)\n', (1832, 1843), True, 'import numpy as np\n'), ((2346, 2365), 'numpy.stack', 'np.stack', (['reg_terms'], {}), '(reg_terms)\n', (2354, 2365), True, 'import numpy as np\n')] |
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
import tensorflow as tf
import torch
from fastestimator.backend import random_normal_like
class TestRandomNormalLike(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_np = np.array([[0, 1], [1, 1]])
cls.test_tf = tf.constant([[0, 1], [2, 2]])
cls.test_torch = torch.Tensor([[1, 1], [2, 3]])
def test_random_normal_np_type(self):
self.assertIsInstance(random_normal_like(self.test_np), np.ndarray, 'Output must be NumPy Array')
def test_random_normal_np_value(self):
self.assertTrue((random_normal_like(self.test_np).shape == (2, 2)),
'Output array shape should be same as input')
def test_random_normal_tf_type(self):
self.assertIsInstance(random_normal_like(self.test_tf), tf.Tensor, 'Output type must be tf.Tensor')
def test_random_normal_tf_value(self):
output_shape = tf.shape([2, 2])
self.assertTrue(tf.reduce_all(tf.equal(tf.shape(random_normal_like(self.test_tf)), output_shape)),
'Output tensor shape should be same as input')
def test_random_normal_torch_type(self):
self.assertIsInstance(random_normal_like(self.test_torch), torch.Tensor, 'Output must be torch.Tensor')
def test_random_normal_torch_value(self):
output_shape = (2, 2)
self.assertTrue((random_normal_like(self.test_torch).size() == output_shape),
'Output tensor shape should be same as input')
| [
"fastestimator.backend.random_normal_like",
"tensorflow.constant",
"tensorflow.shape",
"torch.Tensor",
"numpy.array"
] | [((932, 958), 'numpy.array', 'np.array', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (940, 958), True, 'import numpy as np\n'), ((981, 1010), 'tensorflow.constant', 'tf.constant', (['[[0, 1], [2, 2]]'], {}), '([[0, 1], [2, 2]])\n', (992, 1010), True, 'import tensorflow as tf\n'), ((1036, 1066), 'torch.Tensor', 'torch.Tensor', (['[[1, 1], [2, 3]]'], {}), '([[1, 1], [2, 3]])\n', (1048, 1066), False, 'import torch\n'), ((1624, 1640), 'tensorflow.shape', 'tf.shape', (['[2, 2]'], {}), '([2, 2])\n', (1632, 1640), True, 'import tensorflow as tf\n'), ((1140, 1172), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_np'], {}), '(self.test_np)\n', (1158, 1172), False, 'from fastestimator.backend import random_normal_like\n'), ((1479, 1511), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_tf'], {}), '(self.test_tf)\n', (1497, 1511), False, 'from fastestimator.backend import random_normal_like\n'), ((1895, 1930), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_torch'], {}), '(self.test_torch)\n', (1913, 1930), False, 'from fastestimator.backend import random_normal_like\n'), ((1285, 1317), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_np'], {}), '(self.test_np)\n', (1303, 1317), False, 'from fastestimator.backend import random_normal_like\n'), ((1697, 1729), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_tf'], {}), '(self.test_tf)\n', (1715, 1729), False, 'from fastestimator.backend import random_normal_like\n'), ((2079, 2114), 'fastestimator.backend.random_normal_like', 'random_normal_like', (['self.test_torch'], {}), '(self.test_torch)\n', (2097, 2114), False, 'from fastestimator.backend import random_normal_like\n')] |
import torch
import numpy as np
import pandas as pd
import multiprocessing as mp
from torch_geometric.data import Data
from functools import partial
from easydict import EasyDict
from tqdm.auto import tqdm
from rdkit import Chem
from rdkit.Chem.rdForceFieldHelpers import MMFFOptimizeMolecule
from ..chem import set_rdmol_positions, get_best_rmsd
def get_rmsd_confusion_matrix(data: Data, useFF=False):
data['pos_ref'] = data['pos_ref'].reshape(-1, data['rdmol'].GetNumAtoms(), 3)
data['pos_gen'] = data['pos_gen'].reshape(-1, data['rdmol'].GetNumAtoms(), 3)
num_gen = data['pos_gen'].shape[0]
num_ref = data['pos_ref'].shape[0]
# assert num_gen == data.num_pos_gen.item()
# assert num_ref == data.num_pos_ref.item()
rmsd_confusion_mat = -1 * np.ones([num_ref, num_gen],dtype=np.float)
for i in range(num_gen):
gen_mol = set_rdmol_positions(data['rdmol'], data['pos_gen'][i])
if useFF:
#print('Applying FF on generated molecules...')
MMFFOptimizeMolecule(gen_mol)
for j in range(num_ref):
ref_mol = set_rdmol_positions(data['rdmol'], data['pos_ref'][j])
rmsd_confusion_mat[j,i] = get_best_rmsd(gen_mol, ref_mol)
return rmsd_confusion_mat
def evaluate_conf(data: Data, useFF=False, threshold=0.5):
rmsd_confusion_mat = get_rmsd_confusion_matrix(data, useFF=useFF)
rmsd_ref_min = rmsd_confusion_mat.min(-1)
#print('done one mol')
#print(rmsd_ref_min)
return (rmsd_ref_min<=threshold).mean(), rmsd_ref_min.mean()
def print_covmat_results(results, print_fn=print):
df = pd.DataFrame({
'COV-R_mean': np.mean(results.CoverageR, 0),
'COV-R_median': np.median(results.CoverageR, 0),
'COV-R_std': np.std(results.CoverageR, 0),
'COV-P_mean': np.mean(results.CoverageP, 0),
'COV-P_median': np.median(results.CoverageP, 0),
'COV-P_std': np.std(results.CoverageP, 0),
}, index=results.thresholds)
print_fn('\n' + str(df))
print_fn('MAT-R_mean: %.4f | MAT-R_median: %.4f | MAT-R_std %.4f' % (
np.mean(results.MatchingR), np.median(results.MatchingR), np.std(results.MatchingR)
))
print_fn('MAT-P_mean: %.4f | MAT-P_median: %.4f | MAT-P_std %.4f' % (
np.mean(results.MatchingP), np.median(results.MatchingP), np.std(results.MatchingP)
))
return df
class CovMatEvaluator(object):
def __init__(self,
num_workers=8,
use_force_field=False,
thresholds=np.arange(0.05, 3.05, 0.05),
ratio=2,
filter_disconnected=True,
print_fn=print,
):
super().__init__()
self.num_workers = num_workers
self.use_force_field = use_force_field
self.thresholds = np.array(thresholds).flatten()
self.ratio = ratio
self.filter_disconnected = filter_disconnected
self.pool = mp.Pool(num_workers)
self.print_fn = print_fn
def __call__(self, packed_data_list, start_idx=0):
func = partial(get_rmsd_confusion_matrix, useFF=self.use_force_field)
filtered_data_list = []
for data in packed_data_list:
if 'pos_gen' not in data or 'pos_ref' not in data: continue
if self.filter_disconnected and ('.' in data['smiles']): continue
data['pos_ref'] = data['pos_ref'].reshape(-1, data['rdmol'].GetNumAtoms(), 3)
data['pos_gen'] = data['pos_gen'].reshape(-1, data['rdmol'].GetNumAtoms(), 3)
num_gen = data['pos_ref'].shape[0] * self.ratio
if data['pos_gen'].shape[0] < num_gen: continue
data['pos_gen'] = data['pos_gen'][:num_gen]
filtered_data_list.append(data)
filtered_data_list = filtered_data_list[start_idx:]
self.print_fn('Filtered: %d / %d' % (len(filtered_data_list), len(packed_data_list)))
covr_scores = []
matr_scores = []
covp_scores = []
matp_scores = []
for confusion_mat in tqdm(self.pool.imap(func, filtered_data_list), total=len(filtered_data_list)):
# confusion_mat: (num_ref, num_gen)
rmsd_ref_min = confusion_mat.min(-1) # np (num_ref, )
rmsd_gen_min = confusion_mat.min(0) # np (num_gen, )
rmsd_cov_thres = rmsd_ref_min.reshape(-1, 1) <= self.thresholds.reshape(1, -1) # np (num_ref, num_thres)
rmsd_jnk_thres = rmsd_gen_min.reshape(-1, 1) <= self.thresholds.reshape(1, -1) # np (num_gen, num_thres)
matr_scores.append(rmsd_ref_min.mean())
covr_scores.append(rmsd_cov_thres.mean(0, keepdims=True)) # np (1, num_thres)
matp_scores.append(rmsd_gen_min.mean())
covp_scores.append(rmsd_jnk_thres.mean(0, keepdims=True)) # np (1, num_thres)
covr_scores = np.vstack(covr_scores) # np (num_mols, num_thres)
matr_scores = np.array(matr_scores) # np (num_mols, )
covp_scores = np.vstack(covp_scores) # np (num_mols, num_thres)
matp_scores = np.array(matp_scores)
results = EasyDict({
'CoverageR': covr_scores,
'MatchingR': matr_scores,
'thresholds': self.thresholds,
'CoverageP': covp_scores,
'MatchingP': matp_scores
})
# print_conformation_eval_results(results)
return results
| [
"functools.partial",
"rdkit.Chem.rdForceFieldHelpers.MMFFOptimizeMolecule",
"numpy.median",
"numpy.std",
"numpy.ones",
"numpy.mean",
"numpy.arange",
"numpy.array",
"easydict.EasyDict",
"multiprocessing.Pool",
"numpy.vstack"
] | [((777, 820), 'numpy.ones', 'np.ones', (['[num_ref, num_gen]'], {'dtype': 'np.float'}), '([num_ref, num_gen], dtype=np.float)\n', (784, 820), True, 'import numpy as np\n'), ((2524, 2551), 'numpy.arange', 'np.arange', (['(0.05)', '(3.05)', '(0.05)'], {}), '(0.05, 3.05, 0.05)\n', (2533, 2551), True, 'import numpy as np\n'), ((2925, 2945), 'multiprocessing.Pool', 'mp.Pool', (['num_workers'], {}), '(num_workers)\n', (2932, 2945), True, 'import multiprocessing as mp\n'), ((3050, 3112), 'functools.partial', 'partial', (['get_rmsd_confusion_matrix'], {'useFF': 'self.use_force_field'}), '(get_rmsd_confusion_matrix, useFF=self.use_force_field)\n', (3057, 3112), False, 'from functools import partial\n'), ((4856, 4878), 'numpy.vstack', 'np.vstack', (['covr_scores'], {}), '(covr_scores)\n', (4865, 4878), True, 'import numpy as np\n'), ((4929, 4950), 'numpy.array', 'np.array', (['matr_scores'], {}), '(matr_scores)\n', (4937, 4950), True, 'import numpy as np\n'), ((4993, 5015), 'numpy.vstack', 'np.vstack', (['covp_scores'], {}), '(covp_scores)\n', (5002, 5015), True, 'import numpy as np\n'), ((5066, 5087), 'numpy.array', 'np.array', (['matp_scores'], {}), '(matp_scores)\n', (5074, 5087), True, 'import numpy as np\n'), ((5107, 5256), 'easydict.EasyDict', 'EasyDict', (["{'CoverageR': covr_scores, 'MatchingR': matr_scores, 'thresholds': self.\n thresholds, 'CoverageP': covp_scores, 'MatchingP': matp_scores}"], {}), "({'CoverageR': covr_scores, 'MatchingR': matr_scores, 'thresholds':\n self.thresholds, 'CoverageP': covp_scores, 'MatchingP': matp_scores})\n", (5115, 5256), False, 'from easydict import EasyDict\n'), ((1017, 1046), 'rdkit.Chem.rdForceFieldHelpers.MMFFOptimizeMolecule', 'MMFFOptimizeMolecule', (['gen_mol'], {}), '(gen_mol)\n', (1037, 1046), False, 'from rdkit.Chem.rdForceFieldHelpers import MMFFOptimizeMolecule\n'), ((1668, 1697), 'numpy.mean', 'np.mean', (['results.CoverageR', '(0)'], {}), '(results.CoverageR, 0)\n', (1675, 1697), True, 'import numpy as np\n'), ((1723, 1754), 'numpy.median', 'np.median', (['results.CoverageR', '(0)'], {}), '(results.CoverageR, 0)\n', (1732, 1754), True, 'import numpy as np\n'), ((1777, 1805), 'numpy.std', 'np.std', (['results.CoverageR', '(0)'], {}), '(results.CoverageR, 0)\n', (1783, 1805), True, 'import numpy as np\n'), ((1829, 1858), 'numpy.mean', 'np.mean', (['results.CoverageP', '(0)'], {}), '(results.CoverageP, 0)\n', (1836, 1858), True, 'import numpy as np\n'), ((1884, 1915), 'numpy.median', 'np.median', (['results.CoverageP', '(0)'], {}), '(results.CoverageP, 0)\n', (1893, 1915), True, 'import numpy as np\n'), ((1938, 1966), 'numpy.std', 'np.std', (['results.CoverageP', '(0)'], {}), '(results.CoverageP, 0)\n', (1944, 1966), True, 'import numpy as np\n'), ((2112, 2138), 'numpy.mean', 'np.mean', (['results.MatchingR'], {}), '(results.MatchingR)\n', (2119, 2138), True, 'import numpy as np\n'), ((2140, 2168), 'numpy.median', 'np.median', (['results.MatchingR'], {}), '(results.MatchingR)\n', (2149, 2168), True, 'import numpy as np\n'), ((2170, 2195), 'numpy.std', 'np.std', (['results.MatchingR'], {}), '(results.MatchingR)\n', (2176, 2195), True, 'import numpy as np\n'), ((2285, 2311), 'numpy.mean', 'np.mean', (['results.MatchingP'], {}), '(results.MatchingP)\n', (2292, 2311), True, 'import numpy as np\n'), ((2313, 2341), 'numpy.median', 'np.median', (['results.MatchingP'], {}), '(results.MatchingP)\n', (2322, 2341), True, 'import numpy as np\n'), ((2343, 2368), 'numpy.std', 'np.std', (['results.MatchingP'], {}), '(results.MatchingP)\n', (2349, 2368), True, 'import numpy as np\n'), ((2774, 2794), 'numpy.array', 'np.array', (['thresholds'], {}), '(thresholds)\n', (2782, 2794), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import cv2
import os
path = './google_tiles_13/'
lower_green = np.array([45, 0, 0])
upper_green = np.array([65, 256, 256])
x_min = 5975
y_max = 2598
for file in os.listdir(path):
file_path = os.path.join(path, file)
if not os.path.isfile(file_path):
continue
image = cv2.imread(file_path)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
size_x = image.shape[0]
size_y = image.shape[1]
chunk = 16
green_image = np.arange(image.shape[0] * image.shape[1] * 4).reshape(image.shape[0], image.shape[1], 4)
weights_array = {'x' : [], 'y': [], 'w': []}
for x in range(0, size_x, chunk):
for y in range(0, size_y, chunk):
area = 0.0
x_length = min(chunk, size_x - x)
y_length = min(chunk, size_y - y)
area = x_length * y_length
green_count = 0
for i in range(0, x_length):
for j in range(0, y_length):
if (hsv[x + i][y + j] >= lower_green).all() and (hsv[x + i][y + j] <= upper_green).all():
green_count += 1
alpha = float(green_count) / area
for i in range(0, x_length):
for j in range(0, y_length):
weights_array['x'].append(x + i)
weights_array['y'].append(y + j)
weights_array['w'].append(alpha)
green_image[x + i][y + j] = np.array([0, 128, 0, int(255 * alpha)] )
cv2.imwrite(os.path.join(path, 'final_tiles/' + file), green_image)
df = pd.DataFrame(weights_array)
df.to_csv(os.path.join(os.path.join(path, 'final_csv/'), file.split('.')[0] + '.csv'), sep=';', index=False)
| [
"pandas.DataFrame",
"cv2.cvtColor",
"cv2.imread",
"os.path.isfile",
"numpy.array",
"numpy.arange",
"os.path.join",
"os.listdir"
] | [((105, 125), 'numpy.array', 'np.array', (['[45, 0, 0]'], {}), '([45, 0, 0])\n', (113, 125), True, 'import numpy as np\n'), ((140, 164), 'numpy.array', 'np.array', (['[65, 256, 256]'], {}), '([65, 256, 256])\n', (148, 164), True, 'import numpy as np\n'), ((205, 221), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (215, 221), False, 'import os\n'), ((239, 263), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (251, 263), False, 'import os\n'), ((331, 352), 'cv2.imread', 'cv2.imread', (['file_path'], {}), '(file_path)\n', (341, 352), False, 'import cv2\n'), ((364, 402), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (376, 402), False, 'import cv2\n'), ((1593, 1620), 'pandas.DataFrame', 'pd.DataFrame', (['weights_array'], {}), '(weights_array)\n', (1605, 1620), True, 'import pandas as pd\n'), ((275, 300), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (289, 300), False, 'import os\n'), ((1527, 1568), 'os.path.join', 'os.path.join', (['path', "('final_tiles/' + file)"], {}), "(path, 'final_tiles/' + file)\n", (1539, 1568), False, 'import os\n'), ((492, 538), 'numpy.arange', 'np.arange', (['(image.shape[0] * image.shape[1] * 4)'], {}), '(image.shape[0] * image.shape[1] * 4)\n', (501, 538), True, 'import numpy as np\n'), ((1648, 1680), 'os.path.join', 'os.path.join', (['path', '"""final_csv/"""'], {}), "(path, 'final_csv/')\n", (1660, 1680), False, 'import os\n')] |
import sys
sys.path.insert(0, '/home/bryanb/setigen')
import setigen as stg
import numpy as np
iq_sample_rate = 2.048e6
sample_rate = iq_sample_rate * 2
carrier_freq = 90.3e6
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=carrier_freq - iq_sample_rate / 2,
ascending=True,
num_pols=1)
num_taps = 8
num_branches = 64
fftlength = 1024
int_factor = 1
digitizer = stg.voltage.ComplexQuantizer(target_fwhm=32,
num_bits=8)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=32,
num_bits=8)
start_chan = 0
num_chans = num_branches // 2
rvb = stg.voltage.RawVoltageBackend(antenna,
digitizer=digitizer,
filterbank=filterbank,
requantizer=requantizer,
start_chan=start_chan,
num_chans=num_chans,
block_size=33554432,
blocks_per_file=128,
num_subblocks=32)
# Define function to read in IQ data
f = open('rtlsdr.dat', 'rb')
def iq_signal(f):
"""f is the file handler"""
def sub_func(ts):
num_samples = len(ts)
iq = f.read(num_samples)
iq = xp.array(np.frombuffer(iq, dtype=np.uint8), dtype='float')
iq -= 128
iq = iq[0::2] + iq[1::2] * 1j
# IQ bandwidth is iq_sample_rate, so shift by half
shift_freq = iq_sample_rate / 2
iq_shifted = iq * xp.exp(1j * 2 * xp.pi * shift_freq * ts[0::2])
v = xp.zeros(num_samples, dtype='complex')
v[0::2] = iq_shifted
return v
return sub_func
antenna.x.add_signal(iq_signal(f))
# Add synthetic signals
antenna.x.add_noise(0, 1)
level = stg.voltage.get_level(snr=3e4,
raw_voltage_backend=rvb,
fftlength=fftlength,
obs_length=60,
length_mode='obs_length')
unit_drift_rate = stg.voltage.get_unit_drift_rate(rvb,
fftlength=fftlength,
int_factor=int_factor)
# Update noise stats, and reset file pointer
antenna.x.update_noise()
f.seek(0)
antenna.x.add_constant_signal(f_start=90.9e6,
drift_rate=0.2e6/60,
level=antenna.x.get_total_noise_std()*level*np.sqrt(0.2e6/60/unit_drift_rate))
rvb.record(raw_file_stem='test',
obs_length=60,
length_mode='obs_length',
header_dict={},
verbose=False)
# Remember to close the file!
f.close()
| [
"setigen.voltage.ComplexQuantizer",
"setigen.voltage.get_unit_drift_rate",
"numpy.frombuffer",
"sys.path.insert",
"setigen.voltage.RawVoltageBackend",
"setigen.voltage.Antenna",
"setigen.voltage.get_level",
"setigen.voltage.PolyphaseFilterbank",
"numpy.sqrt"
] | [((11, 53), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/bryanb/setigen"""'], {}), "(0, '/home/bryanb/setigen')\n", (26, 53), False, 'import sys\n'), ((189, 306), 'setigen.voltage.Antenna', 'stg.voltage.Antenna', ([], {'sample_rate': 'sample_rate', 'fch1': '(carrier_freq - iq_sample_rate / 2)', 'ascending': '(True)', 'num_pols': '(1)'}), '(sample_rate=sample_rate, fch1=carrier_freq - \n iq_sample_rate / 2, ascending=True, num_pols=1)\n', (208, 306), True, 'import setigen as stg\n'), ((471, 527), 'setigen.voltage.ComplexQuantizer', 'stg.voltage.ComplexQuantizer', ([], {'target_fwhm': '(32)', 'num_bits': '(8)'}), '(target_fwhm=32, num_bits=8)\n', (499, 527), True, 'import setigen as stg\n'), ((583, 660), 'setigen.voltage.PolyphaseFilterbank', 'stg.voltage.PolyphaseFilterbank', ([], {'num_taps': 'num_taps', 'num_branches': 'num_branches'}), '(num_taps=num_taps, num_branches=num_branches)\n', (614, 660), True, 'import setigen as stg\n'), ((722, 778), 'setigen.voltage.ComplexQuantizer', 'stg.voltage.ComplexQuantizer', ([], {'target_fwhm': '(32)', 'num_bits': '(8)'}), '(target_fwhm=32, num_bits=8)\n', (750, 778), True, 'import setigen as stg\n'), ((874, 1095), 'setigen.voltage.RawVoltageBackend', 'stg.voltage.RawVoltageBackend', (['antenna'], {'digitizer': 'digitizer', 'filterbank': 'filterbank', 'requantizer': 'requantizer', 'start_chan': 'start_chan', 'num_chans': 'num_chans', 'block_size': '(33554432)', 'blocks_per_file': '(128)', 'num_subblocks': '(32)'}), '(antenna, digitizer=digitizer, filterbank=\n filterbank, requantizer=requantizer, start_chan=start_chan, num_chans=\n num_chans, block_size=33554432, blocks_per_file=128, num_subblocks=32)\n', (903, 1095), True, 'import setigen as stg\n'), ((2094, 2220), 'setigen.voltage.get_level', 'stg.voltage.get_level', ([], {'snr': '(30000.0)', 'raw_voltage_backend': 'rvb', 'fftlength': 'fftlength', 'obs_length': '(60)', 'length_mode': '"""obs_length"""'}), "(snr=30000.0, raw_voltage_backend=rvb, fftlength=\n fftlength, obs_length=60, length_mode='obs_length')\n", (2115, 2220), True, 'import setigen as stg\n'), ((2353, 2438), 'setigen.voltage.get_unit_drift_rate', 'stg.voltage.get_unit_drift_rate', (['rvb'], {'fftlength': 'fftlength', 'int_factor': 'int_factor'}), '(rvb, fftlength=fftlength, int_factor=int_factor\n )\n', (2384, 2438), True, 'import setigen as stg\n'), ((1601, 1634), 'numpy.frombuffer', 'np.frombuffer', (['iq'], {'dtype': 'np.uint8'}), '(iq, dtype=np.uint8)\n', (1614, 1634), True, 'import numpy as np\n'), ((2788, 2828), 'numpy.sqrt', 'np.sqrt', (['(200000.0 / 60 / unit_drift_rate)'], {}), '(200000.0 / 60 / unit_drift_rate)\n', (2795, 2828), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import sys
from tensorflow.examples.tutorials.mnist import input_data
sys.path.append("/data")
def test_1():
sess = tf.Session()
hell = tf.constant('Hello!!!')
sess.run(hell)
def simple_mnist():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder("float", [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
#
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def deep_mnist():
sess = tf.InteractiveSession()
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder("float", [None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder('float')
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.placeholder('float', [None, 10])
obj_var = y_conv
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
sess.run(tf.global_variables_initializer())
"""insert code"""
from scripts.utils.tf_utils import GradientSearcher
gradient_search = GradientSearcher(name="pytest_grist")
obj_function = tf.reduce_min(tf.abs(obj_var))
obj_grads = tf.gradients(obj_function, x)[0]
batch = mnist.train.next_batch(50)
batch_xs, batch_ys = batch[0], batch[1]
max_val, min_val = np.max(batch_xs), np.min(batch_xs)
gradient_search.build(batch_size=50, min_val=min_val, max_val=max_val)
"""insert code"""
while True:
"""inserted code"""
monitor_vars = {'loss': cross_entropy, 'obj_function': obj_function, 'obj_grad': obj_grads}
feed_dict = {x: batch_xs, y_: batch_ys, keep_prob: 0.5}
batch_xs, scores_rank = gradient_search.update_batch_data(session=sess, monitor_var=monitor_vars,
feed_dict=feed_dict, input_data=batch_xs, )
"""inserted code"""
_, loss = sess.run([train_step, cross_entropy], feed_dict=feed_dict)
"""inserted code"""
new_batch = mnist.train.next_batch(50)
new_batch_xs, new_batch_ys = new_batch[0], new_batch[1]
new_data_dict = {'x': new_batch_xs, 'y': new_batch_ys}
old_data_dict = {'x': batch_xs, 'y': batch_ys}
batch_xs, batch_ys = gradient_search.switch_new_data(new_data_dict=new_data_dict,
old_data_dict=old_data_dict,
scores_rank=scores_rank)
gradient_search.check_time()
"""inserted code"""
if __name__ == "__main__":
# simple_mnist()
deep_mnist()
| [
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.truncated_normal",
"sys.path.append",
"tensorflow.abs",
"tensorflow.placeholder",
"tensorflow.cast",
"numpy.max",
"tensorflow.gradients",
"tensorflow.global_... | [((114, 138), 'sys.path.append', 'sys.path.append', (['"""/data"""'], {}), "('/data')\n", (129, 138), False, 'import sys\n'), ((166, 178), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (176, 178), True, 'import tensorflow as tf\n'), ((190, 213), 'tensorflow.constant', 'tf.constant', (['"""Hello!!!"""'], {}), "('Hello!!!')\n", (201, 213), True, 'import tensorflow as tf\n'), ((267, 321), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': '(True)'}), "('MNIST_data/', one_hot=True)\n", (292, 321), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((330, 366), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 784]'], {}), "('float', [None, 784])\n", (344, 366), True, 'import tensorflow as tf\n'), ((496, 531), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 10]'], {}), "('float', [None, 10])\n", (510, 531), True, 'import tensorflow as tf\n'), ((675, 708), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (706, 708), True, 'import tensorflow as tf\n'), ((1216, 1254), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1235, 1254), True, 'import tensorflow as tf\n'), ((1266, 1286), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1277, 1286), True, 'import tensorflow as tf\n'), ((1329, 1358), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (1340, 1358), True, 'import tensorflow as tf\n'), ((1370, 1390), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1381, 1390), True, 'import tensorflow as tf\n'), ((1422, 1478), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", (1434, 1478), True, 'import tensorflow as tf\n'), ((1513, 1588), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (1527, 1588), True, 'import tensorflow as tf\n'), ((1620, 1643), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1641, 1643), True, 'import tensorflow as tf\n'), ((1657, 1711), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data/"""'], {'one_hot': '(True)'}), "('MNIST_data/', one_hot=True)\n", (1682, 1711), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((1720, 1756), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 784]'], {}), "('float', [None, 784])\n", (1734, 1756), True, 'import tensorflow as tf\n'), ((1771, 1801), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 28, 28, 1]'], {}), '(x, [-1, 28, 28, 1])\n', (1781, 1801), True, 'import tensorflow as tf\n'), ((2260, 2297), 'tensorflow.reshape', 'tf.reshape', (['h_pool2', '[-1, 7 * 7 * 64]'], {}), '(h_pool2, [-1, 7 * 7 * 64])\n', (2270, 2297), True, 'import tensorflow as tf\n'), ((2378, 2401), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (2392, 2401), True, 'import tensorflow as tf\n'), ((2419, 2450), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['h_fc1', 'keep_prob'], {}), '(h_fc1, keep_prob)\n', (2432, 2450), True, 'import tensorflow as tf\n'), ((2599, 2634), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 10]'], {}), "('float', [None, 10])\n", (2613, 2634), True, 'import tensorflow as tf\n'), ((3074, 3111), 'scripts.utils.tf_utils.GradientSearcher', 'GradientSearcher', ([], {'name': '"""pytest_grist"""'}), "(name='pytest_grist')\n", (3090, 3111), False, 'from scripts.utils.tf_utils import GradientSearcher\n'), ((387, 406), 'tensorflow.zeros', 'tf.zeros', (['[784, 10]'], {}), '([784, 10])\n', (395, 406), True, 'import tensorflow as tf\n'), ((428, 442), 'tensorflow.zeros', 'tf.zeros', (['[10]'], {}), '([10])\n', (436, 442), True, 'import tensorflow as tf\n'), ((718, 730), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (728, 730), True, 'import tensorflow as tf\n'), ((2816, 2836), 'tensorflow.argmax', 'tf.argmax', (['y_conv', '(1)'], {}), '(y_conv, 1)\n', (2825, 2836), True, 'import tensorflow as tf\n'), ((2838, 2854), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (2847, 2854), True, 'import tensorflow as tf\n'), ((2886, 2922), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (2893, 2922), True, 'import tensorflow as tf\n'), ((2938, 2971), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2969, 2971), True, 'import tensorflow as tf\n'), ((3145, 3160), 'tensorflow.abs', 'tf.abs', (['obj_var'], {}), '(obj_var)\n', (3151, 3160), True, 'import tensorflow as tf\n'), ((3178, 3207), 'tensorflow.gradients', 'tf.gradients', (['obj_function', 'x'], {}), '(obj_function, x)\n', (3190, 3207), True, 'import tensorflow as tf\n'), ((3317, 3333), 'numpy.max', 'np.max', (['batch_xs'], {}), '(batch_xs)\n', (3323, 3333), True, 'import numpy as np\n'), ((3335, 3351), 'numpy.min', 'np.min', (['batch_xs'], {}), '(batch_xs)\n', (3341, 3351), True, 'import numpy as np\n'), ((466, 481), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (475, 481), True, 'import tensorflow as tf\n'), ((600, 639), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (633, 639), True, 'import tensorflow as tf\n'), ((974, 989), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (983, 989), True, 'import tensorflow as tf\n'), ((991, 1007), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (1000, 1007), True, 'import tensorflow as tf\n'), ((1043, 1079), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (1050, 1079), True, 'import tensorflow as tf\n'), ((2321, 2351), 'tensorflow.matmul', 'tf.matmul', (['h_pool2_flat', 'W_fc1'], {}), '(h_pool2_flat, W_fc1)\n', (2330, 2351), True, 'import tensorflow as tf\n'), ((2551, 2579), 'tensorflow.matmul', 'tf.matmul', (['h_fc1_drop', 'W_fc2'], {}), '(h_fc1_drop, W_fc2)\n', (2560, 2579), True, 'import tensorflow as tf\n'), ((2729, 2759), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(0.0001)'], {}), '(0.0001)\n', (2751, 2759), True, 'import tensorflow as tf\n'), ((572, 581), 'tensorflow.log', 'tf.log', (['y'], {}), '(y)\n', (578, 581), True, 'import tensorflow as tf\n'), ((2696, 2710), 'tensorflow.log', 'tf.log', (['y_conv'], {}), '(y_conv)\n', (2702, 2710), True, 'import tensorflow as tf\n')] |
from pycocotools.coco import COCO
from scipy import ndimage
from tqdm import tqdm
import numpy as np
import os
import pylab
import sys
pylab.rcParams['figure.figsize'] = (8.0, 10.0)
from PIL import Image, ImagePalette # For indexed images
import matplotlib # For Matlab's color maps
import argparse
def cocoSegmentationToSegmentationMap(coco, imgId, catId=None, checkUniquePixelLabel=True, includeCrowd=False):
'''
Convert COCO GT or results for a single image to a segmentation map.
:param coco: an instance of the COCO API (ground-truth or result)
:param imgId: the id of the COCO image
:param checkUniquePixelLabel: (optional) whether every pixel can have at most one label
:param includeCrowd: whether to include 'crowd' thing annotations as 'other' (or void)
:return: labelMap - [h x w] segmentation map that indicates the label of each pixel
'''
# Init
curImg = coco.imgs[imgId]
imageSize = (curImg['height'], curImg['width'])
labelMap = np.zeros(imageSize)
# Get annotations of the current image (may be empty)
imgAnnots = [a for a in coco.anns.values() if a['image_id'] == imgId]
if includeCrowd:
annIds = coco.getAnnIds(imgIds=imgId)
else:
annIds = coco.getAnnIds(imgIds=imgId, iscrowd=False)
imgAnnots = coco.loadAnns(annIds)
# Combine all annotations of this image in labelMap
#labelMasks = mask.decode([a['segmentation'] for a in imgAnnots])
boxes=[]
segmentations=[]
for a in range(0, len(imgAnnots)):
newLabel = imgAnnots[a]['category_id']
if catId != None and catId != newLabel:
#print('Skipping')
continue
labelMask = coco.annToMask(imgAnnots[a]) == 1
if np.sum(labelMask*1.) / np.size(labelMask) < 0.01:
continue
labelMap = np.zeros(imageSize)
box = [int(c) for c in imgAnnots[a]['bbox']] # .toBbox()#coco.annToMask(imgAnnots[a]) == 1
box[2] += box[0]
box[3] += box[1]
boxes.append(np.expand_dims(box, axis=1))
if checkUniquePixelLabel and (np.logical_and(labelMap[labelMask] != newLabel,
labelMap[labelMask] != 0)).any():
raise Exception('Error: Some pixels have more than one label (image %d)!' % (imgId))
labelMap[labelMask] = newLabel
segmentations.append(labelMap)
return segmentations, boxes
def cocoSegmentationToPng(coco, imgId, pngPath, includeCrowd=False):
'''
Convert COCO GT or results for a single image to a segmentation map and write it to disk.
:param coco: an instance of the COCO API (ground-truth or result)
:param imgId: the COCO id of the image (last part of the file name)
:param pngPath: the path of the .png file
:param includeCrowd: whether to include 'crowd' thing annotations as 'other' (or void)
:return: None
'''
# Create label map
labelMaps, boxes = cocoSegmentationToSegmentationMap(coco, imgId, includeCrowd=includeCrowd)
labelMap = [labelmap.astype(np.int8) for labelmap in labelMaps]
# Get color map and convert to PIL's format
cmap = getCMap()
cmap = (cmap * 255).astype(int)
padding = np.zeros((256-cmap.shape[0], 3), np.int8)
cmap = np.vstack((cmap, padding))
cmap = cmap.reshape((-1))
assert len(cmap) == 768, 'Error: Color map must have exactly 256*3 elements!'
# Write to png file
png = Image.fromarray(labelMap).convert('P')
png.putpalette(cmap)
png.save(pngPath, format='PNG')
def cocoSegmentationToPngBinary(coco, imgId, npyPath, catId, includeCrowd=False):
'''
Convert COCO GT or results for a single image to a segmentation map and write it to disk.
:param coco: an instance of the COCO API (ground-truth or result)
:param imgId: the COCO id of the image (last part of the file name)
:param pngPath: the path of the .png file
:param includeCrowd: whether to include 'crowd' thing annotations as 'other' (or void)
:return: None
'''
# Create label map
labelMaps, boxes = cocoSegmentationToSegmentationMap(coco, imgId, catId=catId, includeCrowd=includeCrowd)
labelMaps = [labelmap.astype(np.int8) for labelmap in labelMaps]
binary = [(labelmap > 0).astype(np.int8) for labelmap in labelMaps] #(labelMap > 0).astype(np.int8) * 255#.astype(float)
counter=0
for j in range(len(binary)):
bj = binary[j]
labeled, nr_objects = ndimage.label(bj)
if nr_objects>1:
continue
if np.sum(bj) < 0.01*np.size(bj):
continue
# if np.sum(bj) > 0.9*np.size(bj):
# continue
edg1 = np.sum(bj, axis=0)[:5].max()
edg2 = np.sum(bj, axis=0)[-5:].max()
edg3 = np.sum(bj, axis=1)[:5].max()
edg4 = np.sum(bj, axis=1)[-5:].max()
if edg1>0.01*np.size(bj, axis=0):
continue
if edg2>0.01*np.size(bj, axis=0):
continue
if edg3>0.01*np.size(bj, axis=1):
continue
if edg4>0.01*np.size(bj, axis=1):
continue
counter+=1
bj = (bj*255).astype(np.uint8)
Image.fromarray(bj).save(npyPath+'_%d.png'%counter)
def getCMap(stuffStartId=92, stuffEndId=182, cmapName='jet', addThings=True, addUnlabeled=True, addOther=True):
'''
Create a color map for the classes in the COCO Stuff Segmentation Challenge.
:param stuffStartId: (optional) index where stuff classes start
:param stuffEndId: (optional) index where stuff classes end
:param cmapName: (optional) Matlab's name of the color map
:param addThings: (optional) whether to add a color for the 91 thing classes
:param addUnlabeled: (optional) whether to add a color for the 'unlabeled' class
:param addOther: (optional) whether to add a color for the 'other' class
:return: cmap - [c, 3] a color map for c colors where the columns indicate the RGB values
'''
# Get jet color map from Matlab
labelCount = stuffEndId - stuffStartId + 1
cmapGen = matplotlib.cm.get_cmap(cmapName, labelCount)
cmap = cmapGen(np.arange(labelCount))
cmap = cmap[:, 0:3]
# Reduce value/brightness of stuff colors (easier in HSV format)
cmap = cmap.reshape((-1, 1, 3))
hsv = matplotlib.colors.rgb_to_hsv(cmap)
hsv[:, 0, 2] = hsv[:, 0, 2] * 0.7
cmap = matplotlib.colors.hsv_to_rgb(hsv)
cmap = cmap.reshape((-1, 3))
# Permute entries to avoid classes with similar name having similar colors
st0 = np.random.get_state()
np.random.seed(42)
perm = np.random.permutation(labelCount)
np.random.set_state(st0)
cmap = cmap[perm, :]
# Add black (or any other) color for each thing class
if addThings:
thingsPadding = np.zeros((stuffStartId - 1, 3))
cmap = np.vstack((thingsPadding, cmap))
# Add black color for 'unlabeled' class
if addUnlabeled:
cmap = np.vstack(((0.0, 0.0, 0.0), cmap))
# Add yellow/orange color for 'other' class
if addOther:
cmap = np.vstack((cmap, (1.0, 0.843, 0.0)))
return cmap
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', required=True,
help='name of the class used')
parser.add_argument(
'--path', required=True,
help='directory containing COCO dataset')
parser.add_argument(
'--mode', required=True,
help='train or val')
args = parser.parse_args()
dataDir = args.path
dataType = '%s2017' % args.mode
annFile='{}/annotations/instances_{}.json'.format(dataDir,dataType)
coco=COCO(annFile)
cats = coco.loadCats(coco.getCatIds())
nms=[cat['name'] for cat in cats]
print('COCO categories: \n{}\n'.format(' '.join(nms)))
nms = set([cat['supercategory'] for cat in cats])
print('COCO supercategories: \n{}'.format(' '.join(nms)))
cat_names = [args.dataset]
catIds = coco.getCatIds(catNms=cat_names)
out_dir = 'dataset'
out_dir = os.path.join(out_dir, args.mode)
if not os.path.exists(out_dir):
print('Creating %s' % out_dir)
os.makedirs(out_dir)
for catId, name in zip(catIds, cat_names):
imgIds = coco.getImgIds(catIds=catId)
print('Extracting category %s' % name)
cat_filepath = os.path.join(out_dir, name)
if not os.path.exists(cat_filepath):
os.mkdir(cat_filepath)
print('Saving to directory %s' % cat_filepath)
for imgId in tqdm(imgIds):
outfile = os.path.join(cat_filepath, '%012d' % imgId)
cocoSegmentationToPngBinary(coco, imgId, outfile, catId) | [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"matplotlib.cm.get_cmap",
"numpy.sum",
"numpy.random.set_state",
"numpy.arange",
"os.path.join",
"os.path.exists",
"tqdm.tqdm",
"numpy.size",
"matplotlib.colors.hsv_to_rgb",
"numpy.random.permutation",
"numpy.vstack",
"matplotli... | [((7060, 7085), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7083, 7085), False, 'import argparse\n'), ((7499, 7512), 'pycocotools.coco.COCO', 'COCO', (['annFile'], {}), '(annFile)\n', (7503, 7512), False, 'from pycocotools.coco import COCO\n'), ((7851, 7883), 'os.path.join', 'os.path.join', (['out_dir', 'args.mode'], {}), '(out_dir, args.mode)\n', (7863, 7883), False, 'import os\n'), ((995, 1014), 'numpy.zeros', 'np.zeros', (['imageSize'], {}), '(imageSize)\n', (1003, 1014), True, 'import numpy as np\n'), ((3175, 3218), 'numpy.zeros', 'np.zeros', (['(256 - cmap.shape[0], 3)', 'np.int8'], {}), '((256 - cmap.shape[0], 3), np.int8)\n', (3183, 3218), True, 'import numpy as np\n'), ((3228, 3254), 'numpy.vstack', 'np.vstack', (['(cmap, padding)'], {}), '((cmap, padding))\n', (3237, 3254), True, 'import numpy as np\n'), ((6005, 6049), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['cmapName', 'labelCount'], {}), '(cmapName, labelCount)\n', (6027, 6049), False, 'import matplotlib\n'), ((6232, 6266), 'matplotlib.colors.rgb_to_hsv', 'matplotlib.colors.rgb_to_hsv', (['cmap'], {}), '(cmap)\n', (6260, 6266), False, 'import matplotlib\n'), ((6316, 6349), 'matplotlib.colors.hsv_to_rgb', 'matplotlib.colors.hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (6344, 6349), False, 'import matplotlib\n'), ((6473, 6494), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (6492, 6494), True, 'import numpy as np\n'), ((6499, 6517), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6513, 6517), True, 'import numpy as np\n'), ((6529, 6562), 'numpy.random.permutation', 'np.random.permutation', (['labelCount'], {}), '(labelCount)\n', (6550, 6562), True, 'import numpy as np\n'), ((6567, 6591), 'numpy.random.set_state', 'np.random.set_state', (['st0'], {}), '(st0)\n', (6586, 6591), True, 'import numpy as np\n'), ((7891, 7914), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (7905, 7914), False, 'import os\n'), ((7955, 7975), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (7966, 7975), False, 'import os\n'), ((8124, 8151), 'os.path.join', 'os.path.join', (['out_dir', 'name'], {}), '(out_dir, name)\n', (8136, 8151), False, 'import os\n'), ((8292, 8304), 'tqdm.tqdm', 'tqdm', (['imgIds'], {}), '(imgIds)\n', (8296, 8304), False, 'from tqdm import tqdm\n'), ((1826, 1845), 'numpy.zeros', 'np.zeros', (['imageSize'], {}), '(imageSize)\n', (1834, 1845), True, 'import numpy as np\n'), ((4421, 4438), 'scipy.ndimage.label', 'ndimage.label', (['bj'], {}), '(bj)\n', (4434, 4438), False, 'from scipy import ndimage\n'), ((6069, 6090), 'numpy.arange', 'np.arange', (['labelCount'], {}), '(labelCount)\n', (6078, 6090), True, 'import numpy as np\n'), ((6718, 6749), 'numpy.zeros', 'np.zeros', (['(stuffStartId - 1, 3)'], {}), '((stuffStartId - 1, 3))\n', (6726, 6749), True, 'import numpy as np\n'), ((6765, 6797), 'numpy.vstack', 'np.vstack', (['(thingsPadding, cmap)'], {}), '((thingsPadding, cmap))\n', (6774, 6797), True, 'import numpy as np\n'), ((6879, 6913), 'numpy.vstack', 'np.vstack', (['((0.0, 0.0, 0.0), cmap)'], {}), '(((0.0, 0.0, 0.0), cmap))\n', (6888, 6913), True, 'import numpy as np\n'), ((6995, 7031), 'numpy.vstack', 'np.vstack', (['(cmap, (1.0, 0.843, 0.0))'], {}), '((cmap, (1.0, 0.843, 0.0)))\n', (7004, 7031), True, 'import numpy as np\n'), ((8163, 8191), 'os.path.exists', 'os.path.exists', (['cat_filepath'], {}), '(cat_filepath)\n', (8177, 8191), False, 'import os\n'), ((8201, 8223), 'os.mkdir', 'os.mkdir', (['cat_filepath'], {}), '(cat_filepath)\n', (8209, 8223), False, 'import os\n'), ((8324, 8367), 'os.path.join', 'os.path.join', (['cat_filepath', "('%012d' % imgId)"], {}), "(cat_filepath, '%012d' % imgId)\n", (8336, 8367), False, 'import os\n'), ((2017, 2044), 'numpy.expand_dims', 'np.expand_dims', (['box'], {'axis': '(1)'}), '(box, axis=1)\n', (2031, 2044), True, 'import numpy as np\n'), ((3402, 3427), 'PIL.Image.fromarray', 'Image.fromarray', (['labelMap'], {}), '(labelMap)\n', (3417, 3427), False, 'from PIL import Image, ImagePalette\n'), ((4496, 4506), 'numpy.sum', 'np.sum', (['bj'], {}), '(bj)\n', (4502, 4506), True, 'import numpy as np\n'), ((1736, 1759), 'numpy.sum', 'np.sum', (['(labelMask * 1.0)'], {}), '(labelMask * 1.0)\n', (1742, 1759), True, 'import numpy as np\n'), ((1759, 1777), 'numpy.size', 'np.size', (['labelMask'], {}), '(labelMask)\n', (1766, 1777), True, 'import numpy as np\n'), ((4514, 4525), 'numpy.size', 'np.size', (['bj'], {}), '(bj)\n', (4521, 4525), True, 'import numpy as np\n'), ((4815, 4834), 'numpy.size', 'np.size', (['bj'], {'axis': '(0)'}), '(bj, axis=0)\n', (4822, 4834), True, 'import numpy as np\n'), ((4878, 4897), 'numpy.size', 'np.size', (['bj'], {'axis': '(0)'}), '(bj, axis=0)\n', (4885, 4897), True, 'import numpy as np\n'), ((4941, 4960), 'numpy.size', 'np.size', (['bj'], {'axis': '(1)'}), '(bj, axis=1)\n', (4948, 4960), True, 'import numpy as np\n'), ((5004, 5023), 'numpy.size', 'np.size', (['bj'], {'axis': '(1)'}), '(bj, axis=1)\n', (5011, 5023), True, 'import numpy as np\n'), ((5113, 5132), 'PIL.Image.fromarray', 'Image.fromarray', (['bj'], {}), '(bj)\n', (5128, 5132), False, 'from PIL import Image, ImagePalette\n'), ((2086, 2159), 'numpy.logical_and', 'np.logical_and', (['(labelMap[labelMask] != newLabel)', '(labelMap[labelMask] != 0)'], {}), '(labelMap[labelMask] != newLabel, labelMap[labelMask] != 0)\n', (2100, 2159), True, 'import numpy as np\n'), ((4630, 4648), 'numpy.sum', 'np.sum', (['bj'], {'axis': '(0)'}), '(bj, axis=0)\n', (4636, 4648), True, 'import numpy as np\n'), ((4674, 4692), 'numpy.sum', 'np.sum', (['bj'], {'axis': '(0)'}), '(bj, axis=0)\n', (4680, 4692), True, 'import numpy as np\n'), ((4719, 4737), 'numpy.sum', 'np.sum', (['bj'], {'axis': '(1)'}), '(bj, axis=1)\n', (4725, 4737), True, 'import numpy as np\n'), ((4763, 4781), 'numpy.sum', 'np.sum', (['bj'], {'axis': '(1)'}), '(bj, axis=1)\n', (4769, 4781), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import unittest
import numpy as np
from panda import Panda
from panda.tests.safety import libpandasafety_py
from panda.tests.safety.common import make_msg
from panda.tests.safety.test_toyota import toyota_checksum
IPAS_OVERRIDE_THRESHOLD = 200
ANGLE_DELTA_BP = [0., 5., 15.]
ANGLE_DELTA_V = [5., .8, .15] # windup limit
ANGLE_DELTA_VU = [5., 3.5, 0.4] # unwind limit
def twos_comp(val, bits):
if val >= 0:
return val
else:
return (2**bits) + val
def sign(a):
if a > 0:
return 1
else:
return -1
class TestToyotaSafety(unittest.TestCase):
@classmethod
def setUp(cls):
cls.safety = libpandasafety_py.libpandasafety
cls.safety.set_safety_hooks(Panda.SAFETY_TOYOTA_IPAS, 66)
cls.safety.init_tests_toyota()
def _torque_driver_msg(self, torque):
to_send = make_msg(0, 0x260)
t = twos_comp(torque, 16)
to_send[0].RDLR = t | ((t & 0xFF) << 16)
to_send[0].RDHR = to_send[0].RDHR | (toyota_checksum(to_send[0], 0x260, 8) << 24)
return to_send
def _torque_driver_msg_array(self, torque):
for i in range(6):
self.safety.safety_rx_hook(self._torque_driver_msg(torque))
def _angle_meas_msg(self, angle):
to_send = make_msg(0, 0x25)
t = twos_comp(angle, 12)
to_send[0].RDLR = ((t & 0xF00) >> 8) | ((t & 0xFF) << 8)
return to_send
def _angle_meas_msg_array(self, angle):
for i in range(6):
self.safety.safety_rx_hook(self._angle_meas_msg(angle))
def _ipas_state_msg(self, state):
to_send = make_msg(0, 0x262)
to_send[0].RDLR = state & 0xF
return to_send
def _ipas_control_msg(self, angle, state):
# note: we command 2/3 of the angle due to CAN conversion
to_send = make_msg(0, 0x266)
t = twos_comp(angle, 12)
to_send[0].RDLR = ((t & 0xF00) >> 8) | ((t & 0xFF) << 8)
to_send[0].RDLR |= ((state & 0xf) << 4)
return to_send
def _speed_msg(self, speed):
to_send = make_msg(0, 0xB4)
speed = int(speed * 100 * 3.6)
to_send[0].RDHR = ((speed & 0xFF) << 16) | (speed & 0xFF00)
return to_send
def test_ipas_override(self):
## angle control is not active
self.safety.set_controls_allowed(1)
# 3 consecutive msgs where driver exceeds threshold but angle_control isn't active
self.safety.set_controls_allowed(1)
self._torque_driver_msg_array(IPAS_OVERRIDE_THRESHOLD + 1)
self.assertTrue(self.safety.get_controls_allowed())
self._torque_driver_msg_array(-IPAS_OVERRIDE_THRESHOLD - 1)
self.assertTrue(self.safety.get_controls_allowed())
# ipas state is override
self.safety.safety_rx_hook(self._ipas_state_msg(5))
self.assertTrue(self.safety.get_controls_allowed())
## now angle control is active
self.safety.safety_tx_hook(self._ipas_control_msg(0, 0))
self.safety.safety_rx_hook(self._ipas_state_msg(0))
# 3 consecutive msgs where driver does exceed threshold
self.safety.set_controls_allowed(1)
self._torque_driver_msg_array(IPAS_OVERRIDE_THRESHOLD + 1)
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self._torque_driver_msg_array(-IPAS_OVERRIDE_THRESHOLD - 1)
self.assertFalse(self.safety.get_controls_allowed())
# ipas state is override and torque isn't overriding any more
self.safety.set_controls_allowed(1)
self._torque_driver_msg_array(0)
self.safety.safety_rx_hook(self._ipas_state_msg(5))
self.assertFalse(self.safety.get_controls_allowed())
# 3 consecutive msgs where driver does not exceed threshold and
# ipas state is not override
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._ipas_state_msg(0))
self.assertTrue(self.safety.get_controls_allowed())
self._torque_driver_msg_array(IPAS_OVERRIDE_THRESHOLD)
self.assertTrue(self.safety.get_controls_allowed())
self._torque_driver_msg_array(-IPAS_OVERRIDE_THRESHOLD)
self.assertTrue(self.safety.get_controls_allowed())
# reset no angle control at the end of the test
self.safety.reset_angle_control()
def test_angle_cmd_when_disabled(self):
self.safety.set_controls_allowed(0)
# test angle cmd too far from actual
angle_refs = [-10, 10]
deltas = list(range(-2, 3))
expected_results = [False, True, True, True, False]
for a in angle_refs:
self._angle_meas_msg_array(a)
for i, d in enumerate(deltas):
self.assertEqual(expected_results[i], self.safety.safety_tx_hook(self._ipas_control_msg(a + d, 1)))
# test ipas state cmd enabled
self._angle_meas_msg_array(0)
self.assertEqual(0, self.safety.safety_tx_hook(self._ipas_control_msg(0, 3)))
# reset no angle control at the end of the test
self.safety.reset_angle_control()
def test_angle_cmd_when_enabled(self):
# ipas angle cmd should pass through when controls are enabled
self.safety.set_controls_allowed(1)
self._angle_meas_msg_array(0)
self.safety.safety_rx_hook(self._speed_msg(0.1))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(0, 1)))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(4, 1)))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(0, 3)))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(-4, 3)))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(-8, 3)))
# reset no angle control at the end of the test
self.safety.reset_angle_control()
def test_angle_cmd_rate_when_disabled(self):
# as long as the command is close to the measured, no rate limit is enforced when
# controls are disabled
self.safety.set_controls_allowed(0)
self.safety.safety_rx_hook(self._angle_meas_msg(0))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(0, 1)))
self.safety.safety_rx_hook(self._angle_meas_msg(100))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(100, 1)))
self.safety.safety_rx_hook(self._angle_meas_msg(-100))
self.assertEqual(1, self.safety.safety_tx_hook(self._ipas_control_msg(-100, 1)))
# reset no angle control at the end of the test
self.safety.reset_angle_control()
def test_angle_cmd_rate_when_enabled(self):
# when controls are allowed, angle cmd rate limit is enforced
# test 1: no limitations if we stay within limits
speeds = [0., 1., 5., 10., 15., 100.]
angles = [-300, -100, -10, 0, 10, 100, 300]
for a in angles:
for s in speeds:
# first test against false positives
self._angle_meas_msg_array(a)
self.safety.safety_tx_hook(self._ipas_control_msg(a, 1))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._speed_msg(s))
max_delta_up = int(np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_V) * 2 / 3. + 1.)
max_delta_down = int(np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU) * 2 / 3. + 1.)
self.assertEqual(True, self.safety.safety_tx_hook(self._ipas_control_msg(a + sign(a) * max_delta_up, 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(self._ipas_control_msg(a, 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(self._ipas_control_msg(a - sign(a) * max_delta_down, 1)))
self.assertTrue(self.safety.get_controls_allowed())
# now inject too high rates
self.assertEqual(False, self.safety.safety_tx_hook(self._ipas_control_msg(a + sign(a) *
(max_delta_up + 1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
self.safety.set_controls_allowed(1)
self.assertEqual(True, self.safety.safety_tx_hook(self._ipas_control_msg(a + sign(a) * max_delta_up, 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(True, self.safety.safety_tx_hook(self._ipas_control_msg(a, 1)))
self.assertTrue(self.safety.get_controls_allowed())
self.assertEqual(False, self.safety.safety_tx_hook(self._ipas_control_msg(a - sign(a) *
(max_delta_down + 1), 1)))
self.assertFalse(self.safety.get_controls_allowed())
# reset no angle control at the end of the test
self.safety.reset_angle_control()
def test_angle_measured_rate(self):
speeds = [0., 1., 5., 10., 15., 100.]
angles = [-300, -100, -10, 0, 10, 100, 300]
angles = [10]
for a in angles:
for s in speeds:
self._angle_meas_msg_array(a)
self.safety.safety_tx_hook(self._ipas_control_msg(a, 1))
self.safety.set_controls_allowed(1)
self.safety.safety_rx_hook(self._speed_msg(s))
#max_delta_up = int(np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_V) * 2 / 3. + 1.)
#max_delta_down = int(np.interp(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU) * 2 / 3. + 1.)
self.safety.safety_rx_hook(self._angle_meas_msg(a))
self.assertTrue(self.safety.get_controls_allowed())
self.safety.safety_rx_hook(self._angle_meas_msg(a + 150))
self.assertFalse(self.safety.get_controls_allowed())
# reset no angle control at the end of the test
self.safety.reset_angle_control()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.interp",
"panda.tests.safety.test_toyota.toyota_checksum",
"panda.tests.safety.common.make_msg"
] | [((9402, 9417), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9415, 9417), False, 'import unittest\n'), ((831, 847), 'panda.tests.safety.common.make_msg', 'make_msg', (['(0)', '(608)'], {}), '(0, 608)\n', (839, 847), False, 'from panda.tests.safety.common import make_msg\n'), ((1217, 1232), 'panda.tests.safety.common.make_msg', 'make_msg', (['(0)', '(37)'], {}), '(0, 37)\n', (1225, 1232), False, 'from panda.tests.safety.common import make_msg\n'), ((1523, 1539), 'panda.tests.safety.common.make_msg', 'make_msg', (['(0)', '(610)'], {}), '(0, 610)\n', (1531, 1539), False, 'from panda.tests.safety.common import make_msg\n'), ((1717, 1733), 'panda.tests.safety.common.make_msg', 'make_msg', (['(0)', '(614)'], {}), '(0, 614)\n', (1725, 1733), False, 'from panda.tests.safety.common import make_msg\n'), ((1936, 1952), 'panda.tests.safety.common.make_msg', 'make_msg', (['(0)', '(180)'], {}), '(0, 180)\n', (1944, 1952), False, 'from panda.tests.safety.common import make_msg\n'), ((966, 1001), 'panda.tests.safety.test_toyota.toyota_checksum', 'toyota_checksum', (['to_send[0]', '(608)', '(8)'], {}), '(to_send[0], 608, 8)\n', (981, 1001), False, 'from panda.tests.safety.test_toyota import toyota_checksum\n'), ((6790, 6833), 'numpy.interp', 'np.interp', (['s', 'ANGLE_DELTA_BP', 'ANGLE_DELTA_V'], {}), '(s, ANGLE_DELTA_BP, ANGLE_DELTA_V)\n', (6799, 6833), True, 'import numpy as np\n'), ((6878, 6922), 'numpy.interp', 'np.interp', (['s', 'ANGLE_DELTA_BP', 'ANGLE_DELTA_VU'], {}), '(s, ANGLE_DELTA_BP, ANGLE_DELTA_VU)\n', (6887, 6922), True, 'import numpy as np\n')] |
import os
import numpy as np
from polharmonic import util, ill, det, gaunt, shcoeffs
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
class Microscope:
"""
A Microscope represents an experiment that collects a single frame of
intensity data.
A Microscope is specified by its illumination path (an Illuminator object),
and its detection path (a Detector object).
"""
def __init__(self, ill=ill.Illuminator(), det=det.Detector(),
color=(0,1,0.3)):
self.ill = ill
self.det = det
self.h0 = self.ill.h()*self.det.h(0, 0, 0)
self.n_len = self.h0.coeffs.shape[0]
self.j_len = self.h0.coeffs.shape[1]
self.hnorm = (self.ill.h()*self.det.h(0, 0, 0)).coeffs[0, 0]
self.Hnorm = (self.ill.H()*self.det.H(0, 0, 0)).coeffs[0, 0]
self.color = color
def h(self, x, y, z):
return self.ill.h()*self.det.h(x, y, z)/self.hnorm
def H(self, x, y, z):
return self.ill.H()*self.det.H(x, y, z)/self.Hnorm
def plot(self, func=h, filename='micro.pdf', n_px=2**6, plot_m=[-2, 0, 2],
contours=True):
print('Plotting: ' + filename)
mlen = len(plot_m)
nlen = 3
# Calculate data for transvsere plotting
w = 2.05
[X, Y] = np.meshgrid(np.linspace(-w, w, n_px),
np.linspace(-w, w, n_px))
data = np.zeros((n_px, n_px, self.n_len, self.j_len))
for index, x in np.ndenumerate(X):
if self.det.optical_axis == [0,0,1]: # z-detection
data[index[0], index[1], :, :] = (func(x, Y[index], 0)).coeffs
elif self.det.optical_axis == [1,0,0]: # x-detection
data[index[0], index[1], :, :] = (func(0, x, Y[index])).coeffs
# Layout windows
if plot_m is None:
mcols = self.h0.rmax
else:
mcols = len(plot_m)
cols = mcols*self.n_len
inches = 1
f, axs = plt.subplots(self.h0.rmax, cols,
figsize=(inches*cols, inches*self.h0.rmax),
gridspec_kw={'hspace':0.0, 'wspace':0.05})
for index, ax in np.ndenumerate(axs):
l = 2*index[0]
if plot_m is None:
m = index[1] - int(self.h0.mmax/2)
else:
plot_n = [-2, 0, 2]
n_ind = index[1]//mlen
n = plot_n[n_ind]
m = plot_m[index[1]%mlen]
j = util.lm2j(l, m)
ax.axis('off')
if index[0] == 0:
ax.annotate('$m='+str(m)+'$', xy=(1, 1), xytext=(0.5, 1.15),
textcoords='axes fraction', ha='center', va='center')
if index[1] % mlen == np.floor(mlen/2):
ax.annotate('$n='+str(n)+'$', xy=(1, 1), xytext=(0.5, 1.4),
textcoords='axes fraction', ha='center', va='center')
if index[1] % mlen == mlen - 1 and index[1] != axs.shape[1] - 1:
ax.annotate("",
xy=(1, -0.05), xycoords='axes fraction',
xytext=(1, 1.3), textcoords='axes fraction',
arrowprops=dict(arrowstyle="-",
linewidth=0.5))
if index[1] == 0:
ax.annotate('$l='+str(l)+'$', xy=(1, 1), xytext=(-0.15, 0.5),
textcoords='axes fraction', ha='center', va='center',
rotation=90)
if j is not None:
ax.imshow(data[:,:,n,j], cmap="bwr", vmin=-1, vmax=1, interpolation='none')
levels = [-0.1, -1e-5, 1e-5, 0.1]
if contours:
ct = ax.contour(data[:,:,n,j], levels, colors='k',linewidths=0.5)
else:
ax.imshow(np.zeros(data[:,:,0,0].shape), cmap="bwr", vmin=-1, vmax=1, interpolation='none')
f.savefig(filename, bbox_inches='tight')
def calc_SVD(self, n_px=2**6):
w = 2.0
self.xcoords = np.linspace(-w, w, n_px),
[X, Y] = np.meshgrid(self.xcoords, self.xcoords)
R = np.sqrt(X**2 + Y**2)
Phi = np.nan_to_num(np.arctan(Y/X))
# For each position calculate K and solve eigenequation
sigma = np.zeros((n_px, n_px, self.n_len))
for index, x in np.ndenumerate(X):
if self.det.optical_axis == [0,0,1]: # z-detection
u, s, v = self.calc_point_SVD(x, Y[index], 0)
elif self.det.optical_axis == [1,0,0]: # x-detection
u, s, v = self.calc_point_SVD(0, x, Y[index])
sigma[index[0], index[1], :] = s
self.sigma = sigma
self.sigma_max = np.max(sigma)
def calc_point_SVD(self, x, y, z):
HH = self.H(x, y, z).coeffs
K = np.dot(HH, HH.T)
mu, v = np.linalg.eigh(K)
u = np.dot(HH.T, v)
return u[:,::-1], np.sqrt(mu[::-1]), v[:,::-1] # Transpose
def plot_SVS(self, filename='svs.pdf', n_px=2**6, marks=[[1e-5,0], [0.5,0], [1.0,0], [1.5,0]]):
print('Plotting: ' + filename)
# Layout windows
inches = 1.5
rows = self.sigma.shape[-1] + 1
cols = len(marks) + 1
f, axs = plt.subplots(rows, cols,
figsize=(inches*cols, inches*(rows - 0.75)),
gridspec_kw={'hspace':0.05, 'wspace':0.10, 'height_ratios':[1,1,1,0.05]})
# Label top row with arrow
axs[0,0].annotate('', xy=(-0.03, 1.1), xytext=(0.55, 1.1),
textcoords='axes fraction', xycoords='axes fraction', ha='center', va='center',
arrowprops=dict(arrowstyle='<->, head_width=0.05, head_length=0.1',
connectionstyle="arc3", linewidth=0.5),)
axs[0,0].annotate(r'$2\textrm{NA}/\lambda$', xy=(0,0), xytext=(0.25, 1.2),
textcoords='axes fraction', xycoords='axes fraction', ha='center', va='center', fontsize=7)
# Top row singular values
for j, ax in enumerate(axs[:-1,0]):
ax.axis('off')
ax.annotate('$j='+str(j)+'$', xy=(1, 1), xytext=(-0.15, 0.5),
textcoords='axes fraction', ha='center', va='center', rotation=90)
extent= [-2,2,-2,2]
origin = 'lower'
ax.imshow(self.sigma[:,:,j]/self.sigma_max, cmap="bwr", vmin=-1, vmax=1, interpolation='none', extent=extent, origin=origin)
ax.set_xlim([-2.05,2.05])
ax.set_ylim([-2.05,2.05])
levels = [-0.1, -1e-5, 1e-5, 0.1]
ct = ax.contour(self.sigma[:,:,j]/self.sigma_max, levels, colors='k',linewidths=0.5, extent=extent, origin=origin)
for mark in marks:
ax.plot(mark[0]*np.cos(mark[1]), mark[0]*np.sin(mark[1]), 'xk', ms=2.5, mew=0.5)
# Colorbars
X, Y = np.meshgrid(np.linspace(0, 1, 100),
np.linspace(0, 1, 100))
axs[-1,0].imshow(X, cmap="bwr", vmin=-1, vmax=1, interpolation='none', extent=[0,1,0,1], origin='lower', aspect='auto')
axs[-1,0].contour(X, levels, colors='k',linewidths=0.5, extent=[0,1,0,1], origin='lower',)
axs[-1,0].set_xlim([0,1])
axs[-1,0].set_ylim([0,1])
axs[-1,0].tick_params(direction='out', bottom=True, top=False)
axs[-1,0].xaxis.set_ticks([0, 0.5, 1.0])
axs[-1,0].yaxis.set_ticks([])
for j, ax in enumerate(axs[-1, 1:]):
ax.axis('off')
# For saving singular function pngs
folder = 'singular'
if not os.path.exists(folder):
os.makedirs(folder)
# Object space singular functions
for j, ax in np.ndenumerate(axs[:-1,1:]):
if self.det.optical_axis == [0,0,1]: # z-detection
u, s, v = self.calc_point_SVD(marks[j[1]][0], marks[j[1]][1], 0)
elif self.det.optical_axis == [1,0,0]: # x-detection
u, s, v = self.calc_point_SVD(0, marks[j[1]][0], marks[j[1]][1])
# Labels
if j[0] == 0:
rho_label = str(marks[j[1]][0])
if marks[j[1]][0] < 1e-3:
rho_label = '0'
ax.annotate(r'$\rho = '+rho_label+'$', xy=(1, 1), xytext=(0.5, 1.15),
textcoords='axes fraction', ha='center', va='center')
ax.annotate(r'$\phi_{\rho} = '+str(marks[j[1]][1])+'$', xy=(1, 1), xytext=(0.5, 0.95),
textcoords='axes fraction', ha='center', va='center')
ax.axis('off')
ax.annotate('$\sigma='+'{:.2f}'.format(s[j[0]]/self.sigma_max)+'$', xy=(1, 1), xytext=(0.5, 0),
textcoords='axes fraction', ha='center', va='center')
# Create singular function plots
sh = shcoeffs.SHCoeffs(u[:,j[0]])
shfilename = folder+'/'+str(j[0])+str(j[1])+'.png'
sh.plot_dist(filename=folder+'/'+str(j[0])+str(j[1])+'.png', r=1.1)
from PIL import Image
im1 = np.asarray(Image.open(shfilename))
ax.imshow(im1, interpolation='none')
f.savefig(filename, bbox_inches='tight')
def scene_string(self):
asy_string = ''
ill_string = "circle(optical_axis, alpha, false, color);\n"
ill_string = ill_string.replace('optical_axis', str(tuple(self.det.optical_axis)))
ill_string = ill_string.replace('alpha', str(np.arcsin(self.ill.na/self.ill.n)))
ill_string = ill_string.replace('color', str(self.color))
asy_string += ill_string
if self.ill.polarizer is not None:
pol_string = "arrow(optical_axis, polarizer, color, false);\n"
pol_string = pol_string.replace('optical_axis', str(tuple(self.ill.optical_axis)))
pol_string = pol_string.replace('polarizer', str(tuple(self.ill.polarizer)))
pol_string = pol_string.replace('color', str(self.color))
asy_string += pol_string
if self.det.polarizer is not None:
pol_string = "arrow(optical_axis, polarizer, color, true);\n"
pol_string = pol_string.replace('optical_axis', str(tuple(self.det.optical_axis)))
pol_string = pol_string.replace('polarizer', str(tuple(self.det.polarizer)))
pol_string = pol_string.replace('color', str(self.color))
asy_string += pol_string
det_string = "circle(optical_axis, alpha, true, color);\n"
det_string = det_string.replace('optical_axis', str(tuple(self.det.optical_axis)))
det_string = det_string.replace('alpha', str(np.arcsin(self.det.na/self.det.n)))
det_string = det_string.replace('color', str(self.color))
asy_string += det_string
return asy_string
def plot_scene(self, filename):
print('Plotting: ' + filename)
util.draw_scene(self.scene_string(), filename=filename, save_file=True)
| [
"polharmonic.util.lm2j",
"numpy.floor",
"polharmonic.shcoeffs.SHCoeffs",
"numpy.sin",
"numpy.meshgrid",
"os.path.exists",
"numpy.arcsin",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.ndenumerate",
"numpy.cos",
"numpy.arctan",
"numpy.dot",
"polharmonic.det.Detector"... | [((483, 500), 'polharmonic.ill.Illuminator', 'ill.Illuminator', ([], {}), '()\n', (498, 500), False, 'from polharmonic import util, ill, det, gaunt, shcoeffs\n'), ((506, 520), 'polharmonic.det.Detector', 'det.Detector', ([], {}), '()\n', (518, 520), False, 'from polharmonic import util, ill, det, gaunt, shcoeffs\n'), ((1483, 1529), 'numpy.zeros', 'np.zeros', (['(n_px, n_px, self.n_len, self.j_len)'], {}), '((n_px, n_px, self.n_len, self.j_len))\n', (1491, 1529), True, 'import numpy as np\n'), ((1554, 1571), 'numpy.ndenumerate', 'np.ndenumerate', (['X'], {}), '(X)\n', (1568, 1571), True, 'import numpy as np\n'), ((2061, 2191), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.h0.rmax', 'cols'], {'figsize': '(inches * cols, inches * self.h0.rmax)', 'gridspec_kw': "{'hspace': 0.0, 'wspace': 0.05}"}), "(self.h0.rmax, cols, figsize=(inches * cols, inches * self.h0.\n rmax), gridspec_kw={'hspace': 0.0, 'wspace': 0.05})\n", (2073, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2294), 'numpy.ndenumerate', 'np.ndenumerate', (['axs'], {}), '(axs)\n', (2289, 2294), True, 'import numpy as np\n'), ((4287, 4326), 'numpy.meshgrid', 'np.meshgrid', (['self.xcoords', 'self.xcoords'], {}), '(self.xcoords, self.xcoords)\n', (4298, 4326), True, 'import numpy as np\n'), ((4339, 4363), 'numpy.sqrt', 'np.sqrt', (['(X ** 2 + Y ** 2)'], {}), '(X ** 2 + Y ** 2)\n', (4346, 4363), True, 'import numpy as np\n'), ((4485, 4519), 'numpy.zeros', 'np.zeros', (['(n_px, n_px, self.n_len)'], {}), '((n_px, n_px, self.n_len))\n', (4493, 4519), True, 'import numpy as np\n'), ((4544, 4561), 'numpy.ndenumerate', 'np.ndenumerate', (['X'], {}), '(X)\n', (4558, 4561), True, 'import numpy as np\n'), ((4913, 4926), 'numpy.max', 'np.max', (['sigma'], {}), '(sigma)\n', (4919, 4926), True, 'import numpy as np\n'), ((5015, 5031), 'numpy.dot', 'np.dot', (['HH', 'HH.T'], {}), '(HH, HH.T)\n', (5021, 5031), True, 'import numpy as np\n'), ((5048, 5065), 'numpy.linalg.eigh', 'np.linalg.eigh', (['K'], {}), '(K)\n', (5062, 5065), True, 'import numpy as np\n'), ((5078, 5093), 'numpy.dot', 'np.dot', (['HH.T', 'v'], {}), '(HH.T, v)\n', (5084, 5093), True, 'import numpy as np\n'), ((5436, 5597), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(inches * cols, inches * (rows - 0.75))', 'gridspec_kw': "{'hspace': 0.05, 'wspace': 0.1, 'height_ratios': [1, 1, 1, 0.05]}"}), "(rows, cols, figsize=(inches * cols, inches * (rows - 0.75)),\n gridspec_kw={'hspace': 0.05, 'wspace': 0.1, 'height_ratios': [1, 1, 1, \n 0.05]})\n", (5448, 5597), True, 'import matplotlib.pyplot as plt\n'), ((7951, 7979), 'numpy.ndenumerate', 'np.ndenumerate', (['axs[:-1, 1:]'], {}), '(axs[:-1, 1:])\n', (7965, 7979), True, 'import numpy as np\n'), ((1378, 1402), 'numpy.linspace', 'np.linspace', (['(-w)', 'w', 'n_px'], {}), '(-w, w, n_px)\n', (1389, 1402), True, 'import numpy as np\n'), ((1433, 1457), 'numpy.linspace', 'np.linspace', (['(-w)', 'w', 'n_px'], {}), '(-w, w, n_px)\n', (1444, 1457), True, 'import numpy as np\n'), ((2590, 2605), 'polharmonic.util.lm2j', 'util.lm2j', (['l', 'm'], {}), '(l, m)\n', (2599, 2605), False, 'from polharmonic import util, ill, det, gaunt, shcoeffs\n'), ((4244, 4268), 'numpy.linspace', 'np.linspace', (['(-w)', 'w', 'n_px'], {}), '(-w, w, n_px)\n', (4255, 4268), True, 'import numpy as np\n'), ((4388, 4404), 'numpy.arctan', 'np.arctan', (['(Y / X)'], {}), '(Y / X)\n', (4397, 4404), True, 'import numpy as np\n'), ((5121, 5138), 'numpy.sqrt', 'np.sqrt', (['mu[::-1]'], {}), '(mu[::-1])\n', (5128, 5138), True, 'import numpy as np\n'), ((7141, 7163), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (7152, 7163), True, 'import numpy as np\n'), ((7194, 7216), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (7205, 7216), True, 'import numpy as np\n'), ((7831, 7853), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (7845, 7853), False, 'import os\n'), ((7867, 7886), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7878, 7886), False, 'import os\n'), ((9073, 9102), 'polharmonic.shcoeffs.SHCoeffs', 'shcoeffs.SHCoeffs', (['u[:, j[0]]'], {}), '(u[:, j[0]])\n', (9090, 9102), False, 'from polharmonic import util, ill, det, gaunt, shcoeffs\n'), ((9309, 9331), 'PIL.Image.open', 'Image.open', (['shfilename'], {}), '(shfilename)\n', (9319, 9331), False, 'from PIL import Image\n'), ((9706, 9741), 'numpy.arcsin', 'np.arcsin', (['(self.ill.na / self.ill.n)'], {}), '(self.ill.na / self.ill.n)\n', (9715, 9741), True, 'import numpy as np\n'), ((10880, 10915), 'numpy.arcsin', 'np.arcsin', (['(self.det.na / self.det.n)'], {}), '(self.det.na / self.det.n)\n', (10889, 10915), True, 'import numpy as np\n'), ((2873, 2891), 'numpy.floor', 'np.floor', (['(mlen / 2)'], {}), '(mlen / 2)\n', (2881, 2891), True, 'import numpy as np\n'), ((4021, 4053), 'numpy.zeros', 'np.zeros', (['data[:, :, 0, 0].shape'], {}), '(data[:, :, 0, 0].shape)\n', (4029, 4053), True, 'import numpy as np\n'), ((7028, 7043), 'numpy.cos', 'np.cos', (['mark[1]'], {}), '(mark[1])\n', (7034, 7043), True, 'import numpy as np\n'), ((7053, 7068), 'numpy.sin', 'np.sin', (['mark[1]'], {}), '(mark[1])\n', (7059, 7068), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import types
from darklim.limit._limit import drde, gauss_smear
__all__ = [
"RatePlot",
]
class RatePlot(object):
"""
Class for making a plot of different dark matter spectra with the ability of comparing
different data and with theoretical dR/dE curves.
Attributes
----------
fig : matplotlib.figure.Figure
Matplotlib Figure object
ax : matplotlib.axes.AxesSubplot
Matplotlib Axes object
_energy_range : array_like
The energy range of the plot in keV.
_spectrum_cmap : str
The colormap to use for plotting each spectra from data.
_drde_cmap : str
The colormap to use for plotting each theoretical drde curve using the WIMP model.
"""
def __init__(self, energy_range, spectrum_cmap="inferno", drde_cmap="viridis", figsize=(10, 6)):
"""
Initialization of the RatePlot class for plotting dark matter spectra.
Parameters
----------
energy_range : array_like
The energy range of the plot in keV.
spectrum_cmap : str, optional
The colormap to use for plotting each spectra from data. Default is "inferno".
drde_cmap : str, optional
The colormap to use for plotting each theoretical drde curve using the WIMP
model. Default is "viridis".
figsize : tuple, optional
Width and height of the figure in inches. Default is (10, 6).
Returns
-------
None
"""
self._energy_range = energy_range
self._spectrum_cmap = spectrum_cmap
self._drde_cmap = drde_cmap
self.fig, self.ax = plt.subplots(figsize=figsize)
self.ax.grid()
self.ax.grid(which="minor", axis="both", linestyle="dotted")
self.ax.tick_params(which="both", direction="in", right=True, top=True)
self.ax.set_yscale('log')
self.ax.set_xlim(self._energy_range)
self.ax.set_ylabel("$\partial R/\partial E_r$ [evts/keV/kg/day]")
self.ax.set_xlabel("Energy [keV]")
self.ax.set_title(
f"Spectrum of Events from {self._energy_range[0]:.1f} to "
f"{self._energy_range[1]:.1f} keV"
)
def _update_colors(self, linestyle):
"""
Helper method for updating the line colors whenever a new line is added.
Parameters
----------
linestyle : str
The linestyle to update all the plot colors for. Should be "-" or "--".
Returns
-------
None
Raises
------
ValueError
If `linestyle` is not "-" or "--".
Notes
-----
Given a linestyle, this method checks how many lines have the specified style.
This method assumes that data curves have a solid linestyle and theoretical
DM curves have a dashed linestyle.
"""
if linestyle == "-":
cmap = self._spectrum_cmap
elif linestyle == "--":
cmap = self._drde_cmap
else:
raise ValueError("The inputted linestyle is not supported.")
n_lines = sum(line.get_linestyle() == linestyle for line in self.ax.lines)
line_colors = plt.cm.get_cmap(cmap)(np.linspace(0.1, 0.9, n_lines))
ii = 0
for line in self.ax.lines:
if line.get_linestyle() == linestyle:
line.set_color(line_colors[ii])
ii += 1
self.ax.legend(loc="upper right")
def add_data(self, energies, exposure, efficiency=None, label=None, nbins=100, **kwargs):
"""
Method for plotting a single spectrum in evts/keV/kg/day from inputted data.
Parameters
----------
energies : array_like
The energies of the events that will be used when plotting, in units of keV.
exposure : float
The exposure of DM search with respect to the inputted spectrum, in units
of kg-days.
efficiency : float, FunctionType, NoneType, optional
The efficiency of the cuts that were applied to the inputted energies. This
can be passed as a float, as a function of energy in keV, or left as None if
no efficiency correction will be done.
label : str, optional
The label for this data to be used in the plot. If left as None, no label is added.
nbins : int, optional
The number of bins to use in the plot.
kwargs
The keyword arguments to pass to `matplotlib.pyplot.step`.
Returns
-------
None
"""
hist, bin_edges = np.histogram(energies, bins=nbins, range=self._energy_range)
bin_cen = (bin_edges[:-1]+bin_edges[1:])/2
rate = hist / np.diff(bin_cen).mean() / exposure
if np.isscalar(efficiency):
rate /= efficiency
elif isinstance(efficiency, types.FunctionType):
rate /= efficiency(bin_cen)
self.ax.step(bin_cen, rate, where='mid', label=label, linestyle='-', **kwargs)
self._update_colors("-")
def add_drde(self, masses, sigmas, tm="Si", npoints=1000, res=None, gauss_width=10, **kwargs):
"""
Method for plotting the expected dark matter spectrum for specified masses and
cross sections in evts/keV/kg/day.
Parameters
----------
masses : float, array_like
The dark matter mass at which to calculate/plot the expected differential
scattering rate. Expected units are GeV.
sigmas : float, array_like
The dark matter cross section at which to calculate/plot the expected
differential scattering rate. Expected units are cm^2.
tm : str, int, optional
The target material of the detector. Can be passed as either the atomic symbol, the
atomic number, or the full name of the element. Default is 'Si'.
npoints : int, optional
The number of points to use in the dR/dE plot. Default is 1000.
res : float, NoneType, optional
The width of the gaussian (1 standard deviation) to be used to smear differential
scatter rate in the plot. Should have units of keV. None by default, in which no
smearing is done.
gauss_width : float, optional
If `res` is not None, this is the number of standard deviations of the Gaussian
distribution that the smearing will go out to. Default is 10.
kwargs
The keyword arguments to pass to `matplotlib.pyplot.plot`.
Returns
-------
None
Raises
------
ValueError
If `masses` and `sigmas` are not the same length.
"""
if np.isscalar(masses):
masses = [masses]
if np.isscalar(sigmas):
sigmas = [sigmas]
if len(masses) != len(sigmas):
raise ValueError("masses and sigmas must be the same length.")
xvals = np.linspace(self._energy_range[0], self._energy_range[1], npoints)
for m, sig in zip(masses, sigmas):
drde = drde(xvals, m, sig, tm=tm)
label = f"DM Mass = {m:.2f} GeV, σ = {sig:.2e} cm$^2$"
if res is not None:
drde = gauss_smear(xvals, drde, res, gauss_width=gauss_width)
label += f"\nWith {gauss_width}$\sigma_E$ Smearing"
self.ax.plot(
xvals,
drde,
linestyle='--',
label=label,
**kwargs,
)
self._update_colors("--")
| [
"numpy.isscalar",
"darklim.limit._limit.gauss_smear",
"numpy.histogram",
"numpy.diff",
"numpy.linspace",
"darklim.limit._limit.drde",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.cm.get_cmap"
] | [((1708, 1737), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1720, 1737), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4757), 'numpy.histogram', 'np.histogram', (['energies'], {'bins': 'nbins', 'range': 'self._energy_range'}), '(energies, bins=nbins, range=self._energy_range)\n', (4709, 4757), True, 'import numpy as np\n'), ((4878, 4901), 'numpy.isscalar', 'np.isscalar', (['efficiency'], {}), '(efficiency)\n', (4889, 4901), True, 'import numpy as np\n'), ((6842, 6861), 'numpy.isscalar', 'np.isscalar', (['masses'], {}), '(masses)\n', (6853, 6861), True, 'import numpy as np\n'), ((6904, 6923), 'numpy.isscalar', 'np.isscalar', (['sigmas'], {}), '(sigmas)\n', (6915, 6923), True, 'import numpy as np\n'), ((7086, 7152), 'numpy.linspace', 'np.linspace', (['self._energy_range[0]', 'self._energy_range[1]', 'npoints'], {}), '(self._energy_range[0], self._energy_range[1], npoints)\n', (7097, 7152), True, 'import numpy as np\n'), ((3274, 3295), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (3289, 3295), True, 'import matplotlib.pyplot as plt\n'), ((3296, 3326), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', 'n_lines'], {}), '(0.1, 0.9, n_lines)\n', (3307, 3326), True, 'import numpy as np\n'), ((7216, 7242), 'darklim.limit._limit.drde', 'drde', (['xvals', 'm', 'sig'], {'tm': 'tm'}), '(xvals, m, sig, tm=tm)\n', (7220, 7242), False, 'from darklim.limit._limit import drde, gauss_smear\n'), ((7365, 7419), 'darklim.limit._limit.gauss_smear', 'gauss_smear', (['xvals', 'drde', 'res'], {'gauss_width': 'gauss_width'}), '(xvals, drde, res, gauss_width=gauss_width)\n', (7376, 7419), False, 'from darklim.limit._limit import drde, gauss_smear\n'), ((4831, 4847), 'numpy.diff', 'np.diff', (['bin_cen'], {}), '(bin_cen)\n', (4838, 4847), True, 'import numpy as np\n')] |
import argparse
import os
parser = argparse.ArgumentParser(description='Model Trainer')
parser.add_argument('--path', help='Path to data folder.', required=True)
parser.add_argument('--lite', help='Generate lite Model.', action='store_true')
args = parser.parse_args()
if args.path:
import cv2
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
def load_dataset(input_path):
features_list = []
features_label = []
for root, dirs, files in os.walk(input_path):
for dir in dirs:
for filename in os.listdir(input_path + "/" + dir):
training_digit_image = cv2.imread(input_path + "/" + dir + "/" + filename)
gray = cv2.cvtColor(training_digit_image, cv2.COLOR_BGR2GRAY)
gray = np.array(gray, dtype='f').ravel()
features_list.append(np.array(gray))
features_label.append(np.float(dir))
features_list = np.array(features_list)
features_label = np.array(features_label)
return features_list, features_label
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
print('\nLearning rate for epoch {} is {}'.format(epoch + 1, model.optimizer.lr.numpy()))
if logs.get('loss') < 0.01 and logs.get('accuracy') > .999:
print("\nReached 99.8% accuracy so cancelling training!")
self.model.stop_training = True
def scheduler(epoch):
return 0.001 if epoch < 10 else float(0.001 * tf.math.exp(0.1 * (10 - epoch)))
train, labels = load_dataset(args.path)
# Split into train/test
X_train, X_test, y_train, y_test = train_test_split(train, labels, test_size=0.3, stratify=labels, random_state=0)
X_train /= 255.0
X_test /= 255.0
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(1700).batch(64)
test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test)).shuffle(1700).batch(64)
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(38, activation='softmax')
])
callbacks = myCallback()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_ds, validation_data=test_ds, epochs=100,
callbacks=[callbacks, tf.keras.callbacks.LearningRateScheduler(scheduler)])
model.save('model.h5')
if args.lite:
# Convert the model.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open('Model.tflite', 'wb').write(tflite_model)
| [
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"cv2.cvtColor",
"sklearn.model_selection.train_test_split",
"os.walk",
"numpy.float",
"tensorflow.data.Dataset.from_tensor_slices",
"cv2.imread",
"tensorflow.keras.callbacks.LearningRateScheduler",
"nu... | [((36, 88), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model Trainer"""'}), "(description='Model Trainer')\n", (59, 88), False, 'import argparse\n'), ((1673, 1752), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train', 'labels'], {'test_size': '(0.3)', 'stratify': 'labels', 'random_state': '(0)'}), '(train, labels, test_size=0.3, stratify=labels, random_state=0)\n', (1689, 1752), False, 'from sklearn.model_selection import train_test_split\n'), ((517, 536), 'os.walk', 'os.walk', (['input_path'], {}), '(input_path)\n', (524, 536), False, 'import os\n'), ((980, 1003), 'numpy.array', 'np.array', (['features_list'], {}), '(features_list)\n', (988, 1003), True, 'import numpy as np\n'), ((1025, 1049), 'numpy.array', 'np.array', (['features_label'], {}), '(features_label)\n', (1033, 1049), True, 'import numpy as np\n'), ((2643, 2690), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (2683, 2690), True, 'import tensorflow as tf\n'), ((2038, 2063), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2061, 2063), True, 'import tensorflow as tf\n'), ((2101, 2146), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2122, 2146), True, 'import tensorflow as tf\n'), ((2184, 2212), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (2207, 2212), True, 'import tensorflow as tf\n'), ((2250, 2297), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(38)'], {'activation': '"""softmax"""'}), "(38, activation='softmax')\n", (2271, 2297), True, 'import tensorflow as tf\n'), ((591, 625), 'os.listdir', 'os.listdir', (["(input_path + '/' + dir)"], {}), "(input_path + '/' + dir)\n", (601, 625), False, 'import os\n'), ((2510, 2561), 'tensorflow.keras.callbacks.LearningRateScheduler', 'tf.keras.callbacks.LearningRateScheduler', (['scheduler'], {}), '(scheduler)\n', (2550, 2561), True, 'import tensorflow as tf\n'), ((666, 717), 'cv2.imread', 'cv2.imread', (["(input_path + '/' + dir + '/' + filename)"], {}), "(input_path + '/' + dir + '/' + filename)\n", (676, 717), False, 'import cv2\n'), ((741, 795), 'cv2.cvtColor', 'cv2.cvtColor', (['training_digit_image', 'cv2.COLOR_BGR2GRAY'], {}), '(training_digit_image, cv2.COLOR_BGR2GRAY)\n', (753, 795), False, 'import cv2\n'), ((1539, 1570), 'tensorflow.math.exp', 'tf.math.exp', (['(0.1 * (10 - epoch))'], {}), '(0.1 * (10 - epoch))\n', (1550, 1570), True, 'import tensorflow as tf\n'), ((1798, 1852), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_train, y_train)'], {}), '((X_train, y_train))\n', (1832, 1852), True, 'import tensorflow as tf\n'), ((1887, 1939), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_test, y_test)'], {}), '((X_test, y_test))\n', (1921, 1939), True, 'import tensorflow as tf\n'), ((890, 904), 'numpy.array', 'np.array', (['gray'], {}), '(gray)\n', (898, 904), True, 'import numpy as np\n'), ((944, 957), 'numpy.float', 'np.float', (['dir'], {}), '(dir)\n', (952, 957), True, 'import numpy as np\n'), ((819, 844), 'numpy.array', 'np.array', (['gray'], {'dtype': '"""f"""'}), "(gray, dtype='f')\n", (827, 844), True, 'import numpy as np\n')] |
import math
import numpy as np
import random
import timeit
from threading import Thread
import functools
dist_ar = [] # 거리표(global)
cities_count = 0 # 도시 수(global)
dots_list = [] # 도시 리스트(global)
# Hyper Parameter
limits = 60 * 12 # 제한시간
# 시간제한 데코레이터
def timeout(seconds_before_timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (func.__name__, seconds_before_timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(seconds_before_timeout)
except Exception as e:
print('error starting thread')
raise e
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
# 거리표 제작(param : 문제 경로)
def make_distArray(str):
global dist_ar
global limit_time
global cities_count
global dots_list
reader = open(str, mode='rt', encoding='utf-8')
dots_list = reader.read().split("\n") # ['x1 y1', 'x2 y2', 'x3 y3' ... 'xn yn']
cities_count = int(dots_list.pop(0))
x_list = [] # ['x1', 'x2', 'x3' ... 'xn']
y_list = [] # ['y1', 'y2', 'y3' ... 'yn']
for i in range(cities_count):
temp = dots_list[i].split(" ")
x_list.append(float(temp[0]))
y_list.append(float(temp[1]))
dist_ar = []
for n in range(cities_count):
temp = []
for m in range(cities_count):
temp.append((math.sqrt(((x_list[m] - x_list[n]) ** 2) + ((y_list[m] - y_list[n]) ** 2))))
dist_ar.append(temp)
dist_ar = np.array(dist_ar)
print(dist_ar)
# 거리표를 이용한 적합도 매칭 함수
def cal_fit(stri):
fit = 0
for steps in range(len(stri) - 1):
fit += dist_ar[stri[steps], stri[steps + 1]]
return fit
def optFunc(stri) :
route = stri
fitness = cal_fit(route)
while 1 :
breaker = True
for i in range(len(route)):
for j in range(len(route)):
new_route = optSwap(route, i, j)
new_fitness = cal_fit(new_route)
if new_fitness < fitness:
route = new_route
fitness = new_fitness
breaker = False
break
if breaker == False :
break
if breaker == True :
break
return route
def optSwap(route,head,tail):
new_route = []
new_route += route[0:head]
new_route += reversed(route[head:tail+1])
new_route += route[tail+1:len(route)]
return new_route
def randomTwo(ranges) :
randomList = []
randomList += random.sample(range(0,ranges), 2)
randomList.sort()
return randomList
def randomFour(ranges) :
randomList = []
randomList += random.sample(range(0,ranges), 4)
randomList.sort()
return randomList
def twoOptMove(nest, pointList) :
nest = nest[:]
new_nest = optSwap(nest, pointList[0], pointList[1])
return new_nest
def doublebridgeMove(nest, pointList) :
nest = nest[:]
new_nest = optSwap(nest, pointList[0], pointList[1])
new_nest = optSwap(new_nest, pointList[2], pointList[3])
return new_nest
def makeFractal(route, calls) :
global population
Fractal_size = math.floor(math.pow(random.uniform(0.0001,0.9999),-1.0/3.0))
if not calls > Fractal_size :
calls += 1
small = twoOptMove(route, sorted(randomTwo(cities_count)))
population.append(small)
makeFractal(small, calls)
large = doublebridgeMove(route, sorted(randomFour(cities_count)))
population.append(large)
makeFractal(large, calls)
def makeArr(population) :
fits = []
for i in range(len(population)) :
fits.append(cal_fit(population[i]))
arr = np.array([population, fits])
return arr.T
@timeout(limits)
def run() :
global population
generation = 0
optSol = random.sample(range(0, cities_count), cities_count)
population.append(optSol)
calls = 0
while 1 :
makeFractal(optSol, calls)
population = makeArr(population)
population = population[np.argsort(population[:, 1])] # fitness 기준 정렬
optSol = population[0,0]
if generation % 5000 == 0 :
print(generation, "세대 최적해", population[0,1])
population = []
population.append(optSol)
generation += 1
calls = 0
population = [] # 전역변수로 선언한 것
try :
make_distArray("dots/opt_cycle200.in")
start = timeit.default_timer()
run()
stop = timeit.default_timer()
print(stop - start)
except :
stop = timeit.default_timer()
print(stop - start) | [
"threading.Thread",
"math.sqrt",
"random.uniform",
"timeit.default_timer",
"numpy.argsort",
"numpy.array",
"functools.wraps"
] | [((1912, 1929), 'numpy.array', 'np.array', (['dist_ar'], {}), '(dist_ar)\n', (1920, 1929), True, 'import numpy as np\n'), ((4090, 4118), 'numpy.array', 'np.array', (['[population, fits]'], {}), '([population, fits])\n', (4098, 4118), True, 'import numpy as np\n'), ((4803, 4825), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4823, 4825), False, 'import timeit\n'), ((4847, 4869), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4867, 4869), False, 'import timeit\n'), ((323, 344), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (338, 344), False, 'import functools\n'), ((4914, 4936), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4934, 4936), False, 'import timeit\n'), ((690, 712), 'threading.Thread', 'Thread', ([], {'target': 'newFunc'}), '(target=newFunc)\n', (696, 712), False, 'from threading import Thread\n'), ((3587, 3617), 'random.uniform', 'random.uniform', (['(0.0001)', '(0.9999)'], {}), '(0.0001, 0.9999)\n', (3601, 3617), False, 'import random\n'), ((4438, 4466), 'numpy.argsort', 'np.argsort', (['population[:, 1]'], {}), '(population[:, 1])\n', (4448, 4466), True, 'import numpy as np\n'), ((1791, 1861), 'math.sqrt', 'math.sqrt', (['((x_list[m] - x_list[n]) ** 2 + (y_list[m] - y_list[n]) ** 2)'], {}), '((x_list[m] - x_list[n]) ** 2 + (y_list[m] - y_list[n]) ** 2)\n', (1800, 1861), False, 'import math\n')] |
import numpy as np
class ActivationModel:
def activation(
self,
Z
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
raise NotImplementedError('Method must be implemented by child class')
def d_activation_d_Z(
self,
Z,
outputs
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
# d_activation_d_Z: (num_outputs, num_examples)
raise NotImplementedError('Method must be implemented by child class')
class SigmoidActivationModel(ActivationModel):
def activation(
self,
Z
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
return np.reciprocal(np.add(1.0, np.exp(-Z))) # (num_outputs, num_examples)
def d_activation_d_Z(
self,
Z,
outputs
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
# d_activation_d_Z: (num_outputs, num_examples)
return np.multiply(outputs, np.subtract(1, outputs)) # (num_outputs, num_examples)
class TanhActivationModel(ActivationModel):
def activation(
self,
Z
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
return np.tanh(Z) # (num_outputs, num_examples)
def d_activation_d_Z(
self,
Z,
outputs
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
# d_activation_d_Z: (num_outputs, num_examples)
return np.subtract(1, np.square(outputs)) # (num_outputs, num_examples)
class ReLUActivationModel(ActivationModel):
def activation(
self,
Z
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
return np.multiply(Z, Z > 0.0) # (num_outputs, num_examples)
def d_activation_d_Z(
self,
Z,
outputs
):
# Z: (num_outputs, num_examples)
# outputs: (num_outputs, num_examples)
# d_activation_d_Z: (num_outputs, num_examples)
return np.multiply(1.0, Z > 0.0) # (num_outputs, num_examples)
| [
"numpy.multiply",
"numpy.subtract",
"numpy.tanh",
"numpy.square",
"numpy.exp"
] | [((1339, 1349), 'numpy.tanh', 'np.tanh', (['Z'], {}), '(Z)\n', (1346, 1349), True, 'import numpy as np\n'), ((1878, 1901), 'numpy.multiply', 'np.multiply', (['Z', '(Z > 0.0)'], {}), '(Z, Z > 0.0)\n', (1889, 1901), True, 'import numpy as np\n'), ((2166, 2191), 'numpy.multiply', 'np.multiply', (['(1.0)', '(Z > 0.0)'], {}), '(1.0, Z > 0.0)\n', (2177, 2191), True, 'import numpy as np\n'), ((1085, 1108), 'numpy.subtract', 'np.subtract', (['(1)', 'outputs'], {}), '(1, outputs)\n', (1096, 1108), True, 'import numpy as np\n'), ((1629, 1647), 'numpy.square', 'np.square', (['outputs'], {}), '(outputs)\n', (1638, 1647), True, 'import numpy as np\n'), ((787, 797), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (793, 797), True, 'import numpy as np\n')] |
import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from face_dataset_train import DataLoaderFace
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--sample_limit', type=int, default=10000, help='subsampling limit for search training')
args = parser.parse_args()
args.save = 'eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
output_dimension = 128
threshold = 0.5
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=1.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, dist, label):
loss = torch.mean(1/2*(label) * torch.pow(dist, 2) +
1/2*(1-label) * torch.pow(torch.clamp(self.margin - dist, min=0.0), 2))
return loss
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, output_dimension, args.layers, args.auxiliary, genotype)
model = model.cuda()
if(args.model_path != "saved_models"):
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = torch.jit.script(ContrastiveLoss())
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
#train_transform, valid_transform = utils._data_transforms_cifar10(args)
print("creating dataloader")
dataLoaderFace = DataLoaderFace(args.batch_size, workers=4, limit=args.sample_limit)
train_queue = dataLoaderFace.get_trainloader()
print("finished creating dataloader 1")
valid_queue = dataLoaderFace.get_valloader()
print("finished creating dataloader 2")
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, str(epoch) + '_weights.pt'))
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
accuracy = utils.AvgrageMeter()
model.train()
for step, data in enumerate(train_queue):
anchor_img, positive_img, negative_img, anchor_label, negative_label = data[0], data[1], data[2], data[3], data[4]
anchor_img = Variable(anchor_img, requires_grad=False).cuda()
positive_img = Variable(positive_img, requires_grad=False).cuda()
negative_img = Variable(negative_img, requires_grad=False).cuda()
labels_p = torch.from_numpy(np.ones((1, positive_img.shape[0]), dtype=None)).cuda(non_blocking=True)
labels_n = torch.from_numpy(np.zeros((1, negative_img.shape[0]), dtype=None)).cuda(non_blocking=True)
optimizer.zero_grad()
anchor_out, anchor_out_aux = model(anchor_img)
positive_out, positive_out_aux = model(positive_img)
negative_out, negative_out_aux = model(negative_img)
dist_p = (positive_out - anchor_out).pow(2).sum(1)
dist_n = (negative_out - anchor_out).pow(2).sum(1)
loss_p = criterion(dist_p, labels_p)
loss_n = criterion(dist_n, labels_n)
loss = loss_n + loss_p
if args.auxiliary:
dist_p_aux = (positive_out_aux - anchor_out_aux).pow(2).sum(1)
dist_n_aux = (negative_out_aux - anchor_out_aux).pow(2).sum(1)
loss_p_aux = criterion(dist_p_aux, labels_p)
loss_n_aux = criterion(dist_n_aux, labels_n)
loss_aux = loss_n_aux + loss_p_aux
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
n = positive_img.shape[0]
prec = utils.accuracy_face(dist_p, dist_n, threshold)
objs.update(loss.data.item(), n)
accuracy.update(prec, n)
#top5.update(prec5.data.item(), n)
if step % args.report_freq == 0:
print("--- Distances ---")
print(dist_p)
print(dist_n)
logging.info('train %03d %f %f', step, objs.avg, accuracy.avg)
return accuracy.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
accuracy = utils.AvgrageMeter()
model.eval()
for step, data in enumerate(valid_queue):
anchor_img, positive_img, negative_img, anchor_label, negative_label = data[0], data[1], data[2], data[3], data[4]
anchor_img = Variable(anchor_img, requires_grad=False).cuda()
positive_img = Variable(positive_img, requires_grad=False).cuda()
negative_img = Variable(negative_img, requires_grad=False).cuda()
n = positive_img.shape[0]
labels_p = torch.from_numpy(np.ones((1, positive_img.shape[0]), dtype=None)).cuda(non_blocking=True)
labels_n = torch.from_numpy(np.zeros((1, negative_img.shape[0]), dtype=None)).cuda(non_blocking=True)
anchor_out, _ = model(anchor_img)
positive_out, _ = model(positive_img)
negative_out, _ = model(negative_img)
dist_p = (positive_out - anchor_out).pow(2).sum(1)
dist_n = (negative_out - anchor_out).pow(2).sum(1)
loss_p = criterion(dist_p, labels_p)
loss_n = criterion(dist_n, labels_n)
loss = loss_n + loss_p
prec = utils.accuracy_face(dist_p, dist_n, threshold)
objs.update(loss.data.item(), n)
accuracy.update(prec, n)
if step % args.report_freq == 0:
logging.info('valid %03d %f %f', step, objs.avg, accuracy.avg)
return accuracy.avg, objs.avg
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"time.strftime",
"numpy.ones",
"logging.Formatter",
"utils.accuracy_face",
"model.NetworkCIFAR",
"glob.glob",
"utils.AvgrageMeter",
"os.path.join",
"face_dataset_train.DataLoaderFace",
"torch.cuda.set_device",
"utils.count_parameters_in_MB",
... | [((389, 421), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""cifar"""'], {}), "('cifar')\n", (412, 421), False, 'import argparse\n'), ((2478, 2589), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO', 'format': 'log_format', 'datefmt': '"""%m/%d %I:%M:%S %p"""'}), "(stream=sys.stdout, level=logging.INFO, format=\n log_format, datefmt='%m/%d %I:%M:%S %p')\n", (2497, 2589), False, 'import logging\n'), ((2339, 2369), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (2352, 2369), False, 'import time\n'), ((2614, 2648), 'os.path.join', 'os.path.join', (['args.save', '"""log.txt"""'], {}), "(args.save, 'log.txt')\n", (2626, 2648), False, 'import os\n'), ((2666, 2695), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (2683, 2695), False, 'import logging\n'), ((3394, 3419), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3408, 3419), True, 'import numpy as np\n'), ((3422, 3453), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (3443, 3453), False, 'import torch\n'), ((3481, 3509), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3498, 3509), False, 'import torch\n'), ((3533, 3566), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3555, 3566), False, 'import torch\n'), ((3569, 3611), 'logging.info', 'logging.info', (["('gpu device = %d' % args.gpu)"], {}), "('gpu device = %d' % args.gpu)\n", (3581, 3611), False, 'import logging\n'), ((3614, 3645), 'logging.info', 'logging.info', (['"""args = %s"""', 'args'], {}), "('args = %s', args)\n", (3626, 3645), False, 'import logging\n'), ((3703, 3791), 'model.NetworkCIFAR', 'Network', (['args.init_channels', 'output_dimension', 'args.layers', 'args.auxiliary', 'genotype'], {}), '(args.init_channels, output_dimension, args.layers, args.auxiliary,\n genotype)\n', (3710, 3791), True, 'from model import NetworkCIFAR as Network\n'), ((4331, 4398), 'face_dataset_train.DataLoaderFace', 'DataLoaderFace', (['args.batch_size'], {'workers': '(4)', 'limit': 'args.sample_limit'}), '(args.batch_size, workers=4, limit=args.sample_limit)\n', (4345, 4398), False, 'from face_dataset_train import DataLoaderFace\n'), ((5227, 5247), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (5245, 5247), False, 'import utils\n'), ((5261, 5281), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (5279, 5281), False, 'import utils\n'), ((7222, 7242), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (7240, 7242), False, 'import utils\n'), ((7256, 7276), 'utils.AvgrageMeter', 'utils.AvgrageMeter', ([], {}), '()\n', (7274, 7276), False, 'import utils\n'), ((2419, 2436), 'glob.glob', 'glob.glob', (['"""*.py"""'], {}), "('*.py')\n", (2428, 2436), False, 'import glob\n'), ((2697, 2716), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2714, 2716), False, 'import logging\n'), ((3304, 3329), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3327, 3329), False, 'import torch\n'), ((3335, 3374), 'logging.info', 'logging.info', (['"""no gpu device available"""'], {}), "('no gpu device available')\n", (3347, 3374), False, 'import logging\n'), ((3379, 3390), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3387, 3390), False, 'import sys\n'), ((3856, 3890), 'utils.load', 'utils.load', (['model', 'args.model_path'], {}), '(model, args.model_path)\n', (3866, 3890), False, 'import utils\n'), ((3928, 3963), 'utils.count_parameters_in_MB', 'utils.count_parameters_in_MB', (['model'], {}), '(model)\n', (3956, 3963), False, 'import utils\n'), ((4938, 4977), 'logging.info', 'logging.info', (['"""train_acc %f"""', 'train_acc'], {}), "('train_acc %f', train_acc)\n", (4950, 4977), False, 'import logging\n'), ((5047, 5086), 'logging.info', 'logging.info', (['"""valid_acc %f"""', 'valid_acc'], {}), "('valid_acc %f', valid_acc)\n", (5059, 5086), False, 'import logging\n'), ((6799, 6845), 'utils.accuracy_face', 'utils.accuracy_face', (['dist_p', 'dist_n', 'threshold'], {}), '(dist_p, dist_n, threshold)\n', (6818, 6845), False, 'import utils\n'), ((8260, 8306), 'utils.accuracy_face', 'utils.accuracy_face', (['dist_p', 'dist_n', 'threshold'], {}), '(dist_p, dist_n, threshold)\n', (8279, 8306), False, 'import utils\n'), ((7073, 7135), 'logging.info', 'logging.info', (['"""train %03d %f %f"""', 'step', 'objs.avg', 'accuracy.avg'], {}), "('train %03d %f %f', step, objs.avg, accuracy.avg)\n", (7085, 7135), False, 'import logging\n'), ((8421, 8483), 'logging.info', 'logging.info', (['"""valid %03d %f %f"""', 'step', 'objs.avg', 'accuracy.avg'], {}), "('valid %03d %f %f', step, objs.avg, accuracy.avg)\n", (8433, 8483), False, 'import logging\n'), ((5480, 5521), 'torch.autograd.Variable', 'Variable', (['anchor_img'], {'requires_grad': '(False)'}), '(anchor_img, requires_grad=False)\n', (5488, 5521), False, 'from torch.autograd import Variable\n'), ((5548, 5591), 'torch.autograd.Variable', 'Variable', (['positive_img'], {'requires_grad': '(False)'}), '(positive_img, requires_grad=False)\n', (5556, 5591), False, 'from torch.autograd import Variable\n'), ((5618, 5661), 'torch.autograd.Variable', 'Variable', (['negative_img'], {'requires_grad': '(False)'}), '(negative_img, requires_grad=False)\n', (5626, 5661), False, 'from torch.autograd import Variable\n'), ((7473, 7514), 'torch.autograd.Variable', 'Variable', (['anchor_img'], {'requires_grad': '(False)'}), '(anchor_img, requires_grad=False)\n', (7481, 7514), False, 'from torch.autograd import Variable\n'), ((7541, 7584), 'torch.autograd.Variable', 'Variable', (['positive_img'], {'requires_grad': '(False)'}), '(positive_img, requires_grad=False)\n', (7549, 7584), False, 'from torch.autograd import Variable\n'), ((7611, 7654), 'torch.autograd.Variable', 'Variable', (['negative_img'], {'requires_grad': '(False)'}), '(negative_img, requires_grad=False)\n', (7619, 7654), False, 'from torch.autograd import Variable\n'), ((3131, 3149), 'torch.pow', 'torch.pow', (['dist', '(2)'], {}), '(dist, 2)\n', (3140, 3149), False, 'import torch\n'), ((5701, 5748), 'numpy.ones', 'np.ones', (['(1, positive_img.shape[0])'], {'dtype': 'None'}), '((1, positive_img.shape[0]), dtype=None)\n', (5708, 5748), True, 'import numpy as np\n'), ((5806, 5854), 'numpy.zeros', 'np.zeros', (['(1, negative_img.shape[0])'], {'dtype': 'None'}), '((1, negative_img.shape[0]), dtype=None)\n', (5814, 5854), True, 'import numpy as np\n'), ((7726, 7773), 'numpy.ones', 'np.ones', (['(1, positive_img.shape[0])'], {'dtype': 'None'}), '((1, positive_img.shape[0]), dtype=None)\n', (7733, 7773), True, 'import numpy as np\n'), ((7831, 7879), 'numpy.zeros', 'np.zeros', (['(1, negative_img.shape[0])'], {'dtype': 'None'}), '((1, negative_img.shape[0]), dtype=None)\n', (7839, 7879), True, 'import numpy as np\n'), ((3216, 3256), 'torch.clamp', 'torch.clamp', (['(self.margin - dist)'], {'min': '(0.0)'}), '(self.margin - dist, min=0.0)\n', (3227, 3256), False, 'import torch\n')] |
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras import metrics, losses
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
chart_names = ["total-bitcoins", "market-price", "market-cap", "trade-volume", "blocks-size", "avg-block-size",
"n-transactions-per-block", "median-confirmation-time", "hash-rate",
"difficulty", "miners-revenue", "transaction-fees", "transaction-fees-usd",
"cost-per-transaction-percent", "cost-per-transaction", "n-unique-addresses", "n-transactions",
"n-transactions-total", "transactions-per-second", "mempool-size", "mempool-growth", "mempool-count",
"utxo-count", "n-transactions-excluding-popular",
"n-transactions-excluding-chains-longer-than-100", "output-volume", "estimated-transaction-volume-usd",
"estimated-transaction-volume", "my-wallet-n-users"]
chart_names2 = ["market-price"]
my_metrics = [metrics.binary_accuracy,
metrics.mean_absolute_error,
metrics.sparse_categorical_accuracy,
losses.mean_absolute_percentage_error,
losses.squared_hinge,
losses.hinge,
losses.poisson]
def plot(x_list, y_list, y_list2, chart_name):
plt.plot(x_list, y_list, label='line1')
plt.plot(x_list, y_list2, label='line2')
plt.title(chart_name)
plt.legend(['predicted', 'real'], loc='upper left')
plt.show()
def create_model():
model = Sequential()
model.add(LSTM(units=64, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dropout(rate=0.3))
model.add(Dense(1))
model.compile(optimizer='adagrad', loss='mse',
metrics=my_metrics)
return model
def train_model(model):
story = model.fit(x_train, y_train, epochs=20, batch_size=128, callbacks=[], validation_data=(x_val, y_val))
plt.plot(story.history['loss'])
plt.plot(story.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
def predict(xs, ys, name):
predicted = model.predict(xs)
if normalization:
predicted = test_scaller2.inverse_transform(predicted)
score = model.evaluate(x=xs, y=ys, verbose=2)
# prepend scores with default loss function
my_metrics.insert(0, losses.mean_squared_error)
print(f'--------- {name} -----------')
[print(f'{my_metrics[index].__name__}: {item}') for index, item in enumerate(score)]
my_metrics.pop()
x_series = list(range(0, predicted.shape[0]))
x_series = np.reshape(x_series, (x_series.__len__(), 1))
if normalization:
plot(x_series, predicted, test_scaller2.inverse_transform(ys.reshape(-1, 1)), name)
else:
plot(x_series, predicted, ys.reshape(-1, 1), name)
if __name__ == '__main__':
normalization = True
data_2018 = pd.read_csv('bitcoin_market_data.csv', sep=',')
total_data = pd.concat([data_2018]).iloc[:, 2:5].values
train_set_size = int(0.9 * total_data.shape[0])
val_set_start = train_set_size + int(0.05 * total_data.shape[0])
test_set_start = val_set_start + int(0.05 * total_data.shape[0]) + 1
train_set = total_data[:train_set_size, :]
# val_set = total_data[:train_set_size:int(1/0.05), :]
val_set = total_data[train_set_size:val_set_start, :]
test_set = total_data[val_set_start:test_set_start, :]
test_set2 = test_set.copy()[:, 0]
train_scaller = MinMaxScaler(feature_range=(0, 1))
if normalization:
train_set = train_scaller.fit_transform(train_set)
val_scaller = MinMaxScaler(feature_range=(0, 1))
if normalization:
val_set = val_scaller.fit_transform(val_set)
test_scaller = MinMaxScaler(feature_range=(0, 1))
if normalization:
test_set = test_scaller.fit_transform(test_set)
test_scaller2 = MinMaxScaler(feature_range=(0, 1))
if normalization:
test_set2 = test_set2.reshape(-1, 1)
test_set2 = test_scaller2.fit_transform(test_set2)
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
vector_size = 128
for i in range(vector_size, train_set.shape[0]):
x_train.append(train_set[i - vector_size:i, :])
y_train.append(train_set[i, 0])
x_train, y_train = np.array(x_train), np.array(y_train)
for i in range(vector_size, val_set.shape[0]):
x_val.append(val_set[i - vector_size:i, :])
y_val.append(val_set[i, 0])
x_val, y_val = np.array(x_val), np.array(y_val)
for i in range(vector_size, test_set.shape[0]):
x_test.append(test_set[i - vector_size:i, :])
y_test.append(test_set[i, 0])
x_test, y_test = np.array(x_test), np.array(y_test)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], x_train.shape[2]))
x_val = np.reshape(x_val, (x_val.shape[0], x_val.shape[1], x_val.shape[2]))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2]))
model = create_model()
# train or load, don't do both !
# train_model(model)
model.load_weights('MODEL_SPLIT2.h5')
predict(x_train, y_train, "Train")
predict(x_val, y_val, "Val")
predict(x_test, y_test, "Test")
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"keras.layers.LSTM",
"matplotlib.pyplot.legend",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.ylabel",
"keras.layers.Dropout",
"keras.layers.Dense",
"numpy.array",
"numpy.reshape",
"... | [((1430, 1469), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list'], {'label': '"""line1"""'}), "(x_list, y_list, label='line1')\n", (1438, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1514), 'matplotlib.pyplot.plot', 'plt.plot', (['x_list', 'y_list2'], {'label': '"""line2"""'}), "(x_list, y_list2, label='line2')\n", (1482, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1540), 'matplotlib.pyplot.title', 'plt.title', (['chart_name'], {}), '(chart_name)\n', (1528, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1596), 'matplotlib.pyplot.legend', 'plt.legend', (["['predicted', 'real']"], {'loc': '"""upper left"""'}), "(['predicted', 'real'], loc='upper left')\n", (1555, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1609, 1611), True, 'import matplotlib.pyplot as plt\n'), ((1646, 1658), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1656, 1658), False, 'from keras.models import Sequential\n'), ((2045, 2076), 'matplotlib.pyplot.plot', 'plt.plot', (["story.history['loss']"], {}), "(story.history['loss'])\n", (2053, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2116), 'matplotlib.pyplot.plot', 'plt.plot', (["story.history['val_loss']"], {}), "(story.history['val_loss'])\n", (2089, 2116), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2144), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (2130, 2144), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2167), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2159, 2167), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2191), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (2182, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2242), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Val']"], {'loc': '"""upper left"""'}), "(['Train', 'Val'], loc='upper left')\n", (2206, 2242), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2255, 2257), True, 'import matplotlib.pyplot as plt\n'), ((3075, 3122), 'pandas.read_csv', 'pd.read_csv', (['"""bitcoin_market_data.csv"""'], {'sep': '""","""'}), "('bitcoin_market_data.csv', sep=',')\n", (3086, 3122), True, 'import pandas as pd\n'), ((3662, 3696), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3674, 3696), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3797, 3831), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3809, 3831), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((3927, 3961), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (3939, 3961), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4061, 4095), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (4073, 4095), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4961, 5036), 'numpy.reshape', 'np.reshape', (['x_train', '(x_train.shape[0], x_train.shape[1], x_train.shape[2])'], {}), '(x_train, (x_train.shape[0], x_train.shape[1], x_train.shape[2]))\n', (4971, 5036), True, 'import numpy as np\n'), ((5049, 5116), 'numpy.reshape', 'np.reshape', (['x_val', '(x_val.shape[0], x_val.shape[1], x_val.shape[2])'], {}), '(x_val, (x_val.shape[0], x_val.shape[1], x_val.shape[2]))\n', (5059, 5116), True, 'import numpy as np\n'), ((5130, 5201), 'numpy.reshape', 'np.reshape', (['x_test', '(x_test.shape[0], x_test.shape[1], x_test.shape[2])'], {}), '(x_test, (x_test.shape[0], x_test.shape[1], x_test.shape[2]))\n', (5140, 5201), True, 'import numpy as np\n'), ((1673, 1737), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(64)', 'input_shape': '(x_train.shape[1], x_train.shape[2])'}), '(units=64, input_shape=(x_train.shape[1], x_train.shape[2]))\n', (1677, 1737), False, 'from keras.layers import LSTM\n'), ((1753, 1770), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.3)'}), '(rate=0.3)\n', (1760, 1770), False, 'from keras.layers import Dropout\n'), ((1786, 1794), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1791, 1794), False, 'from keras.layers import Dense\n'), ((4515, 4532), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (4523, 4532), True, 'import numpy as np\n'), ((4534, 4551), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4542, 4551), True, 'import numpy as np\n'), ((4711, 4726), 'numpy.array', 'np.array', (['x_val'], {}), '(x_val)\n', (4719, 4726), True, 'import numpy as np\n'), ((4728, 4743), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (4736, 4743), True, 'import numpy as np\n'), ((4911, 4927), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (4919, 4927), True, 'import numpy as np\n'), ((4929, 4945), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4937, 4945), True, 'import numpy as np\n'), ((3141, 3163), 'pandas.concat', 'pd.concat', (['[data_2018]'], {}), '([data_2018])\n', (3150, 3163), True, 'import pandas as pd\n')] |
import sys,os,math,time,json,fitsio
import os.path
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import pylab as py
from astropy.table import Table, Column
import datetime
from bokeh.plotting import *
from bokeh.embed import components
from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis
from bokeh.io import output_notebook
from bokeh.models.widgets import RadioButtonGroup
from bokeh.layouts import column
from bokeh.models import CustomJS
from bokeh.transform import linear_cmap
from bokeh.models.widgets import tables as bktables
from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider
from datetime import datetime as dt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz,solar_system_ephemeris,get_body,get_body_barycentric
from datetime import datetime
from astropy.time import Time, TimezoneInfo
import pylunar
from astroplan import Observer
import argparse
from bokeh.embed import file_html
from bokeh.resources import CDN
from astropy.coordinates import get_moon
#########################################################
## HTML header
def html_header(title):
head = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>"""+title+"""</title>
<link rel="stylesheet" href="https://cdn.pydata.org/bokeh/release/bokeh-0.13.0.min.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.13.0.min.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.pydata.org/bokeh/release/bokeh-tables-0.13.0.min.css" type="text/css" />
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-0.13.0.min.js"></script>
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-widgets-0.13.0.min.js"></script>
<script type="text/javascript" src="https://cdn.pydata.org/bokeh/release/bokeh-tables-0.13.0.min.js"></script>
<script type="text/javascript">
Bokeh.set_log_level("info");
</script>
</head>
<body>
"""
return head
#########################################################
def html_footer():
## HTML footer
tail = """
</body>
</html>
"""
return tail
#########################################################
def arg_parser():
parser = argparse.ArgumentParser(usage="""
- A visulaization tool for planning DESI observations
- How to run:
1$ prog -i [tileFile] -j [jsonFile] -o [HTML name] -t [HTML title] -p [plot title]
or
- jsonFile and HTMLname are optional.
If HTMLname is not provided, this program tries to open up the output on the default browser.
If jsonFile is not provided, all plotted tiles would have the same color.
- Example(s):
$ python prog -i tileFile.fits -j qa.json -o output.html
$ python prog -h
To see help and all available options.
""")
parser.add_argument("-i", "--input", type=str, required=True, help="input tiles file (FITS)")
parser.add_argument("-j", "--json", type=str, required=False, help="qa json file (optional)")
parser.add_argument("-o", "--output",type=str, required=False, help="output html file (optional)")
parser.add_argument("-t", "--title", type=str, required=False, help="HTML title (optional)")
parser.add_argument("-p", "--ptitle",type=str, required=False, help="plot title (optional)")
parser.add_argument("-x", "--xfile", type=str, required=False, help="Text file to be printed on the right side of plot (optional)")
args = parser.parse_args()
return args
#########################################################
def get_kp_twilights(tt, dd): # example: '2020-01-01 12:00:00'
kp = Observer.at_site("Kitt Peak", timezone="MST")
ds = str(dd[0]) + '-' + str(dd[1]) + '-' + str(dd[2])
ts = str(tt[0]) + ':' + str(tt[1]) + ':' + str(tt[2])
t = Time(ds + ' ' + ts)
eve = kp.twilight_evening_astronomical(t, which='next').datetime
mor = kp.twilight_morning_astronomical(t, which='next').datetime
return eve, mor
#########################################################
def moonLoc(tt, dd, loc, mooninfo_obj):
ds = str(dd[0]) + '-' + str(dd[1]) + '-' + str(dd[2])
ts = str(tt[0]) + ':' + str(tt[1]) + ':' + str(tt[2])
t = Time(ds + ' ' + ts)
# loc = EarthLocation.of_site('Kitt Peak')
moon_loc = get_moon(t, loc)
moon_update = mooninfo_obj.update(str(t))
frac_phase = mooninfo_obj.fractional_phase()
name_phase = mooninfo_obj.phase_name()
return moon_loc.ra, moon_loc.dec, frac_phase, name_phase
#########################################################
def jupLoc(tt, dd, loc):
ds = str(dd[0]) + '-' + str(dd[1]) + '-' + str(dd[2])
ts = str(tt[0]) + ':' + str(tt[1]) + ':' + str(tt[2])
t = Time(ds + ' ' + ts)
# loc = EarthLocation.of_site('Kitt Peak')
with solar_system_ephemeris.set('builtin'):
jup_loc = get_body('jupiter', t, loc)
return jup_loc.ra, jup_loc.dec
#### Functions
#########################################################
def add_plane(p, color='black', plane=None, projection=None):
from kapteyn import wcs
if plane == None or projection == None:
return
alpha = np.arange(0., 360, 2)
delta = alpha * 0.
tran = wcs.Transformation(plane + " ", projection)
alpha, delta = tran((alpha, delta))
# for i in range(len(alpha)):
# if alpha[i] >180:
ind = np.argsort(alpha)
alpha = alpha[ind]
delta = delta[ind]
p.line(alpha, delta, line_width=2, color=color)
p.line(alpha + 5, delta + 5, color='black', alpha=0.5)
p.line(alpha - 5, delta - 5, color='black', alpha=0.5)
#########################################################
def skyCircle(tt, dd, airmass_lim):
ds = str(dd[0]) + '-' + str(dd[1]) + '-' + str(dd[2])
ts = str(tt[0]) + ':' + str(tt[1]) + ':' + str(tt[2])
observatory = "kpno"
name = "<NAME>"
Lon = -(111 + 35 / 60. + 59.6 / 3600)
Lat = 31.9599
Height = 2120. # meter
obsTime = dt(dd[0], dd[1], dd[2], tt[0], tt[1], tt[2])
reference = dt(2000, 1, 1, 12, 0, 0) # UTCref
reference = time.mktime(reference.timetuple())
obsTime = time.mktime(obsTime.timetuple()) + 7 * 3600
deltaT = (obsTime - reference) / (24 * 3600);
# Convert to LST
LST_hours = ((18.697374558 + 24.06570982441908 * deltaT) + Lon / 15.) % 24;
LST_degrees = LST_hours * 15
obsTime = Time(ds + ' ' + ts)
loc = EarthLocation(lat=Lat * u.deg, lon=Lon * u.deg, height=Height * u.m)
zenithangle = math.acos(
1 / airmass_lim) * 180 / math.pi # 48.19 # deg The zenith angle at which airmass equals 1.5
az = np.arange(0, 360, 3)
alt = az * 0 + (90 - zenithangle)
newAltAzcoordiantes = SkyCoord(alt=alt, az=az, obstime=obsTime, frame='altaz', location=loc, unit="deg")
ra = newAltAzcoordiantes.icrs.ra.deg
dec = newAltAzcoordiantes.icrs.dec.deg
newAltAzcoordiantes = SkyCoord(alt=[90], az=[90], obstime=obsTime, frame='altaz', location=loc, unit="deg")
ra0 = newAltAzcoordiantes.icrs.ra.deg
ra0 = ra0[0]
ra0 = (ra - ra0) % 360
return ColumnDataSource({"RA0": ra0, "RA": (ra0 + LST_degrees) % 360, "DEC": dec})
#########################################################
def bokehTile(tileFile, jsonFile, TT=[0, 0, 0], DD=[2019, 10, 1], dynamic=False, plotTitle=''):
citls, h = fitsio.read(tileFile, header=True)
w = (np.where(citls['IN_DESI'] == 1)[0])
inci = citls[w]
if jsonFile is not None:
with open(jsonFile, "r") as read_file:
data = json.load(read_file)
## Coloring scheme
palette = ['green', 'red', 'white']
dye = []
for tile in citls['TILEID']:
rang = 2 # 'orange'
if jsonFile is not None:
if str(tile) in data:
rang = 0 # 'green' #green default
if len(data[str(tile)]['unassigned']) > 0: # not assigned (red)
rang = 1 # 'red' #'red'
if (0 in data[str(tile)]['gfa_stars_percam']):
print(data[str(tile)]['gfa_stars_percam'])
rang = 1 # 'cyan'
else: rang = 0 # green if qa.json is not provided
dye.append(rang)
dye = np.asarray(dye)
w = (np.where(dye < 2)[0])
citls = citls[w]
dye = dye[w]
mapper = linear_cmap(field_name='DYE', palette=palette, low=0, high=2)
#########################################################
TOOLS = ['pan', 'tap', 'wheel_zoom', 'box_zoom', 'reset', 'save', 'box_select']
obsTime = dt(DD[0], DD[1], DD[2], TT[0], TT[1], TT[2])
# print(get_kp_twilights(TT,DD))
if plotTitle=='' or plotTitle is None:
PTITLE = ''
else:
PTITLE = 'Program: '+plotTitle
p = figure(tools=TOOLS, toolbar_location="right", plot_width=800, plot_height=450,
title=PTITLE, active_drag='box_select') # str(DD[1])+" - 2019")
p.title.text_font_size = '16pt'
p.title.text_color = 'black'
p.grid.grid_line_color = "gainsboro"
############################### adding ecliptic plane+ hour grid ####################3
add_plane(p, color='red', plane='ecliptic', projection='equatorial')
tiledata = dict(
RA = citls['RA'],
DEC = citls['DEC'],
TILEID = citls['TILEID'],
BRIGHTRA = citls['BRIGHTRA'][:,0],
BRIGHTDEC = citls['BRIGHTDEC'][:,0],
BRIGHTVTMAG = citls['BRIGHTVTMAG'][:,0],
EBV_MED = np.round(citls['EBV_MED'], 3),
STAR_DENSITY = citls['STAR_DENSITY'],
DYE = dye,
program = citls['PROGRAM'],
selected = np.ones(len(citls), dtype=bool),
)
for colname in ['STAR_DENSITY', 'EBV_MED']:
if colname in citls.dtype.names:
tiledata[colname] = citls[colname]
tiles = ColumnDataSource(data=tiledata)
colformat = bktables.NumberFormatter(format='0,0.00')
columns = [
bktables.TableColumn(field='TILEID', title='TILEID', width=80),
bktables.TableColumn(field='RA', title='RA', formatter=colformat),
bktables.TableColumn(field='DEC', title='DEC', formatter=colformat),
]
for colname in ['STAR_DENSITY', 'EBV_MED']:
if colname in tiledata:
columns.append(bktables.TableColumn(field=colname, title=colname, formatter=colformat))
columns.append(bktables.TableColumn(field='selected', title='Selected'))
tiletable = bktables.DataTable(columns=columns, source=tiles, width=800)
tiles.selected.js_on_change('indices', CustomJS(args=dict(s1=tiles), code="""
var inds = cb_obj.indices;
var d1 = s1.data;
for (var i=0; i<d1['selected'].length; i++) {
d1['selected'][i] = false;
}
for (var i = 0; i < inds.length; i++) {
d1['selected'][inds[i]] = true;
}
s1.change.emit();
""")
)
render = p.circle('RA', 'DEC', source=tiles, size=9, line_color='chocolate', color=mapper, alpha=0.4,
hover_color='orange', hover_alpha=1, hover_line_color='red',
# set visual properties for selected glyphs
selection_fill_color='orange',
selection_line_color='white',
# set visual properties for non-selected glyphs
nonselection_fill_alpha=0.4,
nonselection_fill_color=mapper)
p.xaxis.axis_label = 'RA [deg]'
p.yaxis.axis_label = 'Dec. [deg]'
p.xaxis.axis_label_text_font_size = "14pt"
p.yaxis.axis_label_text_font_size = "14pt"
p.grid.grid_line_color = "gainsboro"
p.yaxis.major_label_text_font_size = "12pt"
p.xaxis.major_label_text_font_size = "12pt"
p.x_range = Range1d(360, 0)
p.y_range = Range1d(-40, 95)
p.toolbar.logo = None
p.toolbar_location = None
# mytext = Label(x=180, y=-35, text="S", text_color='gray', text_font_size='12pt') ; p.add_layout(mytext)
# mytext = Label(x=180, y=88, text="N", text_color='gray', text_font_size='12pt') ; p.add_layout(mytext)
# mytext = Label(x=350, y=45, text="E", text_color='gray', text_font_size='12pt', angle=np.pi/2) ; p.add_layout(mytext)
# mytext = Label(x=4, y=45, text="W", text_color='gray', text_font_size='12pt', angle=np.pi/2) ; p.add_layout(mytext)
## Javascript code to open up custom html pages, once user click on a tile
code = """
var index_selected = source.selected['1d']['indices'][0];
var tileID = source.data['TILEID'][index_selected];
if (tileID!==undefined) {
var win = window.open("http://www.astro.utah.edu/~u6022465/cmx/ALL_SKY/dr8/allSKY_ci_tiles/sub_pages/tile-"+tileID+".html", " ");
try {win.focus();} catch (e){} }
"""
taptool = p.select(type=TapTool)
taptool.callback = CustomJS(args=dict(source=tiles), code=code)
## The html code for the hover window that contain tile infrormation
ttp = """
<div>
<div>
<span style="font-size: 14px; color: blue;">Tile ID:</span>
<span style="font-size: 14px; font-weight: bold;">@TILEID{int}</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">RA:</span>
<span style="font-size: 14px; font-weight: bold;">@RA</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">Dec:</span>
<span style="font-size: 14px; font-weight: bold;">@DEC</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">EBV_MED:</span>
<span style="font-size: 14px; font-weight: bold;">@EBV_MED{0.000}</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">STAR_DENSITY:</span>
<span style="font-size: 14px; font-weight: bold;">@STAR_DENSITY{0}</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">BRIGHTEST_STAR_VTMAG:</span>
<span style="font-size: 14px; font-weight: bold;">@BRIGHTVTMAG</span>
</div>
<div>
<span style="font-size: 14px; color: blue;">BRIGHTEST_STAR_LOC:</span>
<span style="font-size: 14px; font-weight: bold;">(@BRIGHTRA, @BRIGHTDEC)</span>
</div>
</div>
"""
hover = HoverTool(tooltips=ttp, renderers=[render])
hover.point_policy = 'snap_to_data'
hover.line_policy = 'nearest'
# hover.mode='vline'
p.add_tools(hover)
cross = CrosshairTool()
# cross.dimensions='height'
cross.line_alpha = 0.3
cross.line_color = 'gray'
p.add_tools(cross)
# Setting the second y axis range name and range
p.extra_y_ranges = {"foo": p.y_range}
p.extra_x_ranges = {"joo": p.x_range}
# Adding the second axis to the plot.
p.add_layout(LinearAxis(y_range_name="foo"), 'right')
p.add_layout(LinearAxis(x_range_name="joo"), 'above')
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
if dynamic:
# twilight_source = get_kp_twilights(TT,DD) # evening and morning twilights at every TT and DD
circleSource_1 = skyCircle(TT, DD, 1.5)
p.circle('RA', 'DEC', source=circleSource_1, size=1.5, color='black')
circleSource_2 = skyCircle(TT, DD, 2.0)
p.circle('RA', 'DEC', source=circleSource_2, size=0.5, color='gray')
else:
circleSource = skyCircle(TT, DD, 1.5)
p.circle('RA', 'DEC', source=circleSource, size=1.5, color=None)
### Dealing with the Moon and Jupiter
inFile = 'moonLoc_jupLoc_fracPhase.csv' # 'moon_loc_jup_loc_fracPhase_namePhase.csv'
tbl_moon_jup = np.genfromtxt(inFile, delimiter=',', filling_values=-1, names=True, dtype=None) # , dtype=np.float)
loc = EarthLocation.of_site('Kitt Peak')
kp_lat = 31, 57, 48
kp_lon = -111, 36, 00
mooninfo_obj = pylunar.MoonInfo((kp_lat), (kp_lon))
m_ra, m_dec, frac_phase, name_phase = moonLoc(TT, DD, loc, mooninfo_obj)
j_ra, j_dec = jupLoc(TT, DD, loc)
#moonSource = ColumnDataSource({"moon_RAS": tbl_moon_jup['moon_ra'], "moon_DECS": tbl_moon_jup['moon_dec'],
# "Phase_frac": tbl_moon_jup['moon_phase_frac']})
moonSource = ColumnDataSource({"moon_RAS":tbl_moon_jup['moon_ra'], "moon_DECS":tbl_moon_jup['moon_dec'], "Phase_frac":np.round(100*tbl_moon_jup['moon_phase_frac'])})
####moon_RADEC = ColumnDataSource({"moon_ra": [m_ra.deg], "moon_dec": [m_dec.deg], "phase_frac": [frac_phase]})
moon_RADEC_ = ColumnDataSource({"moon_ra":[m_ra.deg-360], "moon_dec":[m_dec.deg],"phase_frac":[frac_phase]})
moon_RADEC = ColumnDataSource({"moon_ra":[m_ra.deg], "moon_dec":[m_dec.deg],"phase_frac":[frac_phase]})
render_moon = p.circle('moon_ra', 'moon_dec', source=moon_RADEC, size=170, color='cyan', alpha=0.2)
render_moon = p.circle('moon_ra', 'moon_dec', source=moon_RADEC, size=4, color='blue')
render_moon = p.circle('moon_ra', 'moon_dec', source=moon_RADEC_, size=170, color='cyan', alpha=0.2)
render_moon = p.circle('moon_ra', 'moon_dec', source=moon_RADEC_, size=4, color='blue')
jupSource = ColumnDataSource({"jup_RAS": tbl_moon_jup['jup_ra'], "jup_DECS": tbl_moon_jup['jup_dec']})
jup_RADEC = ColumnDataSource({"jup_ra": [j_ra.deg], "jup_dec": [j_dec.deg]})
twilight = get_kp_twilights(TT, DD) # evening and morning twilights at every TT and DD
twilight_source = ColumnDataSource({"eve_twilight": [twilight[0]], "mor_twilight": [twilight[1]]})
render_jup = p.circle('jup_ra', 'jup_dec', source=jup_RADEC, size=5, color='blue')
render_jup = p.circle('jup_ra', 'jup_dec', source=jup_RADEC, size=4, color='gold')
from bokeh.models.glyphs import Text
TXTsrc = ColumnDataSource(dict(x=[350], y=[85], text=['Moon Phase: ' + "%.0f" % (frac_phase * 100) + "%"]))
glyph = Text(x="x", y="y", text="text", angle=0, text_color="black")
p.add_glyph(TXTsrc, glyph)
TXTsrc_moon = ColumnDataSource(dict(x=[m_ra.deg+10], y=[m_dec.deg-10], text=['Moon']))
glyph = Text(x="x", y="y", text="text", angle=0, text_color="blue", text_alpha=0.3, text_font_size='10pt')
p.add_glyph(TXTsrc_moon, glyph)
TXTsrc_jup = ColumnDataSource(dict(x=[j_ra.deg+5], y=[j_dec.deg-8], text=['Jup.']))
glyph = Text(x="x", y="y", text="text", angle=0, text_color="black", text_alpha=0.3, text_font_size='10pt')
p.add_glyph(TXTsrc_jup, glyph)
callback = CustomJS(args=dict(source_sky1=circleSource_1,source_sky2=circleSource_2,
source_moon=moonSource, source_moon_RADEC=moon_RADEC,
source_moon_RADEC_=moon_RADEC_,
source_jup=jupSource, source_jup_RADEC=jup_RADEC,
sourceTXT = TXTsrc,
sourceTXTmoon = TXTsrc_moon,
sourceTXTjup = TXTsrc_jup
),
code="""
// First set times as if they were UTC
var t = new Date(time_slider.value);
var d = new Date(date_slider.value);
var data1 = source_sky1.data;
var ra_1 = data1['RA'];
var ra0_1 = data1['RA0'];
var data2 = source_sky2.data;
var ra_2 = data2['RA'];
var ra0_2 = data2['RA0'];
var data_moon = source_moon.data;
var ras_moon = data_moon['moon_RAS'];
var decs_moon = data_moon['moon_DECS'];
var phase_frac = data_moon['Phase_frac'];
var moonRADEC = source_moon_RADEC.data;
var moon_ra = moonRADEC['moon_ra'];
var moon_dec = moonRADEC['moon_dec'];
var moonRADEC_ = source_moon_RADEC_.data;
var moon_ra_ = moonRADEC_['moon_ra'];
var moon_dec_ = moonRADEC_['moon_dec'];
var data_jup = source_jup.data;
var ras_jup = data_jup['jup_RAS'];
var decs_jup = data_jup['jup_DECS'];
var jupRADEC = source_jup_RADEC.data;
var jup_ra = jupRADEC['jup_ra'];
var jup_dec = jupRADEC['jup_dec'];
var Hour = t.getUTCHours();
var Day = d.getDate();
var Month = d.getMonth();
var Year = new Array(31,28,31,30,31,30,31,31,30,31,30,31);
var all_FULdays = 0;
for (var i = 0; i < Month; i++)
all_FULdays=all_FULdays+Year[i];
all_FULdays = all_FULdays + (Day-1);
if (Hour<12) all_FULdays=all_FULdays+1;
var all_minutes = all_FULdays*24+Hour;
if (all_minutes<8800) {
moon_ra[0] = ras_moon[all_minutes];
moon_dec[0] = decs_moon[all_minutes];
moon_ra_[0] = ras_moon[all_minutes]-360.;
moon_dec_[0] = decs_moon[all_minutes];
}
var jupTXTdata = sourceTXTjup.data;
var x_jup = jupTXTdata['x'];
var y_jup = jupTXTdata['y'];
var text_jup = jupTXTdata['text'];
if (all_minutes<8800) {
jup_ra[0] = ras_jup[all_minutes];
jup_dec[0] = decs_jup[all_minutes];
x_jup[0] = jup_ra[0]+5;
y_jup[0] = jup_dec[0]-8;
}
if (t.getUTCHours() < 12) {
d.setTime(date_slider.value + 24*3600*1000);
} else {
d.setTime(date_slider.value);
}
d.setUTCHours(t.getUTCHours());
d.setUTCMinutes(t.getUTCMinutes());
d.setUTCSeconds(0);
// Correct to KPNO local time
// d object still thinks in UTC, which is 7 hours ahead of KPNO
d.setTime(d.getTime() + 7*3600*1000);
// noon UT on 2000-01-01
var reftime = new Date();
reftime.setUTCFullYear(2000);
reftime.setUTCMonth(0); // Months are 0-11 (!)
reftime.setUTCDate(1); // Days are 1-31 (!)
reftime.setUTCHours(12);
reftime.setUTCMinutes(0);
reftime.setUTCSeconds(0);
// time difference in days (starting from milliseconds)
var dt = (d.getTime() - reftime.getTime()) / (24*3600*1000);
// Convert to LST
var mayall_longitude_degrees = -(111 + 35/60. + 59.6/3600);
var LST_hours = ((18.697374558 + 24.06570982441908 * dt) + mayall_longitude_degrees/15) % 24;
var LST_degrees = LST_hours * 15;
for (var i = 0; i < ra_1.length; i++) {
ra_1[i] = (ra0_1[i] + LST_degrees) % 360;
}
for (var i = 0; i < ra_2.length; i++) {
ra_2[i] = (ra0_2[i] + LST_degrees) % 360;
}
//// Here we gtake care of the moon phasde text
var TXTdata = sourceTXT.data;
var x = TXTdata['x'];
var y = TXTdata['y'];
var text = TXTdata['text'];
var moonTXTdata = sourceTXTmoon.data;
var x_moon = moonTXTdata['x'];
var y_moon = moonTXTdata['y'];
var text_moon = moonTXTdata['text'];
// x[0] = 1;
// y[0] = 40;
if (all_minutes<8800) {
text[0] = 'Moon Phase: ' + phase_frac[all_minutes]+'%';
x_moon[0] = moon_ra[0]+10;
y_moon[0] = moon_dec[0]-10;
}
sourceTXT.change.emit();
/////////////////////////////// Moon phase code ends.
source_sky1.change.emit();
source_sky2.change.emit();
//source_moon_RADEC.change.emit();
//source_moon_RADEC_.change.emit();
//source_jup_RADEC.change.emit();
sourceTXTmoon.change.emit();
sourceTXTjup.change.emit();
//alert(d);
""")
if dynamic:
### TIME
Timeslider = DateSlider(start=dt(2019, 9, 1, 16, 0, 0), end=dt(2019, 9, 2, 8, 0, 0),
value=dt(2019, 9, 1, 16, 0, 0), step=1, title="KPNO local time(hh:mm)", format="%H:%M",
width=800)
## DATE
Dateslider = DateSlider(start=dt(2019, 9, 1, 16, 0, 0), end=dt(2020, 8, 31, 8, 0, 0),
value=dt(2019, 10, 1, 16, 0, 0), step=1, title="Date of sunset(4pm-8am)",
format="%B:%d", width=800)
callback.args['time_slider'] = Timeslider
callback.args['date_slider'] = Dateslider
Dateslider.js_on_change('value', callback)
Timeslider.js_on_change('value', callback)
layout = column(p, Dateslider, Timeslider, tiletable)
# show(p)
return layout
return p
#########################################################
if __name__ == "__main__":
if (len(sys.argv) < 2):
print("\nNot enough input arguments ...")
print >> sys.stderr, "Use \"python "+sys.argv[0]+" -h\" for help ... \n"
exit(1)
args = arg_parser()
inputFile = args.input
if inputFile.split('.')[-1]!='fits' or inputFile is None:
print('Error: '+'Check out the input fits file, it should end with the suffix "fits".\n')
exit(1)
if not os.path.exists(inputFile):
print('Error: '+inputFile+' does NOT exist. Please use the correct file name.\n')
exit(1)
outputDefault = inputFile.split('fits')[0][0:-1]+'.html'
if args.title is None:
args.title = 'DESI Tile Picker'
print("\n------------------------------------")
print("Input Arguments (provided by User)")
print("--------------------------------------")
print("Input file:", args.input)
print("qa json file:", args.json)
print("Plot title:", args.ptitle)
print("Text file:", args.xfile)
print("optput html file:", args.output)
print("html title:", args.title)
print("------------------------------------")
print("You can use \"python "+sys.argv[0]+" -h\"")
print("to see how you can set these values.")
print("------------------------------------")
p = bokehTile(tileFile = args.input, jsonFile = args.json, TT=[0, 0, 0],
DD=[2019, 10, 1], dynamic=True, plotTitle=args.ptitle)
script, div = components(p)
script = '\n'.join(['' + line for line in script.split('\n')])
if args.output is None:
htmlName = outputDefault
else:
htmlName = args.output
print("The output HTML file is: ", htmlName)
head = html_header(args.title)
tail = html_footer()
with open(htmlName, "w") as text_file:
text_file.write(head)
text_file.write('<table><tr>')
text_file.write('<td valign="top" width="850">')
text_file.write(script)
text_file.write(div)
text_file.write('</td>')
if args.xfile is not None:
textFile = args.xfile
if os.path.exists(textFile):
f = open(textFile, 'r')
txt = f.read()
text_file.write('<td valign="top" width="400"><p>'+txt+'</p></td>')
else:
print('Warning: '+textFile+' does NOT exist. Continuing without this file ...')
print(' '+'You can try again using the correct text file.\n')
text_file.write('</tr></table>')
text_file.write(tail)
| [
"bokeh.models.ColumnDataSource",
"astropy.coordinates.get_moon",
"argparse.ArgumentParser",
"numpy.argsort",
"fitsio.read",
"matplotlib.pyplot.style.use",
"bokeh.models.CrosshairTool",
"numpy.arange",
"numpy.round",
"astropy.coordinates.EarthLocation.of_site",
"astropy.coordinates.get_body",
"... | [((808, 840), 'matplotlib.pyplot.style.use', 'plt.style.use', (['astropy_mpl_style'], {}), '(astropy_mpl_style)\n', (821, 840), True, 'import matplotlib.pyplot as plt\n'), ((2664, 3274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""\n\n - A visulaization tool for planning DESI observations\n \n\n - How to run: \n \n 1$ prog -i [tileFile] -j [jsonFile] -o [HTML name] -t [HTML title] -p [plot title]\n \n or\n\n\n - jsonFile and HTMLname are optional. \n If HTMLname is not provided, this program tries to open up the output on the default browser.\n If jsonFile is not provided, all plotted tiles would have the same color.\n\n\n - Example(s): \n $ python prog -i tileFile.fits -j qa.json -o output.html\n \n \n $ python prog -h \n To see help and all available options.\n \n\n"""'}), '(usage=\n """\n\n - A visulaization tool for planning DESI observations\n \n\n - How to run: \n \n 1$ prog -i [tileFile] -j [jsonFile] -o [HTML name] -t [HTML title] -p [plot title]\n \n or\n\n\n - jsonFile and HTMLname are optional. \n If HTMLname is not provided, this program tries to open up the output on the default browser.\n If jsonFile is not provided, all plotted tiles would have the same color.\n\n\n - Example(s): \n $ python prog -i tileFile.fits -j qa.json -o output.html\n \n \n $ python prog -h \n To see help and all available options.\n \n\n"""\n )\n', (2687, 3274), False, 'import argparse\n'), ((4099, 4144), 'astroplan.Observer.at_site', 'Observer.at_site', (['"""Kitt Peak"""'], {'timezone': '"""MST"""'}), "('Kitt Peak', timezone='MST')\n", (4115, 4144), False, 'from astroplan import Observer\n'), ((4269, 4288), 'astropy.time.Time', 'Time', (["(ds + ' ' + ts)"], {}), "(ds + ' ' + ts)\n", (4273, 4288), False, 'from astropy.time import Time, TimezoneInfo\n'), ((4678, 4697), 'astropy.time.Time', 'Time', (["(ds + ' ' + ts)"], {}), "(ds + ' ' + ts)\n", (4682, 4697), False, 'from astropy.time import Time, TimezoneInfo\n'), ((4761, 4777), 'astropy.coordinates.get_moon', 'get_moon', (['t', 'loc'], {}), '(t, loc)\n', (4769, 4777), False, 'from astropy.coordinates import get_moon\n'), ((5187, 5206), 'astropy.time.Time', 'Time', (["(ds + ' ' + ts)"], {}), "(ds + ' ' + ts)\n", (5191, 5206), False, 'from astropy.time import Time, TimezoneInfo\n'), ((5621, 5643), 'numpy.arange', 'np.arange', (['(0.0)', '(360)', '(2)'], {}), '(0.0, 360, 2)\n', (5630, 5643), True, 'import numpy as np\n'), ((5678, 5721), 'kapteyn.wcs.Transformation', 'wcs.Transformation', (["(plane + ' ')", 'projection'], {}), "(plane + ' ', projection)\n", (5696, 5721), False, 'from kapteyn import wcs\n'), ((5832, 5849), 'numpy.argsort', 'np.argsort', (['alpha'], {}), '(alpha)\n', (5842, 5849), True, 'import numpy as np\n'), ((6428, 6472), 'datetime.datetime', 'dt', (['dd[0]', 'dd[1]', 'dd[2]', 'tt[0]', 'tt[1]', 'tt[2]'], {}), '(dd[0], dd[1], dd[2], tt[0], tt[1], tt[2])\n', (6430, 6472), True, 'from datetime import datetime as dt\n'), ((6489, 6513), 'datetime.datetime', 'dt', (['(2000)', '(1)', '(1)', '(12)', '(0)', '(0)'], {}), '(2000, 1, 1, 12, 0, 0)\n', (6491, 6513), True, 'from datetime import datetime as dt\n'), ((6833, 6852), 'astropy.time.Time', 'Time', (["(ds + ' ' + ts)"], {}), "(ds + ' ' + ts)\n", (6837, 6852), False, 'from astropy.time import Time, TimezoneInfo\n'), ((6863, 6931), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': '(Lat * u.deg)', 'lon': '(Lon * u.deg)', 'height': '(Height * u.m)'}), '(lat=Lat * u.deg, lon=Lon * u.deg, height=Height * u.m)\n', (6876, 6931), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((7074, 7094), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(3)'], {}), '(0, 360, 3)\n', (7083, 7094), True, 'import numpy as np\n'), ((7160, 7247), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'alt': 'alt', 'az': 'az', 'obstime': 'obsTime', 'frame': '"""altaz"""', 'location': 'loc', 'unit': '"""deg"""'}), "(alt=alt, az=az, obstime=obsTime, frame='altaz', location=loc, unit\n ='deg')\n", (7168, 7247), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((7354, 7443), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'alt': '[90]', 'az': '[90]', 'obstime': 'obsTime', 'frame': '"""altaz"""', 'location': 'loc', 'unit': '"""deg"""'}), "(alt=[90], az=[90], obstime=obsTime, frame='altaz', location=loc,\n unit='deg')\n", (7362, 7443), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((7538, 7613), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'RA0': ra0, 'RA': (ra0 + LST_degrees) % 360, 'DEC': dec}"], {}), "({'RA0': ra0, 'RA': (ra0 + LST_degrees) % 360, 'DEC': dec})\n", (7554, 7613), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((7785, 7819), 'fitsio.read', 'fitsio.read', (['tileFile'], {'header': '(True)'}), '(tileFile, header=True)\n', (7796, 7819), False, 'import sys, os, math, time, json, fitsio\n'), ((8662, 8677), 'numpy.asarray', 'np.asarray', (['dye'], {}), '(dye)\n', (8672, 8677), True, 'import numpy as np\n'), ((8760, 8821), 'bokeh.transform.linear_cmap', 'linear_cmap', ([], {'field_name': '"""DYE"""', 'palette': 'palette', 'low': '(0)', 'high': '(2)'}), "(field_name='DYE', palette=palette, low=0, high=2)\n", (8771, 8821), False, 'from bokeh.transform import linear_cmap\n'), ((8984, 9028), 'datetime.datetime', 'dt', (['DD[0]', 'DD[1]', 'DD[2]', 'TT[0]', 'TT[1]', 'TT[2]'], {}), '(DD[0], DD[1], DD[2], TT[0], TT[1], TT[2])\n', (8986, 9028), True, 'from datetime import datetime as dt\n'), ((10268, 10299), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'tiledata'}), '(data=tiledata)\n', (10284, 10299), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((10331, 10372), 'bokeh.models.widgets.tables.NumberFormatter', 'bktables.NumberFormatter', ([], {'format': '"""0,0.00"""'}), "(format='0,0.00')\n", (10355, 10372), True, 'from bokeh.models.widgets import tables as bktables\n'), ((10915, 10975), 'bokeh.models.widgets.tables.DataTable', 'bktables.DataTable', ([], {'columns': 'columns', 'source': 'tiles', 'width': '(800)'}), '(columns=columns, source=tiles, width=800)\n', (10933, 10975), True, 'from bokeh.models.widgets import tables as bktables\n'), ((12243, 12258), 'bokeh.models.Range1d', 'Range1d', (['(360)', '(0)'], {}), '(360, 0)\n', (12250, 12258), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((12275, 12291), 'bokeh.models.Range1d', 'Range1d', (['(-40)', '(95)'], {}), '(-40, 95)\n', (12282, 12291), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((14925, 14968), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': 'ttp', 'renderers': '[render]'}), '(tooltips=ttp, renderers=[render])\n', (14934, 14968), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((15105, 15120), 'bokeh.models.CrosshairTool', 'CrosshairTool', ([], {}), '()\n', (15118, 15120), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((16283, 16362), 'numpy.genfromtxt', 'np.genfromtxt', (['inFile'], {'delimiter': '""","""', 'filling_values': '(-1)', 'names': '(True)', 'dtype': 'None'}), "(inFile, delimiter=',', filling_values=-1, names=True, dtype=None)\n", (16296, 16362), True, 'import numpy as np\n'), ((16395, 16429), 'astropy.coordinates.EarthLocation.of_site', 'EarthLocation.of_site', (['"""Kitt Peak"""'], {}), "('Kitt Peak')\n", (16416, 16429), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((16499, 16531), 'pylunar.MoonInfo', 'pylunar.MoonInfo', (['kp_lat', 'kp_lon'], {}), '(kp_lat, kp_lon)\n', (16515, 16531), False, 'import pylunar\n'), ((17161, 17265), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'moon_ra': [m_ra.deg - 360], 'moon_dec': [m_dec.deg], 'phase_frac': [\n frac_phase]}"], {}), "({'moon_ra': [m_ra.deg - 360], 'moon_dec': [m_dec.deg],\n 'phase_frac': [frac_phase]})\n", (17177, 17265), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((17273, 17371), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'moon_ra': [m_ra.deg], 'moon_dec': [m_dec.deg], 'phase_frac': [frac_phase]}"], {}), "({'moon_ra': [m_ra.deg], 'moon_dec': [m_dec.deg],\n 'phase_frac': [frac_phase]})\n", (17289, 17371), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((17775, 17869), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'jup_RAS': tbl_moon_jup['jup_ra'], 'jup_DECS': tbl_moon_jup['jup_dec']}"], {}), "({'jup_RAS': tbl_moon_jup['jup_ra'], 'jup_DECS':\n tbl_moon_jup['jup_dec']})\n", (17791, 17869), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((17882, 17946), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'jup_ra': [j_ra.deg], 'jup_dec': [j_dec.deg]}"], {}), "({'jup_ra': [j_ra.deg], 'jup_dec': [j_dec.deg]})\n", (17898, 17946), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((18062, 18147), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (["{'eve_twilight': [twilight[0]], 'mor_twilight': [twilight[1]]}"], {}), "({'eve_twilight': [twilight[0]], 'mor_twilight': [twilight[1]]}\n )\n", (18078, 18147), False, 'from bokeh.models import CustomJS, ColumnDataSource, DateSlider, DateRangeSlider\n'), ((18484, 18544), 'bokeh.models.glyphs.Text', 'Text', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""text"""', 'angle': '(0)', 'text_color': '"""black"""'}), "(x='x', y='y', text='text', angle=0, text_color='black')\n", (18488, 18544), False, 'from bokeh.models.glyphs import Text\n'), ((18685, 18787), 'bokeh.models.glyphs.Text', 'Text', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""text"""', 'angle': '(0)', 'text_color': '"""blue"""', 'text_alpha': '(0.3)', 'text_font_size': '"""10pt"""'}), "(x='x', y='y', text='text', angle=0, text_color='blue', text_alpha=0.3,\n text_font_size='10pt')\n", (18689, 18787), False, 'from bokeh.models.glyphs import Text\n'), ((18921, 19024), 'bokeh.models.glyphs.Text', 'Text', ([], {'x': '"""x"""', 'y': '"""y"""', 'text': '"""text"""', 'angle': '(0)', 'text_color': '"""black"""', 'text_alpha': '(0.3)', 'text_font_size': '"""10pt"""'}), "(x='x', y='y', text='text', angle=0, text_color='black', text_alpha=0.3,\n text_font_size='10pt')\n", (18925, 19024), False, 'from bokeh.models.glyphs import Text\n'), ((27871, 27884), 'bokeh.embed.components', 'components', (['p'], {}), '(p)\n', (27881, 27884), False, 'from bokeh.embed import components\n'), ((5263, 5300), 'astropy.coordinates.solar_system_ephemeris.set', 'solar_system_ephemeris.set', (['"""builtin"""'], {}), "('builtin')\n", (5289, 5300), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((5320, 5347), 'astropy.coordinates.get_body', 'get_body', (['"""jupiter"""', 't', 'loc'], {}), "('jupiter', t, loc)\n", (5328, 5347), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz, solar_system_ephemeris, get_body, get_body_barycentric\n'), ((7829, 7860), 'numpy.where', 'np.where', (["(citls['IN_DESI'] == 1)"], {}), "(citls['IN_DESI'] == 1)\n", (7837, 7860), True, 'import numpy as np\n'), ((8687, 8704), 'numpy.where', 'np.where', (['(dye < 2)'], {}), '(dye < 2)\n', (8695, 8704), True, 'import numpy as np\n'), ((10397, 10459), 'bokeh.models.widgets.tables.TableColumn', 'bktables.TableColumn', ([], {'field': '"""TILEID"""', 'title': '"""TILEID"""', 'width': '(80)'}), "(field='TILEID', title='TILEID', width=80)\n", (10417, 10459), True, 'from bokeh.models.widgets import tables as bktables\n'), ((10469, 10534), 'bokeh.models.widgets.tables.TableColumn', 'bktables.TableColumn', ([], {'field': '"""RA"""', 'title': '"""RA"""', 'formatter': 'colformat'}), "(field='RA', title='RA', formatter=colformat)\n", (10489, 10534), True, 'from bokeh.models.widgets import tables as bktables\n'), ((10544, 10611), 'bokeh.models.widgets.tables.TableColumn', 'bktables.TableColumn', ([], {'field': '"""DEC"""', 'title': '"""DEC"""', 'formatter': 'colformat'}), "(field='DEC', title='DEC', formatter=colformat)\n", (10564, 10611), True, 'from bokeh.models.widgets import tables as bktables\n'), ((10836, 10892), 'bokeh.models.widgets.tables.TableColumn', 'bktables.TableColumn', ([], {'field': '"""selected"""', 'title': '"""Selected"""'}), "(field='selected', title='Selected')\n", (10856, 10892), True, 'from bokeh.models.widgets import tables as bktables\n'), ((15431, 15461), 'bokeh.models.LinearAxis', 'LinearAxis', ([], {'y_range_name': '"""foo"""'}), "(y_range_name='foo')\n", (15441, 15461), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((15489, 15519), 'bokeh.models.LinearAxis', 'LinearAxis', ([], {'x_range_name': '"""joo"""'}), "(x_range_name='joo')\n", (15499, 15519), False, 'from bokeh.models import ColumnDataSource, LabelSet, HoverTool, Range1d, Label, TapTool, OpenURL, CustomJS, CrosshairTool, LinearAxis\n'), ((26194, 26238), 'bokeh.layouts.column', 'column', (['p', 'Dateslider', 'Timeslider', 'tiletable'], {}), '(p, Dateslider, Timeslider, tiletable)\n', (26200, 26238), False, 'from bokeh.layouts import column\n'), ((26817, 26842), 'os.path.exists', 'os.path.exists', (['inputFile'], {}), '(inputFile)\n', (26831, 26842), False, 'import sys, os, math, time, json, fitsio\n'), ((6951, 6977), 'math.acos', 'math.acos', (['(1 / airmass_lim)'], {}), '(1 / airmass_lim)\n', (6960, 6977), False, 'import sys, os, math, time, json, fitsio\n'), ((7985, 8005), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (7994, 8005), False, 'import sys, os, math, time, json, fitsio\n'), ((9912, 9941), 'numpy.round', 'np.round', (["citls['EBV_MED']", '(3)'], {}), "(citls['EBV_MED'], 3)\n", (9920, 9941), True, 'import numpy as np\n'), ((16978, 17025), 'numpy.round', 'np.round', (["(100 * tbl_moon_jup['moon_phase_frac'])"], {}), "(100 * tbl_moon_jup['moon_phase_frac'])\n", (16986, 17025), True, 'import numpy as np\n'), ((28535, 28559), 'os.path.exists', 'os.path.exists', (['textFile'], {}), '(textFile)\n', (28549, 28559), False, 'import sys, os, math, time, json, fitsio\n'), ((10731, 10802), 'bokeh.models.widgets.tables.TableColumn', 'bktables.TableColumn', ([], {'field': 'colname', 'title': 'colname', 'formatter': 'colformat'}), '(field=colname, title=colname, formatter=colformat)\n', (10751, 10802), True, 'from bokeh.models.widgets import tables as bktables\n'), ((25478, 25502), 'datetime.datetime', 'dt', (['(2019)', '(9)', '(1)', '(16)', '(0)', '(0)'], {}), '(2019, 9, 1, 16, 0, 0)\n', (25480, 25502), True, 'from datetime import datetime as dt\n'), ((25508, 25531), 'datetime.datetime', 'dt', (['(2019)', '(9)', '(2)', '(8)', '(0)', '(0)'], {}), '(2019, 9, 2, 8, 0, 0)\n', (25510, 25531), True, 'from datetime import datetime as dt\n'), ((25571, 25595), 'datetime.datetime', 'dt', (['(2019)', '(9)', '(1)', '(16)', '(0)', '(0)'], {}), '(2019, 9, 1, 16, 0, 0)\n', (25573, 25595), True, 'from datetime import datetime as dt\n'), ((25751, 25775), 'datetime.datetime', 'dt', (['(2019)', '(9)', '(1)', '(16)', '(0)', '(0)'], {}), '(2019, 9, 1, 16, 0, 0)\n', (25753, 25775), True, 'from datetime import datetime as dt\n'), ((25781, 25805), 'datetime.datetime', 'dt', (['(2020)', '(8)', '(31)', '(8)', '(0)', '(0)'], {}), '(2020, 8, 31, 8, 0, 0)\n', (25783, 25805), True, 'from datetime import datetime as dt\n'), ((25845, 25870), 'datetime.datetime', 'dt', (['(2019)', '(10)', '(1)', '(16)', '(0)', '(0)'], {}), '(2019, 10, 1, 16, 0, 0)\n', (25847, 25870), True, 'from datetime import datetime as dt\n')] |
"""
Like image_sample.py, but use a noisy image classifier to guide the sampling
process towards more realistic images.
"""
import argparse
import os
import numpy as np
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from guided_diffusion import dist_util, logger
from guided_diffusion.script_util import (
model_defaults,
diffusion_defaults,
classifier_defaults,
create_model,
create_gaussian_diffusion,
create_classifier,
add_dict_to_argparser,
args_to_dict,
)
import matplotlib.pyplot as plt
from guided_diffusion.sample_util import save_samples
from tqdm import tqdm
def get_gathered_item(x):
gathered_x = [th.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_x, x)
return gathered_x
def main():
args = create_argparser().parse_args()
visible_gpus_list = []
if args.gpus:
visible_gpus_list = [str(gpu_id) for gpu_id in args.gpus.split(",")]
dist_util.setup_dist(visible_gpu_list=visible_gpus_list, local_rank=args.local_rank)
logger.configure(dir=os.path.join(args.log_root, args.save_name))
logger.log(args)
logger.log("creating model and diffusion...")
model = create_model(
**args_to_dict(args, model_defaults().keys())
)
diffusion = create_gaussian_diffusion(
**args_to_dict(args, diffusion_defaults().keys())
)
if args.model_path:
model.load_state_dict(
dist_util.load_state_dict(args.model_path, map_location="cpu"),
strict=True
)
model.to(dist_util.dev())
if args.use_fp16:
model.convert_to_fp16()
model.eval()
classifier = create_classifier(
**args_to_dict(args, classifier_defaults().keys())
)
if args.classifier_path:
logger.log("loading classifier from {}".format(args.classifier_path))
classifier.load_state_dict(
dist_util.load_state_dict(args.classifier_path, map_location="cpu"),
strict=True
)
classifier.to(dist_util.dev())
if args.classifier_use_fp16:
classifier.convert_to_fp16()
classifier.eval()
step_size = 25 if args.timestep_respacing == 'ddim25' else int(args.timestep_respacing)
batch_grad_norm = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_updated_grad_norm = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_probability = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_entropy = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_entropy_scale = th.zeros((step_size, args.batch_size,), device=dist_util.dev())
batch_probability_distribution = th.zeros((step_size, args.batch_size, args.classifier_out_channels,), device=dist_util.dev())
def cond_fn(x, t, y=None, prior_variance=1.0, t_range_start=0, t_range_end=1000):
assert y is not None
if step_size == 25:
step_id = t[0].item() // 40
elif step_size == 250:
step_id = t[0].item() // 4
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
output = classifier(x_in, t)
logits = output
log_probs = F.log_softmax(logits, dim=-1) # (B, C)
selected = log_probs[range(len(logits)), y.view(-1)] # (B, )
cond_grad = th.autograd.grad(selected.sum(), x_in)[0]
cond_grad = cond_grad * args.classifier_scale
with th.no_grad():
probs = F.softmax(logits, dim=-1) # (B, C)
entropy = (-log_probs * probs).sum(dim=-1) / np.log(args.classifier_out_channels) # (B, )
entropy_scale = 1.0 / entropy # (B, )
original_grad_norm = th.norm(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32).detach()
selected_probability = probs[range(len(logits)), y.view(-1)]
batch_probability[step_id] = selected_probability
batch_probability_distribution[step_id] = probs
batch_grad_norm[step_id] = original_grad_norm
batch_entropy[step_id] = entropy
batch_entropy_scale[step_id] = entropy_scale
logger.log(
'\n',
't = ', t[0].detach(), '\n',
'\t\t mean std median', '\n',
'\t\t grad_norm =', original_grad_norm.mean(-1).detach(), original_grad_norm.std(-1).detach(), original_grad_norm.median(-1).values, '\n',
'\t\t probability = ', selected_probability.mean(-1).detach(), selected_probability.std(-1).detach(), selected_probability.median(-1).values, '\n',
'\t\t entropy = ', entropy.mean(-1).detach(), entropy.std(-1).detach(), entropy.median(-1).values, '\n',
'\t\t entropy_scale = ', entropy_scale.mean(-1).detach(), entropy_scale.std(-1).detach(), entropy_scale.median(-1).values, '\n',
)
if args.use_entropy_scale and (t[0] >= t_range_start and t[0] < t_range_end):
cond_grad = cond_grad * entropy_scale.reshape(-1, 1, 1, 1).repeat(1, *cond_grad[0].shape)
updated_grad_norm = th.norm(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32).detach()
batch_updated_grad_norm[step_id]=updated_grad_norm
logger.log(
'\t\t updated_grad_norm = ',
updated_grad_norm.mean(-1).detach(), updated_grad_norm.std(-1).detach(), updated_grad_norm.median(-1).values, '\n',
'\n'
)
return cond_grad
return cond_grad
def model_fn(x, t, y=None, t_range_start=0, t_range_end=1000):
assert y is not None
return model(x, t, y if args.class_cond else None)
logger.log("sampling...")
all_images, all_labels = [], []
all_grad_norm = []
all_updated_grad_norm = []
all_probability = []
all_entropy = []
all_entropy_scale = []
all_probability_distribution = []
id = 0
while len(all_images) * args.batch_size < args.num_samples:
id += 1
model_kwargs = {}
if args.selected_class == -1:
classes = th.randint(
low=0, high=args.classifier_out_channels, size=(args.batch_size,), device=dist_util.dev()
)
else:
classes = th.randint(
low=args.selected_class, high=args.selected_class + 1, size=(args.batch_size,), device=dist_util.dev()
)
model_kwargs["y"] = classes
model_kwargs['t_range_start'] = args.t_range_start
model_kwargs['t_range_end'] = args.t_range_end
sample_fn = (
diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop
)
sample = sample_fn(
model_fn,
(args.batch_size, 3, args.image_size, args.image_size),
clip_denoised=args.clip_denoised,
model_kwargs=model_kwargs,
cond_fn=cond_fn,
device=dist_util.dev(),
)
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = get_gathered_item(sample)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = get_gathered_item(classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f"created {len(all_images) * args.batch_size} / {args.num_samples} samples")
gathered_batch_grad_norm = get_gathered_item(batch_grad_norm)
gathered_batch_updated_grad_norm = get_gathered_item(batch_updated_grad_norm)
gathered_batch_probability = get_gathered_item(batch_probability)
gathered_batch_entropy = get_gathered_item(batch_entropy)
gathered_batch_entropy_scale = get_gathered_item(batch_entropy_scale)
gathered_batch_probability_distribution = get_gathered_item(batch_probability_distribution)
all_grad_norm.extend([x.cpu().numpy() for x in gathered_batch_grad_norm])
all_updated_grad_norm.extend([x.cpu().numpy() for x in gathered_batch_updated_grad_norm])
all_probability.extend([x.cpu().numpy() for x in gathered_batch_probability])
all_entropy.extend([x.cpu().numpy() for x in gathered_batch_entropy])
all_entropy_scale.extend([x.cpu().numpy() for x in gathered_batch_entropy_scale])
all_probability_distribution.extend([x.cpu().numpy() for x in gathered_batch_probability_distribution])
if dist.get_rank() == 0:
arr = np.concatenate(all_images, axis=0)
arr = arr[: args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[: args.num_samples]
all_grad_norm = np.concatenate(all_grad_norm, axis=1)[:args.num_samples]
all_updated_grad_norm = np.concatenate(all_updated_grad_norm, axis=1)[:args.num_samples]
all_probability = np.concatenate(all_probability, axis=1)[:args.num_samples]
all_entropy = np.concatenate(all_entropy, axis=1)[:args.num_samples]
all_entropy_scale = np.concatenate(all_entropy_scale, axis=1)[:args.num_samples]
all_probability_distribution = np.concatenate(all_probability_distribution, axis=1)[:args.num_samples]
shape_str = "x".join([str(x) for x in arr.shape])
if args.selected_class == -1:
metainfo_out_path = os.path.join(logger.get_dir(), "metainfo_scale{}_steps{}_class0-999_samples_{}.npz".format(args.classifier_scale, args.timestep_respacing, shape_str))
else:
metainfo_out_path = os.path.join(logger.get_dir(), "metainfo_scale{}_steps{}_class{}_samples_{}.npz".format(args.classifier_scale, args.timestep_respacing, args.selected_class, shape_str))
np.savez(metainfo_out_path,all_grad_norm=all_grad_norm, all_updated_grad_norm=all_updated_grad_norm,
all_probability=all_probability, all_entropy=all_entropy, all_entropy_scale=all_entropy_scale,
all_probability_distribution=all_probability_distribution)
logger.log(f"saving to {metainfo_out_path}")
dist.barrier()
logger.log("sampling complete")
def create_argparser():
defaults = dict(
clip_denoised=True,
num_samples=10000,
batch_size=16,
use_ddim=False,
model_path="",
classifier_path="",
classifier_scale=1.0,
log_root="",
save_name="",
gpus="",
t_range_start=0,
t_range_end=1000,
use_entropy_scale=False,
expected_classifier_gradient_value=-1.0,
selected_class=-1,
use_cond_range_scale=False,
)
defaults.update(diffusion_defaults())
defaults.update(model_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=0)
add_dict_to_argparser(parser, defaults)
return parser
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"guided_diffusion.dist_util.setup_dist",
"torch.distributed.all_gather",
"torch.distributed.get_world_size",
"torch.no_grad",
"guided_diffusion.script_util.add_dict_to_argparser",
"os.path.join",
"guided_diffusion.script_util.diffusion_defaults",
"guided_diffusion.logger.l... | [((748, 778), 'torch.distributed.all_gather', 'dist.all_gather', (['gathered_x', 'x'], {}), '(gathered_x, x)\n', (763, 778), True, 'import torch.distributed as dist\n'), ((985, 1074), 'guided_diffusion.dist_util.setup_dist', 'dist_util.setup_dist', ([], {'visible_gpu_list': 'visible_gpus_list', 'local_rank': 'args.local_rank'}), '(visible_gpu_list=visible_gpus_list, local_rank=args.\n local_rank)\n', (1005, 1074), False, 'from guided_diffusion import dist_util, logger\n'), ((1145, 1161), 'guided_diffusion.logger.log', 'logger.log', (['args'], {}), '(args)\n', (1155, 1161), False, 'from guided_diffusion import dist_util, logger\n'), ((1167, 1212), 'guided_diffusion.logger.log', 'logger.log', (['"""creating model and diffusion..."""'], {}), "('creating model and diffusion...')\n", (1177, 1212), False, 'from guided_diffusion import dist_util, logger\n'), ((5779, 5804), 'guided_diffusion.logger.log', 'logger.log', (['"""sampling..."""'], {}), "('sampling...')\n", (5789, 5804), False, 'from guided_diffusion import dist_util, logger\n'), ((10202, 10216), 'torch.distributed.barrier', 'dist.barrier', ([], {}), '()\n', (10214, 10216), True, 'import torch.distributed as dist\n'), ((10221, 10252), 'guided_diffusion.logger.log', 'logger.log', (['"""sampling complete"""'], {}), "('sampling complete')\n", (10231, 10252), False, 'from guided_diffusion import dist_util, logger\n'), ((10884, 10909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10907, 10909), False, 'import argparse\n'), ((10975, 11014), 'guided_diffusion.script_util.add_dict_to_argparser', 'add_dict_to_argparser', (['parser', 'defaults'], {}), '(parser, defaults)\n', (10996, 11014), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((688, 704), 'torch.zeros_like', 'th.zeros_like', (['x'], {}), '(x)\n', (701, 704), True, 'import torch as th\n'), ((1584, 1599), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (1597, 1599), False, 'from guided_diffusion import dist_util, logger\n'), ((2051, 2066), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2064, 2066), False, 'from guided_diffusion import dist_util, logger\n'), ((8593, 8608), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8606, 8608), True, 'import torch.distributed as dist\n'), ((8629, 8663), 'numpy.concatenate', 'np.concatenate', (['all_images'], {'axis': '(0)'}), '(all_images, axis=0)\n', (8643, 8663), True, 'import numpy as np\n'), ((8722, 8756), 'numpy.concatenate', 'np.concatenate', (['all_labels'], {'axis': '(0)'}), '(all_labels, axis=0)\n', (8736, 8756), True, 'import numpy as np\n'), ((9853, 10127), 'numpy.savez', 'np.savez', (['metainfo_out_path'], {'all_grad_norm': 'all_grad_norm', 'all_updated_grad_norm': 'all_updated_grad_norm', 'all_probability': 'all_probability', 'all_entropy': 'all_entropy', 'all_entropy_scale': 'all_entropy_scale', 'all_probability_distribution': 'all_probability_distribution'}), '(metainfo_out_path, all_grad_norm=all_grad_norm,\n all_updated_grad_norm=all_updated_grad_norm, all_probability=\n all_probability, all_entropy=all_entropy, all_entropy_scale=\n all_entropy_scale, all_probability_distribution=\n all_probability_distribution)\n', (9861, 10127), True, 'import numpy as np\n'), ((10151, 10195), 'guided_diffusion.logger.log', 'logger.log', (['f"""saving to {metainfo_out_path}"""'], {}), "(f'saving to {metainfo_out_path}')\n", (10161, 10195), False, 'from guided_diffusion import dist_util, logger\n'), ((10768, 10788), 'guided_diffusion.script_util.diffusion_defaults', 'diffusion_defaults', ([], {}), '()\n', (10786, 10788), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((10810, 10826), 'guided_diffusion.script_util.model_defaults', 'model_defaults', ([], {}), '()\n', (10824, 10826), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((10848, 10869), 'guided_diffusion.script_util.classifier_defaults', 'classifier_defaults', ([], {}), '()\n', (10867, 10869), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((1096, 1139), 'os.path.join', 'os.path.join', (['args.log_root', 'args.save_name'], {}), '(args.log_root, args.save_name)\n', (1108, 1139), False, 'import os\n'), ((1473, 1535), 'guided_diffusion.dist_util.load_state_dict', 'dist_util.load_state_dict', (['args.model_path'], {'map_location': '"""cpu"""'}), "(args.model_path, map_location='cpu')\n", (1498, 1535), False, 'from guided_diffusion import dist_util, logger\n'), ((1930, 1997), 'guided_diffusion.dist_util.load_state_dict', 'dist_util.load_state_dict', (['args.classifier_path'], {'map_location': '"""cpu"""'}), "(args.classifier_path, map_location='cpu')\n", (1955, 1997), False, 'from guided_diffusion import dist_util, logger\n'), ((2322, 2337), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2335, 2337), False, 'from guided_diffusion import dist_util, logger\n'), ((2416, 2431), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2429, 2431), False, 'from guided_diffusion import dist_util, logger\n'), ((2504, 2519), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2517, 2519), False, 'from guided_diffusion import dist_util, logger\n'), ((2588, 2603), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2601, 2603), False, 'from guided_diffusion import dist_util, logger\n'), ((2678, 2693), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2691, 2693), False, 'from guided_diffusion import dist_util, logger\n'), ((2809, 2824), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (2822, 2824), False, 'from guided_diffusion import dist_util, logger\n'), ((3096, 3112), 'torch.enable_grad', 'th.enable_grad', ([], {}), '()\n', (3110, 3112), True, 'import torch as th\n'), ((3260, 3289), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3273, 3289), True, 'import torch.nn.functional as F\n'), ((3513, 3525), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (3523, 3525), True, 'import torch as th\n'), ((3548, 3573), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3557, 3573), True, 'import torch.nn.functional as F\n'), ((8832, 8869), 'numpy.concatenate', 'np.concatenate', (['all_grad_norm'], {'axis': '(1)'}), '(all_grad_norm, axis=1)\n', (8846, 8869), True, 'import numpy as np\n'), ((8921, 8966), 'numpy.concatenate', 'np.concatenate', (['all_updated_grad_norm'], {'axis': '(1)'}), '(all_updated_grad_norm, axis=1)\n', (8935, 8966), True, 'import numpy as np\n'), ((9012, 9051), 'numpy.concatenate', 'np.concatenate', (['all_probability'], {'axis': '(1)'}), '(all_probability, axis=1)\n', (9026, 9051), True, 'import numpy as np\n'), ((9093, 9128), 'numpy.concatenate', 'np.concatenate', (['all_entropy'], {'axis': '(1)'}), '(all_entropy, axis=1)\n', (9107, 9128), True, 'import numpy as np\n'), ((9176, 9217), 'numpy.concatenate', 'np.concatenate', (['all_entropy_scale'], {'axis': '(1)'}), '(all_entropy_scale, axis=1)\n', (9190, 9217), True, 'import numpy as np\n'), ((9276, 9328), 'numpy.concatenate', 'np.concatenate', (['all_probability_distribution'], {'axis': '(1)'}), '(all_probability_distribution, axis=1)\n', (9290, 9328), True, 'import numpy as np\n'), ((720, 741), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (739, 741), True, 'import torch.distributed as dist\n'), ((3641, 3677), 'numpy.log', 'np.log', (['args.classifier_out_channels'], {}), '(args.classifier_out_channels)\n', (3647, 3677), True, 'import numpy as np\n'), ((7021, 7036), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (7034, 7036), False, 'from guided_diffusion import dist_util, logger\n'), ((9491, 9507), 'guided_diffusion.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (9505, 9507), False, 'from guided_diffusion import dist_util, logger\n'), ((9688, 9704), 'guided_diffusion.logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (9702, 9704), False, 'from guided_diffusion import dist_util, logger\n'), ((3771, 3827), 'torch.norm', 'th.norm', (['cond_grad'], {'p': '(2)', 'dim': '(1, 2, 3)', 'dtype': 'th.float32'}), '(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32)\n', (3778, 3827), True, 'import torch as th\n'), ((6286, 6301), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (6299, 6301), False, 'from guided_diffusion import dist_util, logger\n'), ((6467, 6482), 'guided_diffusion.dist_util.dev', 'dist_util.dev', ([], {}), '()\n', (6480, 6482), False, 'from guided_diffusion import dist_util, logger\n'), ((1268, 1284), 'guided_diffusion.script_util.model_defaults', 'model_defaults', ([], {}), '()\n', (1282, 1284), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((1371, 1391), 'guided_diffusion.script_util.diffusion_defaults', 'diffusion_defaults', ([], {}), '()\n', (1389, 1391), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((1738, 1759), 'guided_diffusion.script_util.classifier_defaults', 'classifier_defaults', ([], {}), '()\n', (1757, 1759), False, 'from guided_diffusion.script_util import model_defaults, diffusion_defaults, classifier_defaults, create_model, create_gaussian_diffusion, create_classifier, add_dict_to_argparser, args_to_dict\n'), ((5163, 5219), 'torch.norm', 'th.norm', (['cond_grad'], {'p': '(2)', 'dim': '(1, 2, 3)', 'dtype': 'th.float32'}), '(cond_grad, p=2, dim=(1, 2, 3), dtype=th.float32)\n', (5170, 5219), True, 'import torch as th\n')] |
import os
import sys
import math
sys.path.insert(0,os.path.abspath('../../datatypes_and_database/'))
sys.path.insert(0,os.path.abspath('../../background_subtraction/sg_background_fit'))
sys.path.insert(0,os.path.abspath('../../config_file_handling/'))
sys.path.insert(0,os.path.abspath('../fitting_functions/'))
sys.path.insert(0,os.path.abspath('../pade_background_fit'))
import admx_db_interface
import fitting_functions as fit
from admx_db_datatypes import PowerSpectrum,PowerMeasurement,ADMXDataSeries
from admx_datatype_hdf5 import save_dataseries_to_hdf5,load_dataseries_from_hdf5,save_measurement_to_hdf5,load_measurement_from_hdf5
from sg_background_fit import filter_bg_with_sg,filter_bg_with_sg_keep_extrema
from pade_background_fit import filter_bg_with_pade, mypade
from config_file_handling import test_timestamp_cuts, get_intermediate_data_file_name
import numpy as np
import h5py
import datetime
import time
from dateutil import parser
import yaml
import argparse
from scipy.constants import codata
from scipy.optimize import curve_fit,fsolve
#----------Stuff for checking the config files
target_nibble="test_nibble"
argparser=argparse.ArgumentParser()
argparser.add_argument("-r","--run_definition",help="run definition yaml file",default="../../config/run1b_definitions.yaml")
argparser.add_argument("-n","--nibble_name",help="name of nibble to run",default=target_nibble)
args=argparser.parse_args()
run_definition_file=open(args.run_definition,"r")
run_definition=yaml.load(run_definition_file)
run_definition_file.close()
target_nibble=args.nibble_name
print(target_nibble)
#INTERFACE WITH ADMX DB
db=admx_db_interface.ADMXDB()
db.hostname="admxdb01.fnal.gov"
receiver_shape_file=get_intermediate_data_file_name(run_definition["nibbles"][target_nibble],"receiver_shape.h5")
start_time=run_definition["nibbles"][target_nibble]["start_time"]
stop_time=run_definition["nibbles"][target_nibble]["stop_time"]
timestamp_cut_yaml=run_definition["timestamp_cuts"]
#Correct this to pick the correct receiver shape file
f=h5py.File(receiver_shape_file,"r")
receiver_shape=load_dataseries_from_hdf5(f,"receiver_shape")
f.close()
#Boltzmann's Constant
kboltz=codata.value('Boltzmann constant')
def calculate_chi2(y,yfit,uncertainties):
residuals=np.subtract(y,yfit)
norm_residuals=np.divide(residuals, uncertainties)
chi2=np.multiply(norm_residuals,norm_residuals)
total_chi2=np.sum(chi2)
return total_chi2
#Actual DB Query
max_lines=100000
#max_lines=50
query="SELECT A.timestamp, B.start_frequency_channel_one,B.stop_frequency_channel_one,B.frequency_resolution_channel_one,B.power_spectrum_channel_one,B.sampling_rate,B.integration_time,A.q_channel_one,A.mode_frequency_channel_one,A.digitizer_log_reference,A.notes from axion_scan_log as A INNER JOIN digitizer_log as B ON A.digitizer_log_reference=B.digitizer_log_id WHERE A.timestamp < '"+str(stop_time)+"' AND A.timestamp>'"+str(start_time)+"' ORDER BY A.timestamp asc LIMIT "+str(max_lines)
print("Querying database")
records=db.send_admxdb_query(query)
print("#got "+str(len(records))+" entries")
tstamp_array=[]
na_freq_array=[]
fit_freq_array=[]
chi2_free_freq_array=[]
chi2_fixed_freq_array=[]
tstamp_array=[]
#Analysis Code Here
for line in records:
should_cut,cut_reason=test_timestamp_cuts(timestamp_cut_yaml,line[0])
if should_cut:
count=1
else:
#Read the time in seconds
time_sec=time.mktime(line[0].timetuple())
tstamp_array.append(time_sec)
#Process the spectrum
spectrum_raw=PowerSpectrum(line[4],line[1],line[2])
spectrum_before_receiver=spectrum_raw/receiver_shape.yvalues
spectrum_before_receiver.yvalues=np.delete(spectrum_before_receiver.yvalues, 0)
q_ch1=line[7]
c_freq=line[8]
lsq_fixed_center=fit.least_squares_fit_lorentz_skew_lorentz_gain_slope_fixed_center(spectrum_before_receiver,spectrum_before_receiver.yvalues[1],0,0,0,c_freq,q_ch1)
lsq_free_center=fit.least_squares_fit_lorentz_skew_lorentz_gain_slope_free_center(spectrum_before_receiver,spectrum_before_receiver.yvalues[1],0,0,0,c_freq,q_ch1)
lsq_fixed_center_fit=fit.lorentz_skew_lorentz_plus_constant_w_gain_slope(spectrum_before_receiver.get_xvalues(), [lsq_fixed_center.x[0], lsq_fixed_center.x[1], lsq_fixed_center.x[2],lsq_fixed_center.x[3],c_freq,q_ch1])
lsq_free_center_fit=fit.lorentz_skew_lorentz_plus_constant_w_gain_slope(spectrum_before_receiver.get_xvalues(), [lsq_free_center.x[0],lsq_free_center.x[1],lsq_free_center.x[2], lsq_free_center.x[3], lsq_free_center.x[4], q_ch1])
fit_freq=lsq_free_center.x[4]
na_freq=c_freq
na_freq_array.append(na_freq)
fit_freq_array.append(fit_freq)
uncertainties=np.ones(np.size(spectrum_before_receiver.yvalues))
int_time=line[6]
res=line[3]*10**6
fractional_uncertainty=1/(np.sqrt(int_time*res))
uncertainties=np.multiply(spectrum_before_receiver.yvalues,fractional_uncertainty)
chi2_free_freq=calculate_chi2(spectrum_before_receiver.yvalues, lsq_free_center_fit, uncertainties)
chi2_fixed_freq=calculate_chi2(spectrum_before_receiver.yvalues, lsq_fixed_center_fit, uncertainties)
num_points=len(spectrum_before_receiver.yvalues)
chi2_free_freq_array.append(chi2_free_freq/(num_points-5))
chi2_fixed_freq_array.append(chi2_fixed_freq/(num_points-4))
array_to_save=(tstamp_array,na_freq_array, fit_freq_array, chi2_free_freq_array, chi2_fixed_freq_array)
array_to_save=np.transpose(array_to_save)
list_to_save=array_to_save.tolist()
np.savetxt("frequency_offset_"+target_nibble+".txt", array_to_save, delimiter=" ")
| [
"yaml.load",
"numpy.sum",
"argparse.ArgumentParser",
"scipy.constants.codata.value",
"fitting_functions.least_squares_fit_lorentz_skew_lorentz_gain_slope_free_center",
"fitting_functions.least_squares_fit_lorentz_skew_lorentz_gain_slope_fixed_center",
"os.path.abspath",
"numpy.multiply",
"admx_datat... | [((1146, 1171), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1169, 1171), False, 'import argparse\n'), ((1488, 1518), 'yaml.load', 'yaml.load', (['run_definition_file'], {}), '(run_definition_file)\n', (1497, 1518), False, 'import yaml\n'), ((1627, 1653), 'admx_db_interface.ADMXDB', 'admx_db_interface.ADMXDB', ([], {}), '()\n', (1651, 1653), False, 'import admx_db_interface\n'), ((1707, 1805), 'config_file_handling.get_intermediate_data_file_name', 'get_intermediate_data_file_name', (["run_definition['nibbles'][target_nibble]", '"""receiver_shape.h5"""'], {}), "(run_definition['nibbles'][target_nibble],\n 'receiver_shape.h5')\n", (1738, 1805), False, 'from config_file_handling import test_timestamp_cuts, get_intermediate_data_file_name\n'), ((2039, 2074), 'h5py.File', 'h5py.File', (['receiver_shape_file', '"""r"""'], {}), "(receiver_shape_file, 'r')\n", (2048, 2074), False, 'import h5py\n'), ((2089, 2135), 'admx_datatype_hdf5.load_dataseries_from_hdf5', 'load_dataseries_from_hdf5', (['f', '"""receiver_shape"""'], {}), "(f, 'receiver_shape')\n", (2114, 2135), False, 'from admx_datatype_hdf5 import save_dataseries_to_hdf5, load_dataseries_from_hdf5, save_measurement_to_hdf5, load_measurement_from_hdf5\n'), ((2175, 2209), 'scipy.constants.codata.value', 'codata.value', (['"""Boltzmann constant"""'], {}), "('Boltzmann constant')\n", (2187, 2209), False, 'from scipy.constants import codata\n'), ((5539, 5566), 'numpy.transpose', 'np.transpose', (['array_to_save'], {}), '(array_to_save)\n', (5551, 5566), True, 'import numpy as np\n'), ((5603, 5693), 'numpy.savetxt', 'np.savetxt', (["('frequency_offset_' + target_nibble + '.txt')", 'array_to_save'], {'delimiter': '""" """'}), "('frequency_offset_' + target_nibble + '.txt', array_to_save,\n delimiter=' ')\n", (5613, 5693), True, 'import numpy as np\n'), ((51, 99), 'os.path.abspath', 'os.path.abspath', (['"""../../datatypes_and_database/"""'], {}), "('../../datatypes_and_database/')\n", (66, 99), False, 'import os\n'), ((119, 184), 'os.path.abspath', 'os.path.abspath', (['"""../../background_subtraction/sg_background_fit"""'], {}), "('../../background_subtraction/sg_background_fit')\n", (134, 184), False, 'import os\n'), ((204, 250), 'os.path.abspath', 'os.path.abspath', (['"""../../config_file_handling/"""'], {}), "('../../config_file_handling/')\n", (219, 250), False, 'import os\n'), ((270, 310), 'os.path.abspath', 'os.path.abspath', (['"""../fitting_functions/"""'], {}), "('../fitting_functions/')\n", (285, 310), False, 'import os\n'), ((330, 371), 'os.path.abspath', 'os.path.abspath', (['"""../pade_background_fit"""'], {}), "('../pade_background_fit')\n", (345, 371), False, 'import os\n'), ((2267, 2287), 'numpy.subtract', 'np.subtract', (['y', 'yfit'], {}), '(y, yfit)\n', (2278, 2287), True, 'import numpy as np\n'), ((2306, 2341), 'numpy.divide', 'np.divide', (['residuals', 'uncertainties'], {}), '(residuals, uncertainties)\n', (2315, 2341), True, 'import numpy as np\n'), ((2351, 2394), 'numpy.multiply', 'np.multiply', (['norm_residuals', 'norm_residuals'], {}), '(norm_residuals, norm_residuals)\n', (2362, 2394), True, 'import numpy as np\n'), ((2409, 2421), 'numpy.sum', 'np.sum', (['chi2'], {}), '(chi2)\n', (2415, 2421), True, 'import numpy as np\n'), ((3281, 3329), 'config_file_handling.test_timestamp_cuts', 'test_timestamp_cuts', (['timestamp_cut_yaml', 'line[0]'], {}), '(timestamp_cut_yaml, line[0])\n', (3300, 3329), False, 'from config_file_handling import test_timestamp_cuts, get_intermediate_data_file_name\n'), ((3544, 3584), 'admx_db_datatypes.PowerSpectrum', 'PowerSpectrum', (['line[4]', 'line[1]', 'line[2]'], {}), '(line[4], line[1], line[2])\n', (3557, 3584), False, 'from admx_db_datatypes import PowerSpectrum, PowerMeasurement, ADMXDataSeries\n'), ((3691, 3737), 'numpy.delete', 'np.delete', (['spectrum_before_receiver.yvalues', '(0)'], {}), '(spectrum_before_receiver.yvalues, 0)\n', (3700, 3737), True, 'import numpy as np\n'), ((3813, 3975), 'fitting_functions.least_squares_fit_lorentz_skew_lorentz_gain_slope_fixed_center', 'fit.least_squares_fit_lorentz_skew_lorentz_gain_slope_fixed_center', (['spectrum_before_receiver', 'spectrum_before_receiver.yvalues[1]', '(0)', '(0)', '(0)', 'c_freq', 'q_ch1'], {}), '(\n spectrum_before_receiver, spectrum_before_receiver.yvalues[1], 0, 0, 0,\n c_freq, q_ch1)\n', (3879, 3975), True, 'import fitting_functions as fit\n'), ((3984, 4145), 'fitting_functions.least_squares_fit_lorentz_skew_lorentz_gain_slope_free_center', 'fit.least_squares_fit_lorentz_skew_lorentz_gain_slope_free_center', (['spectrum_before_receiver', 'spectrum_before_receiver.yvalues[1]', '(0)', '(0)', '(0)', 'c_freq', 'q_ch1'], {}), '(\n spectrum_before_receiver, spectrum_before_receiver.yvalues[1], 0, 0, 0,\n c_freq, q_ch1)\n', (4049, 4145), True, 'import fitting_functions as fit\n'), ((4936, 5005), 'numpy.multiply', 'np.multiply', (['spectrum_before_receiver.yvalues', 'fractional_uncertainty'], {}), '(spectrum_before_receiver.yvalues, fractional_uncertainty)\n', (4947, 5005), True, 'import numpy as np\n'), ((4767, 4808), 'numpy.size', 'np.size', (['spectrum_before_receiver.yvalues'], {}), '(spectrum_before_receiver.yvalues)\n', (4774, 4808), True, 'import numpy as np\n'), ((4892, 4915), 'numpy.sqrt', 'np.sqrt', (['(int_time * res)'], {}), '(int_time * res)\n', (4899, 4915), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Generate figure showing the distribution of the nearest neighbor distance ratio
for regular and highly memorized observations.
Author: <NAME>
License: See LICENSE file.
Copyright: 2021, The Alan Turing Institute
"""
import argparse
import numpy as np
from fitter import Fitter
from analysis_utils import dict2tex
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--nns", help="Nearest neighbor file (.npz)", required=True
)
parser.add_argument(
"--results", help="Memorization results (.npz)", required=True
)
parser.add_argument(
"-o", "--output", help="Output file to write to (.tex)", required=True
)
return parser.parse_args()
def make_tex(rho, M, q=0.95, ymax=4.2):
tex = []
pgfplotsset = """
\\pgfplotsset{compat=newest,%
/pgf/declare function={
gauss(\\x) = 1/sqrt(2*pi) * exp(-0.5 * \\x * \\x);%
johnson(\\x,\\a,\\b) = \\b/sqrt(\\x * \\x + 1) * gauss(\\a+\\b*ln(\\x+sqrt(\\x*\\x+1)));%
johnsonsu(\\x,\\a,\\b,\\loc,\\scale) = johnson((\\x - \\loc)/\\scale,\\a,\\b)/\\scale;%
},
}
"""
tex.append("\\documentclass[10pt,preview=true]{standalone}")
# LuaLaTeX
tex.append(
"\\pdfvariable suppressoptionalinfo \\numexpr1+2+8+16+32+64+128+512\\relax"
)
tex.append("\\usepackage[utf8]{inputenc}")
tex.append("\\usepackage[T1]{fontenc}")
tex.append("\\usepackage{pgfplots}")
tex.append(pgfplotsset)
tex.append("\\definecolor{MyBlue}{HTML}{004488}")
tex.append("\\definecolor{MyYellow}{HTML}{DDAA33}")
tex.append("\\definecolor{MyRed}{HTML}{BB5566}")
tex.append("\\begin{document}")
tex.append("\\begin{tikzpicture}")
fontsize = "\\normalsize"
bins = 100
xmin = 0.6
xmax = 1.6
ymin = 0
ytick = "{" + ", ".join(map(str, range(1 + round(ymax)))) + "}"
axis_opts = {
"xmin": xmin,
"xmax": xmax,
"ymin": ymin,
"ymax": ymax,
"scale only axis": None,
"xlabel": "$\\rho_i$",
"ylabel": "Density",
"width": "8cm",
"height": "5cm",
"xtick": "{0.75, 1.0, 1.25, 1.5}",
"xticklabels": "{0.75, 1.0, 1.25, 1.5}",
"ytick": ytick,
"yticklabels": ytick,
"xlabel style": {"font": fontsize},
"ylabel style": {"font": fontsize},
"xticklabel style": {"font": fontsize},
"yticklabel style": {"font": fontsize},
"legend pos": "north east",
"legend style": {"font": fontsize},
"legend cell align": "left",
}
tex.append(f"\\begin{{axis}}[{dict2tex(axis_opts)}]")
thickness = "very thick"
hist_plot_opts = {
"forget plot": None,
"draw": "none",
"fill opacity": 0.3,
"hist": {
"bins": bins,
"density": "true",
"data min": xmin,
"data max": xmax,
},
}
line_plot_opts = {
"domain": f"{xmin}:{xmax}",
"samples": 201,
"mark": "none",
"solid": None,
thickness: None,
}
rhos = [rho[M <= q], rho[M > q]]
labels = ["Regular", "High mem."]
colors = ["MyBlue", "MyYellow"]
for r, label, color in zip(rhos, labels, colors):
hist_plot_opts["fill"] = color
line_plot_opts["draw"] = color
tex.append(
f"\\addplot [{dict2tex(hist_plot_opts)}] table[y index=0] {{%"
)
tex.append("data")
for v in r:
tex.append(str(v.item()))
tex.append("};")
f = Fitter(
r, distributions=["johnsonsu"], xmin=xmin, xmax=xmax, bins=bins
)
f.fit()
params = f.get_best()
a, b, loc, scale = params["johnsonsu"]
tex.append(f"\\addplot [{dict2tex(line_plot_opts)}] {{%")
tex.append(f"johnsonsu(x, {a}, {b}, {loc}, {scale})")
tex.append("};")
tex.append(f"\\addlegendentry{{{label}}}")
tex.append("\\end{axis}")
tex.append("\\end{tikzpicture}")
tex.append("\\end{document}")
return tex
def main():
args = parse_args()
results = np.load(args.results, allow_pickle=True)
nns = np.load(args.nns)
metadata = results["metadata"][()]
dataset = metadata["dataset"]
ratio = nns["ratio"]
M = results["M"][:, -1]
assert ratio.shape == M.shape
if dataset == "CelebA":
ymax = 4.3
elif dataset == "BinarizedMNIST" and metadata["learning_rate"] == 1e-3:
ymax = 7.6
elif dataset == "BinarizedMNIST" and metadata["learning_rate"] == 1e-4:
ymax = 6.4
else:
raise NotImplementedError
tex = make_tex(ratio, M, q=0.95, ymax=ymax)
with open(args.output, "w") as fp:
fp.write("\n".join(tex))
if __name__ == "__main__":
main()
| [
"analysis_utils.dict2tex",
"numpy.load",
"argparse.ArgumentParser",
"fitter.Fitter"
] | [((381, 406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (404, 406), False, 'import argparse\n'), ((4154, 4194), 'numpy.load', 'np.load', (['args.results'], {'allow_pickle': '(True)'}), '(args.results, allow_pickle=True)\n', (4161, 4194), True, 'import numpy as np\n'), ((4205, 4222), 'numpy.load', 'np.load', (['args.nns'], {}), '(args.nns)\n', (4212, 4222), True, 'import numpy as np\n'), ((3592, 3663), 'fitter.Fitter', 'Fitter', (['r'], {'distributions': "['johnsonsu']", 'xmin': 'xmin', 'xmax': 'xmax', 'bins': 'bins'}), "(r, distributions=['johnsonsu'], xmin=xmin, xmax=xmax, bins=bins)\n", (3598, 3663), False, 'from fitter import Fitter\n'), ((2645, 2664), 'analysis_utils.dict2tex', 'dict2tex', (['axis_opts'], {}), '(axis_opts)\n', (2653, 2664), False, 'from analysis_utils import dict2tex\n'), ((3410, 3434), 'analysis_utils.dict2tex', 'dict2tex', (['hist_plot_opts'], {}), '(hist_plot_opts)\n', (3418, 3434), False, 'from analysis_utils import dict2tex\n'), ((3813, 3837), 'analysis_utils.dict2tex', 'dict2tex', (['line_plot_opts'], {}), '(line_plot_opts)\n', (3821, 3837), False, 'from analysis_utils import dict2tex\n')] |
import pytest
import numpy as np
from eddington import FitFunctionRuntimeError
from tests.fit_function.dummy_functions import dummy_func1, dummy_func2
delta = 10e-5
@pytest.fixture
def dummy_func1_fixture():
yield dummy_func1
dummy_func1.clear_fixed()
@pytest.fixture
def dummy_func2_fixture():
yield dummy_func2
dummy_func2.clear_fixed()
def test_name(dummy_func1_fixture):
assert dummy_func1_fixture.name == "dummy_func1", "Name is different than expected"
def test_signature(dummy_func1_fixture):
assert (
dummy_func1_fixture.signature == "dummy_func1"
), "Signature is different than expected"
def test_title_name(dummy_func1_fixture):
assert (
dummy_func1_fixture.title_name == "Dummy Func1"
), "Title name is different than expected"
def test_call_success(dummy_func1_fixture):
a = np.array([1, 2])
x = 3
result = dummy_func1_fixture(a, x)
assert (
pytest.approx(result, rel=delta) == 19
), "Execution result is different than expected"
def test_call_failure_because_of_not_enough_parameters(dummy_func1_fixture):
a = np.array([1])
x = 3
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 2, got 1$"
):
dummy_func1_fixture(a, x)
def test_call_failure_because_of_too_many_parameters(dummy_func1_fixture):
a = np.array([1, 2, 3])
x = 4
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 2, got 3$"
):
dummy_func1_fixture(a, x)
def test_assign(dummy_func1_fixture):
a = np.array([1, 2])
x = 3
dummy_func1_fixture.assign(a)
result = dummy_func1_fixture(x)
assert (
pytest.approx(result, rel=delta) == 19
), "Execution result is different than expected"
def test_call_func_with_fix_value(dummy_func2_fixture):
a = np.array([7, 2, 1])
x = 2
dummy_func2_fixture.fix(1, 3)
result = dummy_func2_fixture(a, x)
assert (
pytest.approx(result, rel=delta) == 29
), "Execution result is different than expected"
def test_call_x_derivative_with_fix_value(dummy_func2_fixture):
a = np.array([7, 2, 1])
x = 2
dummy_func2_fixture.fix(1, 3)
result = dummy_func2_fixture.x_derivative(a, x)
assert (
pytest.approx(result, rel=delta) == 23
), "x derivative execution result is different than expected"
def test_call_a_derivative_with_fix_value(dummy_func2_fixture):
a = np.array([7, 2, 1])
x = 2
dummy_func2_fixture.fix(1, 3)
result = dummy_func2_fixture.a_derivative(a, x)
assert result == pytest.approx(
np.array([1, 4, 8]), rel=delta
), "a derivative execution result is different than expected"
def test_override_fix_value(dummy_func2_fixture):
a = np.array([7, 2, 1])
x = 2
dummy_func2_fixture.fix(1, 9)
dummy_func2_fixture.fix(1, 3)
result = dummy_func2(a, x)
assert (
pytest.approx(result, rel=delta) == 29
), "Execution result is different than expected"
def test_unfix_value(dummy_func2_fixture):
dummy_func2_fixture.fix(1, 5)
dummy_func2_fixture.unfix(1)
a = np.array([7, 3, 2, 1])
x = 2
result = dummy_func2(a, x)
assert (
pytest.approx(result, rel=delta) == 29
), "Execution result is different than expected"
def test_clear_fixed(dummy_func2_fixture):
dummy_func2_fixture.fix(1, 5)
dummy_func2_fixture.fix(3, 2)
dummy_func2_fixture.clear_fixed()
a = np.array([7, 3, 2, 1])
x = 2
result = dummy_func2(a, x)
assert (
pytest.approx(result, rel=delta) == 29
), "Execution result is different than expected"
def test_assign_failure_because_of_not_enough_parameters(dummy_func1_fixture):
a = np.array([1])
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 2, got 1$"
):
dummy_func1_fixture.assign(a)
def test_assign_failure_because_of_too_many_parameters(dummy_func1_fixture):
a = np.array([1, 2, 3])
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 2, got 3$"
):
dummy_func1_fixture.assign(a)
def test_fix_failure_when_trying_to_fix_negative_index(dummy_func1_fixture):
with pytest.raises(
FitFunctionRuntimeError,
match="^Cannot fix index -1. Indices should be between 0 and 1$",
):
dummy_func1_fixture.fix(
-1, 10,
)
def test_fix_failure_when_trying_to_fix_too_big_index(dummy_func1_fixture):
with pytest.raises(
FitFunctionRuntimeError,
match="^Cannot fix index 2. Indices should be between 0 and 1$",
):
dummy_func1_fixture.fix(2, 10)
def test_call_failure_because_not_enough_parameters_after_fix(dummy_func2_fixture):
a = np.array([7, 2])
x = 2
dummy_func2_fixture.fix(1, 3)
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 3, got 2"
):
dummy_func2_fixture(a, x)
def test_call_failure_because_too_much_parameters_after_fix(dummy_func2_fixture):
a = np.array([7, 2, 8, 2])
x = 2
dummy_func2_fixture.fix(1, 3)
with pytest.raises(
FitFunctionRuntimeError, match="^Input length should be 3, got 4$"
):
dummy_func2_fixture(a, x)
def test_call_failure_when_trying_to_run_without_arguments(dummy_func2_fixture):
with pytest.raises(
FitFunctionRuntimeError, match='^No parameters has been given to "dummy_func2"$'
):
dummy_func2_fixture()
def test_fit_function_representation(dummy_func1_fixture):
assert str(dummy_func1_fixture) == (
"FitFunction(name='dummy_func1', syntax='a[0] + a[1] * x ** 2')"
), "Representation is different than expected"
| [
"tests.fit_function.dummy_functions.dummy_func2.clear_fixed",
"pytest.raises",
"numpy.array",
"tests.fit_function.dummy_functions.dummy_func2",
"pytest.approx",
"tests.fit_function.dummy_functions.dummy_func1.clear_fixed"
] | [((239, 264), 'tests.fit_function.dummy_functions.dummy_func1.clear_fixed', 'dummy_func1.clear_fixed', ([], {}), '()\n', (262, 264), False, 'from tests.fit_function.dummy_functions import dummy_func1, dummy_func2\n'), ((336, 361), 'tests.fit_function.dummy_functions.dummy_func2.clear_fixed', 'dummy_func2.clear_fixed', ([], {}), '()\n', (359, 361), False, 'from tests.fit_function.dummy_functions import dummy_func1, dummy_func2\n'), ((859, 875), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (867, 875), True, 'import numpy as np\n'), ((1125, 1138), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1133, 1138), True, 'import numpy as np\n'), ((1374, 1393), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1382, 1393), True, 'import numpy as np\n'), ((1592, 1608), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1600, 1608), True, 'import numpy as np\n'), ((1868, 1887), 'numpy.array', 'np.array', (['[7, 2, 1]'], {}), '([7, 2, 1])\n', (1876, 1887), True, 'import numpy as np\n'), ((2158, 2177), 'numpy.array', 'np.array', (['[7, 2, 1]'], {}), '([7, 2, 1])\n', (2166, 2177), True, 'import numpy as np\n'), ((2474, 2493), 'numpy.array', 'np.array', (['[7, 2, 1]'], {}), '([7, 2, 1])\n', (2482, 2493), True, 'import numpy as np\n'), ((2791, 2810), 'numpy.array', 'np.array', (['[7, 2, 1]'], {}), '([7, 2, 1])\n', (2799, 2810), True, 'import numpy as np\n'), ((2902, 2919), 'tests.fit_function.dummy_functions.dummy_func2', 'dummy_func2', (['a', 'x'], {}), '(a, x)\n', (2913, 2919), False, 'from tests.fit_function.dummy_functions import dummy_func1, dummy_func2\n'), ((3153, 3175), 'numpy.array', 'np.array', (['[7, 3, 2, 1]'], {}), '([7, 3, 2, 1])\n', (3161, 3175), True, 'import numpy as np\n'), ((3199, 3216), 'tests.fit_function.dummy_functions.dummy_func2', 'dummy_func2', (['a', 'x'], {}), '(a, x)\n', (3210, 3216), False, 'from tests.fit_function.dummy_functions import dummy_func1, dummy_func2\n'), ((3489, 3511), 'numpy.array', 'np.array', (['[7, 3, 2, 1]'], {}), '([7, 3, 2, 1])\n', (3497, 3511), True, 'import numpy as np\n'), ((3535, 3552), 'tests.fit_function.dummy_functions.dummy_func2', 'dummy_func2', (['a', 'x'], {}), '(a, x)\n', (3546, 3552), False, 'from tests.fit_function.dummy_functions import dummy_func1, dummy_func2\n'), ((3755, 3768), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3763, 3768), True, 'import numpy as np\n'), ((4000, 4019), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (4008, 4019), True, 'import numpy as np\n'), ((4792, 4808), 'numpy.array', 'np.array', (['[7, 2]'], {}), '([7, 2])\n', (4800, 4808), True, 'import numpy as np\n'), ((5085, 5107), 'numpy.array', 'np.array', (['[7, 2, 8, 2]'], {}), '([7, 2, 8, 2])\n', (5093, 5107), True, 'import numpy as np\n'), ((946, 978), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (959, 978), False, 'import pytest\n'), ((1158, 1244), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 2, got 1$"""'}), "(FitFunctionRuntimeError, match=\n '^Input length should be 2, got 1$')\n", (1171, 1244), False, 'import pytest\n'), ((1413, 1499), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 2, got 3$"""'}), "(FitFunctionRuntimeError, match=\n '^Input length should be 2, got 3$')\n", (1426, 1499), False, 'import pytest\n'), ((1710, 1742), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (1723, 1742), False, 'import pytest\n'), ((1992, 2024), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (2005, 2024), False, 'import pytest\n'), ((2295, 2327), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (2308, 2327), False, 'import pytest\n'), ((2941, 2973), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (2954, 2973), False, 'import pytest\n'), ((3238, 3270), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (3251, 3270), False, 'import pytest\n'), ((3574, 3606), 'pytest.approx', 'pytest.approx', (['result'], {'rel': 'delta'}), '(result, rel=delta)\n', (3587, 3606), False, 'import pytest\n'), ((3778, 3864), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 2, got 1$"""'}), "(FitFunctionRuntimeError, match=\n '^Input length should be 2, got 1$')\n", (3791, 3864), False, 'import pytest\n'), ((4029, 4115), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 2, got 3$"""'}), "(FitFunctionRuntimeError, match=\n '^Input length should be 2, got 3$')\n", (4042, 4115), False, 'import pytest\n'), ((4252, 4361), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Cannot fix index -1. Indices should be between 0 and 1$"""'}), "(FitFunctionRuntimeError, match=\n '^Cannot fix index -1. Indices should be between 0 and 1$')\n", (4265, 4361), False, 'import pytest\n'), ((4531, 4639), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Cannot fix index 2. Indices should be between 0 and 1$"""'}), "(FitFunctionRuntimeError, match=\n '^Cannot fix index 2. Indices should be between 0 and 1$')\n", (4544, 4639), False, 'import pytest\n'), ((4863, 4948), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 3, got 2"""'}), "(FitFunctionRuntimeError, match='^Input length should be 3, got 2'\n )\n", (4876, 4948), False, 'import pytest\n'), ((5162, 5248), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^Input length should be 3, got 4$"""'}), "(FitFunctionRuntimeError, match=\n '^Input length should be 3, got 4$')\n", (5175, 5248), False, 'import pytest\n'), ((5385, 5485), 'pytest.raises', 'pytest.raises', (['FitFunctionRuntimeError'], {'match': '"""^No parameters has been given to "dummy_func2"$"""'}), '(FitFunctionRuntimeError, match=\n \'^No parameters has been given to "dummy_func2"$\')\n', (5398, 5485), False, 'import pytest\n'), ((2634, 2653), 'numpy.array', 'np.array', (['[1, 4, 8]'], {}), '([1, 4, 8])\n', (2642, 2653), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
from statlogging import readlog
from readconfig import readconfig
from chkrate import chkrate
import os
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp()
import matplotlib
matplotlib.use('Agg',warn=False)
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
import datetime
import sys
def hsplot(hashrate,cfg,time0):
[t,v1,v2,vp,vps] = hashrate
if len(t) < 2:
print("More log files are needed for plotting.")
return 1
print("Plotting into " + cfg['HSplot']['img_dir'] + "hs-"+time0.strftime("%Y_%m_%d_%H_%M")+".png ... ",end="")
sys.stdout.flush()
for k in range(0,len(t)):
t[k] = t[k]/3600.0
x = np.array(t)
y0 = np.array(v1)
y1 = np.array(v2)
y2 = np.array(vp)
y3 = np.array(vps)
ymax = np.amax(np.hstack((y0,y1,y2,y3)))
f0 = interp1d(x, y0)
f1 = interp1d(x, y1)
f2 = interp1d(x, y2)
f3 = interp1d(x, y3)
xnew = np.linspace(t[0], t[-1], 1800)
fig = plt.figure(figsize=(float(cfg['HSplot']['width'])/float(cfg['HSplot']['dpi']),float(cfg['HSplot']['height'])/float(cfg['HSplot']['dpi'])), dpi=int(cfg['HSplot']['dpi']), facecolor="white")
titlefont = {'family' : cfg['HSplot']['font_family1'],
'weight' : 'normal',
'size' : int(cfg['HSplot']['font_size1']),
}
ticks_font = matplotlib.font_manager.FontProperties(family=cfg['HSplot']['font_family2'], style='normal', size=int(cfg['HSplot']['font_size2']), weight='normal', stretch='normal')
p0, = plt.plot(xnew,f0(xnew),'b-')
p1, = plt.plot(xnew,f1(xnew),'c-')
p2, = plt.plot(xnew,f2(xnew),'g-')
p3, = plt.plot(xnew,f3(xnew),'r-')
plt.legend((p0,p1,p2,p3),('Local Method 1','Local Method 2','Pool Worker','Pool Sum'), loc = 2, prop = ticks_font)
# x axis tick label
xticklabel = []
xmax = time0 - datetime.timedelta(seconds = (time0.hour - (time0.hour/2)*2)*3600 + time0.minute*60)
xmin = xmax
xticklabel.append(xmin.strftime("%H:%M"))
for i in range(0,12):
xmin = xmin - datetime.timedelta(seconds=7200)
xticklabel.append(xmin.strftime("%H:%M"))
xticklabel = xticklabel[::-1]
# y axis tick label
ymax_s = str(int(ymax))
flag = int(ymax_s[0])
yticklabel = ['0']
if flag == 1:
#0.1;0.2;0.3....
ystep = 1*(10**(len(ymax_s)-2))
ylim = int(ymax + ystep -1) / ystep * ystep
for i in range(1,int(ylim/ystep) ):
yticklabel.append("{:,}".format(i*(10 ** (len(ymax_s)-2))))
elif flag >= 2 and flag <= 3:
#0.2;0.4;0.6...
ystep = 2*(10**(len(ymax_s)-2))
ylim = int(ymax + ystep -1) / ystep * ystep
for i in range(1,int(ylim/ystep) ):
yticklabel.append("{:,}".format(i*2*(10 ** (len(ymax_s)-2))))
elif flag >= 4 and flag <= 6:
#0.25;0.50;0.75...
ystep = 25*(10**(len(ymax_s)-3))
ylim = int(ymax + ystep -1) / ystep * ystep
for i in range(1,int(ylim/ystep) ):
yticklabel.append("{:,}".format(i*25*(10 ** (len(ymax_s)-3))))
else:
#0.5;1.0;1.5...
ystep = 5*(10**(len(ymax_s)-2))
ylim = int(ymax + ystep -1) / ystep * ystep
for i in range(1,int(ylim/ystep) ):
yticklabel.append("{:,}".format(i*5*(10 ** (len(ymax_s)-2))))
ax=plt.gca()
ax.set_xticks(np.linspace((xmin-time0).total_seconds()/3600.0,(xmax-time0).total_seconds()/3600.0,13))
ax.set_xticklabels( tuple(xticklabel) )
ax.set_yticks(np.linspace(0,ylim-ystep,len(yticklabel)))
ax.set_yticklabels( tuple(yticklabel) )
ax.tick_params(tick1On = False, tick2On = False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(cfg['HSplot']['title'],fontdict=titlefont)
for label in ax.get_xticklabels() :
label.set_fontproperties(ticks_font)
for label in ax.get_yticklabels() :
label.set_fontproperties(ticks_font)
plt.axis([-24, 0, 0, ylim])
plt.grid(color = '0.75', linestyle='-')
plt.tight_layout()
plt.savefig(cfg['HSplot']['img_dir'] + "hs-"+time0.strftime("%Y_%m_%d_%H_%M")+".png")
print("Done.")
plt.clf()
return "hs-"+time0.strftime("%Y_%m_%d_%H_%M")+".png"
| [
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axis",
"numpy.hstack",
"tempfile.mkdtemp",
"numpy.array",
"sys.stdout.flush",
"matplotlib.use",
"numpy.linspace",
"datetime.timedelta",
"scipy.interpolate.interp... | [((210, 228), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (226, 228), False, 'import tempfile\n'), ((247, 280), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {'warn': '(False)'}), "('Agg', warn=False)\n", (261, 280), False, 'import matplotlib\n'), ((654, 672), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (670, 672), False, 'import sys\n'), ((728, 739), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (736, 739), True, 'import numpy as np\n'), ((746, 758), 'numpy.array', 'np.array', (['v1'], {}), '(v1)\n', (754, 758), True, 'import numpy as np\n'), ((765, 777), 'numpy.array', 'np.array', (['v2'], {}), '(v2)\n', (773, 777), True, 'import numpy as np\n'), ((784, 796), 'numpy.array', 'np.array', (['vp'], {}), '(vp)\n', (792, 796), True, 'import numpy as np\n'), ((803, 816), 'numpy.array', 'np.array', (['vps'], {}), '(vps)\n', (811, 816), True, 'import numpy as np\n'), ((866, 881), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y0'], {}), '(x, y0)\n', (874, 881), False, 'from scipy.interpolate import interp1d\n'), ((888, 903), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y1'], {}), '(x, y1)\n', (896, 903), False, 'from scipy.interpolate import interp1d\n'), ((910, 925), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y2'], {}), '(x, y2)\n', (918, 925), False, 'from scipy.interpolate import interp1d\n'), ((932, 947), 'scipy.interpolate.interp1d', 'interp1d', (['x', 'y3'], {}), '(x, y3)\n', (940, 947), False, 'from scipy.interpolate import interp1d\n'), ((957, 987), 'numpy.linspace', 'np.linspace', (['t[0]', 't[-1]', '(1800)'], {}), '(t[0], t[-1], 1800)\n', (968, 987), True, 'import numpy as np\n'), ((1645, 1766), 'matplotlib.pyplot.legend', 'plt.legend', (['(p0, p1, p2, p3)', "('Local Method 1', 'Local Method 2', 'Pool Worker', 'Pool Sum')"], {'loc': '(2)', 'prop': 'ticks_font'}), "((p0, p1, p2, p3), ('Local Method 1', 'Local Method 2',\n 'Pool Worker', 'Pool Sum'), loc=2, prop=ticks_font)\n", (1655, 1766), True, 'import matplotlib.pyplot as plt\n'), ((3089, 3098), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3096, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3710), 'matplotlib.pyplot.axis', 'plt.axis', (['[-24, 0, 0, ylim]'], {}), '([-24, 0, 0, ylim])\n', (3691, 3710), True, 'import matplotlib.pyplot as plt\n'), ((3713, 3750), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""0.75"""', 'linestyle': '"""-"""'}), "(color='0.75', linestyle='-')\n", (3721, 3750), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3772), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3770, 3772), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3887), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3885, 3887), True, 'import matplotlib.pyplot as plt\n'), ((833, 860), 'numpy.hstack', 'np.hstack', (['(y0, y1, y2, y3)'], {}), '((y0, y1, y2, y3))\n', (842, 860), True, 'import numpy as np\n'), ((1814, 1907), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '((time0.hour - time0.hour / 2 * 2) * 3600 + time0.minute * 60)'}), '(seconds=(time0.hour - time0.hour / 2 * 2) * 3600 + time0\n .minute * 60)\n', (1832, 1907), False, 'import datetime\n'), ((1994, 2026), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(7200)'}), '(seconds=7200)\n', (2012, 2026), False, 'import datetime\n')] |
'''
Created on 15 Jul 2013
@author: <NAME>
'''
import numpy as np
from core import regression_methods
def exponentialModelAnalysis(isopachs,n):
"""
Analyses the isopach data under the assumption it follows a n-segment exponential model
Model: T(x) = c*exp(-m*x)
IMPORTANT: Works under the assumption x = sqrt(A/pi) rather than x = sqrt(A).
Arguments
isopachs:list of Isopachs -- list of isopachs to analyse
n:int -- the number of exponential segments
Returns
A dictionary with the following key-value mapping:
dict["estimatedTotalVolume"]:float -- the estimated total volume of the deposit.
dict["thicknessFunction"]:func x->t -- the thickness function, calculates T(x) (in metres).
dict["segmentLimits"]:list of floats -- list of bounds for the segments. Segment i
is valid between segmentLimits[i] and
segmentLimits[i+1].
dict["segmentVolumes"]:list of floats -- estimated tephra volumes for each segment.
dict["segmentCoefficients"]:list of floats -- estimated coefficients for each segment.
dict["segmentExponents"]:list of floats -- estimated exponents for each segment.
dict["segmentBts"]:list of floats -- estimated half-thicknesses for each segment
(i.e. distance across which tephra thickness
halves).
dict["regressionLines"]:list of Lines -- Line objects representing each segment's
least squares regression line used to estimate
it's parameters.
dict["isopachs"]:list of Isopachs -- list of Isopachs analysed.
dict["numberOfSegments"]:int -- number of exponential segments.
dict["mrse"]:float -- the mean relative squared error of the model
"""
thicknessesM = [isopach.thicknessM for isopach in isopachs]
logThickness = [np.log(isopach.thicknessM) for isopach in isopachs]
sqrtAreasKM = [isopach.sqrtAreaKM for isopach in isopachs]
regressionLines, segmentLimits = regression_methods.calculateMultiLineRegression(sqrtAreasKM,logThickness,n)
segmentT0s = [np.exp(line.c) for line in regressionLines]
segmentKs = [-line.m for line in regressionLines]
segmentBts = [np.log(2)/(k*np.sqrt(np.pi)) for k in segmentKs]
segmentVolumes = []
segmentLimits[0], segmentLimits[-1] = 0, float("inf")
for i in range(n):
segmentVolumes.append(calculateExponentialSegmentVolume(segmentT0s[i],segmentKs[i],segmentLimits[i],segmentLimits[i+1]))
estimatedTotalVolume = sum(segmentVolumes)
def thicknessFunction(x):
for i in range(n):
if segmentLimits[i] <= x < segmentLimits[i+1]:
return segmentT0s[i]*np.exp(-segmentKs[i]*x)
raise ValueError("x (" + str(x) + ") is not in the domain of the function (0 to infinity)")
mrse = regression_methods.meanRelativeSquaredError(sqrtAreasKM, thicknessesM, thicknessFunction)
return {"estimatedTotalVolume" : estimatedTotalVolume,
"thicknessFunction" : thicknessFunction,
"segmentLimits" : segmentLimits,
"segmentVolumes" : segmentVolumes,
"segmentCoefficients" : segmentT0s,
"segmentExponents" : segmentKs,
"segmentBts" : segmentBts,
"regressionLines" : regressionLines,
"isopachs" : isopachs,
"numberOfSegments" : n,
"mrse" : mrse}
def calculateExponentialSegmentVolume(coefficient,exponent,startLimitKM,endLimitKM):
"""
Returns the volume for the segment of the deposit in km3.
"""
t1 = (2*coefficient)/(1000*exponent*exponent)
t2 = (startLimitKM*exponent+1)*np.exp(-exponent*startLimitKM)
t3 = (endLimitKM*exponent+1)*np.exp(-exponent*endLimitKM) if endLimitKM != float("inf") else 0
return t1*(t2-t3)
| [
"numpy.log",
"core.regression_methods.calculateMultiLineRegression",
"numpy.exp",
"core.regression_methods.meanRelativeSquaredError",
"numpy.sqrt"
] | [((2002, 2079), 'core.regression_methods.calculateMultiLineRegression', 'regression_methods.calculateMultiLineRegression', (['sqrtAreasKM', 'logThickness', 'n'], {}), '(sqrtAreasKM, logThickness, n)\n', (2049, 2079), False, 'from core import regression_methods\n'), ((2789, 2882), 'core.regression_methods.meanRelativeSquaredError', 'regression_methods.meanRelativeSquaredError', (['sqrtAreasKM', 'thicknessesM', 'thicknessFunction'], {}), '(sqrtAreasKM, thicknessesM,\n thicknessFunction)\n', (2832, 2882), False, 'from core import regression_methods\n'), ((1852, 1878), 'numpy.log', 'np.log', (['isopach.thicknessM'], {}), '(isopach.thicknessM)\n', (1858, 1878), True, 'import numpy as np\n'), ((2096, 2110), 'numpy.exp', 'np.exp', (['line.c'], {}), '(line.c)\n', (2102, 2110), True, 'import numpy as np\n'), ((3524, 3556), 'numpy.exp', 'np.exp', (['(-exponent * startLimitKM)'], {}), '(-exponent * startLimitKM)\n', (3530, 3556), True, 'import numpy as np\n'), ((2208, 2217), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2214, 2217), True, 'import numpy as np\n'), ((3586, 3616), 'numpy.exp', 'np.exp', (['(-exponent * endLimitKM)'], {}), '(-exponent * endLimitKM)\n', (3592, 3616), True, 'import numpy as np\n'), ((2221, 2235), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (2228, 2235), True, 'import numpy as np\n'), ((2659, 2684), 'numpy.exp', 'np.exp', (['(-segmentKs[i] * x)'], {}), '(-segmentKs[i] * x)\n', (2665, 2684), True, 'import numpy as np\n')] |
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from magenta.models.nsynth.wavenet import fastgen
import sys
# Change path back to /src to load other modules
sys.path.insert(0, '/home/ubuntu/DeepBass/src')
from ingestion.IO_utils import Load, Save
import streamlit as st
import time
"""Demo of NSynth embedding of audio and its reconstruction
User Parameters:
tlen (float): Amount of time for reconstruction
silence_len (float) : Skip this many seconds of the ending that is silent
output_dir (str) : Directory to save the reconstruction
model_dir (str) : Directory of the pretrained model (tf checkpoint)
Returns:
Streamlit notebook
Reconstructed audio in the form of a wav file
Notes:
sr must be 16 kHz per the model architecture
"""
# Directory where mp3 are stored.
AUDIO_DIR = '/home/ubuntu/DeepBass/data/raw/EDM_Test'
filenames = [f for f in listdir(AUDIO_DIR) if isfile(join(AUDIO_DIR, f))]
sr = 16000
# magenta also uses librosa for loading
x1, _ = Load(AUDIO_DIR, filenames[0], sr=sr)
# Take the last four seconds
t_len = 1
silence_len = 7
x1 = x1[:silence_len*sr]
x1 = x1[-sr*t_len:]
sample_length = x1.shape[0]
output_dir = '/home/ubuntu/DeepBass/src/notebooks/'
output_name = 'original_' + filenames[0] + '.wav'
Save(output_dir, output_name, x1, sr)
model_dir = '/home/ubuntu/DeepBass/src/notebooks/wavenet-ckpt/model.ckpt-200000'
# Create encoding
start = time.time()
encoding = fastgen.encode(x1, model_dir, sample_length)
end = time.time()
st.write('Encoding took ' + str((end-start)) + ' seconds')
st.write('Encoding shape ' + str(encoding.shape))
# Save encoding
np.save(filenames[0] + '.npy', encoding)
# Plot PCM and encoding
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
axs[0].plot(x1)
axs[0].set_title('Audio Signal')
axs[1].plot(encoding[0])
axs[1].set_title('NSynth Encoding')
st.pyplot()
# Decoding
start = time.time()
fastgen.synthesize(encoding, checkpoint_path = model_dir,
save_paths=['gen_' + filenames[0]],
samples_per_save=sample_length)
end = time.time()
st.write('Decoding took ' + str((end-start)) + ' seconds')
# Evaluate reconstruction
x1_gen, _ = Load(output_dir, 'gen_' + filenames[0], sr=sr)
fig, ax = plt.subplots(figsize=(10, 5))
ax.plot(x1_gen)
ax.set_title('Reconstructed Audio Signal')
st.pyplot()
| [
"numpy.save",
"os.path.join",
"ingestion.IO_utils.Load",
"magenta.models.nsynth.wavenet.fastgen.synthesize",
"sys.path.insert",
"time.time",
"magenta.models.nsynth.wavenet.fastgen.encode",
"streamlit.pyplot",
"matplotlib.pyplot.subplots",
"os.listdir",
"ingestion.IO_utils.Save"
] | [((217, 264), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/ubuntu/DeepBass/src"""'], {}), "(0, '/home/ubuntu/DeepBass/src')\n", (232, 264), False, 'import sys\n'), ((1059, 1095), 'ingestion.IO_utils.Load', 'Load', (['AUDIO_DIR', 'filenames[0]'], {'sr': 'sr'}), '(AUDIO_DIR, filenames[0], sr=sr)\n', (1063, 1095), False, 'from ingestion.IO_utils import Load, Save\n'), ((1328, 1365), 'ingestion.IO_utils.Save', 'Save', (['output_dir', 'output_name', 'x1', 'sr'], {}), '(output_dir, output_name, x1, sr)\n', (1332, 1365), False, 'from ingestion.IO_utils import Load, Save\n'), ((1475, 1486), 'time.time', 'time.time', ([], {}), '()\n', (1484, 1486), False, 'import time\n'), ((1498, 1542), 'magenta.models.nsynth.wavenet.fastgen.encode', 'fastgen.encode', (['x1', 'model_dir', 'sample_length'], {}), '(x1, model_dir, sample_length)\n', (1512, 1542), False, 'from magenta.models.nsynth.wavenet import fastgen\n'), ((1549, 1560), 'time.time', 'time.time', ([], {}), '()\n', (1558, 1560), False, 'import time\n'), ((1688, 1728), 'numpy.save', 'np.save', (["(filenames[0] + '.npy')", 'encoding'], {}), "(filenames[0] + '.npy', encoding)\n", (1695, 1728), True, 'import numpy as np\n'), ((1765, 1800), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(10, 5)'}), '(2, 1, figsize=(10, 5))\n', (1777, 1800), True, 'import matplotlib.pyplot as plt\n'), ((1911, 1922), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (1920, 1922), True, 'import streamlit as st\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((1955, 2082), 'magenta.models.nsynth.wavenet.fastgen.synthesize', 'fastgen.synthesize', (['encoding'], {'checkpoint_path': 'model_dir', 'save_paths': "['gen_' + filenames[0]]", 'samples_per_save': 'sample_length'}), "(encoding, checkpoint_path=model_dir, save_paths=['gen_' +\n filenames[0]], samples_per_save=sample_length)\n", (1973, 2082), False, 'from magenta.models.nsynth.wavenet import fastgen\n'), ((2127, 2138), 'time.time', 'time.time', ([], {}), '()\n', (2136, 2138), False, 'import time\n'), ((2237, 2283), 'ingestion.IO_utils.Load', 'Load', (['output_dir', "('gen_' + filenames[0])"], {'sr': 'sr'}), "(output_dir, 'gen_' + filenames[0], sr=sr)\n", (2241, 2283), False, 'from ingestion.IO_utils import Load, Save\n'), ((2294, 2323), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (2306, 2323), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2394), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (2392, 2394), True, 'import streamlit as st\n'), ((949, 967), 'os.listdir', 'listdir', (['AUDIO_DIR'], {}), '(AUDIO_DIR)\n', (956, 967), False, 'from os import listdir\n'), ((978, 996), 'os.path.join', 'join', (['AUDIO_DIR', 'f'], {}), '(AUDIO_DIR, f)\n', (982, 996), False, 'from os.path import isfile, join\n')] |
# -*- coding: utf-8 -*-
"""
HikeBooklet
author: georand
source: https://github.com/georand/hikebooklet
date: 2021
"""
import io, pathlib, logging
import requests
import numpy as np
from PIL import Image, ImageDraw, ImageColor, ImageFont
from config import *
logger=logging.getLogger(__name__)
class TileMap ():
'''
Build an opentopo map (OSM) image
see:
https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
http://tools.geofabrik.de/calc/
'''
def __init__(self, llBox, myCache, resolution = RESOLUTION):
'''
llBox : [{'lat':NW_lat,'lon':NW_lon},{'lat':SE_lat,'lon':SE_lon}] (in degree)
resolution(nbPixels) : resolution of the larger side of the bounding box
in the resulting map image. The actual size of the
map image will therefore be larger.
cache : the cache directory for the OSM tiles
'''
self.mapImg = None
self.cache = myCache
# zoom level of the OSM tiles
self.zoom = self.getZoom(llBox, resolution)
# compute map scales
self.scales = self.getScales(llBox, self.zoom)
# [NW, SE] OSM tile X,Y bounding box
self.mapBBoxXY = self.getMapBBoxXY(self.llBox)
# [NW, SE] tiled map lat,lon boundingBox
self.mapBBoxLL = self.getMapBBoxLL(self.mapBBoxXY)
# download the tiles and make a map
self.mapImg = self.getMap()
def getZoom(self, box, resolution):
'''
given lat/lon bounding box and an image resolution
return the OSM tile zoom level
see https://wiki.openstreetmap.org/wiki/Zoom_levels
'''
N = resolution/256 # pixel tiles are 256x256
a = np.abs( np.arcsinh(np.tan(np.radians(box[0]['lat']))) \
- np.arcsinh(np.tan(np.radians(box[1]['lat']))) )
zLat = np.log(N*np.pi/a)/np.log(2)+1
b = np.abs(np.radians(box[1]['lon']) - np.radians(box[0]['lon']))
zLon = np.log(N*np.pi/b)/np.log(2)+1
return int(min(zLat,zLon))
def getScales(self, box, zoom):
'''
given lat/lon bounding box
compute scales (meter/pixel, lat/pixel, lon/pixel) for the map encompassing the given box
see https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Resolution_and_Scale
'''
middle = {'lat':(box[0]['lat'] + box[1]['lat'])/2.,
'lon':(box[0]['lon'] + box[1]['lon'])/2.}
meterPerPixel = 156543.03 * np.cos(middle['lat']) / (2**zoom)
tile = self.LLToXY(middle)
TL = self.XYToLL(tile)
BR = self.XYToLL({'x':tile['x']+1, 'y':tile['y']+1})
latPerPixel = (TL['lat']-BR['lat'])/256
lonPerPixel = (BR['lon']-TL['lon'])/256
return {'meterPerPixel':meterPerPixel, 'latPerPixel':latPerPixel, 'lonPerPixel':lonPerPixel}
def getMapBBoxXY(self, box):
'''
given lat/lon bounding box
return the OSM tile_X,tile_Y bounding box of the tiled map
'''
TL = self.LLToXY(box[0])
BR = self.LLToXY(box[1])
BR = {'x':BR['x']+1, 'y':BR['y']+1}
return [TL, BR]
def getMapBBoxLL(self, box):
'''
given OSM tile_X,tile_Y bounding box of the tiled map
return the lat,lon bounding box of the tiled map
'''
TL = self.XYToLL(box[0])
BR = self.XYToLL(box[1])
return [TL, BR]
def LLToXY(self, latlon, zoom = None):
'''
return tile_X and tile_Y containing the given coordinates at zoom level
'''
if not zoom:
zoom = self.zoom
lat_rad = np.radians(latlon['lat'])
n = 2.0 ** zoom
x = int((latlon['lon'] + 180.0) / 360.0 * n)
y = int((1.0 - np.arcsinh(np.tan(lat_rad)) / np.pi) / 2.0 * n)
return {'x':x, 'y':y}
def XYToLL(self, xyTile, zoom = None):
'''
return the coordinates (lat,lon) of the given tile upper left corner
'''
if not zoom:
zoom = self.zoom
n = 2.0 ** zoom
lon_deg = xyTile['x'] / n * 360.0 - 180.0
lat_rad = np.arctan(np.sinh(np.pi * (1 - 2 * xyTile['y'] / n)))
lat_deg = lat_rad * 180.0 / np.pi
return {'lat':lat_deg, 'lon':lon_deg}
def getMap(self):
'''
build the map using using tiles downloaded from openTopoMap
and a cache directory
'''
dx = self.mapBBoxXY[1]['x'] - self.mapBBoxXY[0]['x']
dy = self.mapBBoxXY[1]['y'] - self.mapBBoxXY[0]['y']
mapImg = Image.new('RGB', (dx*256, dy*256))
logger.info(f'retrieving {dx*dy} OpenTopoMap tiles at scale {self.zoom}')
# get the tiles and paste them in the image
for i in range(0, dx):
for j in range(0, dy):
x = self.mapBBoxXY[0]['x'] + i
y = self.mapBBoxXY[0]['y'] + j
# try to load tile from cache
tileCacheFilename = f'OTM-{self.zoom}-{x}-{y}.png'
tile = self.cache.loadData(tileCacheFilename)
# if not present in the cache, download the tile and update the cache
if not tile:
logger.info(f'downloading OpenTopoMap tile {tileCacheFilename}')
try:
with requests.Session() as s:
r = s.get(URL_MAP.format(self.zoom,x,y))
r.raise_for_status()
tile = r.content
self.cache.saveData(tileCacheFilename, tile)
except requests.exceptions.HTTPError as e:
logger.error(f'unable to download OpenTopoMap tile {self.zoom}-{x}-{y} from server (HTTP error: {e.response.status_code})')
print(e)
continue
# paste the tile image in the map
imgTile = Image.open(io.BytesIO(tile))
mapImg.paste(imgTile, (i*imgTile.size[0], j*imgTile.size[1]))
del imgTile
return mapImg
def cropMap(self, box):
'''
extract the portion of the map corresponding to the given box (lat,lon)
'''
# compute the coordinate of the (lat,lon) box in the map
nBox = ((box[0]['lon'] - self.mapBBoxLL[0]['lon']) / self.scales['lonPerPixel'],
(self.mapBBoxLL[0]['lat'] - box[0]['lat']) / self.scales['latPerPixel'],
(box[1]['lon'] - self.mapBBoxLL[0]['lon']) / self.scales['lonPerPixel'],
(self.mapBBoxLL[0]['lat'] - box[1]['lat']) / self.scales['latPerPixel'])
return self.mapImg.crop(nBox)
def drawScale(self):
draw = ImageDraw.Draw(self.mapImg)
# compute the best scale representation based on the image m/px scale
meterPerPixel = self.scales['meterPerPixel']
targetedPixelLength = 150
scaleInMeters = int(np.power(10, int(np.log(targetedPixelLength * meterPerPixel) \
/ np.log(10))))
scaleInPixels = int(meterPerPixel * targetedPixelLength / scaleInMeters) \
* scaleInMeters / meterPerPixel
# draw scale segment
p1 = (self.mapImg.size[0]-20, self.mapImg.size[1]-20)
p0 = (p1[0]-scaleInPixels, p1[1])
draw.line([p0,p1], fill=(0,0,0), width=2)
# write scale
p = pathlib.Path(__file__).parent
font = ImageFont.truetype(str(p.joinpath('fonts/FreeMonoBold.ttf')), 20)
if scaleInMeters < 1000:
text = f'{scaleInMeters}m'
else:
text = f'{scaleInMeters/1000:.1f}km'
draw.text((p1[0],p1[1] - 4), text, anchor='rb', font=font, fill=(0,0,0))
del draw
class GPXMap (TileMap):
'''
Build an opentopo map (OSM) image and plot GPX tracks
'''
def __init__(self, gpx, trackNum = None, resolution = RESOLUTION, cache = None):
'''
resolution(nbPixels) : resolution of the larger side of the resulting map image
(depends on the given lat/lon bounding box)
TrackNum: plot the given track or tracks if None
cache : the cache directory for the OSM tiles
'''
self.mapImg = None
self.gpx = gpx
self.resolution = resolution
self.cache = cache
# get the gpx boudingbox square for the given track number
self.llBox = gpx.getBoundingBox(trackNum)
# zoom level of the OSM tiles
self.zoom = self.getZoom(self.llBox, resolution)
# compute map scales
self.scales = self.getScales(self.llBox, self.zoom)
# reshape the box in order to obtain a square resolutionxresolution image
# with the track at the center
self.llBox = self.reshapeBBox(self.llBox)
# [NW, SE] OSM tile X,Y bounding box
self.mapBBoxXY = self.getMapBBoxXY(self.llBox)
# [NW, SE] tiled map lat,lon boundingBox
self.mapBBoxLL = self.getMapBBoxLL(self.mapBBoxXY)
# download the tiles and make a map
self.mapImg = self.getMap()
# draw the gpx track
self.drawGPX(trackNum)
# crop image map according to bounding box
self.mapImg = self.cropMap(self.llBox)
# draw scale
self.drawScale()
def reshapeBBox(self, box):
'''
reshape the box in order to obtain a square resolutionxresolution image
with the given lat/lon bounding box at the center
'''
dx = (self.resolution - (box[1]['lon'] - box[0]['lon']) / self.scales['lonPerPixel']) / 2
dy = (self.resolution - (box[0]['lat'] - box[1]['lat']) / self.scales['latPerPixel']) / 2
dlon = dx * self.scales['lonPerPixel']
dlat = dy * self.scales['latPerPixel']
nBox= [{'lat':box[0]['lat'] + dlat, 'lon':box[0]['lon'] - dlon},
{'lat':box[1]['lat'] - dlat, 'lon':box[1]['lon'] + dlon}]
return nBox
def drawGPX(self, trackNum):
trackList = [self.gpx.tracks[trackNum]] if trackNum!=None else self.gpx.tracks
draw = ImageDraw.Draw(self.mapImg)
# create the lists of track point in pixels
ptList = []
for trk in trackList:
ptList.append([])
for tp in trk['trackPoints']:
ptList[-1].append(((tp['lon'] - self.mapBBoxLL[0]['lon']) \
/ self.scales['lonPerPixel'],
(self.mapBBoxLL[0]['lat'] - tp['lat']) \
/ self.scales['latPerPixel']))
# draw a line segment between points
c = 0
for l in ptList:
p0 = l[0]
for p1 in l[1:]:
draw.line([p0,p1], fill=COLORS[c], width=5)
p0 = p1
c = (c + 1) % len(COLORS)
# draw a circle at each point
c = 1
s = 1
for l in ptList:
for p in l:
draw.ellipse([(p[0]-s,p[1]-s),(p[0]+s,p[1]+s)], fill=COLORS[c])
c = (c + 1) % len(COLORS)
del draw
| [
"numpy.radians",
"PIL.Image.new",
"io.BytesIO",
"numpy.log",
"requests.Session",
"pathlib.Path",
"numpy.tan",
"numpy.cos",
"PIL.ImageDraw.Draw",
"numpy.sinh",
"logging.getLogger"
] | [((273, 300), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (290, 300), False, 'import io, pathlib, logging\n'), ((3374, 3399), 'numpy.radians', 'np.radians', (["latlon['lat']"], {}), "(latlon['lat'])\n", (3384, 3399), True, 'import numpy as np\n'), ((4204, 4242), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(dx * 256, dy * 256)'], {}), "('RGB', (dx * 256, dy * 256))\n", (4213, 4242), False, 'from PIL import Image, ImageDraw, ImageColor, ImageFont\n'), ((6085, 6112), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.mapImg'], {}), '(self.mapImg)\n', (6099, 6112), False, 'from PIL import Image, ImageDraw, ImageColor, ImageFont\n'), ((9225, 9252), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.mapImg'], {}), '(self.mapImg)\n', (9239, 9252), False, 'from PIL import Image, ImageDraw, ImageColor, ImageFont\n'), ((3824, 3866), 'numpy.sinh', 'np.sinh', (["(np.pi * (1 - 2 * xyTile['y'] / n))"], {}), "(np.pi * (1 - 2 * xyTile['y'] / n))\n", (3831, 3866), True, 'import numpy as np\n'), ((6737, 6759), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (6749, 6759), False, 'import io, pathlib, logging\n'), ((1771, 1792), 'numpy.log', 'np.log', (['(N * np.pi / a)'], {}), '(N * np.pi / a)\n', (1777, 1792), True, 'import numpy as np\n'), ((1789, 1798), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1795, 1798), True, 'import numpy as np\n'), ((1817, 1842), 'numpy.radians', 'np.radians', (["box[1]['lon']"], {}), "(box[1]['lon'])\n", (1827, 1842), True, 'import numpy as np\n'), ((1845, 1870), 'numpy.radians', 'np.radians', (["box[0]['lon']"], {}), "(box[0]['lon'])\n", (1855, 1870), True, 'import numpy as np\n'), ((1883, 1904), 'numpy.log', 'np.log', (['(N * np.pi / b)'], {}), '(N * np.pi / b)\n', (1889, 1904), True, 'import numpy as np\n'), ((1901, 1910), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1907, 1910), True, 'import numpy as np\n'), ((2351, 2372), 'numpy.cos', 'np.cos', (["middle['lat']"], {}), "(middle['lat'])\n", (2357, 2372), True, 'import numpy as np\n'), ((5368, 5384), 'io.BytesIO', 'io.BytesIO', (['tile'], {}), '(tile)\n', (5378, 5384), False, 'import io, pathlib, logging\n'), ((1665, 1690), 'numpy.radians', 'np.radians', (["box[0]['lat']"], {}), "(box[0]['lat'])\n", (1675, 1690), True, 'import numpy as np\n'), ((1730, 1755), 'numpy.radians', 'np.radians', (["box[1]['lat']"], {}), "(box[1]['lat'])\n", (1740, 1755), True, 'import numpy as np\n'), ((6308, 6351), 'numpy.log', 'np.log', (['(targetedPixelLength * meterPerPixel)'], {}), '(targetedPixelLength * meterPerPixel)\n', (6314, 6351), True, 'import numpy as np\n'), ((6397, 6407), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (6403, 6407), True, 'import numpy as np\n'), ((4860, 4878), 'requests.Session', 'requests.Session', ([], {}), '()\n', (4876, 4878), False, 'import requests\n'), ((3499, 3514), 'numpy.tan', 'np.tan', (['lat_rad'], {}), '(lat_rad)\n', (3505, 3514), True, 'import numpy as np\n')] |
import pytest
from _plotly_utils.basevalidators import InfoArrayValidator, type_str
import numpy as np
# Fixtures
# --------
@pytest.fixture()
def validator_any2():
return InfoArrayValidator('prop', 'parent', items=[{'valType': 'any'}, {'valType': 'any'}])
@pytest.fixture()
def validator_number3():
return InfoArrayValidator('prop', 'parent', items=[
{'valType': 'number', 'min': 0, 'max': 1},
{'valType': 'number', 'min': 0, 'max': 1},
{'valType': 'number', 'min': 0, 'max': 1}])
@pytest.fixture()
def validator_number3_free():
return InfoArrayValidator('prop', 'parent', items=[
{'valType': 'number', 'min': 0, 'max': 1},
{'valType': 'number', 'min': 0, 'max': 1},
{'valType': 'number', 'min': 0, 'max': 1}], free_length=True)
# Any2 Tests
# ----------
# ### Acceptance ###
@pytest.mark.parametrize('val', [
[1, 'A'], ('hello', 'world!'), [1, ()], [-1, 1]
])
def test_validator_acceptance_any2(val, validator_any2: InfoArrayValidator):
coerce_val = validator_any2.validate_coerce(val)
assert coerce_val == list(val)
assert validator_any2.present(coerce_val) == tuple(val)
# ### Rejection by type ###
@pytest.mark.parametrize('val', [
'Not a list', 123, set(), {}
])
def test_validator_rejection_any2_type(val, validator_any2: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_any2.validate_coerce(val)
assert 'must be a list or tuple.' in str(validation_failure.value)
# ### Rejection by length ###
@pytest.mark.parametrize('val', [
[0, 1, 'A'], ('hello', 'world', '!'), [None, {}, []], [-1, 1, 9]
])
def test_validator_rejection_any2_length(val, validator_any2: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_any2.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
# Number3 Tests
# -------------
# ### Acceptance ###
@pytest.mark.parametrize('val', [
[1, 0, 0.5], (0.1, 0.4, 0.99), [1, 1, 0]
])
def test_validator_acceptance_number3(val, validator_number3: InfoArrayValidator):
coerce_val = validator_number3.validate_coerce(val)
assert coerce_val == list(val)
assert validator_number3.present(coerce_val) == tuple(val)
# ### Rejection by length ###
@pytest.mark.parametrize('val', [
[1, 0], (0.1, 0.4, 0.99, 0.4), [1]
])
def test_validator_rejection_number3_length(val, validator_number3: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3.validate_coerce(val)
assert 'must be a list or tuple of length 3.' in str(validation_failure.value)
# ### Rejection by element type ###
@pytest.mark.parametrize('val,first_invalid_ind', [
([1, 0, '0.5'], 2),
((0.1, set(), 0.99), 1),
([[], '2', {}], 0)
])
def test_validator_rejection_number3_length(val, first_invalid_ind, validator_number3: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3.validate_coerce(val)
assert 'The prop[%d] property of parent must be a number.' % first_invalid_ind in str(validation_failure.value)
# ### Rejection by element value ###
# Elements must be in [0, 1]
@pytest.mark.parametrize('val,first_invalid_ind', [
([1, 0, 1.5], 2),
((0.1, -0.4, 0.99), 1),
([-1, 1, 0], 0)
])
def test_validator_rejection_number3_length(val, first_invalid_ind, validator_number3: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3.validate_coerce(val)
assert ('The prop[%d] property of parent must be in the range [0, 1]' % first_invalid_ind
in str(validation_failure.value))
# Number3 Tests (free_length=True)
# --------------------------------
# ### Acceptance ###
@pytest.mark.parametrize('val', [
[1, 0, 0.5],
(0.1, 0.99),
np.array([0.1, 0.99]),
[0], []
])
def test_validator_acceptance_number3_free(val, validator_number3_free: InfoArrayValidator):
coerce_val = validator_number3_free.validate_coerce(val)
assert coerce_val == list(val)
assert validator_number3_free.present(coerce_val) == tuple(val)
# ### Rejection by type ###
@pytest.mark.parametrize('val', [
'Not a list', 123, set(), {}
])
def test_validator_rejection_any2_type(val, validator_number3_free: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3_free.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
# ### Rejection by length ###
@pytest.mark.parametrize('val', [
(0.1, 0.4, 0.99, 0.4), [1, 0, 0, 0, 0, 0, 0]
])
def test_validator_rejection_number3_free_length(val, validator_number3_free: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3_free.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
# ### Rejection by element type ###
@pytest.mark.parametrize('val,first_invalid_ind', [
([1, 0, '0.5'], 2),
((0.1, set()), 1),
([[]], 0)
])
def test_validator_rejection_number3_length(val, first_invalid_ind, validator_number3_free: InfoArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator_number3_free.validate_coerce(val)
assert ("Invalid value of type {typ} received for the 'prop[{first_invalid_ind}]' property of parent"
.format(typ= type_str(val[first_invalid_ind]),
first_invalid_ind=first_invalid_ind)) in str(validation_failure.value)
| [
"_plotly_utils.basevalidators.InfoArrayValidator",
"_plotly_utils.basevalidators.type_str",
"pytest.fixture",
"pytest.raises",
"numpy.array",
"pytest.mark.parametrize"
] | [((128, 144), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (142, 144), False, 'import pytest\n'), ((266, 282), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (280, 282), False, 'import pytest\n'), ((521, 537), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (535, 537), False, 'import pytest\n'), ((846, 931), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "[[1, 'A'], ('hello', 'world!'), [1, ()], [-1, 1]]"], {}), "('val', [[1, 'A'], ('hello', 'world!'), [1, ()], [-1,\n 1]])\n", (869, 931), False, 'import pytest\n'), ((1547, 1649), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', "[[0, 1, 'A'], ('hello', 'world', '!'), [None, {}, []], [-1, 1, 9]]"], {}), "('val', [[0, 1, 'A'], ('hello', 'world', '!'), [None,\n {}, []], [-1, 1, 9]])\n", (1570, 1649), False, 'import pytest\n'), ((1954, 2028), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[[1, 0, 0.5], (0.1, 0.4, 0.99), [1, 1, 0]]'], {}), "('val', [[1, 0, 0.5], (0.1, 0.4, 0.99), [1, 1, 0]])\n", (1977, 2028), False, 'import pytest\n'), ((2305, 2373), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[[1, 0], (0.1, 0.4, 0.99, 0.4), [1]]'], {}), "('val', [[1, 0], (0.1, 0.4, 0.99, 0.4), [1]])\n", (2328, 2373), False, 'import pytest\n'), ((3230, 3344), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val,first_invalid_ind"""', '[([1, 0, 1.5], 2), ((0.1, -0.4, 0.99), 1), ([-1, 1, 0], 0)]'], {}), "('val,first_invalid_ind', [([1, 0, 1.5], 2), ((0.1, \n -0.4, 0.99), 1), ([-1, 1, 0], 0)])\n", (3253, 3344), False, 'import pytest\n'), ((4561, 4639), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""val"""', '[(0.1, 0.4, 0.99, 0.4), [1, 0, 0, 0, 0, 0, 0]]'], {}), "('val', [(0.1, 0.4, 0.99, 0.4), [1, 0, 0, 0, 0, 0, 0]])\n", (4584, 4639), False, 'import pytest\n'), ((178, 266), '_plotly_utils.basevalidators.InfoArrayValidator', 'InfoArrayValidator', (['"""prop"""', '"""parent"""'], {'items': "[{'valType': 'any'}, {'valType': 'any'}]"}), "('prop', 'parent', items=[{'valType': 'any'}, {'valType':\n 'any'}])\n", (196, 266), False, 'from _plotly_utils.basevalidators import InfoArrayValidator, type_str\n'), ((319, 500), '_plotly_utils.basevalidators.InfoArrayValidator', 'InfoArrayValidator', (['"""prop"""', '"""parent"""'], {'items': "[{'valType': 'number', 'min': 0, 'max': 1}, {'valType': 'number', 'min': 0,\n 'max': 1}, {'valType': 'number', 'min': 0, 'max': 1}]"}), "('prop', 'parent', items=[{'valType': 'number', 'min': 0,\n 'max': 1}, {'valType': 'number', 'min': 0, 'max': 1}, {'valType':\n 'number', 'min': 0, 'max': 1}])\n", (337, 500), False, 'from _plotly_utils.basevalidators import InfoArrayValidator, type_str\n'), ((579, 778), '_plotly_utils.basevalidators.InfoArrayValidator', 'InfoArrayValidator', (['"""prop"""', '"""parent"""'], {'items': "[{'valType': 'number', 'min': 0, 'max': 1}, {'valType': 'number', 'min': 0,\n 'max': 1}, {'valType': 'number', 'min': 0, 'max': 1}]", 'free_length': '(True)'}), "('prop', 'parent', items=[{'valType': 'number', 'min': 0,\n 'max': 1}, {'valType': 'number', 'min': 0, 'max': 1}, {'valType':\n 'number', 'min': 0, 'max': 1}], free_length=True)\n", (597, 778), False, 'from _plotly_utils.basevalidators import InfoArrayValidator, type_str\n'), ((1349, 1374), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1362, 1374), False, 'import pytest\n'), ((1744, 1769), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1757, 1769), False, 'import pytest\n'), ((2478, 2503), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2491, 2503), False, 'import pytest\n'), ((2944, 2969), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2957, 2969), False, 'import pytest\n'), ((3471, 3496), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3484, 3496), False, 'import pytest\n'), ((3873, 3894), 'numpy.array', 'np.array', (['[0.1, 0.99]'], {}), '([0.1, 0.99])\n', (3881, 3894), True, 'import numpy as np\n'), ((4366, 4391), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4379, 4391), False, 'import pytest\n'), ((4754, 4779), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4767, 4779), False, 'import pytest\n'), ((5192, 5217), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5205, 5217), False, 'import pytest\n'), ((5425, 5457), '_plotly_utils.basevalidators.type_str', 'type_str', (['val[first_invalid_ind]'], {}), '(val[first_invalid_ind])\n', (5433, 5457), False, 'from _plotly_utils.basevalidators import InfoArrayValidator, type_str\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import operator
import itertools
import numpy as np
from zarr.util import is_total_slice, normalize_array_selection, \
get_chunk_range, human_readable_size, normalize_resize_args, \
normalize_storage_path, normalize_shape, normalize_chunks
from zarr.storage import array_meta_key, attrs_key, listdir, getsize
from zarr.meta import decode_array_metadata, encode_array_metadata
from zarr.attrs import Attributes
from zarr.errors import PermissionError, err_read_only, err_array_not_found
from zarr.compat import reduce
from zarr.codecs import AsType, get_codec
class Array(object):
"""Instantiate an array from an initialized store.
Parameters
----------
store : MutableMapping
Array store, already initialized.
path : string, optional
Storage path.
read_only : bool, optional
True if array should be protected against modification.
chunk_store : MutableMapping, optional
Separate storage for chunks. If not provided, `store` will be used
for storage of both chunks and metadata.
synchronizer : object, optional
Array synchronizer.
cache_metadata : bool, optional
If True, array configuration metadata will be cached for the
lifetime of the object. If False, array metadata will be reloaded
prior to all data access and modification operations (may incur
overhead depending on storage and data access pattern).
Attributes
----------
store
path
name
read_only
chunk_store
shape
chunks
dtype
compression
compression_opts
fill_value
order
synchronizer
filters
attrs
size
itemsize
nbytes
nbytes_stored
cdata_shape
nchunks
nchunks_initialized
is_view
Methods
-------
__getitem__
__setitem__
resize
append
view
astype
""" # flake8: noqa
def __init__(self, store, path=None, read_only=False, chunk_store=None,
synchronizer=None, cache_metadata=True):
# N.B., expect at this point store is fully initialized with all
# configuration metadata fully specified and normalized
self._store = store
self._path = normalize_storage_path(path)
if self._path:
self._key_prefix = self._path + '/'
else:
self._key_prefix = ''
self._read_only = read_only
if chunk_store is None:
self._chunk_store = store
else:
self._chunk_store = chunk_store
self._synchronizer = synchronizer
self._cache_metadata = cache_metadata
self._is_view = False
# initialize metadata
self._load_metadata()
# initialize attributes
akey = self._key_prefix + attrs_key
self._attrs = Attributes(store, key=akey, read_only=read_only,
synchronizer=synchronizer)
def _load_metadata(self):
"""(Re)load metadata from store."""
if self._synchronizer is None:
self._load_metadata_nosync()
else:
mkey = self._key_prefix + array_meta_key
with self._synchronizer[mkey]:
self._load_metadata_nosync()
def _load_metadata_nosync(self):
try:
mkey = self._key_prefix + array_meta_key
meta_bytes = self._store[mkey]
except KeyError:
err_array_not_found(self._path)
else:
# decode and store metadata
meta = decode_array_metadata(meta_bytes)
self._meta = meta
self._shape = meta['shape']
self._chunks = meta['chunks']
self._dtype = meta['dtype']
self._fill_value = meta['fill_value']
self._order = meta['order']
# setup compressor
config = meta['compressor']
if config is None:
self._compressor = None
else:
self._compressor = get_codec(config)
# setup filters
filters = meta['filters']
if filters:
filters = [get_codec(config) for config in filters]
self._filters = filters
def _refresh_metadata(self):
if not self._cache_metadata:
self._load_metadata()
def _refresh_metadata_nosync(self):
if not self._cache_metadata and not self._is_view:
self._load_metadata_nosync()
def _flush_metadata_nosync(self):
if self._is_view:
raise PermissionError('not permitted for views')
if self._compressor:
compressor_config = self._compressor.get_config()
else:
compressor_config = None
if self._filters:
filters_config = [f.get_config() for f in self._filters]
else:
filters_config = None
meta = dict(shape=self._shape, chunks=self._chunks, dtype=self._dtype,
compressor=compressor_config, fill_value=self._fill_value,
order=self._order, filters=filters_config)
mkey = self._key_prefix + array_meta_key
self._store[mkey] = encode_array_metadata(meta)
@property
def store(self):
"""A MutableMapping providing the underlying storage for the array."""
return self._store
@property
def path(self):
"""Storage path."""
return self._path
@property
def name(self):
"""Array name following h5py convention."""
if self.path:
# follow h5py convention: add leading slash
name = self.path
if name[0] != '/':
name = '/' + name
return name
return None
@property
def read_only(self):
"""A boolean, True if modification operations are not permitted."""
return self._read_only
@property
def chunk_store(self):
"""A MutableMapping providing the underlying storage for array
chunks."""
return self._chunk_store
@property
def shape(self):
"""A tuple of integers describing the length of each dimension of
the array."""
# N.B., shape may change if array is resized, hence need to refresh
# metadata
self._refresh_metadata()
return self._shape
@shape.setter
def shape(self, value):
self.resize(value)
@property
def chunks(self):
"""A tuple of integers describing the length of each dimension of a
chunk of the array."""
return self._chunks
@property
def dtype(self):
"""The NumPy data type."""
return self._dtype
@property
def compressor(self):
"""Primary compression codec."""
return self._compressor
@property
def fill_value(self):
"""A value used for uninitialized portions of the array."""
return self._fill_value
@property
def order(self):
"""A string indicating the order in which bytes are arranged within
chunks of the array."""
return self._order
@property
def filters(self):
"""One or more codecs used to transform data prior to compression."""
return self._filters
@property
def synchronizer(self):
"""Object used to synchronize write access to the array."""
return self._synchronizer
@property
def attrs(self):
"""A MutableMapping containing user-defined attributes. Note that
attribute values must be JSON serializable."""
return self._attrs
@property
def ndim(self):
"""Number of dimensions."""
return len(self.shape)
@property
def _size(self):
return reduce(operator.mul, self._shape)
@property
def size(self):
"""The total number of elements in the array."""
# N.B., this property depends on shape, and shape may change if array
# is resized, hence need to refresh metadata
self._refresh_metadata()
return self._size
@property
def itemsize(self):
"""The size in bytes of each item in the array."""
return self.dtype.itemsize
@property
def _nbytes(self):
return self._size * self.itemsize
@property
def nbytes(self):
"""The total number of bytes that would be required to store the
array without compression."""
# N.B., this property depends on shape, and shape may change if array
# is resized, hence need to refresh metadata
self._refresh_metadata()
return self._nbytes
@property
def nbytes_stored(self):
"""The total number of stored bytes of data for the array. This
includes storage required for configuration metadata and user
attributes."""
m = getsize(self._store, self._path)
if self._store == self._chunk_store:
return m
else:
n = getsize(self._chunk_store, self._path)
if m < 0 or n < 0:
return -1
else:
return m + n
@property
def _cdata_shape(self):
return tuple(int(np.ceil(s / c))
for s, c in zip(self._shape, self._chunks))
@property
def cdata_shape(self):
"""A tuple of integers describing the number of chunks along each
dimension of the array."""
self._refresh_metadata()
return self._cdata_shape
@property
def _nchunks(self):
return reduce(operator.mul, self._cdata_shape)
@property
def nchunks(self):
"""Total number of chunks."""
self._refresh_metadata()
return self._nchunks
@property
def nchunks_initialized(self):
"""The number of chunks that have been initialized with some data."""
return sum(1 for k in listdir(self._chunk_store, self._path)
if k not in [array_meta_key, attrs_key])
# backwards compability
initialized = nchunks_initialized
@property
def is_view(self):
"""A boolean, True if this array is a view on another array."""
return self._is_view
def __eq__(self, other):
return (
isinstance(other, Array) and
self.store == other.store and
self.read_only == other.read_only and
self.path == other.path and
not self._is_view
# N.B., no need to compare other properties, should be covered by
# store comparison
)
def __array__(self, *args):
a = self[:]
if args:
a = a.astype(args[0])
return a
def __len__(self):
return self.shape[0]
def __getitem__(self, item):
"""Retrieve data for some portion of the array. Most NumPy-style
slicing operations are supported.
Returns
-------
out : ndarray
A NumPy array containing the data for the requested region.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100000000), chunks=1000000, dtype='i4')
>>> z
Array((100000000,), int32, chunks=(1000000,), order=C)
nbytes: 381.5M; nbytes_stored: 6.4M; ratio: 59.9; initialized: 100/100
compressor: Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
store: dict
Take some slices::
>>> z[5]
5
>>> z[:5]
array([0, 1, 2, 3, 4], dtype=int32)
>>> z[-5:]
array([99999995, 99999996, 99999997, 99999998, 99999999], dtype=int32)
>>> z[5:10]
array([5, 6, 7, 8, 9], dtype=int32)
>>> z[:]
array([ 0, 1, 2, ..., 99999997, 99999998, 99999999], dtype=int32)
Setup a 2-dimensional array::
>>> import zarr
>>> import numpy as np
>>> z = zarr.array(np.arange(100000000).reshape(10000, 10000),
... chunks=(1000, 1000), dtype='i4')
>>> z
Array((10000, 10000), int32, chunks=(1000, 1000), order=C)
nbytes: 381.5M; nbytes_stored: 9.2M; ratio: 41.5; initialized: 100/100
compressor: Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
store: dict
Take some slices::
>>> z[2, 2]
20002
>>> z[:2, :2]
array([[ 0, 1],
[10000, 10001]], dtype=int32)
>>> z[:2]
array([[ 0, 1, 2, ..., 9997, 9998, 9999],
[10000, 10001, 10002, ..., 19997, 19998, 19999]], dtype=int32)
>>> z[:, :2]
array([[ 0, 1],
[ 10000, 10001],
[ 20000, 20001],
...,
[99970000, 99970001],
[99980000, 99980001],
[99990000, 99990001]], dtype=int32)
>>> z[:]
array([[ 0, 1, 2, ..., 9997, 9998, 9999],
[ 10000, 10001, 10002, ..., 19997, 19998, 19999],
[ 20000, 20001, 20002, ..., 29997, 29998, 29999],
...,
[99970000, 99970001, 99970002, ..., 99979997, 99979998, 99979999],
[99980000, 99980001, 99980002, ..., 99989997, 99989998, 99989999],
[99990000, 99990001, 99990002, ..., 99999997, 99999998, 99999999]], dtype=int32)
""" # flake8: noqa
# refresh metadata
if not self._cache_metadata:
self._load_metadata()
# normalize selection
selection = normalize_array_selection(item, self._shape)
# determine output array shape
out_shape = tuple(s.stop - s.start for s in selection
if isinstance(s, slice))
# setup output array
out = np.empty(out_shape, dtype=self._dtype, order=self._order)
# determine indices of chunks overlapping the selection
chunk_range = get_chunk_range(selection, self._chunks)
# iterate over chunks in range
for cidx in itertools.product(*chunk_range):
# determine chunk offset
offset = [i * c for i, c in zip(cidx, self._chunks)]
# determine region within output array
out_selection = tuple(
slice(max(0, o - s.start),
min(o + c - s.start, s.stop - s.start))
for s, o, c, in zip(selection, offset, self._chunks)
if isinstance(s, slice)
)
# determine region within chunk
chunk_selection = tuple(
slice(max(0, s.start - o), min(c, s.stop - o))
if isinstance(s, slice)
else s - o
for s, o, c in zip(selection, offset, self._chunks)
)
# obtain the destination array as a view of the output array
if out_selection:
dest = out[out_selection]
else:
dest = out
# load chunk selection into output array
self._chunk_getitem(cidx, chunk_selection, dest)
if out.shape:
return out
else:
return out[()]
def __setitem__(self, item, value):
"""Modify data for some portion of the array.
Examples
--------
Setup a 1-dimensional array::
>>> import zarr
>>> z = zarr.zeros(100000000, chunks=1000000, dtype='i4')
>>> z
Array((100000000,), int32, chunks=(1000000,), order=C)
nbytes: 381.5M; nbytes_stored: ...; ratio: ...; initialized: 0/100
compressor: Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
store: dict
Set all array elements to the same scalar value::
>>> z[:] = 42
>>> z[:]
array([42, 42, 42, ..., 42, 42, 42], dtype=int32)
Set a portion of the array::
>>> z[:100] = np.arange(100)
>>> z[-100:] = np.arange(100)[::-1]
>>> z[:]
array([0, 1, 2, ..., 2, 1, 0], dtype=int32)
Setup a 2-dimensional array::
>>> z = zarr.zeros((10000, 10000), chunks=(1000, 1000), dtype='i4')
>>> z
Array((10000, 10000), int32, chunks=(1000, 1000), order=C)
nbytes: 381.5M; nbytes_stored: ...; ratio: ...; initialized: 0/100
compressor: Blosc(cname='lz4', clevel=5, shuffle=SHUFFLE, blocksize=0)
store: dict
Set all array elements to the same scalar value::
>>> z[:] = 42
>>> z[:]
array([[42, 42, 42, ..., 42, 42, 42],
[42, 42, 42, ..., 42, 42, 42],
[42, 42, 42, ..., 42, 42, 42],
...,
[42, 42, 42, ..., 42, 42, 42],
[42, 42, 42, ..., 42, 42, 42],
[42, 42, 42, ..., 42, 42, 42]], dtype=int32)
Set a portion of the array::
>>> z[0, :] = np.arange(z.shape[1])
>>> z[:, 0] = np.arange(z.shape[0])
>>> z[:]
array([[ 0, 1, 2, ..., 9997, 9998, 9999],
[ 1, 42, 42, ..., 42, 42, 42],
[ 2, 42, 42, ..., 42, 42, 42],
...,
[9997, 42, 42, ..., 42, 42, 42],
[9998, 42, 42, ..., 42, 42, 42],
[9999, 42, 42, ..., 42, 42, 42]], dtype=int32)
"""
# guard conditions
if self._read_only:
err_read_only()
# refresh metadata
if not self._cache_metadata:
self._load_metadata_nosync()
# normalize selection
selection = normalize_array_selection(item, self._shape)
# check value shape
expected_shape = tuple(
s.stop - s.start for s in selection
if isinstance(s, slice)
)
if np.isscalar(value):
pass
elif expected_shape != value.shape:
raise ValueError('value has wrong shape, expecting %s, found %s'
% (str(expected_shape),
str(value.shape)))
# determine indices of chunks overlapping the selection
chunk_range = get_chunk_range(selection, self._chunks)
# iterate over chunks in range
for cidx in itertools.product(*chunk_range):
# determine chunk offset
offset = [i * c for i, c in zip(cidx, self._chunks)]
# determine required index range within chunk
chunk_selection = tuple(
slice(max(0, s.start - o), min(c, s.stop - o))
if isinstance(s, slice)
else s - o
for s, o, c in zip(selection, offset, self._chunks)
)
if np.isscalar(value):
# put data
self._chunk_setitem(cidx, chunk_selection, value)
else:
# assume value is array-like
# determine index within value
value_selection = tuple(
slice(max(0, o - s.start),
min(o + c - s.start, s.stop - s.start))
for s, o, c in zip(selection, offset, self._chunks)
if isinstance(s, slice)
)
# put data
self._chunk_setitem(cidx, chunk_selection, value[value_selection])
def _chunk_getitem(self, cidx, item, dest):
"""Obtain part or whole of a chunk.
Parameters
----------
cidx : tuple of ints
Indices of the chunk.
item : tuple of slices
Location of region within the chunk.
dest : ndarray
Numpy array to store result in.
"""
try:
# obtain compressed data for chunk
ckey = self._chunk_key(cidx)
cdata = self._chunk_store[ckey]
except KeyError:
# chunk not initialized
if self._fill_value is not None:
dest.fill(self._fill_value)
else:
if is_total_slice(item, self._chunks) and \
not self._filters and \
((self._order == 'C' and dest.flags.c_contiguous) or
(self._order == 'F' and dest.flags.f_contiguous)):
# optimization: we want the whole chunk, and the destination is
# contiguous, so we can decompress directly from the chunk
# into the destination array
if self._compressor:
self._compressor.decode(cdata, dest)
else:
arr = np.frombuffer(cdata, dtype=self._dtype)
arr = arr.reshape(self._chunks, order=self._order)
np.copyto(dest, arr)
else:
# decode chunk
chunk = self._decode_chunk(cdata)
# set data in output array
# (split into two lines for profiling)
tmp = chunk[item]
if dest.shape:
dest[:] = tmp
else:
dest[()] = tmp
def _chunk_setitem(self, cidx, item, value):
"""Replace part or whole of a chunk.
Parameters
----------
cidx : tuple of ints
Indices of the chunk.
item : tuple of slices
Location of region within the chunk.
value : scalar or ndarray
Value to set.
"""
# synchronization
if self._synchronizer is None:
self._chunk_setitem_nosync(cidx, item, value)
else:
# synchronize on the chunk
ckey = self._chunk_key(cidx)
with self._synchronizer[ckey]:
self._chunk_setitem_nosync(cidx, item, value)
def _chunk_setitem_nosync(self, cidx, item, value):
# obtain key for chunk storage
ckey = self._chunk_key(cidx)
if is_total_slice(item, self._chunks):
# totally replace chunk
# optimization: we are completely replacing the chunk, so no need
# to access the existing chunk data
if np.isscalar(value):
# setup array filled with value
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
chunk.fill(value)
else:
if not self._compressor and not self._filters:
# https://github.com/alimanfoo/zarr/issues/79
# Ensure a copy is taken so we don't end up storing
# a view into someone else's array.
# N.B., this assumes that filters or compressor always
# take a copy and never attempt to apply encoding in-place.
chunk = np.array(value, dtype=self._dtype, order=self._order)
else:
# ensure array is contiguous
if self._order == 'F':
chunk = np.asfortranarray(value, dtype=self._dtype)
else:
chunk = np.ascontiguousarray(value, dtype=self._dtype)
else:
# partially replace the contents of this chunk
try:
# obtain compressed data for chunk
cdata = self._chunk_store[ckey]
except KeyError:
# chunk not initialized
chunk = np.empty(self._chunks, dtype=self._dtype, order=self._order)
if self._fill_value is not None:
chunk.fill(self._fill_value)
else:
# decode chunk
chunk = self._decode_chunk(cdata)
if not chunk.flags.writeable:
chunk = chunk.copy(order='K')
# modify
chunk[item] = value
# encode chunk
cdata = self._encode_chunk(chunk)
# store
self._chunk_store[ckey] = cdata
def _chunk_key(self, cidx):
return self._key_prefix + '.'.join(map(str, cidx))
def _decode_chunk(self, cdata):
# decompress
if self._compressor:
chunk = self._compressor.decode(cdata)
else:
chunk = cdata
# apply filters
if self._filters:
for f in self._filters[::-1]:
chunk = f.decode(chunk)
# view as correct dtype
if isinstance(chunk, np.ndarray):
chunk = chunk.view(self._dtype)
else:
chunk = np.frombuffer(chunk, self._dtype)
# reshape
chunk = chunk.reshape(self._chunks, order=self._order)
return chunk
def _encode_chunk(self, chunk):
# apply filters
if self._filters:
for f in self._filters:
chunk = f.encode(chunk)
# compress
if self._compressor:
cdata = self._compressor.encode(chunk)
else:
cdata = chunk
return cdata
def __repr__(self):
# N.B., __repr__ needs to be synchronized to ensure consistent view
# of metadata AND when retrieving nbytes_stored from filesystem storage
return self._synchronized_op(self._repr_nosync)
def _repr_nosync(self):
# main line
r = '%s(' % type(self).__name__
if self.name:
r += '%s, ' % self.name
r += '%s, ' % str(self._shape)
r += '%s, ' % str(self._dtype)
r += 'chunks=%s, ' % str(self._chunks)
r += 'order=%s' % self._order
r += ')'
# storage size info
r += '\n nbytes: %s' % human_readable_size(self._nbytes)
if self.nbytes_stored > 0:
r += '; nbytes_stored: %s' % human_readable_size(
self.nbytes_stored)
r += '; ratio: %.1f' % (self._nbytes / self.nbytes_stored)
r += '; initialized: %s/%s' % (self.nchunks_initialized,
self._nchunks)
# filters
if self._filters:
# first line
r += '\n filters: %r' % self._filters[0]
# subsequent lines
for f in self._filters[1:]:
r += '\n %r' % f
# compressor
if self._compressor:
r += '\n compressor: %r' % self._compressor
# storage and synchronizer classes
r += '\n store: %s' % type(self._store).__name__
if self._store != self._chunk_store:
r += '; chunk_store: %s' % type(self._chunk_store).__name__
if self._synchronizer is not None:
r += '; synchronizer: %s' % type(self._synchronizer).__name__
return r
def __getstate__(self):
return self._store, self._path, self._read_only, self._chunk_store, \
self._synchronizer, self._cache_metadata
def __setstate__(self, state):
self.__init__(*state)
def _synchronized_op(self, f, *args, **kwargs):
# no synchronization
if self._synchronizer is None:
self._refresh_metadata_nosync()
return f(*args, **kwargs)
else:
# synchronize on the array
mkey = self._key_prefix + array_meta_key
with self._synchronizer[mkey]:
self._refresh_metadata_nosync()
result = f(*args, **kwargs)
return result
def _write_op(self, f, *args, **kwargs):
# guard condition
if self._read_only:
err_read_only()
return self._synchronized_op(f, *args, **kwargs)
def resize(self, *args):
"""Change the shape of the array by growing or shrinking one or more
dimensions.
Examples
--------
>>> import zarr
>>> z = zarr.zeros(shape=(10000, 10000), chunks=(1000, 1000))
>>> z.shape
(10000, 10000)
>>> z.resize(20000, 10000)
>>> z.shape
(20000, 10000)
>>> z.resize(30000, 1000)
>>> z.shape
(30000, 1000)
Notes
-----
When resizing an array, the data are not rearranged in any way.
If one or more dimensions are shrunk, any chunks falling outside the
new array shape will be deleted from the underlying store.
""" # flake8: noqa
return self._write_op(self._resize_nosync, *args)
def _resize_nosync(self, *args):
# normalize new shape argument
old_shape = self._shape
new_shape = normalize_resize_args(old_shape, *args)
old_cdata_shape = self._cdata_shape
# update metadata
self._shape = new_shape
self._flush_metadata_nosync()
# determine the new number and arrangement of chunks
chunks = self._chunks
new_cdata_shape = tuple(int(np.ceil(s / c))
for s, c in zip(new_shape, chunks))
# remove any chunks not within range
for cidx in itertools.product(*[range(n) for n in old_cdata_shape]):
if all(i < c for i, c in zip(cidx, new_cdata_shape)):
pass # keep the chunk
else:
key = self._chunk_key(cidx)
try:
del self._chunk_store[key]
except KeyError:
# chunk not initialized
pass
def append(self, data, axis=0):
"""Append `data` to `axis`.
Parameters
----------
data : array_like
Data to be appended.
axis : int
Axis along which to append.
Returns
-------
new_shape : tuple
Notes
-----
The size of all dimensions other than `axis` must match between this
array and `data`.
Examples
--------
>>> import numpy as np
>>> import zarr
>>> a = np.arange(10000000, dtype='i4').reshape(10000, 1000)
>>> z = zarr.array(a, chunks=(1000, 100))
>>> z.shape
(10000, 1000)
>>> z.append(a)
(20000, 1000)
>>> z.append(np.vstack([a, a]), axis=1)
(20000, 2000)
>>> z
Array((20000, 2000), int32, chunks=(1000, 100), order=C)
...
"""
return self._write_op(self._append_nosync, data, axis=axis)
def _append_nosync(self, data, axis=0):
# ensure data is array-like
if not hasattr(data, 'shape') or not hasattr(data, 'dtype'):
data = np.asanyarray(data)
# ensure shapes are compatible for non-append dimensions
self_shape_preserved = tuple(s for i, s in enumerate(self._shape)
if i != axis)
data_shape_preserved = tuple(s for i, s in enumerate(data.shape)
if i != axis)
if self_shape_preserved != data_shape_preserved:
raise ValueError('shapes not compatible')
# remember old shape
old_shape = self._shape
# determine new shape
new_shape = tuple(
self._shape[i] if i != axis else self._shape[i] + data.shape[i]
for i in range(len(self._shape))
)
# resize
self._resize_nosync(new_shape)
# store data
# noinspection PyTypeChecker
append_selection = tuple(
slice(None) if i != axis else slice(old_shape[i], new_shape[i])
for i in range(len(self._shape))
)
self[append_selection] = data
return new_shape
def view(self, shape=None, chunks=None, dtype=None,
fill_value=None, filters=None, read_only=None,
synchronizer=None):
"""Return an array sharing the same data.
Parameters
----------
shape : int or tuple of ints
Array shape.
chunks : int or tuple of ints, optional
Chunk shape.
dtype : string or dtype, optional
NumPy dtype.
fill_value : object
Default value to use for uninitialized portions of the array.
filters : sequence, optional
Sequence of filters to use to encode chunk data prior to
compression.
read_only : bool, optional
True if array should be protected against modification.
synchronizer : object, optional
Array synchronizer.
Notes
-----
WARNING: This is an experimental feature and should be used with care.
There are plenty of ways to generate errors and/or cause data
corruption.
Examples
--------
Bypass filters:
>>> import zarr
>>> import numpy as np
>>> np.random.seed(42)
>>> labels = [b'female', b'male']
>>> data = np.random.choice(labels, size=10000)
>>> filters = [zarr.Categorize(labels=labels,
... dtype=data.dtype,
... astype='u1')]
>>> a = zarr.array(data, chunks=1000, filters=filters)
>>> a[:]
array([b'female', b'male', b'female', ..., b'male', b'male', b'female'],
dtype='|S6')
>>> v = a.view(dtype='u1', filters=[])
>>> v.is_view
True
>>> v[:]
array([1, 2, 1, ..., 2, 2, 1], dtype=uint8)
Views can be used to modify data:
>>> x = v[:]
>>> x.sort()
>>> v[:] = x
>>> v[:]
array([1, 1, 1, ..., 2, 2, 2], dtype=uint8)
>>> a[:]
array([b'female', b'female', b'female', ..., b'male', b'male', b'male'],
dtype='|S6')
View as a different dtype with the same itemsize:
>>> data = np.random.randint(0, 2, size=10000, dtype='u1')
>>> a = zarr.array(data, chunks=1000)
>>> a[:]
array([0, 0, 1, ..., 1, 0, 0], dtype=uint8)
>>> v = a.view(dtype=bool)
>>> v[:]
array([False, False, True, ..., True, False, False], dtype=bool)
>>> np.all(a[:].view(dtype=bool) == v[:])
True
An array can be viewed with a dtype with a different itemsize, however
some care is needed to adjust the shape and chunk shape so that chunk
data is interpreted correctly:
>>> data = np.arange(10000, dtype='u2')
>>> a = zarr.array(data, chunks=1000)
>>> a[:10]
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16)
>>> v = a.view(dtype='u1', shape=20000, chunks=2000)
>>> v[:10]
array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8)
>>> np.all(a[:].view('u1') == v[:])
True
Change fill value for uninitialized chunks:
>>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1')
>>> a[:]
array([-1, -1, -1, ..., -1, -1, -1], dtype=int8)
>>> v = a.view(fill_value=42)
>>> v[:]
array([42, 42, 42, ..., 42, 42, 42], dtype=int8)
Note that resizing or appending to views is not permitted:
>>> a = zarr.empty(10000)
>>> v = a.view()
>>> try:
... v.resize(20000)
... except PermissionError as e:
... print(e)
not permitted for views
""" # flake8: noqa
store = self._store
chunk_store = self._chunk_store
path = self._path
if read_only is None:
read_only = self._read_only
if synchronizer is None:
synchronizer = self._synchronizer
a = Array(store=store, path=path, chunk_store=chunk_store,
read_only=read_only, synchronizer=synchronizer,
cache_metadata=True)
a._is_view = True
# allow override of some properties
if dtype is None:
dtype = self._dtype
else:
dtype = np.dtype(dtype)
a._dtype = dtype
if shape is None:
shape = self._shape
else:
shape = normalize_shape(shape)
a._shape = shape
if chunks is not None:
chunks = normalize_chunks(chunks, shape, dtype.itemsize)
a._chunks = chunks
if fill_value is not None:
a._fill_value = fill_value
if filters is not None:
a._filters = filters
return a
def astype(self, dtype):
"""Does on the fly type conversion of the underlying data.
Parameters
----------
dtype : string or dtype
NumPy dtype.
Notes
-----
This method returns a new Array object which is a view on the same
underlying chunk data. Modifying any data via the view is currently
not permitted and will result in an error. This is an experimental
feature and its behavior is subject to change in the future.
See Also
--------
Array.view
Examples
--------
>>> import zarr
>>> import numpy as np
>>> data = np.arange(100, dtype=np.uint8)
>>> a = zarr.array(data, chunks=10)
>>> a[:]
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99], dtype=uint8)
>>> v = a.astype(np.float32)
>>> v.is_view
True
>>> v[:]
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.],
dtype=float32)
""" # flake8: noqa
dtype = np.dtype(dtype)
filters = []
if self._filters:
filters.extend(self._filters)
filters.insert(0, AsType(encode_dtype=self._dtype, decode_dtype=dtype))
return self.view(filters=filters, dtype=dtype, read_only=True)
| [
"zarr.util.normalize_storage_path",
"numpy.empty",
"zarr.codecs.get_codec",
"zarr.util.normalize_resize_args",
"zarr.storage.listdir",
"zarr.codecs.AsType",
"zarr.util.normalize_shape",
"zarr.errors.err_read_only",
"zarr.util.normalize_chunks",
"itertools.product",
"zarr.attrs.Attributes",
"za... | [((2311, 2339), 'zarr.util.normalize_storage_path', 'normalize_storage_path', (['path'], {}), '(path)\n', (2333, 2339), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((2901, 2976), 'zarr.attrs.Attributes', 'Attributes', (['store'], {'key': 'akey', 'read_only': 'read_only', 'synchronizer': 'synchronizer'}), '(store, key=akey, read_only=read_only, synchronizer=synchronizer)\n', (2911, 2976), False, 'from zarr.attrs import Attributes\n'), ((5251, 5278), 'zarr.meta.encode_array_metadata', 'encode_array_metadata', (['meta'], {}), '(meta)\n', (5272, 5278), False, 'from zarr.meta import decode_array_metadata, encode_array_metadata\n'), ((7817, 7850), 'zarr.compat.reduce', 'reduce', (['operator.mul', 'self._shape'], {}), '(operator.mul, self._shape)\n', (7823, 7850), False, 'from zarr.compat import reduce\n'), ((8907, 8939), 'zarr.storage.getsize', 'getsize', (['self._store', 'self._path'], {}), '(self._store, self._path)\n', (8914, 8939), False, 'from zarr.storage import array_meta_key, attrs_key, listdir, getsize\n'), ((9599, 9638), 'zarr.compat.reduce', 'reduce', (['operator.mul', 'self._cdata_shape'], {}), '(operator.mul, self._cdata_shape)\n', (9605, 9638), False, 'from zarr.compat import reduce\n'), ((13933, 13977), 'zarr.util.normalize_array_selection', 'normalize_array_selection', (['item', 'self._shape'], {}), '(item, self._shape)\n', (13958, 13977), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((14175, 14232), 'numpy.empty', 'np.empty', (['out_shape'], {'dtype': 'self._dtype', 'order': 'self._order'}), '(out_shape, dtype=self._dtype, order=self._order)\n', (14183, 14232), True, 'import numpy as np\n'), ((14320, 14360), 'zarr.util.get_chunk_range', 'get_chunk_range', (['selection', 'self._chunks'], {}), '(selection, self._chunks)\n', (14335, 14360), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((14421, 14452), 'itertools.product', 'itertools.product', (['*chunk_range'], {}), '(*chunk_range)\n', (14438, 14452), False, 'import itertools\n'), ((18144, 18188), 'zarr.util.normalize_array_selection', 'normalize_array_selection', (['item', 'self._shape'], {}), '(item, self._shape)\n', (18169, 18188), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((18355, 18373), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (18366, 18373), True, 'import numpy as np\n'), ((18704, 18744), 'zarr.util.get_chunk_range', 'get_chunk_range', (['selection', 'self._chunks'], {}), '(selection, self._chunks)\n', (18719, 18744), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((18805, 18836), 'itertools.product', 'itertools.product', (['*chunk_range'], {}), '(*chunk_range)\n', (18822, 18836), False, 'import itertools\n'), ((22475, 22509), 'zarr.util.is_total_slice', 'is_total_slice', (['item', 'self._chunks'], {}), '(item, self._chunks)\n', (22489, 22509), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((29018, 29057), 'zarr.util.normalize_resize_args', 'normalize_resize_args', (['old_shape', '*args'], {}), '(old_shape, *args)\n', (29039, 29057), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((39255, 39270), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (39263, 39270), True, 'import numpy as np\n'), ((3610, 3643), 'zarr.meta.decode_array_metadata', 'decode_array_metadata', (['meta_bytes'], {}), '(meta_bytes)\n', (3631, 3643), False, 'from zarr.meta import decode_array_metadata, encode_array_metadata\n'), ((4624, 4666), 'zarr.errors.PermissionError', 'PermissionError', (['"""not permitted for views"""'], {}), "('not permitted for views')\n", (4639, 4666), False, 'from zarr.errors import PermissionError, err_read_only, err_array_not_found\n'), ((9036, 9074), 'zarr.storage.getsize', 'getsize', (['self._chunk_store', 'self._path'], {}), '(self._chunk_store, self._path)\n', (9043, 9074), False, 'from zarr.storage import array_meta_key, attrs_key, listdir, getsize\n'), ((17971, 17986), 'zarr.errors.err_read_only', 'err_read_only', ([], {}), '()\n', (17984, 17986), False, 'from zarr.errors import PermissionError, err_read_only, err_array_not_found\n'), ((19265, 19283), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (19276, 19283), True, 'import numpy as np\n'), ((22690, 22708), 'numpy.isscalar', 'np.isscalar', (['value'], {}), '(value)\n', (22701, 22708), True, 'import numpy as np\n'), ((25066, 25099), 'numpy.frombuffer', 'np.frombuffer', (['chunk', 'self._dtype'], {}), '(chunk, self._dtype)\n', (25079, 25099), True, 'import numpy as np\n'), ((26156, 26189), 'zarr.util.human_readable_size', 'human_readable_size', (['self._nbytes'], {}), '(self._nbytes)\n', (26175, 26189), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((28027, 28042), 'zarr.errors.err_read_only', 'err_read_only', ([], {}), '()\n', (28040, 28042), False, 'from zarr.errors import PermissionError, err_read_only, err_array_not_found\n'), ((31003, 31022), 'numpy.asanyarray', 'np.asanyarray', (['data'], {}), '(data)\n', (31016, 31022), True, 'import numpy as np\n'), ((36572, 36587), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (36580, 36587), True, 'import numpy as np\n'), ((36709, 36731), 'zarr.util.normalize_shape', 'normalize_shape', (['shape'], {}), '(shape)\n', (36724, 36731), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((36813, 36860), 'zarr.util.normalize_chunks', 'normalize_chunks', (['chunks', 'shape', 'dtype.itemsize'], {}), '(chunks, shape, dtype.itemsize)\n', (36829, 36860), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((39387, 39439), 'zarr.codecs.AsType', 'AsType', ([], {'encode_dtype': 'self._dtype', 'decode_dtype': 'dtype'}), '(encode_dtype=self._dtype, decode_dtype=dtype)\n', (39393, 39439), False, 'from zarr.codecs import AsType, get_codec\n'), ((3504, 3535), 'zarr.errors.err_array_not_found', 'err_array_not_found', (['self._path'], {}), '(self._path)\n', (3523, 3535), False, 'from zarr.errors import PermissionError, err_read_only, err_array_not_found\n'), ((4082, 4099), 'zarr.codecs.get_codec', 'get_codec', (['config'], {}), '(config)\n', (4091, 4099), False, 'from zarr.codecs import AsType, get_codec\n'), ((20575, 20609), 'zarr.util.is_total_slice', 'is_total_slice', (['item', 'self._chunks'], {}), '(item, self._chunks)\n', (20589, 20609), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((22783, 22843), 'numpy.empty', 'np.empty', (['self._chunks'], {'dtype': 'self._dtype', 'order': 'self._order'}), '(self._chunks, dtype=self._dtype, order=self._order)\n', (22791, 22843), True, 'import numpy as np\n'), ((26266, 26305), 'zarr.util.human_readable_size', 'human_readable_size', (['self.nbytes_stored'], {}), '(self.nbytes_stored)\n', (26285, 26305), False, 'from zarr.util import is_total_slice, normalize_array_selection, get_chunk_range, human_readable_size, normalize_resize_args, normalize_storage_path, normalize_shape, normalize_chunks\n'), ((4218, 4235), 'zarr.codecs.get_codec', 'get_codec', (['config'], {}), '(config)\n', (4227, 4235), False, 'from zarr.codecs import AsType, get_codec\n'), ((9247, 9261), 'numpy.ceil', 'np.ceil', (['(s / c)'], {}), '(s / c)\n', (9254, 9261), True, 'import numpy as np\n'), ((9935, 9973), 'zarr.storage.listdir', 'listdir', (['self._chunk_store', 'self._path'], {}), '(self._chunk_store, self._path)\n', (9942, 9973), False, 'from zarr.storage import array_meta_key, attrs_key, listdir, getsize\n'), ((21148, 21187), 'numpy.frombuffer', 'np.frombuffer', (['cdata'], {'dtype': 'self._dtype'}), '(cdata, dtype=self._dtype)\n', (21161, 21187), True, 'import numpy as np\n'), ((21279, 21299), 'numpy.copyto', 'np.copyto', (['dest', 'arr'], {}), '(dest, arr)\n', (21288, 21299), True, 'import numpy as np\n'), ((23339, 23392), 'numpy.array', 'np.array', (['value'], {'dtype': 'self._dtype', 'order': 'self._order'}), '(value, dtype=self._dtype, order=self._order)\n', (23347, 23392), True, 'import numpy as np\n'), ((23976, 24036), 'numpy.empty', 'np.empty', (['self._chunks'], {'dtype': 'self._dtype', 'order': 'self._order'}), '(self._chunks, dtype=self._dtype, order=self._order)\n', (23984, 24036), True, 'import numpy as np\n'), ((29327, 29341), 'numpy.ceil', 'np.ceil', (['(s / c)'], {}), '(s / c)\n', (29334, 29341), True, 'import numpy as np\n'), ((23540, 23583), 'numpy.asfortranarray', 'np.asfortranarray', (['value'], {'dtype': 'self._dtype'}), '(value, dtype=self._dtype)\n', (23557, 23583), True, 'import numpy as np\n'), ((23642, 23688), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['value'], {'dtype': 'self._dtype'}), '(value, dtype=self._dtype)\n', (23662, 23688), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import os.path as osp
import os
from PIL import Image
import numpy as np
import json
from data_utils.transform import *
import cv2
class TransformationVal(object):
def __init__(self, size_wh=[1536, 768]):
self.size_wh = size_wh
def __call__(self, im_lb):
im = im_lb['im']
lb = im_lb['lb']
assert im.size == lb.size
w, h = self.size_wh
im = im.resize((w, h), Image.BILINEAR)
lb = lb.resize((w, h), Image.NEAREST)
return dict(im=im, lb=lb)
class CamVid(Dataset):
def __init__(self, rootpth='/raid/huangsh/datasets/CamVid/', cropsize_wh=(960, 720), mode='train'):
super(CamVid, self).__init__()
assert mode in ('train', 'val', 'test')
self.mode = mode
self.ignore_lb = 255
if self.mode == 'train':
self.file_txt = '{}/trainval.txt'.format(rootpth)
else:
self.file_txt = '{}/{}.txt'.format(rootpth, mode)
self.dir = rootpth
file_names, img_names, ann_names = [], [], []
with open(self.file_txt) as f:
files = f.readlines()
for item in files:
item = item.strip()
item = item.split(' ')
img_names.append(item[0])
ann_names.append(item[1])
file_names.append(item[0].split('/')[-1].split('.png')[0])
# parse img directory
self.imgs = {}
impths = ["{}/{}".format(self.dir, el) for el in img_names]
#print(impths)
self.file_names = []
self.file_names.extend(file_names)
self.len = len(self.file_names)
self.imgs.update(dict(zip(file_names, impths)))
# parse gt directory
self.labels = {}
lbpths = ["{}/{}".format(self.dir, el) for el in ann_names]
self.labels.update(dict(zip(file_names, lbpths)))
# pre-processing
self.to_tensor = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
self.trans_train = Compose([ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5), HorizontalFlip(),
RandomScale((0.75, 1.0, 1.25, 1.5, 1.75, 2.0)), RandomCrop(cropsize_wh)]
)
self.trans_val = TransformationVal(cropsize_wh)
def __getitem__(self, idx):
fn = self.file_names[idx]
impth = self.imgs[fn]
lbpth = self.labels[fn]
img = Image.open(impth)
label = Image.open(lbpth)
if self.trans_val is None or self.mode == 'train':
if self.mode == 'train':
im_lb = dict(im=img, lb=label)
im_lb = self.trans_train(im_lb)
img, label = im_lb['im'], im_lb['lb']
img = self.to_tensor(img)
label = np.array(label).astype(np.int64)[np.newaxis, :]
else:
im_lb = dict(im=img, lb=label)
im_lb = self.trans_val(im_lb)
img, label = im_lb['im'], im_lb['lb']
img = self.to_tensor(img)
label = np.array(label).astype(np.int64)[np.newaxis, :]
return img, label
def __len__(self):
return self.len
if __name__ == "__main__":
from tqdm import tqdm
ds = CamVid('./data/', mode='val')
uni = []
for im, lb in tqdm(ds):
lb_uni = np.unique(lb).tolist()
uni.extend(lb_uni)
print(uni)
print(set(uni))
| [
"tqdm.tqdm",
"PIL.Image.open",
"numpy.array",
"torchvision.transforms.Normalize",
"numpy.unique",
"torchvision.transforms.ToTensor"
] | [((3417, 3425), 'tqdm.tqdm', 'tqdm', (['ds'], {}), '(ds)\n', (3421, 3425), False, 'from tqdm import tqdm\n'), ((2559, 2576), 'PIL.Image.open', 'Image.open', (['impth'], {}), '(impth)\n', (2569, 2576), False, 'from PIL import Image\n'), ((2593, 2610), 'PIL.Image.open', 'Image.open', (['lbpth'], {}), '(lbpth)\n', (2603, 2610), False, 'from PIL import Image\n'), ((2008, 2029), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2027, 2029), True, 'import torchvision.transforms as transforms\n'), ((2031, 2097), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (2051, 2097), True, 'import torchvision.transforms as transforms\n'), ((3444, 3457), 'numpy.unique', 'np.unique', (['lb'], {}), '(lb)\n', (3453, 3457), True, 'import numpy as np\n'), ((2914, 2929), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2922, 2929), True, 'import numpy as np\n'), ((3169, 3184), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3177, 3184), True, 'import numpy as np\n')] |
"""
Kriging geographical data
-------------------------
In this example we are going to interpolate actual temperature data from
the German weather service `DWD <https://www.dwd.de/EN>`_.
"""
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import gstools as gs
border = np.loadtxt(os.path.join("..", "data", "de_borders.txt"))
ids, lat, lon, temp = np.loadtxt(os.path.join("..", "data", "temp_obs.txt")).T
###############################################################################
# First we will estimate the variogram of our temperature data.
# As the maximal bin distance we choose 8 degrees, which corresponds to a
# chordal length of about 900 km.
bins = gs.standard_bins((lat, lon), max_dist=np.deg2rad(8), latlon=True)
bin_c, vario = gs.vario_estimate((lat, lon), temp, bin_edges=bins, latlon=True)
###############################################################################
# Now we can use this estimated variogram to fit a model to it.
# Here we will use a :any:`Spherical` model. We select the ``latlon`` option
# to use the `Yadrenko` variant of the model to gain a valid model for lat-lon
# coordinates and we rescale it to the earth-radius. Otherwise the length
# scale would be given in radians representing the great-circle distance.
#
# We deselect the nugget from fitting and plot the result afterwards.
#
# .. note::
#
# You need to plot the Yadrenko variogram, since the standard variogram
# still holds the ordinary routine that is not respecting the great-circle
# distance.
model = gs.Spherical(latlon=True, rescale=gs.EARTH_RADIUS)
model.fit_variogram(bin_c, vario, nugget=False)
ax = model.plot("vario_yadrenko", x_max=bin_c[-1])
ax.scatter(bin_c, vario)
ax.set_xlabel("great circle distance / radians")
ax.set_ylabel("semi-variogram")
fig = ax.get_figure()
fig.savefig(os.path.join("..", "results", "variogram.pdf"), dpi=300)
print(model)
###############################################################################
# As we see, we have a rather large correlation length of ca. 600 km.
#
# Now we want to interpolate the data using Universal and Regression kriging
# in order to compare them.
# We will use a north-south drift by assuming a linear correlation
# of temperature with latitude.
def north_south_drift(lat, lon):
"""North south trend depending linearly on latitude."""
return lat
uk = gs.krige.Universal(
model=model,
cond_pos=(lat, lon),
cond_val=temp,
drift_functions=north_south_drift,
)
# fit linear regression model for temperature depending on latitude
regress = stats.linregress(lat, temp)
trend = lambda x, y: regress.intercept + regress.slope * x
dk = gs.krige.Detrended(
model=model,
cond_pos=(lat, lon),
cond_val=temp,
trend=trend,
)
###############################################################################
# Now we generate the kriging field, by defining a lat-lon grid that covers
# the whole of Germany. The :any:`Krige` class provides the option to only
# krige the mean field, so one can have a glimpse at the estimated drift.
g_lat = np.arange(47, 56.1, 0.1)
g_lon = np.arange(5, 16.1, 0.1)
fld_uk = uk((g_lat, g_lon), mesh_type="structured", return_var=False)
mean = uk((g_lat, g_lon), mesh_type="structured", only_mean=True)
fld_dk = dk((g_lat, g_lon), mesh_type="structured", return_var=False)
###############################################################################
# And that's it. Now let's have a look at the generated field and the input
# data along with the estimated mean:
levels = np.linspace(5, 23, 64)
fig, ax = plt.subplots(1, 3, figsize=[10, 5], sharey=True)
sca = ax[0].scatter(lon, lat, c=temp, vmin=5, vmax=23, cmap="coolwarm")
co1 = ax[1].contourf(g_lon, g_lat, fld_uk, levels, cmap="coolwarm")
co2 = ax[2].contourf(g_lon, g_lat, fld_dk, levels, cmap="coolwarm")
# pdf anti-alias
ax[1].contour(g_lon, g_lat, fld_uk, levels, cmap="coolwarm", zorder=-10)
ax[2].contour(g_lon, g_lat, fld_dk, levels, cmap="coolwarm", zorder=-10)
[ax[i].plot(border[:, 0], border[:, 1], color="k") for i in range(3)]
[ax[i].set_xlim([5, 16]) for i in range(3)]
[ax[i].set_xlabel("Longitude / °") for i in range(3)]
ax[0].set_ylabel("Latitude / °")
ax[0].set_title("Temperature observations at 2m\nfrom DWD (2020-06-09 12:00)")
ax[1].set_title("Universal Kriging\nwith North-South drift")
ax[2].set_title("Regression Kriging\nwith North-South trend")
fmt = dict(orientation="horizontal", shrink=0.5, fraction=0.1, pad=0.2)
fig.colorbar(co2, ax=ax, **fmt).set_label("T / °C")
fig.savefig(os.path.join("..", "results", "kriging.pdf"), dpi=300)
###############################################################################
# To get a better impression of the estimated north-south drift and trend,
# we'll take a look at a cross-section at a longitude of 10 degree:
fig, ax = plt.subplots()
label = "latitude-temperature scatter"
reg_trend = trend(g_lat, g_lon)
ax.scatter(lat, temp, c="silver", alpha=1.0, edgecolors="none", label=label)
ax.plot(g_lat, fld_uk[:, 50], label="Universal Kriging: temperature (10° lon)")
ax.plot(g_lat, mean[:, 50], label="North-South drift: Universal Kriging")
ax.plot(g_lat, reg_trend, label="North-South trend: Regression Kriging")
ax.set_ylim(7)
ax.set_xlabel("Latitude / °")
ax.set_ylabel("T / °C")
ax.set_title("North-South cross-section")
ax.legend()
fig.savefig(os.path.join("..", "results", "trend.pdf"), dpi=300)
| [
"gstools.krige.Universal",
"os.path.join",
"numpy.deg2rad",
"gstools.Spherical",
"numpy.arange",
"scipy.stats.linregress",
"numpy.linspace",
"gstools.vario_estimate",
"matplotlib.pyplot.subplots",
"gstools.krige.Detrended"
] | [((788, 852), 'gstools.vario_estimate', 'gs.vario_estimate', (['(lat, lon)', 'temp'], {'bin_edges': 'bins', 'latlon': '(True)'}), '((lat, lon), temp, bin_edges=bins, latlon=True)\n', (805, 852), True, 'import gstools as gs\n'), ((1567, 1617), 'gstools.Spherical', 'gs.Spherical', ([], {'latlon': '(True)', 'rescale': 'gs.EARTH_RADIUS'}), '(latlon=True, rescale=gs.EARTH_RADIUS)\n', (1579, 1617), True, 'import gstools as gs\n'), ((2401, 2507), 'gstools.krige.Universal', 'gs.krige.Universal', ([], {'model': 'model', 'cond_pos': '(lat, lon)', 'cond_val': 'temp', 'drift_functions': 'north_south_drift'}), '(model=model, cond_pos=(lat, lon), cond_val=temp,\n drift_functions=north_south_drift)\n', (2419, 2507), True, 'import gstools as gs\n'), ((2602, 2629), 'scipy.stats.linregress', 'stats.linregress', (['lat', 'temp'], {}), '(lat, temp)\n', (2618, 2629), False, 'from scipy import stats\n'), ((2695, 2780), 'gstools.krige.Detrended', 'gs.krige.Detrended', ([], {'model': 'model', 'cond_pos': '(lat, lon)', 'cond_val': 'temp', 'trend': 'trend'}), '(model=model, cond_pos=(lat, lon), cond_val=temp, trend=trend\n )\n', (2713, 2780), True, 'import gstools as gs\n'), ((3110, 3134), 'numpy.arange', 'np.arange', (['(47)', '(56.1)', '(0.1)'], {}), '(47, 56.1, 0.1)\n', (3119, 3134), True, 'import numpy as np\n'), ((3143, 3166), 'numpy.arange', 'np.arange', (['(5)', '(16.1)', '(0.1)'], {}), '(5, 16.1, 0.1)\n', (3152, 3166), True, 'import numpy as np\n'), ((3579, 3601), 'numpy.linspace', 'np.linspace', (['(5)', '(23)', '(64)'], {}), '(5, 23, 64)\n', (3590, 3601), True, 'import numpy as np\n'), ((3612, 3660), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '[10, 5]', 'sharey': '(True)'}), '(1, 3, figsize=[10, 5], sharey=True)\n', (3624, 3660), True, 'import matplotlib.pyplot as plt\n'), ((4864, 4878), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4876, 4878), True, 'import matplotlib.pyplot as plt\n'), ((321, 365), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""de_borders.txt"""'], {}), "('..', 'data', 'de_borders.txt')\n", (333, 365), False, 'import os\n'), ((1857, 1903), 'os.path.join', 'os.path.join', (['""".."""', '"""results"""', '"""variogram.pdf"""'], {}), "('..', 'results', 'variogram.pdf')\n", (1869, 1903), False, 'import os\n'), ((4574, 4618), 'os.path.join', 'os.path.join', (['""".."""', '"""results"""', '"""kriging.pdf"""'], {}), "('..', 'results', 'kriging.pdf')\n", (4586, 4618), False, 'import os\n'), ((5389, 5431), 'os.path.join', 'os.path.join', (['""".."""', '"""results"""', '"""trend.pdf"""'], {}), "('..', 'results', 'trend.pdf')\n", (5401, 5431), False, 'import os\n'), ((400, 442), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""temp_obs.txt"""'], {}), "('..', 'data', 'temp_obs.txt')\n", (412, 442), False, 'import os\n'), ((745, 758), 'numpy.deg2rad', 'np.deg2rad', (['(8)'], {}), '(8)\n', (755, 758), True, 'import numpy as np\n')] |
from random import shuffle
import sys
import anndata
import numpy as np
from scipy import sparse
from sklearn import preprocessing
def print_progress(epoch, logs, n_epochs=10000):
"""Creates Message for '_print_progress_bar'.
Parameters
----------
epoch: Integer
Current epoch iteration.
logs: dict
Dictionary of all current losses.
n_epochs: Integer
Maximum value of epochs.
Returns
-------
"""
message = ""
for key in logs:
if "loss" in key and ("epoch_" in key or "val_" in key) and "unweighted" not in key:
message += f" - {key:s}: {logs[key][-1]:7.10f}"
_print_progress_bar(epoch + 1, n_epochs, prefix='', suffix=message, decimals=1, length=20)
def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""Prints out message with a progress bar.
Parameters
----------
iteration: Integer
Current epoch.
total: Integer
Maximum value of epochs.
prefix: String
String before the progress bar.
suffix: String
String after the progress bar.
decimals: Integer
Digits after comma for all the losses.
length: Integer
Length of the progress bar.
fill: String
Symbol for filling the bar.
Returns
-------
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_len = int(length * iteration // total)
bar = fill * filled_len + '-' * (length - filled_len)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def extractor(data, cell_type, conditions, cell_type_key="cell_type", condition_key="condition"):
"""
Returns a list of `data` files while filtering for a specific `cell_type`.
Parameters
----------
data: `~anndata.AnnData`
Annotated data matrix
cell_type: basestring
specific cell type to be extracted from `data`.
conditions: dict
dictionary of stimulated/control of `data`.
Returns
-------
list of `data` files while filtering for a specific `cell_type`.
"""
cell_with_both_condition = data[data.obs[cell_type_key] == cell_type]
condtion_1 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["ctrl"])]
condtion_2 = data[(data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"])]
training = data[~((data.obs[cell_type_key] == cell_type) & (data.obs[condition_key] == conditions["stim"]))]
return [training, condtion_1, condtion_2, cell_with_both_condition]
def balancer(adata, cell_type_key="cell_type", condition_key="condition"):
"""
Makes cell type population equal.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
balanced_data: `~anndata.AnnData`
Equal cell type population Annotated data matrix.
"""
class_names = np.unique(adata.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = adata.copy()[adata.obs[cell_type_key] == cls].shape[0]
max_number = np.max(list(class_pop.values()))
all_data_x = []
all_data_label = []
all_data_condition = []
for cls in class_names:
temp = adata.copy()[adata.obs[cell_type_key] == cls]
index = np.random.choice(range(len(temp)), max_number)
if sparse.issparse(temp.X):
temp_x = temp.X.A[index]
else:
temp_x = temp.X[index]
all_data_x.append(temp_x)
temp_ct = np.repeat(cls, max_number)
all_data_label.append(temp_ct)
temp_cc = np.repeat(np.unique(temp.obs[condition_key]), max_number)
all_data_condition.append(temp_cc)
balanced_data = anndata.AnnData(np.concatenate(all_data_x))
balanced_data.obs[cell_type_key] = np.concatenate(all_data_label)
balanced_data.obs[condition_key] = np.concatenate(all_data_label)
class_names = np.unique(balanced_data.obs[cell_type_key])
class_pop = {}
for cls in class_names:
class_pop[cls] = len(balanced_data[balanced_data.obs[cell_type_key] == cls])
return balanced_data
def data_remover(adata, remain_list, remove_list, cell_type_key, condition_key):
"""
Removes specific cell type in stimulated condition form `adata`.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix
remain_list: list
list of cell types which are going to be remained in `adata`.
remove_list: list
list of cell types which are going to be removed from `adata`.
Returns
-------
merged_data: list
returns array of specified cell types in stimulated condition
"""
source_data = []
for i in remain_list:
source_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"},
cell_type_key=cell_type_key, condition_key=condition_key)[3])
target_data = []
for i in remove_list:
target_data.append(extractor(adata, i, conditions={"ctrl": "control", "stim": "stimulated"},
cell_type_key=cell_type_key, condition_key=condition_key)[1])
merged_data = training_data_provider(source_data, target_data)
merged_data.var_names = adata.var_names
return merged_data
def training_data_provider(train_s, train_t):
"""
Concatenates two lists containing adata files
Parameters
----------
train_s: `~anndata.AnnData`
Annotated data matrix.
train_t: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
Concatenated Annotated data matrix.
"""
train_s_X = []
train_s_diet = []
train_s_groups = []
for i in train_s:
train_s_X.append(i.X.A)
train_s_diet.append(i.obs["condition"].tolist())
train_s_groups.append(i.obs["cell_type"].tolist())
train_s_X = np.concatenate(train_s_X)
temp = []
for i in train_s_diet:
temp = temp + i
train_s_diet = temp
temp = []
for i in train_s_groups:
temp = temp + i
train_s_groups = temp
train_t_X = []
train_t_diet = []
train_t_groups = []
for i in train_t:
train_t_X.append(i.X.A)
train_t_diet.append(i.obs["condition"].tolist())
train_t_groups.append(i.obs["cell_type"].tolist())
temp = []
for i in train_t_diet:
temp = temp + i
train_t_diet = temp
temp = []
for i in train_t_groups:
temp = temp + i
train_t_groups = temp
train_t_X = np.concatenate(train_t_X)
train_real = np.concatenate([train_s_X, train_t_X]) # concat all
train_real = anndata.AnnData(train_real)
train_real.obs["condition"] = train_s_diet + train_t_diet
train_real.obs["cell_type"] = train_s_groups + train_t_groups
return train_real
def shuffle_adata(adata):
"""
Shuffles the `adata`.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
labels: numpy nd-array
list of encoded labels
Returns
-------
adata: `~anndata.AnnData`
Shuffled annotated data matrix.
labels: numpy nd-array
Array of shuffled labels if `labels` is not None.
"""
if sparse.issparse(adata.X):
adata.X = adata.X.A
ind_list = [i for i in range(adata.shape[0])]
shuffle(ind_list)
new_adata = adata[ind_list, :]
return new_adata
def label_encoder(adata):
"""
Encode labels of Annotated `adata` matrix using sklearn.preprocessing.LabelEncoder class.
Parameters
----------
adata: `~anndata.AnnData`
Annotated data matrix.
Returns
-------
labels: numpy nd-array
Array of encoded labels
"""
le = preprocessing.LabelEncoder()
labels = le.fit_transform(adata.obs["condition"].tolist())
return labels.reshape(-1, 1), le
| [
"sys.stdout.write",
"scipy.sparse.issparse",
"random.shuffle",
"numpy.unique",
"sklearn.preprocessing.LabelEncoder",
"sys.stdout.flush",
"anndata.AnnData",
"numpy.concatenate",
"numpy.repeat"
] | [((1784, 1802), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1800, 1802), False, 'import sys\n'), ((3182, 3217), 'numpy.unique', 'np.unique', (['adata.obs[cell_type_key]'], {}), '(adata.obs[cell_type_key])\n', (3191, 3217), True, 'import numpy as np\n'), ((4081, 4111), 'numpy.concatenate', 'np.concatenate', (['all_data_label'], {}), '(all_data_label)\n', (4095, 4111), True, 'import numpy as np\n'), ((4151, 4181), 'numpy.concatenate', 'np.concatenate', (['all_data_label'], {}), '(all_data_label)\n', (4165, 4181), True, 'import numpy as np\n'), ((4200, 4243), 'numpy.unique', 'np.unique', (['balanced_data.obs[cell_type_key]'], {}), '(balanced_data.obs[cell_type_key])\n', (4209, 4243), True, 'import numpy as np\n'), ((6178, 6203), 'numpy.concatenate', 'np.concatenate', (['train_s_X'], {}), '(train_s_X)\n', (6192, 6203), True, 'import numpy as np\n'), ((6819, 6844), 'numpy.concatenate', 'np.concatenate', (['train_t_X'], {}), '(train_t_X)\n', (6833, 6844), True, 'import numpy as np\n'), ((6862, 6900), 'numpy.concatenate', 'np.concatenate', (['[train_s_X, train_t_X]'], {}), '([train_s_X, train_t_X])\n', (6876, 6900), True, 'import numpy as np\n'), ((6932, 6959), 'anndata.AnnData', 'anndata.AnnData', (['train_real'], {}), '(train_real)\n', (6947, 6959), False, 'import anndata\n'), ((7517, 7541), 'scipy.sparse.issparse', 'sparse.issparse', (['adata.X'], {}), '(adata.X)\n', (7532, 7541), False, 'from scipy import sparse\n'), ((7626, 7643), 'random.shuffle', 'shuffle', (['ind_list'], {}), '(ind_list)\n', (7633, 7643), False, 'from random import shuffle\n'), ((8022, 8050), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (8048, 8050), False, 'from sklearn import preprocessing\n'), ((1645, 1720), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix))"], {}), "('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix))\n", (1661, 1720), False, 'import sys\n'), ((1757, 1779), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (1773, 1779), False, 'import sys\n'), ((3630, 3653), 'scipy.sparse.issparse', 'sparse.issparse', (['temp.X'], {}), '(temp.X)\n', (3645, 3653), False, 'from scipy import sparse\n'), ((3793, 3819), 'numpy.repeat', 'np.repeat', (['cls', 'max_number'], {}), '(cls, max_number)\n', (3802, 3819), True, 'import numpy as np\n'), ((4014, 4040), 'numpy.concatenate', 'np.concatenate', (['all_data_x'], {}), '(all_data_x)\n', (4028, 4040), True, 'import numpy as np\n'), ((3887, 3921), 'numpy.unique', 'np.unique', (['temp.obs[condition_key]'], {}), '(temp.obs[condition_key])\n', (3896, 3921), True, 'import numpy as np\n')] |
# ----------------------------------------------------------------------------
# - TanksAndTemples Website Toolbox -
# - http://www.tanksandtemples.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2017
# <NAME> <<EMAIL> >
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ----------------------------------------------------------------------------
#
# This python script is for downloading dataset from www.tanksandtemples.org
# The dataset has a different license, please refer to
# https://tanksandtemples.org/license/
import json
import copy
import os
import numpy as np
from setup import *
def read_alignment_transformation(filename):
with open(filename) as data_file:
data = json.load(data_file)
return np.asarray(data['transformation']).reshape((4, 4)).transpose()
def EvaluateHisto(source, target, trans, crop_volume, voxel_size, threshold,
filename_mvs, plot_stretch, scene_name, verbose = True):
print("[EvaluateHisto]")
set_verbosity_level(VerbosityLevel.Debug)
s = copy.deepcopy(source)
s.transform(trans)
s = crop_volume.crop_point_cloud(s)
s = voxel_down_sample(s, voxel_size)
estimate_normals(s, search_param = KDTreeSearchParamKNN(knn = 20))
print(filename_mvs+"/" + scene_name + ".precision.ply")
t = copy.deepcopy(target)
t = crop_volume.crop_point_cloud(t)
t = voxel_down_sample(t, voxel_size)
estimate_normals(t, search_param = KDTreeSearchParamKNN(knn = 20))
print("[compute_point_cloud_to_point_cloud_distance]")
distance1 = compute_point_cloud_to_point_cloud_distance(s, t)
print("[compute_point_cloud_to_point_cloud_distance]")
distance2 = compute_point_cloud_to_point_cloud_distance(t, s)
# write the distances to bin files
np.array(distance1).astype('float64').tofile(
filename_mvs + "/" + scene_name + ".precision.bin")
np.array(distance2).astype('float64').tofile(
filename_mvs + "/" + scene_name + ".recall.bin")
# Colorize the poincloud files prith the precision and recall values
write_point_cloud(filename_mvs+"/" + scene_name + ".precision.ply", s)
write_point_cloud(filename_mvs+"/" + scene_name + ".precision.ncb.ply", s)
write_point_cloud(filename_mvs+"/" + scene_name + ".recall.ply", t)
source_n_fn = filename_mvs + "/" + scene_name + ".precision.ply"
target_n_fn = filename_mvs + "/" + scene_name + ".recall.ply"
print('[ViewDistances] Add color coding to visualize error')
eval_str_viewDT = OPEN3D_EXPERIMENTAL_BIN_PATH + \
"ViewDistances " + source_n_fn + " --max_distance " + \
str(threshold*3) + " --write_color_back --without_gui"
os.system(eval_str_viewDT)
print('[ViewDistances] Add color coding to visualize error')
eval_str_viewDT = OPEN3D_EXPERIMENTAL_BIN_PATH + \
"ViewDistances " + target_n_fn + " --max_distance " + \
str(threshold*3) + " --write_color_back --without_gui"
os.system(eval_str_viewDT)
# get histogram and f-score
[precision, recall, fscore, edges_source, cum_source,
edges_target, cum_target] = get_f1_score_histo2(
threshold, filename_mvs, plot_stretch, distance1, distance2)
np.savetxt(filename_mvs+"/" + scene_name + ".recall.txt", cum_target)
np.savetxt(filename_mvs+"/" + scene_name + ".precision.txt", cum_source)
np.savetxt(filename_mvs+"/" + scene_name + ".prf_tau_plotstr.txt",
np.array([precision, recall, fscore, threshold, plot_stretch]))
return [precision, recall, fscore, edges_source,
cum_source, edges_target, cum_target]
def get_f1_score_histo2(threshold, filename_mvs,
plot_stretch, distance1, distance2, verbose = True):
print("[get_f1_score_histo2]")
dist_threshold = threshold
if(len(distance1) and len(distance2)):
recall = float(sum(d < threshold for d in distance2)) / \
float(len(distance2))
precision = float(sum(d < threshold for d in distance1)) / \
float(len(distance1))
fscore = 2 * recall * precision / (recall + precision)
num = len(distance1)
bins = np.arange(0, dist_threshold*plot_stretch , dist_threshold / 100)
hist, edges_source = np.histogram(distance1, bins)
cum_source = np.cumsum(hist).astype(float) / num
num = len(distance2)
bins = np.arange(0, dist_threshold*plot_stretch , dist_threshold / 100)
hist, edges_target = np.histogram(distance2, bins)
cum_target = np.cumsum(hist).astype(float) / num
else:
precision = 0
recall = 0
fscore = 0
edges_source = np.array([0])
cum_source = np.array([0])
edges_target = np.array([0])
cum_target = np.array([0])
return [precision, recall, fscore, edges_source, cum_source,
edges_target, cum_target]
| [
"copy.deepcopy",
"json.load",
"numpy.asarray",
"numpy.savetxt",
"os.system",
"numpy.cumsum",
"numpy.histogram",
"numpy.array",
"numpy.arange"
] | [((2217, 2238), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (2230, 2238), False, 'import copy\n'), ((2465, 2486), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (2478, 2486), False, 'import copy\n'), ((3757, 3783), 'os.system', 'os.system', (['eval_str_viewDT'], {}), '(eval_str_viewDT)\n', (3766, 3783), False, 'import os\n'), ((4017, 4043), 'os.system', 'os.system', (['eval_str_viewDT'], {}), '(eval_str_viewDT)\n', (4026, 4043), False, 'import os\n'), ((4246, 4317), 'numpy.savetxt', 'np.savetxt', (["(filename_mvs + '/' + scene_name + '.recall.txt')", 'cum_target'], {}), "(filename_mvs + '/' + scene_name + '.recall.txt', cum_target)\n", (4256, 4317), True, 'import numpy as np\n'), ((4317, 4391), 'numpy.savetxt', 'np.savetxt', (["(filename_mvs + '/' + scene_name + '.precision.txt')", 'cum_source'], {}), "(filename_mvs + '/' + scene_name + '.precision.txt', cum_source)\n", (4327, 4391), True, 'import numpy as np\n'), ((1913, 1933), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1922, 1933), False, 'import json\n'), ((4461, 4523), 'numpy.array', 'np.array', (['[precision, recall, fscore, threshold, plot_stretch]'], {}), '([precision, recall, fscore, threshold, plot_stretch])\n', (4469, 4523), True, 'import numpy as np\n'), ((5088, 5153), 'numpy.arange', 'np.arange', (['(0)', '(dist_threshold * plot_stretch)', '(dist_threshold / 100)'], {}), '(0, dist_threshold * plot_stretch, dist_threshold / 100)\n', (5097, 5153), True, 'import numpy as np\n'), ((5176, 5205), 'numpy.histogram', 'np.histogram', (['distance1', 'bins'], {}), '(distance1, bins)\n', (5188, 5205), True, 'import numpy as np\n'), ((5290, 5355), 'numpy.arange', 'np.arange', (['(0)', '(dist_threshold * plot_stretch)', '(dist_threshold / 100)'], {}), '(0, dist_threshold * plot_stretch, dist_threshold / 100)\n', (5299, 5355), True, 'import numpy as np\n'), ((5378, 5407), 'numpy.histogram', 'np.histogram', (['distance2', 'bins'], {}), '(distance2, bins)\n', (5390, 5407), True, 'import numpy as np\n'), ((5526, 5539), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5534, 5539), True, 'import numpy as np\n'), ((5555, 5568), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5563, 5568), True, 'import numpy as np\n'), ((5586, 5599), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5594, 5599), True, 'import numpy as np\n'), ((5615, 5628), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (5623, 5628), True, 'import numpy as np\n'), ((1942, 1976), 'numpy.asarray', 'np.asarray', (["data['transformation']"], {}), "(data['transformation'])\n", (1952, 1976), True, 'import numpy as np\n'), ((2906, 2925), 'numpy.array', 'np.array', (['distance1'], {}), '(distance1)\n', (2914, 2925), True, 'import numpy as np\n'), ((3008, 3027), 'numpy.array', 'np.array', (['distance2'], {}), '(distance2)\n', (3016, 3027), True, 'import numpy as np\n'), ((5221, 5236), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (5230, 5236), True, 'import numpy as np\n'), ((5423, 5438), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (5432, 5438), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bag of local features network.
Model similar to that from https://openreview.net/pdf?id=SkfMWhAqYQ.
The model has an optional bottleneck before the linear logits layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow.compat.v1 as tf
ConvLayer = collections.namedtuple(
"ConvLayer",
["kernel_size", "stride", "filters"])
def receptive_field_size(conv_ops_list, input_size=None):
"""Computes receptive field and output size for valid convolutions.
Args:
conv_ops_list: List of named tuples in the order of convolutions operation
with the format (kernel_size, stride, filters).
input_size: (Integer) If not None the function also computes output size.
Returns:
output_rf: (Integer) Size of receptive field
output_size: (Integer) Spatial size of network tensor at the output.
"""
output_rf = 1
output_size = input_size
stride_jumps = 1
for conv_op in conv_ops_list:
kernel_size, stride, _ = conv_op
output_rf += stride_jumps * (kernel_size-1)
stride_jumps *= stride
if input_size:
output_size = int(np.ceil((output_size - kernel_size + 1) / stride))
return output_rf, output_size
def bottleneck(x, filters, kernel_size, stride, activation, expansion,
batch_norm_config, is_training):
"""Creates a bottleneck layer.
conv(kernel:1, stride:1) -> conv(kernel, stride) -> conv(kernel:1, stride:1)
Args:
x: input tensor.
filters: (Integer) Number of filters for first two convolutions.
kernel_size: (Integer) Bottleneck kernel size.
stride: (Integer) Bottleneck stride size.
activation: Tensorflow activation function.
expansion: (Integer) Expansion of feature channels at the last convolution.
batch_norm_config: (Configuration object) with batch normalization params.
is_training: (Boolean) Whether training on inference mode.
Returns:
Returns bottleneck output tensor.
"""
residual = x
with tf.variable_scope("a"):
net = tf.layers.conv2d(
x,
filters=filters,
kernel_size=1,
strides=(1, 1),
use_bias=not batch_norm_config.enable,
padding="valid")
tf.logging.info("Constructing layer: %s", net)
if batch_norm_config.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=batch_norm_config.momentum,
epsilon=batch_norm_config.epsilon)
tf.logging.info("Constructing layer: %s", net)
net = activation(net)
with tf.variable_scope("b"):
net = tf.layers.conv2d(
net,
filters=filters,
kernel_size=kernel_size,
strides=(stride, stride),
use_bias=not batch_norm_config.enable,
padding="valid")
tf.logging.info("Constructing layer: %s", net)
if batch_norm_config.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=batch_norm_config.momentum,
epsilon=batch_norm_config.epsilon)
tf.logging.info("Constructing layer: %s", net)
net = activation(net)
with tf.variable_scope("c"):
net = tf.layers.conv2d(
net,
filters=filters * expansion,
kernel_size=1,
strides=(1, 1),
use_bias=not batch_norm_config.enable,
padding="valid")
if batch_norm_config.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=batch_norm_config.momentum,
epsilon=batch_norm_config.epsilon,
gamma_initializer=tf.zeros_initializer())
tf.logging.info("Constructing layer: %s", net)
if kernel_size == 3:
residual = residual[:, 1:-1, 1:-1, :]
with tf.variable_scope("downsample"):
if stride != 1 or residual.shape.as_list()[-1] != filters * expansion:
residual = tf.layers.conv2d(
residual,
filters=filters * expansion,
kernel_size=1,
strides=(stride, stride),
use_bias=not batch_norm_config.enable,
padding="valid")
if batch_norm_config.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=batch_norm_config.momentum,
epsilon=batch_norm_config.epsilon)
with tf.variable_scope("add_residual"):
net += residual
out = activation(net)
return out
class BagNet(object):
"""Bag of local fetures network (BagNet).
Attributes:
infilters: (Integer) Base filters dimensionality.
expansion: (Integer) Filter expansion multiplier.
config: (Configuration object) With parameters:
blocks: (List of integers) Number of bottleneck blocks per group.
strides: (List of integers) Stride size per group.
num_classes: (Integer) Number of output classes.
kernel3: (List of integers) Number 3x3 kernels per group.
activation: Tensorflow activation function.
num_classes: (Integer) Number of output classes.
planes: (List of integer) Base filters size per group.
final_bottleneck: (Boolean) Use a final features bottleneck.
batch_norm: (Configuration object) Batch normalization parameters.
conv_ops_list: (List of named tuples) Settings of the convolutions.
receptive_field: (Tuple of integers) Receptive field shape.
variable_scope: (string) Name of variable scope.
var_list: List of network variables.
init_op: Initialization operations for model variables.
"""
def __init__(self, config, variable_scope="bagnet"):
self.infilters = 64
self.config = config
self.variable_scope = variable_scope
self.receptive_field = None
self.conv_ops_list = []
# Call once to create network variables. Then reuse variables later.
self.var_list = []
self.init_op = []
def _collect_variables(self, vs):
"""Collects model variables.
Populates self.var_list with model variables and self.init_op with
variables' initializer. This function is only called once with __call__.
Args:
vs: Variables list to be added to self.var_list.
"""
self.var_list.extend(vs)
self.init_op = tf.variables_initializer(var_list=self.var_list)
def _make_group(self,
x,
filters,
blocks,
is_training,
activation,
expansion,
stride=1,
kernel3=0,
prefix=""):
"""Makes network group of layers.
Args:
x: Input tensor.
filters: (Integer) Number of filters for first two convolutions.
blocks: (Integer) Number of bottleneck blocks.
is_training: (Boolean) Whether training on inference mode.
activation: Tensorflow activation function.
expansion: (Integer) Expansion of feature multiplier.
stride: (Integer) Group stride size.
kernel3: (Integer) Number of 3x3 convolutional layers.
prefix: (String) Prefix of variable scope.
Returns:
Group output tensor.
"""
net = x
with tf.variable_scope(prefix):
for i in range(blocks):
with tf.variable_scope("block%d" % i):
kernel_size = 3 if i < kernel3 else 1
stride_size = stride if i == 0 else 1
net = bottleneck(
net,
filters,
kernel_size=kernel_size,
stride=stride_size,
activation=activation,
expansion=expansion,
batch_norm_config=self.config.batch_norm,
is_training=is_training)
self.conv_ops_list.extend(
[
ConvLayer(1, 1, filters),
ConvLayer(kernel_size, stride_size, filters),
ConvLayer(1, 1, filters * expansion)
]
)
self.infilters = filters * expansion
return net
def _build_model_graph(self, x, is_training):
"""Builds model graph."""
endpoints = {}
with tf.variable_scope("pre_groups"):
net = tf.layers.conv2d(
x,
filters=self.config.init_conv_channels,
kernel_size=3,
strides=(1, 1),
use_bias=not self.config.batch_norm.enable,
padding="valid")
self.conv_ops_list.append(ConvLayer(3, 1, self.config.init_conv_channels))
if self.config.batch_norm.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=self.config.batch_norm.momentum,
epsilon=self.config.batch_norm.epsilon)
net = self.config.activation(net)
number_groups = len(self.config.blocks)
for i in range(number_groups):
net = self._make_group(
net,
filters=self.config.planes[i],
blocks=self.config.blocks[i],
is_training=is_training,
activation=self.config.activation,
expansion=self.config.expansion,
stride=self.config.strides[i],
kernel3=self.config.kernel3[i],
prefix="group%d" % i)
tf.logging.info("Constructing layer: %s", net)
endpoints["features2d"] = net
if self.config.final_bottleneck:
with tf.variable_scope("final_bottleneck"):
channels = net.shape.as_list()[-1]
net = tf.layers.conv2d(
net,
filters=channels // 4,
kernel_size=1,
strides=(1, 1),
use_bias=not self.config.batch_norm.enable,
padding="valid")
self.conv_ops_list.append(ConvLayer(1, 1, channels // 4))
if self.config.batch_norm.enable:
net = tf.layers.batch_normalization(
net, training=is_training,
momentum=self.config.batch_norm.momentum,
epsilon=self.config.batch_norm.epsilon)
net = self.config.activation(net)
endpoints["features2d_lowd"] = net
with tf.variable_scope("logits2d"):
net = tf.layers.conv2d(
net,
filters=self.config.num_classes,
kernel_size=1,
strides=(1, 1),
use_bias=True,
padding="valid")
self.conv_ops_list.append(ConvLayer(1, 1, self.config.num_classes))
tf.logging.info("Constructing layer: %s", net)
endpoints["logits2d"] = net
logits = tf.reduce_mean(net, axis=[1, 2])
tf.logging.info("Constructing layer: %s", logits)
return logits, endpoints
def __call__(self, x, is_training):
"""Builds network.
Args:
x: 4-D Tensor of shape [batch, height, width, channels].
is_training: (Boolean) Training or inference mode.
Returns:
logits: Network output.
endpoints: Dictionary with activations at different layers.
"""
variables_before = set(tf.global_variables())
reuse = bool(self.var_list)
tf.logging.info("Build bagnet.")
with tf.variable_scope(self.variable_scope, reuse=reuse):
logits, endpoints = self._build_model_graph(x, is_training)
variables_after = set(tf.global_variables())
if not reuse:
self._collect_variables(list(variables_after - variables_before))
self.receptive_field = tuple([
receptive_field_size(self.conv_ops_list)[0]] * 2)
return logits, endpoints
| [
"tensorflow.compat.v1.variables_initializer",
"numpy.ceil",
"tensorflow.compat.v1.zeros_initializer",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.layers.batch_normalization",
"collections.namedtuple",
"tensorfl... | [((982, 1055), 'collections.namedtuple', 'collections.namedtuple', (['"""ConvLayer"""', "['kernel_size', 'stride', 'filters']"], {}), "('ConvLayer', ['kernel_size', 'stride', 'filters'])\n", (1004, 1055), False, 'import collections\n'), ((2664, 2686), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""a"""'], {}), "('a')\n", (2681, 2686), True, 'import tensorflow.compat.v1 as tf\n'), ((2698, 2825), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': 'filters', 'kernel_size': '(1)', 'strides': '(1, 1)', 'use_bias': '(not batch_norm_config.enable)', 'padding': '"""valid"""'}), "(x, filters=filters, kernel_size=1, strides=(1, 1),\n use_bias=not batch_norm_config.enable, padding='valid')\n", (2714, 2825), True, 'import tensorflow.compat.v1 as tf\n'), ((2876, 2922), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (2891, 2922), True, 'import tensorflow.compat.v1 as tf\n'), ((3133, 3179), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (3148, 3179), True, 'import tensorflow.compat.v1 as tf\n'), ((3214, 3236), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""b"""'], {}), "('b')\n", (3231, 3236), True, 'import tensorflow.compat.v1 as tf\n'), ((3248, 3398), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['net'], {'filters': 'filters', 'kernel_size': 'kernel_size', 'strides': '(stride, stride)', 'use_bias': '(not batch_norm_config.enable)', 'padding': '"""valid"""'}), "(net, filters=filters, kernel_size=kernel_size, strides=(\n stride, stride), use_bias=not batch_norm_config.enable, padding='valid')\n", (3264, 3398), True, 'import tensorflow.compat.v1 as tf\n'), ((3448, 3494), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (3463, 3494), True, 'import tensorflow.compat.v1 as tf\n'), ((3705, 3751), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (3720, 3751), True, 'import tensorflow.compat.v1 as tf\n'), ((3787, 3809), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""c"""'], {}), "('c')\n", (3804, 3809), True, 'import tensorflow.compat.v1 as tf\n'), ((3821, 3963), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['net'], {'filters': '(filters * expansion)', 'kernel_size': '(1)', 'strides': '(1, 1)', 'use_bias': '(not batch_norm_config.enable)', 'padding': '"""valid"""'}), "(net, filters=filters * expansion, kernel_size=1, strides=(\n 1, 1), use_bias=not batch_norm_config.enable, padding='valid')\n", (3837, 3963), True, 'import tensorflow.compat.v1 as tf\n'), ((4270, 4316), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (4285, 4316), True, 'import tensorflow.compat.v1 as tf\n'), ((4391, 4422), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""downsample"""'], {}), "('downsample')\n", (4408, 4422), True, 'import tensorflow.compat.v1 as tf\n'), ((4952, 4985), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""add_residual"""'], {}), "('add_residual')\n", (4969, 4985), True, 'import tensorflow.compat.v1 as tf\n'), ((6804, 6852), 'tensorflow.compat.v1.variables_initializer', 'tf.variables_initializer', ([], {'var_list': 'self.var_list'}), '(var_list=self.var_list)\n', (6828, 6852), True, 'import tensorflow.compat.v1 as tf\n'), ((9703, 9749), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (9718, 9749), True, 'import tensorflow.compat.v1 as tf\n'), ((10838, 10884), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'net'], {}), "('Constructing layer: %s', net)\n", (10853, 10884), True, 'import tensorflow.compat.v1 as tf\n'), ((10930, 10962), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['net'], {'axis': '[1, 2]'}), '(net, axis=[1, 2])\n', (10944, 10962), True, 'import tensorflow.compat.v1 as tf\n'), ((10967, 11016), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Constructing layer: %s"""', 'logits'], {}), "('Constructing layer: %s', logits)\n", (10982, 11016), True, 'import tensorflow.compat.v1 as tf\n'), ((11444, 11476), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Build bagnet."""'], {}), "('Build bagnet.')\n", (11459, 11476), True, 'import tensorflow.compat.v1 as tf\n'), ((2968, 3101), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'training': 'is_training', 'momentum': 'batch_norm_config.momentum', 'epsilon': 'batch_norm_config.epsilon'}), '(net, training=is_training, momentum=\n batch_norm_config.momentum, epsilon=batch_norm_config.epsilon)\n', (2997, 3101), True, 'import tensorflow.compat.v1 as tf\n'), ((3540, 3673), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'training': 'is_training', 'momentum': 'batch_norm_config.momentum', 'epsilon': 'batch_norm_config.epsilon'}), '(net, training=is_training, momentum=\n batch_norm_config.momentum, epsilon=batch_norm_config.epsilon)\n', (3569, 3673), True, 'import tensorflow.compat.v1 as tf\n'), ((4516, 4676), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['residual'], {'filters': '(filters * expansion)', 'kernel_size': '(1)', 'strides': '(stride, stride)', 'use_bias': '(not batch_norm_config.enable)', 'padding': '"""valid"""'}), "(residual, filters=filters * expansion, kernel_size=1,\n strides=(stride, stride), use_bias=not batch_norm_config.enable,\n padding='valid')\n", (4532, 4676), True, 'import tensorflow.compat.v1 as tf\n'), ((7723, 7748), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['prefix'], {}), '(prefix)\n', (7740, 7748), True, 'import tensorflow.compat.v1 as tf\n'), ((8645, 8676), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""pre_groups"""'], {}), "('pre_groups')\n", (8662, 8676), True, 'import tensorflow.compat.v1 as tf\n'), ((8690, 8850), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['x'], {'filters': 'self.config.init_conv_channels', 'kernel_size': '(3)', 'strides': '(1, 1)', 'use_bias': '(not self.config.batch_norm.enable)', 'padding': '"""valid"""'}), "(x, filters=self.config.init_conv_channels, kernel_size=3,\n strides=(1, 1), use_bias=not self.config.batch_norm.enable, padding='valid'\n )\n", (8706, 8850), True, 'import tensorflow.compat.v1 as tf\n'), ((10537, 10566), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""logits2d"""'], {}), "('logits2d')\n", (10554, 10566), True, 'import tensorflow.compat.v1 as tf\n'), ((10580, 10701), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['net'], {'filters': 'self.config.num_classes', 'kernel_size': '(1)', 'strides': '(1, 1)', 'use_bias': '(True)', 'padding': '"""valid"""'}), "(net, filters=self.config.num_classes, kernel_size=1,\n strides=(1, 1), use_bias=True, padding='valid')\n", (10596, 10701), True, 'import tensorflow.compat.v1 as tf\n'), ((11385, 11406), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (11404, 11406), True, 'import tensorflow.compat.v1 as tf\n'), ((11486, 11537), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {'reuse': 'reuse'}), '(self.variable_scope, reuse=reuse)\n', (11503, 11537), True, 'import tensorflow.compat.v1 as tf\n'), ((11631, 11652), 'tensorflow.compat.v1.global_variables', 'tf.global_variables', ([], {}), '()\n', (11650, 11652), True, 'import tensorflow.compat.v1 as tf\n'), ((1803, 1852), 'numpy.ceil', 'np.ceil', (['((output_size - kernel_size + 1) / stride)'], {}), '((output_size - kernel_size + 1) / stride)\n', (1810, 1852), True, 'import numpy as np\n'), ((4779, 4912), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'training': 'is_training', 'momentum': 'batch_norm_config.momentum', 'epsilon': 'batch_norm_config.epsilon'}), '(net, training=is_training, momentum=\n batch_norm_config.momentum, epsilon=batch_norm_config.epsilon)\n', (4808, 4912), True, 'import tensorflow.compat.v1 as tf\n'), ((9038, 9181), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'training': 'is_training', 'momentum': 'self.config.batch_norm.momentum', 'epsilon': 'self.config.batch_norm.epsilon'}), '(net, training=is_training, momentum=self.\n config.batch_norm.momentum, epsilon=self.config.batch_norm.epsilon)\n', (9067, 9181), True, 'import tensorflow.compat.v1 as tf\n'), ((9832, 9869), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""final_bottleneck"""'], {}), "('final_bottleneck')\n", (9849, 9869), True, 'import tensorflow.compat.v1 as tf\n'), ((9928, 10068), 'tensorflow.compat.v1.layers.conv2d', 'tf.layers.conv2d', (['net'], {'filters': '(channels // 4)', 'kernel_size': '(1)', 'strides': '(1, 1)', 'use_bias': '(not self.config.batch_norm.enable)', 'padding': '"""valid"""'}), "(net, filters=channels // 4, kernel_size=1, strides=(1, 1),\n use_bias=not self.config.batch_norm.enable, padding='valid')\n", (9944, 10068), True, 'import tensorflow.compat.v1 as tf\n'), ((4241, 4263), 'tensorflow.compat.v1.zeros_initializer', 'tf.zeros_initializer', ([], {}), '()\n', (4261, 4263), True, 'import tensorflow.compat.v1 as tf\n'), ((7793, 7825), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (["('block%d' % i)"], {}), "('block%d' % i)\n", (7810, 7825), True, 'import tensorflow.compat.v1 as tf\n'), ((10262, 10405), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['net'], {'training': 'is_training', 'momentum': 'self.config.batch_norm.momentum', 'epsilon': 'self.config.batch_norm.epsilon'}), '(net, training=is_training, momentum=self.\n config.batch_norm.momentum, epsilon=self.config.batch_norm.epsilon)\n', (10291, 10405), True, 'import tensorflow.compat.v1 as tf\n')] |
from src.metrics.significance import _significance, _mask_with_significance
from src.metrics.consolidated_dprimes import single_cell_dprimes, probewise_LDA_dprimes, probewise_dPCA_dprimes, full_dPCA_dprimes
from src.metrics.dprime import flip_dprimes
from src.data.load import get_site_ids
from src.metrics.consolidated_metrics import metrics_to_DF
from src.data.load import set_name
from src.data.region_map import region_map
import itertools as itt
import numpy as np
import pandas as pd
from configparser import ConfigParser
import pathlib as pl
from joblib import dump
"""
NOTE: after some discussion with charlie about the order of shuffling, reducing dimension and paired shuffles, I made
some strong modifications on 'consolidated_dprimes.py', see the copy 'consolidated_dprimes.py'. This pipeline is similar
as its more recent namesake, the only difference being that it calls the new dprimes/shuffle from the aforementioned
module. For the sake of comparison, it saves the results in different caches.
Thise is a rework of how metrics like center of mass, integral etc were being calculated for different values of dprime.
These came from different sites and combinations of context_pairs, probes, or their mean. All this data is stored in a
dataframe, so the effect of the different parameters on the metrics can be explored. This removes old things and adds
new ones:
1. removes old metric like the old exponential decay fits, and the non significant calculations of center
of mass and integral, between others.
2. Adds different corrections for multiple comparisons
3. Adds different ways of considering significance for the means of probes and context pairs, which I have
speculated in the past, might lead to over estimation of significance in late time bins.
4. Parses the variance captured by the full dPCA, so we can potentially see differences in the
marginalizations between regions.
it is recomended that once cleared some of the questions about multiple comparisons correction, and significance of the
mean, I cut dows on the unused alternatives for a cleaner and smaller DF.
"""
config = ConfigParser()
config.read_file(open(pl.Path(__file__).parents[2] / 'config' / 'settings.ini'))
rec_recache = False
dprime_recache = False
signif_tails = 'both'
alpha=0.01
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'montecarlo': 1000,
'zscore': True,
'dprime_absolute': None}
summary_DF_file = pl.Path(config['paths']['analysis_cache']) / 'consolidated_summary_DF_v2' / set_name(meta)
variance_DF_file = pl.Path(config['paths']['analysis_cache']) / 'variance_explained_DF' / set_name(meta)
analysis_functions = {'SC': single_cell_dprimes,'LDA':probewise_LDA_dprimes,
'pdPCA': probewise_dPCA_dprimes, 'fdPCA': full_dPCA_dprimes}
permutations = {'contexts': [0, 1, 2, 3, 4],
'probes': [1, 2, 3, 4],
'stim_type': 'permutations'}
triplets = {'contexts': ['silence', 'continuous', 'similar', 'sharp'],
'probes':[2, 3, 5, 6],
'stim_type': 'triplets'}
experiments = [permutations, triplets]
multiple_corrections = {'none':None,
'full': [1,2,3],
'time': [3],
'probe': [2,3],
'context_pair': [1,3]}
mean_types = ['zeros', 'mean']
metrics = ['significant_abs_mass_center', 'significant_abs_sum']
sites = set(get_site_ids(316).keys())
badsites = {'AMT031a', 'DRX008b','DRX021a', 'DRX023a', 'ley074a' } # empirically decided
sites = sites.difference(badsites)
# sites = ['CRD004a']
DF = pd.DataFrame()
bads = list()
for site, expt, (fname, func) in itt.product(sites, experiments, analysis_functions.items()):
# skips full_dPCA for the triplets experiment
if expt['stim_type'] == 'triplets' and fname == 'fdPCA':
continue
print(site, expt['stim_type'], fname)
# parses the stim_type from the experiment into the meta parameters
expt = expt.copy()
meta['stim_type'] = expt.pop('stim_type')
# runs the dprime function
try:
dprime, shuffled_dprime, goodcells, var_capt = func(site, **expt, meta=meta)
except:
print('failed')
bads.append((site, expt['stim_type'], fname))
# for analysis with dimensionality reduction, changes the cellname to nan for proper dimension labeling.
if fname != 'SC':
chan_name = [np.nan]
else:
chan_name = goodcells
# creates label dictionalry
dim_lab_dict = {'cellid': chan_name,
'context_pair': [f'{c1}_{c2}' for c1, c2 in itt.combinations(expt['contexts'], 2)],
'probe': expt['probes'],
'time': np.linspace(0, dprime.shape[-1] / meta['raster_fs'], dprime.shape[-1],
endpoint=False) * 1000}
# calculats different significaces/corrections
# calculate significant time bins, both raw and corrected for multiple comparisons
for corr_name, corr in multiple_corrections.items():
print(f' comp_corr: {corr_name}')
significance, confidence_interval = _significance(dprime, shuffled_dprime, corr,
alpha=alpha)
fliped, _ = flip_dprimes(dprime, None, flip='sum')
for mean_type in mean_types:
print(f' mean_signif: {mean_type}')
# masks dprime with different significances, uses different approaches to define significance of the mean.
masked, masked_lab_dict = _mask_with_significance(fliped, significance, dim_lab_dict, mean_type=mean_type)
# calculate different metrics and organize into a dataframe
df = metrics_to_DF(masked, masked_lab_dict, metrics=metrics)
df['mult_comp_corr'] = corr_name
df['mean_signif_type'] = mean_type
df['stim_type'] = meta['stim_type']
df['analysis'] = fname
df['siteid'] = site
df['region'] = region_map[site]
DF = DF.append(df)
print('failed sites: ', bads)
DF.drop_duplicates(inplace=True)
if summary_DF_file.parent.exists() is False:
summary_DF_file.parent.mkdir()
dump(DF, summary_DF_file)
# calculates variance captuded by the full dPCA and organized in a DF
variance_DF = list()
for site in sites:
expt = permutations.copy()
meta['stim_type'] = expt.pop('stim_type')
_, _, _, var_capt = full_dPCA_dprimes(site, **expt, meta=meta)
cum_var, dpc_var, marg_var, total_marginalized_var, comp_id = var_capt
total_marginalized_var['siteid'] = site
variance_DF.append(total_marginalized_var)
variance_DF = pd.DataFrame(variance_DF)
if variance_DF_file.parent.exists() is False:
variance_DF_file.parent.mkdir()
dump(variance_DF, variance_DF_file)
| [
"pandas.DataFrame",
"src.metrics.consolidated_metrics.metrics_to_DF",
"src.metrics.dprime.flip_dprimes",
"src.metrics.significance._mask_with_significance",
"src.metrics.consolidated_dprimes.full_dPCA_dprimes",
"joblib.dump",
"src.data.load.set_name",
"src.metrics.significance._significance",
"pathl... | [((2125, 2139), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (2137, 2139), False, 'from configparser import ConfigParser\n'), ((3672, 3686), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3684, 3686), True, 'import pandas as pd\n'), ((6296, 6321), 'joblib.dump', 'dump', (['DF', 'summary_DF_file'], {}), '(DF, summary_DF_file)\n', (6300, 6321), False, 'from joblib import dump\n'), ((6762, 6787), 'pandas.DataFrame', 'pd.DataFrame', (['variance_DF'], {}), '(variance_DF)\n', (6774, 6787), True, 'import pandas as pd\n'), ((6872, 6907), 'joblib.dump', 'dump', (['variance_DF', 'variance_DF_file'], {}), '(variance_DF, variance_DF_file)\n', (6876, 6907), False, 'from joblib import dump\n'), ((2581, 2595), 'src.data.load.set_name', 'set_name', (['meta'], {}), '(meta)\n', (2589, 2595), False, 'from src.data.load import set_name\n'), ((2686, 2700), 'src.data.load.set_name', 'set_name', (['meta'], {}), '(meta)\n', (2694, 2700), False, 'from src.data.load import set_name\n'), ((6536, 6578), 'src.metrics.consolidated_dprimes.full_dPCA_dprimes', 'full_dPCA_dprimes', (['site'], {'meta': 'meta'}), '(site, **expt, meta=meta)\n', (6553, 6578), False, 'from src.metrics.consolidated_dprimes import single_cell_dprimes, probewise_LDA_dprimes, probewise_dPCA_dprimes, full_dPCA_dprimes\n'), ((2505, 2547), 'pathlib.Path', 'pl.Path', (["config['paths']['analysis_cache']"], {}), "(config['paths']['analysis_cache'])\n", (2512, 2547), True, 'import pathlib as pl\n'), ((2615, 2657), 'pathlib.Path', 'pl.Path', (["config['paths']['analysis_cache']"], {}), "(config['paths']['analysis_cache'])\n", (2622, 2657), True, 'import pathlib as pl\n'), ((5198, 5255), 'src.metrics.significance._significance', '_significance', (['dprime', 'shuffled_dprime', 'corr'], {'alpha': 'alpha'}), '(dprime, shuffled_dprime, corr, alpha=alpha)\n', (5211, 5255), False, 'from src.metrics.significance import _significance, _mask_with_significance\n'), ((5352, 5390), 'src.metrics.dprime.flip_dprimes', 'flip_dprimes', (['dprime', 'None'], {'flip': '"""sum"""'}), "(dprime, None, flip='sum')\n", (5364, 5390), False, 'from src.metrics.dprime import flip_dprimes\n'), ((3493, 3510), 'src.data.load.get_site_ids', 'get_site_ids', (['(316)'], {}), '(316)\n', (3505, 3510), False, 'from src.data.load import get_site_ids\n'), ((4777, 4867), 'numpy.linspace', 'np.linspace', (['(0)', "(dprime.shape[-1] / meta['raster_fs'])", 'dprime.shape[-1]'], {'endpoint': '(False)'}), "(0, dprime.shape[-1] / meta['raster_fs'], dprime.shape[-1],\n endpoint=False)\n", (4788, 4867), True, 'import numpy as np\n'), ((5641, 5726), 'src.metrics.significance._mask_with_significance', '_mask_with_significance', (['fliped', 'significance', 'dim_lab_dict'], {'mean_type': 'mean_type'}), '(fliped, significance, dim_lab_dict, mean_type=mean_type\n )\n', (5664, 5726), False, 'from src.metrics.significance import _significance, _mask_with_significance\n'), ((5812, 5867), 'src.metrics.consolidated_metrics.metrics_to_DF', 'metrics_to_DF', (['masked', 'masked_lab_dict'], {'metrics': 'metrics'}), '(masked, masked_lab_dict, metrics=metrics)\n', (5825, 5867), False, 'from src.metrics.consolidated_metrics import metrics_to_DF\n'), ((4664, 4701), 'itertools.combinations', 'itt.combinations', (["expt['contexts']", '(2)'], {}), "(expt['contexts'], 2)\n", (4680, 4701), True, 'import itertools as itt\n'), ((2162, 2179), 'pathlib.Path', 'pl.Path', (['__file__'], {}), '(__file__)\n', (2169, 2179), True, 'import pathlib as pl\n')] |
#!/usr/bin/python
# Copyright 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
import numpy as np
import test_util as tu
import requests
class HttpTest(tu.TestResultCollector):
def _get_infer_url(self, model_name):
return "http://localhost:8000/v2/models/{}/infer".format(model_name)
def test_raw_binary(self):
# Select model that satisfies constraints for raw binary request
model = "onnx_zero_1_float32"
input = np.arange(8, dtype=np.float32)
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input.tobytes(),
headers=headers)
r.raise_for_status()
# Get the inference header size so we can locate the output binary data
header_size = int(r.headers["Inference-Header-Content-Length"])
self.assertEqual(
input.tobytes(), r.content[header_size:],
"Expected response body contains correct output binary data: {}; got: {}"
.format(input.tobytes(), r.content[header_size:]))
def test_raw_binary_longer(self):
# Similar to test_raw_binary but test with different data size
model = "onnx_zero_1_float32"
input = np.arange(32, dtype=np.float32)
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input.tobytes(),
headers=headers)
r.raise_for_status()
# Get the inference header size so we can locate the output binary data
header_size = int(r.headers["Inference-Header-Content-Length"])
self.assertEqual(
input.tobytes(), r.content[header_size:],
"Expected response body contains correct output binary data: {}; got: {}"
.format(input.tobytes(), r.content[header_size:]))
def test_byte(self):
# Select model that satisfies constraints for raw binary request
# i.e. BYTE type the element count must be 1
model = "onnx_zero_1_object_1_element"
input = "427"
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input,
headers=headers)
r.raise_for_status()
# Get the inference header size so we can locate the output binary data
header_size = int(r.headers["Inference-Header-Content-Length"])
# Triton returns BYTES tensor with byte size prepended
output = r.content[header_size + 4:].decode()
self.assertEqual(
input, output,
"Expected response body contains correct output binary data: {}; got: {}"
.format(input, output))
def test_byte_too_many_elements(self):
# Select model that doesn't satisfy constraints for raw binary request
# i.e. BYTE type the element count must be 1
model = "onnx_zero_1_object"
input = "427"
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input,
headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
self.assertIn(
"For BYTE datatype raw input, the model must have input shape [1]",
r.content.decode())
def test_multi_variable_dimensions(self):
# Select model that doesn't satisfy constraints for raw binary request
# i.e. this model has multiple variable-sized dimensions
model = "onnx_zero_1_float16"
input = np.ones([2, 2], dtype=np.float16)
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input.tobytes(),
headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
self.assertIn(
"The shape of the raw input 'INPUT0' can not be deduced because there are more than one variable-sized dimension",
r.content.decode())
def test_multi_inputs(self):
# Select model that doesn't satisfy constraints for raw binary request
# i.e. input count must be 1
model = "onnx_zero_3_float32"
# Use one numpy array, after tobytes() it can be seen as three inputs
# each with 8 elements (this ambiguity is why this is not allowed)
input = np.arange(24, dtype=np.float32)
headers = {'Inference-Header-Content-Length': '0'}
r = requests.post(self._get_infer_url(model),
data=input.tobytes(),
headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
self.assertIn(
"Raw request must only have 1 input (found 1) to be deduced but got 3 inputs in",
r.content.decode())
if __name__ == '__main__':
unittest.main()
| [
"sys.path.append",
"unittest.main",
"numpy.arange",
"numpy.ones"
] | [((1572, 1600), 'sys.path.append', 'sys.path.append', (['"""../common"""'], {}), "('../common')\n", (1587, 1600), False, 'import sys\n'), ((6869, 6884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6882, 6884), False, 'import unittest\n'), ((1997, 2027), 'numpy.arange', 'np.arange', (['(8)'], {'dtype': 'np.float32'}), '(8, dtype=np.float32)\n', (2006, 2027), True, 'import numpy as np\n'), ((2807, 2838), 'numpy.arange', 'np.arange', (['(32)'], {'dtype': 'np.float32'}), '(32, dtype=np.float32)\n', (2816, 2838), True, 'import numpy as np\n'), ((5326, 5359), 'numpy.ones', 'np.ones', (['[2, 2]'], {'dtype': 'np.float16'}), '([2, 2], dtype=np.float16)\n', (5333, 5359), True, 'import numpy as np\n'), ((6277, 6308), 'numpy.arange', 'np.arange', (['(24)'], {'dtype': 'np.float32'}), '(24, dtype=np.float32)\n', (6286, 6308), True, 'import numpy as np\n')] |
# Copyright 2019, Ri<NAME>
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import os
from abc import ABCMeta, abstractmethod
from itertools import product
from typing import Callable
import gym
import gym.spaces
import networkx as nx
import numpy as np
from pyquil import get_qc
from pyquil.api import QuantumComputer
from pyquil.api._qac import AbstractCompiler
from pyquil.device import NxDevice
from pyquil.gates import I, RX, RY, RZ, CNOT, MEASURE, RESET
from pyquil.pyqvm import PyQVM
from pyquil.quil import Program, Pragma
from pyquil.quilbase import Gate
from pyquil.unitary_tools import all_bitstrings
NUM_ANGLES = 8 # discrete actions involve rotation by multiples of 2*pi/NUM_ANGLES
NUM_SHOTS = 10 # how many measurement shots to use? for a N qubit problem this produces N*NUM_SHOTS bits of data
MAX_PROGRAM_LENGTH = 25 # limit to the number of actions taken in a given episode
QPU_NAME = "Aspen-4-16Q-A"
class MinimalPyQVMCompiler(AbstractCompiler):
def quil_to_native_quil(self, program):
return program
def native_quil_to_executable(self, nq_program):
return nq_program
def bitstring_index(bitstring):
"Recover an integer from its bitstring representation."
return int("".join(map(str, bitstring)), 2)
def lift_bitstring_function(n, f):
"""Lifts a function defined on single bitstrings to arrays of bitstrings.
Args:
n: The number of bits in the bitsring.
f: The bitstring function, which produces a float value.
Returns:
A function which, given a K x n array of 0/1 values, returns the
mean of f applied across the K rows.
"""
bss = all_bitstrings(n).astype(np.float64)
vals = np.apply_along_axis(f, 1, bss)
# normalize to be between 0 and 1
m, M = np.min(vals), np.max(vals)
if np.isclose(m, M):
vals[:] = 0.5
else:
vals -= m
vals *= 1 / (M - m)
def _fn(bitstrings):
indices = np.apply_along_axis(bitstring_index, 1, bitstrings)
return np.mean(vals[indices])
return _fn
class ProblemSet(metaclass=ABCMeta):
"""Base class representing an abstract problem set."""
@property
@abstractmethod
def num_problems(self) -> int:
"The number of problems in the problem set."
pass
@property
@abstractmethod
def num_variables(self) -> int:
"The number of variables in any problem."
pass
@abstractmethod
def problem(self, i: int) -> np.ndarray:
"An array representing the ith problem."
pass
@abstractmethod
def bitstrings_score(self, i: int) -> Callable[[np.ndarray], float]:
"The scoring function associated with problem i."
pass
class AllProblems(ProblemSet):
"""A problem set of combinatorial optimization problems.
Args:
weights: A numpy array of weight matrices. weights[k,i,j] is the
coupling between vertex i and j in the kth problem.
labels: A list of string labels, either 'maxcut', 'maxqp', or 'qubo'.
"""
def __init__(self, weights, labels):
assert len(weights.shape) == 3
assert weights.shape[1] == weights.shape[2]
assert len(weights) == len(labels)
self._weights = weights
self._labels = labels
def num_problems(self):
return self._weights.shape[0]
def num_variables(self):
return self._weights.shape[1]
def problem(self, i):
# due to the symmetry of these problems, we only need to observe the upper triangular entries
upper = np.triu_indices(self._weights.shape[1])
return self._weights[i, :, :][upper]
def bitstrings_score(self, i):
W = self._weights[i, :, :]
n = W.shape[0]
if self._labels[i] == "maxcut":
def cutweight(bitstring):
return sum(
(
W[i, j]
for i in range(n)
for j in range(n)
if bitstring[i] != bitstring[j]
),
0.0,
)
return lift_bitstring_function(n, cutweight)
elif self._labels[i] == "maxqp":
def quadratic(x):
z = 2 * x - 1
return np.dot(z.T, np.dot(W, z))
return lift_bitstring_function(n, quadratic)
elif self._labels[i] == "qubo":
def quadratic(x):
return np.dot(x.T, np.dot(-W, x))
return lift_bitstring_function(n, quadratic)
class ForestDiscreteEnv(gym.Env):
"""The Rigetti Forest environment.
This implements a Gym environment for gate-based quantum computing with
problem-specific rewards on the Rigetti hardware.
Attributes:
observation: A np.array, formed by concatenating observed bitstring values
with a vector containing the problem weights.
observation_space: The (continuous) set of possible observations.
action space: The space of discrete actions.
instrs: A table mapping action IDs to PyQuil gates.
Args:
data: A path to a numpy dataset.
label: Either a path to a dataset of labels, or a single label value.
shuffle: A flag indicating whether the data should be randomly shuffled.
qpu: A flag indicating whether to run on the qpu given by QPU_NAME.
"""
def __init__(self, data, label, shuffle=False, qpu=False):
weights = np.load(data)
n_graphs = len(weights)
# read labels from file, or as single label
if os.path.exists(label):
labels = np.load(label)
else:
labels = [label for _ in range(n_graphs)]
if shuffle:
self._shuffled_order = np.random.permutation(n_graphs)
weights = weights[self._shuffled_order]
labels = labels[self._shuffled_order]
self.pset = AllProblems(weights, labels)
self.num_qubits = self.pset.num_variables()
qubits = list(range(self.num_qubits))
angles = np.linspace(0, 2 * np.pi, NUM_ANGLES, endpoint=False)
self.instrs = [CNOT(q0, q1) for q0, q1 in product(qubits, qubits) if q0 != q1]
self.instrs += [
op(theta, q) for q, op, theta in product(qubits, [RX, RY, RZ], angles)
]
self.action_space = gym.spaces.Discrete(len(self.instrs))
obs_len = NUM_SHOTS * self.num_qubits + len(self.pset.problem(0))
self.observation_space = gym.spaces.Box(
np.full(obs_len, -1.0), np.full(obs_len, 1.0), dtype=np.float32
)
self.reward_threshold = 0.8
self.qpu = qpu
if qpu:
self._qc = get_qc(QPU_NAME)
else:
self._qc = QuantumComputer(
name="qvm",
qam=PyQVM(n_qubits=self.num_qubits),
device=NxDevice(nx.complete_graph(self.num_qubits)),
compiler=MinimalPyQVMCompiler(),
)
self.reset()
def reset(self, problem_id=None):
"""Reset the state of the environment.
This clears out whatever program you may have assembled so far, and
updates the active problem.
Args:
problem_id: The numeric index of the problem (relative to the problem set).
If None, a random problem will be chosen.
"""
if problem_id is None:
problem_id = np.random.randint(0, self.pset.num_problems())
self.problem_id = problem_id
self._prob_vec = self.pset.problem(self.problem_id)
# the scoring function (for reward computation)
self._prob_score = self.pset.bitstrings_score(self.problem_id)
# we put some trivial gates on each relevant qubit, so that we can
# always recover the problem variables from the program itself
self.program = Program([I(q) for q in range(self.num_qubits)])
self.current_step = 0
self.running_episode_reward = 0
self.bitstrings, info = self._run_program(self.program)
return self.observation
@property
def observation(self):
"""Get the current observed quantum + problem state."""
# This consists of two things:
# - the measured bitstrings
# - the vectorized representation of the optimization problem
#
# In particular, the first 10*NUM_SHOTS (i.e. 100) entries are measured
# qubit values. The remaining entries are the weights of the problem
# graph.
return np.concatenate([self.bitstrings.flatten(), self._prob_vec])
def step(self, action):
"""Advance the environment by performing the specified action."""
# get the instruction indicated by the action
instr = self.instrs[action]
# extend the program
self.program.inst(instr)
# run and get some measured bitstrings
self.bitstrings, info = self._run_program(self.program)
# compute the avg score of the bitstrings
reward = self._prob_score(self.bitstrings)
self.running_episode_reward += reward
info["instr"] = instr
info["reward-nb"] = reward
self.current_step += 1
# are we done yet?
done = False
if self.current_step >= MAX_PROGRAM_LENGTH:
done = True
if reward >= self.reward_threshold:
reward += MAX_PROGRAM_LENGTH - self.current_step
done = True
return self.observation, reward, done, info
def _wrap_program(self, program):
# the actions select gates. but a pyquil program needs a bit more
# namely, declaration of classical memory for readout, and suitable
# measurement instructions
ro = program.declare("ro", "BIT", self.num_qubits)
for q in range(self.num_qubits):
program.inst(MEASURE(q, ro[q]))
program.wrap_in_numshots_loop(NUM_SHOTS)
return program
def _run_program(self, program):
program = program.copy()
if self.qpu:
# time to go through the compiler. whee!
pragma = Program([Pragma("INITIAL_REWIRING", ['"PARTIAL"']), RESET()])
program = pragma + program
program = self._wrap_program(program)
nq_program = self._qc.compiler.quil_to_native_quil(program)
gate_count = sum(1 for instr in nq_program if isinstance(instr, Gate))
executable = self._qc.compiler.native_quil_to_executable(nq_program)
results = self._qc.run(executable=executable)
else:
program = self._wrap_program(program)
gate_count = len(program)
results = self._qc.run(program)
info = {"gate_count": gate_count} # compiled length for qpu, uncompiled for qvm
return results, info
def render(self, mode="human"):
raise NotImplementedError(
"Rendering of this environment not currently supported."
)
def seed(self, seed):
np.random.seed(seed)
| [
"pyquil.gates.RESET",
"numpy.load",
"numpy.random.seed",
"numpy.isclose",
"numpy.mean",
"pyquil.unitary_tools.all_bitstrings",
"numpy.full",
"os.path.exists",
"numpy.apply_along_axis",
"numpy.max",
"pyquil.quil.Pragma",
"numpy.linspace",
"itertools.product",
"pyquil.gates.CNOT",
"pyquil.... | [((1793, 1823), 'numpy.apply_along_axis', 'np.apply_along_axis', (['f', '(1)', 'bss'], {}), '(f, 1, bss)\n', (1812, 1823), True, 'import numpy as np\n'), ((1907, 1923), 'numpy.isclose', 'np.isclose', (['m', 'M'], {}), '(m, M)\n', (1917, 1923), True, 'import numpy as np\n'), ((1873, 1885), 'numpy.min', 'np.min', (['vals'], {}), '(vals)\n', (1879, 1885), True, 'import numpy as np\n'), ((1887, 1899), 'numpy.max', 'np.max', (['vals'], {}), '(vals)\n', (1893, 1899), True, 'import numpy as np\n'), ((2047, 2098), 'numpy.apply_along_axis', 'np.apply_along_axis', (['bitstring_index', '(1)', 'bitstrings'], {}), '(bitstring_index, 1, bitstrings)\n', (2066, 2098), True, 'import numpy as np\n'), ((2114, 2136), 'numpy.mean', 'np.mean', (['vals[indices]'], {}), '(vals[indices])\n', (2121, 2136), True, 'import numpy as np\n'), ((3666, 3705), 'numpy.triu_indices', 'np.triu_indices', (['self._weights.shape[1]'], {}), '(self._weights.shape[1])\n', (3681, 3705), True, 'import numpy as np\n'), ((5597, 5610), 'numpy.load', 'np.load', (['data'], {}), '(data)\n', (5604, 5610), True, 'import numpy as np\n'), ((5707, 5728), 'os.path.exists', 'os.path.exists', (['label'], {}), '(label)\n', (5721, 5728), False, 'import os\n'), ((6190, 6243), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'NUM_ANGLES'], {'endpoint': '(False)'}), '(0, 2 * np.pi, NUM_ANGLES, endpoint=False)\n', (6201, 6243), True, 'import numpy as np\n'), ((11154, 11174), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11168, 11174), True, 'import numpy as np\n'), ((1745, 1762), 'pyquil.unitary_tools.all_bitstrings', 'all_bitstrings', (['n'], {}), '(n)\n', (1759, 1762), False, 'from pyquil.unitary_tools import all_bitstrings\n'), ((5751, 5765), 'numpy.load', 'np.load', (['label'], {}), '(label)\n', (5758, 5765), True, 'import numpy as np\n'), ((5890, 5921), 'numpy.random.permutation', 'np.random.permutation', (['n_graphs'], {}), '(n_graphs)\n', (5911, 5921), True, 'import numpy as np\n'), ((6267, 6279), 'pyquil.gates.CNOT', 'CNOT', (['q0', 'q1'], {}), '(q0, q1)\n', (6271, 6279), False, 'from pyquil.gates import I, RX, RY, RZ, CNOT, MEASURE, RESET\n'), ((6651, 6673), 'numpy.full', 'np.full', (['obs_len', '(-1.0)'], {}), '(obs_len, -1.0)\n', (6658, 6673), True, 'import numpy as np\n'), ((6675, 6696), 'numpy.full', 'np.full', (['obs_len', '(1.0)'], {}), '(obs_len, 1.0)\n', (6682, 6696), True, 'import numpy as np\n'), ((6825, 6841), 'pyquil.get_qc', 'get_qc', (['QPU_NAME'], {}), '(QPU_NAME)\n', (6831, 6841), False, 'from pyquil import get_qc\n'), ((6294, 6317), 'itertools.product', 'product', (['qubits', 'qubits'], {}), '(qubits, qubits)\n', (6301, 6317), False, 'from itertools import product\n'), ((6401, 6438), 'itertools.product', 'product', (['qubits', '[RX, RY, RZ]', 'angles'], {}), '(qubits, [RX, RY, RZ], angles)\n', (6408, 6438), False, 'from itertools import product\n'), ((8018, 8022), 'pyquil.gates.I', 'I', (['q'], {}), '(q)\n', (8019, 8022), False, 'from pyquil.gates import I, RX, RY, RZ, CNOT, MEASURE, RESET\n'), ((10000, 10017), 'pyquil.gates.MEASURE', 'MEASURE', (['q', 'ro[q]'], {}), '(q, ro[q])\n', (10007, 10017), False, 'from pyquil.gates import I, RX, RY, RZ, CNOT, MEASURE, RESET\n'), ((6944, 6975), 'pyquil.pyqvm.PyQVM', 'PyQVM', ([], {'n_qubits': 'self.num_qubits'}), '(n_qubits=self.num_qubits)\n', (6949, 6975), False, 'from pyquil.pyqvm import PyQVM\n'), ((10267, 10308), 'pyquil.quil.Pragma', 'Pragma', (['"""INITIAL_REWIRING"""', '[\'"PARTIAL"\']'], {}), '(\'INITIAL_REWIRING\', [\'"PARTIAL"\'])\n', (10273, 10308), False, 'from pyquil.quil import Program, Pragma\n'), ((10310, 10317), 'pyquil.gates.RESET', 'RESET', ([], {}), '()\n', (10315, 10317), False, 'from pyquil.gates import I, RX, RY, RZ, CNOT, MEASURE, RESET\n'), ((4408, 4420), 'numpy.dot', 'np.dot', (['W', 'z'], {}), '(W, z)\n', (4414, 4420), True, 'import numpy as np\n'), ((7009, 7043), 'networkx.complete_graph', 'nx.complete_graph', (['self.num_qubits'], {}), '(self.num_qubits)\n', (7026, 7043), True, 'import networkx as nx\n'), ((4586, 4599), 'numpy.dot', 'np.dot', (['(-W)', 'x'], {}), '(-W, x)\n', (4592, 4599), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Abstract Base Class for Basis Function and some common implementations."""
import abc
import numpy as np
class BasisFunction(object):
r"""ABC for basis functions used by LSPI Policies.
A basis function is a function that takes in a state vector and an action
index and returns a vector of features. The resulting feature vector is
referred to as :math:`\phi` in the LSPI paper (pg 9 of the PDF referenced
in this package's documentation). The :math:`\phi` vector is dotted with
the weight vector of the Policy to calculate the Q-value.
The dimensions of the state vector are usually smaller than the dimensions
of the :math:`\phi` vector. However, the dimensions of the :math:`\phi`
vector are usually much smaller than the dimensions of an exact
representation of the state which leads to significant savings when
computing and storing a policy.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def size(self):
r"""Return the vector size of the basis function.
Returns
-------
int
The size of the :math:`\phi` vector.
(Referred to as k in the paper).
"""
pass # pragma: no cover
@abc.abstractmethod
def evaluate(self, state, action):
r"""Calculate the :math:`\phi` matrix for the given state-action pair.
The way this value is calculated depends entirely on the concrete
implementation of BasisFunction.
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
"""
pass # pragma: no cover
@abc.abstractproperty
def num_actions(self):
"""Return number of possible actions.
Returns
-------
int
Number of possible actions.
"""
pass # pragma: no cover
@staticmethod
def _validate_num_actions(num_actions):
"""Return num_actions if valid. Otherwise raise ValueError.
Return
------
int
Number of possible actions.
Raises
------
ValueError
If num_actions < 1
"""
if num_actions < 1:
raise ValueError('num_actions must be >= 1')
return num_actions
class FakeBasis(BasisFunction):
r"""Basis that ignores all input. Useful for random sampling.
When creating a purely random Policy a basis function is still required.
This basis function just returns a :math:`\phi` equal to [1.] for all
inputs. It will however, still throw exceptions for impossible values like
negative action indexes.
"""
def __init__(self, num_actions):
"""Initialize FakeBasis."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
def size(self):
r"""Return size of 1.
Returns
-------
int
Size of :math:`phi` which is always 1 for FakeBasis
Example
-------
>>> FakeBasis().size()
1
"""
return 1
def evaluate(self, state, action):
r"""Return :math:`\phi` equal to [1.].
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s. FakeBasis ignores these
values.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a. FakeBasis ignores these
values.
Returns
-------
numpy.array
:math:`\phi` vector equal to [1.].
Raises
------
IndexError
If action index is < 0
Example
-------
>>> FakeBasis().evaluate(np.arange(10), 0)
array([ 1.])
"""
if action < 0:
raise IndexError('action index must be >= 0')
if action >= self.num_actions:
raise IndexError('action must be < num_actions')
return np.array([1.])
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class OneDimensionalPolynomialBasis(BasisFunction):
"""Polynomial features for a state with one dimension.
Takes the value of the state and constructs a vector proportional
to the specified degree and number of actions. The polynomial is first
constructed as [..., 1, value, value^2, ..., value^k, ...]
where k is the degree. The rest of the vector is 0.
Parameters
----------
degree : int
The polynomial degree.
num_actions: int
The total number of possible actions
Raises
------
ValueError
If degree is less than 0
ValueError
If num_actions is less than 1
"""
def __init__(self, degree, num_actions):
"""Initialize polynomial basis function."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
if degree < 0:
raise ValueError('Degree must be >= 0')
self.degree = degree
def size(self):
"""Calculate the size of the basis function.
The base size will be degree + 1. This basic matrix is then
duplicated once for every action. Therefore the size is equal to
(degree + 1) * number of actions
Returns
-------
int
The size of the phi matrix that will be returned from evaluate.
Example
-------
>>> basis = OneDimensionalPolynomialBasis(2, 2)
>>> basis.size()
6
"""
return (self.degree + 1) * self.num_actions
def evaluate(self, state, action):
r"""Calculate :math:`\phi` matrix for given state action pair.
The :math:`\phi` matrix is used to calculate the Q function for the
given policy.
Parameters
----------
state : numpy.array
The state to get the features for.
When calculating Q(s, a) this is the s.
action : int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
Raises
------
IndexError
If :math:`0 \le action < num\_actions` then IndexError is raised.
ValueError
If the state vector has any number of dimensions other than 1 a
ValueError is raised.
Example
-------
>>> basis = OneDimensionalPolynomialBasis(2, 2)
>>> basis.evaluate(np.array([2]), 0)
array([ 1., 2., 4., 0., 0., 0.])
"""
if action < 0 or action >= self.num_actions:
raise IndexError('Action index out of bounds')
if state.shape != (1, ):
raise ValueError('This class only supports one dimensional states')
phi = np.zeros((self.size(), ))
offset = (self.size()/self.num_actions)*action
value = state[0]
phi[offset:offset + self.degree + 1] = \
np.array([pow(value, i) for i in range(self.degree+1)])
return phi
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class RadialBasisFunction(BasisFunction):
r"""Gaussian Multidimensional Radial Basis Function (RBF).
Given a set of k means :math:`(\mu_1 , \ldots, \mu_k)` produce a feature
vector :math:`(1, e^{-\gamma || s - \mu_1 ||^2}, \cdots,
e^{-\gamma || s - \mu_k ||^2})` where `s` is the state vector and
:math:`\gamma` is a free parameter. This vector will be padded with
0's on both sides proportional to the number of possible actions
specified.
Parameters
----------
means: list(numpy.array)
List of numpy arrays representing :math:`(\mu_1, \ldots, \mu_k)`.
Each :math:`\mu` is a numpy array with dimensions matching the state
vector this basis function will be used with. If the dimensions of each
vector are not equal than an exception will be raised. If no means are
specified then a ValueError will be raised
gamma: float
Free parameter which controls the size/spread of the Gaussian "bumps".
This parameter is best selected via tuning through cross validation.
gamma must be > 0.
num_actions: int
Number of actions. Must be in range [1, :math:`\infty`] otherwise
an exception will be raised.
Raises
------
ValueError
If means list is empty
ValueError
If dimensions of each mean vector do not match.
ValueError
If gamma is <= 0.
ValueError
If num_actions is less than 1.
Note
----
The numpy arrays specifying the means are not copied.
"""
def __init__(self, means, gamma, num_actions):
"""Initialize RBF instance."""
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
if len(means) == 0:
raise ValueError('You must specify at least one mean')
if reduce(RadialBasisFunction.__check_mean_size, means) is None:
raise ValueError('All mean vectors must have the same dimensions')
self.means = means
if gamma <= 0:
raise ValueError('gamma must be > 0')
self.gamma = gamma
@staticmethod
def __check_mean_size(left, right):
"""Apply f if the value is not None.
This method is meant to be used with reduce. It will return either the
right most numpy array or None if any of the array's had
differing sizes. I wanted to use a Maybe monad here,
but Python doesn't support that out of the box.
Return
------
None or numpy.array
None values will propogate through the reduce automatically.
"""
if left is None or right is None:
return None
else:
if left.shape != right.shape:
return None
return right
def size(self):
r"""Calculate size of the :math:`\phi` matrix.
The size is equal to the number of means + 1 times the number of
number actions.
Returns
-------
int
The size of the phi matrix that will be returned from evaluate.
"""
return (len(self.means) + 1) * self.num_actions
def evaluate(self, state, action):
r"""Calculate the :math:`\phi` matrix.
Matrix will have the following form:
:math:`[\cdots, 1, e^{-\gamma || s - \mu_1 ||^2}, \cdots,
e^{-\gamma || s - \mu_k ||^2}, \cdots]`
where the matrix will be padded with 0's on either side depending
on the specified action index and the number of possible actions.
Returns
-------
numpy.array
The :math:`\phi` vector. Used by Policy to compute Q-value.
Raises
------
IndexError
If :math:`0 \le action < num\_actions` then IndexError is raised.
ValueError
If the state vector has any number of dimensions other than 1 a
ValueError is raised.
"""
if action < 0 or action >= self.num_actions:
raise IndexError('Action index out of bounds')
if state.shape != self.means[0].shape:
raise ValueError('Dimensions of state must match '
'dimensions of means')
phi = np.zeros((self.size(), ))
offset = (len(self.means[0])+1)*action
rbf = [RadialBasisFunction.__calc_basis_component(state,
mean,
self.gamma)
for mean in self.means]
phi[offset] = 1.
phi[offset+1:offset+1+len(rbf)] = rbf
return phi
@staticmethod
def __calc_basis_component(state, mean, gamma):
mean_diff = state - mean
return np.exp(-gamma*np.sum(mean_diff*mean_diff))
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
If value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
class ExactBasis(BasisFunction):
"""Basis function with no functional approximation.
This can only be used in domains with finite, discrete state-spaces. For
example the Chain domain from the LSPI paper would work with this basis,
but the inverted pendulum domain would not.
Parameters
----------
num_states: list
A list containing integers representing the number of possible values
for each state variable.
num_actions: int
Number of possible actions.
"""
def __init__(self, num_states, num_actions):
"""Initialize ExactBasis."""
if len(np.where(num_states <= 0)[0]) != 0:
raise ValueError('num_states value\'s must be > 0')
self.__num_actions = BasisFunction._validate_num_actions(num_actions)
self._num_states = num_states
self._offsets = [1]
for i in range(1, len(num_states)):
self._offsets.append(self._offsets[-1]*num_states[i-1])
def size(self):
r"""Return the vector size of the basis function.
Returns
-------
int
The size of the :math:`\phi` vector.
(Referred to as k in the paper).
"""
return reduce(lambda x, y: x*y, self._num_states, 1)*self.__num_actions
def get_state_action_index(self, state, action):
"""Return the non-zero index of the basis.
Parameters
----------
state: numpy.array
The state to get the index for.
action: int
The state to get the index for.
Returns
-------
int
The non-zero index of the basis
Raises
------
IndexError
If action index < 0 or action index > num_actions
"""
if action < 0:
raise IndexError('action index must be >= 0')
if action >= self.num_actions:
raise IndexError('action must be < num_actions')
base = action * int(self.size() / self.__num_actions)
offset = 0
for i, value in enumerate(state):
offset += self._offsets[i] * state[i]
return base + offset
def evaluate(self, state, action):
r"""Return a :math:`\phi` vector that has a single non-zero value.
Parameters
----------
state: numpy.array
The state to get the features for. When calculating Q(s, a) this is
the s.
action: int
The action index to get the features for.
When calculating Q(s, a) this is the a.
Returns
-------
numpy.array
:math:`\phi` vector
Raises
------
IndexError
If action index < 0 or action index > num_actions
ValueError
If the size of the state does not match the the size of the
num_states list used during construction.
ValueError
If any of the state variables are < 0 or >= the corresponding
value in the num_states list used during construction.
"""
if len(state) != len(self._num_states):
raise ValueError('Number of state variables must match '
+ 'size of num_states.')
if len(np.where(state < 0)[0]) != 0:
raise ValueError('state cannot contain negative values.')
for state_var, num_state_values in zip(state, self._num_states):
if state_var >= num_state_values:
raise ValueError('state values must be <= corresponding '
+ 'num_states value.')
phi = np.zeros(self.size())
phi[self.get_state_action_index(state, action)] = 1
return phi
@property
def num_actions(self):
"""Return number of possible actions."""
return self.__num_actions
@num_actions.setter
def num_actions(self, value):
"""Set the number of possible actions.
Parameters
----------
value: int
Number of possible actions. Must be >= 1.
Raises
------
ValueError
if value < 1.
"""
if value < 1:
raise ValueError('num_actions must be at least 1.')
self.__num_actions = value
| [
"numpy.where",
"numpy.array",
"numpy.sum"
] | [((4384, 4399), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4392, 4399), True, 'import numpy as np\n'), ((13339, 13368), 'numpy.sum', 'np.sum', (['(mean_diff * mean_diff)'], {}), '(mean_diff * mean_diff)\n', (13345, 13368), True, 'import numpy as np\n'), ((14546, 14571), 'numpy.where', 'np.where', (['(num_states <= 0)'], {}), '(num_states <= 0)\n', (14554, 14571), True, 'import numpy as np\n'), ((17198, 17217), 'numpy.where', 'np.where', (['(state < 0)'], {}), '(state < 0)\n', (17206, 17217), True, 'import numpy as np\n')] |
"""
femagtools.dxfsl.converter
~~~~~~~~~~~~~~~~~~~~~~~~~~
read a dxf file and create a plot or fsl file
Authors: <NAME>, <NAME>
"""
import os
from femagtools.dxfsl.geom import Geometry, dxfshapes, femshapes
from femagtools.dxfsl.shape import Shape
from femagtools.dxfsl.fslrenderer import FslRenderer, agndst
from femagtools.dxfsl.plotrenderer import PlotRenderer
import logging
import logging.config
import numpy as np
logger = logging.getLogger(__name__)
def symmetry_search(machine,
plt, # plotter
kind,
symtol=0.0,
is_inner=False,
is_outer=False,
show_plots=True,
debug_mode=False,
rows=1,
cols=1,
num=1):
logger.info("symmetry search for %s", kind)
machine.clear_cut_lines()
if show_plots and debug_mode:
plt.render_elements(machine.geom, Shape,
neighbors=True, title=kind)
if not machine.find_symmetry(symtol):
logger.info(" - {}: no symmetry axis found".format(kind))
if show_plots:
plt.add_emptyplot(rows, cols, num, 'no symmetry axis')
machine_mirror = machine.get_symmetry_mirror()
machine_slice = machine
else:
logger.info(" - {}: symmetry axis found !!".format(kind))
if show_plots:
plt.render_elements(machine.geom, Shape,
title=kind+' (symmetrylines)',
draw_inside=True,
rows=rows, cols=cols, num=num, show=False)
machine_slice = machine.get_symmetry_slice()
if machine_slice is None:
machine.kind = kind
logger.info(" - no slice extracted ?!?")
return machine
machine_mirror = machine_slice.get_symmetry_mirror()
if machine_mirror is None:
logger.info(" - no mirror found")
if not machine_slice.is_startangle_zero():
machine_slice.rotate_to(0.0)
machine_slice.set_alfa_and_corners()
machine_ok = machine_slice
else:
if show_plots and debug_mode:
plt.render_elements(machine_mirror.mirror_geom, Shape,
title='Mirror of '+kind,
rows=rows, cols=cols, num=num, show=True)
logger.info(" - mirror found")
machine_next_mirror = machine_mirror.get_symmetry_mirror()
while machine_next_mirror is not None:
logger.info(" - another mirror found")
machine_mirror = machine_next_mirror
machine_next_mirror = machine_mirror.get_symmetry_mirror()
machine_ok = machine_mirror
machine_ok.set_minmax_radius()
# machine_ok.complete_hull(is_inner, is_outer)
machine_ok.create_auxiliary_lines()
machine_ok.set_kind(kind)
logger.info("*** End of symmetry search for %s ***", kind)
return machine_ok
def convert(dxfile,
rtol=1e-03,
atol=0.005,
mindist=0.0,
symtol=0.001,
split=False,
inner_name='inner',
outer_name='outer',
part=(),
airgap=0.0,
airgap2=0.0,
da=0.0,
dy=0.0,
nodedist=1,
view_only=False,
view_korr=False,
show_plots=False,
show_areas=False,
write_fsl=True,
write_png=False,
debug_mode=False):
layers = ()
conv = {}
basename = os.path.basename(dxfile).split('.')[0]
logger.info("start processing %s", basename)
if part:
if part[0] not in ('rotor', 'stator'):
logger.error('FATAL: Parameter rotor or stator expected')
return dict(error='unknown part {}'.format(part))
if part[1] not in ('in', 'out'):
logger.error('"{}" has to be defined in/out'.format(part[0]))
return dict(error='unknown location {}'.format(part[1]))
else:
if da:
logger.warn("distance airgap (da) ignored")
da = 0.0
if dy:
logger.warn("distance yoke (dy) ignored")
dy = 0.0
try:
if dxfile.split('.')[-1] == 'fem':
basegeom = Geometry(femshapes(dxfile),
rtol=rtol,
atol=atol,
split=split)
else:
basegeom = Geometry(dxfshapes(dxfile,
mindist=mindist,
layers=layers),
rtol=rtol,
atol=atol,
split=split)
except FileNotFoundError as ex:
logger.error(ex)
return dict()
logger.info("total elements %s", len(basegeom.g.edges()))
p = PlotRenderer()
if view_only:
logger.info("View only")
if view_korr:
logger.info("With Corrections")
basegeom.search_all_overlapping_elements()
basegeom.search_all_appendices()
p.render_elements(basegeom, Shape,
neighbors=True,
png=write_png,
show=True)
return dict()
basegeom.search_all_overlapping_elements()
machine_base = basegeom.get_machine()
if show_plots:
p.render_elements(basegeom, Shape,
title=os.path.basename(dxfile),
with_hull=False,
rows=3, cols=2, num=1, show=debug_mode)
basegeom.search_all_appendices()
if not machine_base.is_a_machine():
logger.warn("it's Not a Machine!!")
return dict(error='machine not detected')
if not (machine_base.part > 0):
# machine shape is unclear
machine_base.set_center(0.0, 0.0)
machine_base.set_radius(9999999)
machine = machine_base
if machine.part_of_circle() == 0:
logger.warn("No arc segment found")
return dict(error='no arc segment found')
machine.clear_cut_lines()
machine.move_to_middle()
if show_plots and debug_mode:
p.render_elements(machine.geom, Shape,
title='Areas',
neighbors=True,
with_corners=False, show=True)
if machine.airgap(airgap, airgap2, symtol):
p.render_elements(machine.geom, Shape,
title='Search for airgap failed',
with_corners=False, show=True)
return dict(error='no airgap found')
if show_plots:
p.render_elements(basegeom, Shape, neighbors=True,
title='Original with nodes',
rows=3, cols=2, num=2, show=False)
machine.repair_hull()
machine.geom.delete_all_appendices()
if machine.has_airgap():
machine_inner = machine.copy(0.0, 2*np.pi, True, True)
machine_inner = symmetry_search(machine_inner,
p, # plot
inner_name,
is_inner=True,
symtol=symtol,
show_plots=show_plots,
rows=3, # rows
cols=2, # columns
num=3) # start num
machine_inner.set_inner()
machine_outer = machine.copy(0.0, 2*np.pi, True, False)
machine_outer = symmetry_search(machine_outer,
p, # plot
outer_name,
is_outer=True,
symtol=symtol,
show_plots=show_plots,
rows=3, # rows
cols=2, # columns
num=4) # start num
machine_inner.sync_with_counterpart(machine_outer)
machine_inner.search_subregions()
machine_outer.search_subregions()
if machine_inner.has_mirrored_windings():
logger.info("undo mirrored windings of %s", inner_name)
machine_inner = machine_inner.undo_mirror()
machine_inner.sync_with_counterpart(machine_outer)
machine_inner.search_subregions()
machine_inner.create_mirror_lines_outside_windings()
elif machine_outer.has_mirrored_windings():
logger.info("undo mirrored windings of %s", outer_name)
machine_outer = machine_outer.undo_mirror()
machine_inner.sync_with_counterpart(machine_outer)
machine_outer.search_subregions()
machine_outer.create_mirror_lines_outside_windings()
machine_inner.delete_tiny_elements(mindist)
machine_outer.delete_tiny_elements(mindist)
logger.info("END of work: %s", basename)
if show_plots:
p.render_elements(machine_inner.geom, Shape,
draw_inside=True, title=inner_name,
rows=3, cols=2, num=5, show=False,
# with_nodes=True,
# neighbors=True,
# write_id=True,
fill_areas=True)
p.render_elements(machine_outer.geom, Shape,
draw_inside=True, title=outer_name,
rows=3, cols=2, num=6, show=False,
# with_nodes=True,
# neighbors=True,
# write_id=True,
fill_areas=True)
if write_png:
p.write_plot(basename)
else:
p.show_plot()
if show_areas:
p.render_elements(machine_inner.geom, Shape,
title=inner_name,
show=True,
draw_inside=True,
neighbors=True,
fill_areas=True)
p.render_areas(machine_inner.geom,
title=inner_name,
with_nodes=True,
single_view=True)
p.render_elements(machine_outer.geom, Shape,
title=outer_name,
show=True,
draw_inside=True,
neighbors=True,
fill_areas=True)
p.render_areas(machine_outer.geom,
title=outer_name,
with_nodes=True,
single_view=True)
if write_fsl:
if machine_inner.is_full() or machine_outer.is_full():
logger.warning("it's not possible to create fsl-file")
return None
fslrenderer = FslRenderer(basename)
inner = fslrenderer.render(machine_inner, inner=True)
outer = fslrenderer.render(machine_outer, outer=True)
if machine_inner.geom.is_rotor():
conv['fsl_magnet'] = inner
conv['fsl_stator'] = outer
else:
conv['fsl_magnet'] = inner
conv['fsl_rotor'] = outer
params = create_femag_parameters(machine_inner,
machine_outer,
nodedist)
conv.update(params)
conv['fsl'] = fslrenderer.render_main(
machine,
machine_inner, machine_outer,
inner, outer,
params)
else:
# No airgap found. This must be an inner or outer part
name = "No_Airgap"
inner = False
outer = False
params = None
if part:
if part[1] == 'in':
name = inner_name
inner = True
else:
name = outer_name
outer = True
machine = symmetry_search(machine,
p, # plot
name,
symtol=symtol,
show_plots=show_plots,
rows=3, # rows
cols=2, # cols
num=3) # start num
if da > 0.0 or dy > 0.0:
if inner:
r_out = da / 2.0
r_in = dy / 2.0
elif outer:
r_out = dy / 2.0
r_in = da / 2.0
else:
r_out = 0.0
r_in = 0.0
if machine.cut_is_possible(r_in, r_out):
logger.info("make a cut")
machine = machine.cut(r_in, r_out)
if part:
if part[0] == 'stator':
machine.geom.set_stator()
machine.geom.search_stator_subregions(part[1])
if machine.has_mirrored_windings():
logger.info("undo mirror of stator")
machine = machine.undo_mirror()
machine.geom.set_stator()
machine.geom.search_stator_subregions(part[1])
machine.create_mirror_lines_outside_windings()
params = create_femag_parameters_stator(machine,
part[1])
else:
machine.geom.set_rotor()
machine.geom.search_rotor_subregions(part[1])
params = create_femag_parameters_rotor(machine,
part[1])
else:
machine.geom.search_subregions()
machine.delete_tiny_elements(mindist)
logger.info("END of work: %s", basename)
if show_plots:
p.render_elements(machine.geom, Shape,
draw_inside=True, title=name,
rows=3, cols=2, num=5, show=False,
fill_areas=True)
if write_png:
p.write_plot(basename)
else:
p.show_plot()
if show_areas:
p.render_elements(machine.geom, Shape,
title=name,
show=True,
draw_inside=True,
neighbors=True,
fill_areas=True)
p.render_areas(machine.geom,
title=name,
with_nodes=True,
single_view=True)
if write_fsl:
if machine.is_full():
logger.warning("it's not possible to create fsl-file")
return None
fslrenderer = FslRenderer(basename)
conv['fsl'] = fslrenderer.render(machine, inner, outer)
if params:
conv.update(params)
conv['name'] = basename
return conv
def create_femag_parameters(m_inner, m_outer, nodedist=1):
if not (m_inner and m_outer):
return {}
params = {}
geom_inner = m_inner.geom
geom_outer = m_outer.geom
parts_inner = int(m_inner.get_symmetry_part())
parts_outer = int(m_outer.get_symmetry_part())
if parts_inner > parts_outer:
num_slots = parts_inner
num_poles = parts_outer
num_sl_gen = int(geom_inner.get_symmetry_copies()+1)
alfa_slot = geom_inner.get_alfa()
alfa_pole = geom_outer.get_alfa()
else:
num_slots = parts_outer
num_poles = parts_inner
num_sl_gen = int(geom_outer.get_symmetry_copies()+1)
alfa_slot = geom_outer.get_alfa()
alfa_pole = geom_inner.get_alfa()
params['tot_num_slot'] = num_slots
params['num_sl_gen'] = num_sl_gen
params['num_poles'] = num_poles
params['nodedist'] = nodedist
params['dy1'] = 2*geom_outer.max_radius
params['da1'] = 2*geom_outer.min_radius
params['da2'] = 2*geom_inner.max_radius
params['dy2'] = 2*geom_inner.min_radius
params['agndst'] = agndst(params['da1'], params['da2'],
num_slots, num_poles,
nodedist)
params['alfa_slot'] = alfa_slot
params['alfa_pole'] = alfa_pole
if num_slots == 0 or num_poles == 0:
if num_slots == 0:
logger.warning("No slots found")
if num_poles == 0:
logger.warning("No poles found")
logger.warning("Model not ready for femag")
return {'error': "Model not ready for femag"}
if not np.isclose(alfa_slot * num_slots,
alfa_pole * num_poles):
logger.warning("slots and poles dont match")
return {'error': "Model not ready for femag"}
return params
def create_femag_parameters_stator(motor, position):
params = {}
num_slots = motor.get_symmetry_part()
params['tot_num_slot'] = num_slots
if position == 'in':
params['da2'] = 2*motor.geom.max_radius
params['dy2'] = 2*motor.geom.min_radius
else:
params['dy1'] = 2*motor.geom.max_radius
params['da1'] = 2*motor.geom.min_radius
return params
def create_femag_parameters_rotor(motor, position):
params = {}
num_poles = motor.get_symmetry_part()
params['num_poles'] = num_poles
if position == 'in':
params['da2'] = 2*motor.geom.max_radius
params['dy2'] = 2*motor.geom.min_radius
else:
params['dy1'] = 2*motor.geom.max_radius
params['da1'] = 2*motor.geom.min_radius
return params
| [
"femagtools.dxfsl.geom.femshapes",
"os.path.basename",
"femagtools.dxfsl.fslrenderer.FslRenderer",
"femagtools.dxfsl.plotrenderer.PlotRenderer",
"numpy.isclose",
"femagtools.dxfsl.geom.dxfshapes",
"logging.getLogger",
"femagtools.dxfsl.fslrenderer.agndst"
] | [((439, 466), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (456, 466), False, 'import logging\n'), ((4986, 5000), 'femagtools.dxfsl.plotrenderer.PlotRenderer', 'PlotRenderer', ([], {}), '()\n', (4998, 5000), False, 'from femagtools.dxfsl.plotrenderer import PlotRenderer\n'), ((16593, 16661), 'femagtools.dxfsl.fslrenderer.agndst', 'agndst', (["params['da1']", "params['da2']", 'num_slots', 'num_poles', 'nodedist'], {}), "(params['da1'], params['da2'], num_slots, num_poles, nodedist)\n", (16599, 16661), False, 'from femagtools.dxfsl.fslrenderer import FslRenderer, agndst\n'), ((17098, 17154), 'numpy.isclose', 'np.isclose', (['(alfa_slot * num_slots)', '(alfa_pole * num_poles)'], {}), '(alfa_slot * num_slots, alfa_pole * num_poles)\n', (17108, 17154), True, 'import numpy as np\n'), ((11294, 11315), 'femagtools.dxfsl.fslrenderer.FslRenderer', 'FslRenderer', (['basename'], {}), '(basename)\n', (11305, 11315), False, 'from femagtools.dxfsl.fslrenderer import FslRenderer, agndst\n'), ((15295, 15316), 'femagtools.dxfsl.fslrenderer.FslRenderer', 'FslRenderer', (['basename'], {}), '(basename)\n', (15306, 15316), False, 'from femagtools.dxfsl.fslrenderer import FslRenderer, agndst\n'), ((3627, 3651), 'os.path.basename', 'os.path.basename', (['dxfile'], {}), '(dxfile)\n', (3643, 3651), False, 'import os\n'), ((4369, 4386), 'femagtools.dxfsl.geom.femshapes', 'femshapes', (['dxfile'], {}), '(dxfile)\n', (4378, 4386), False, 'from femagtools.dxfsl.geom import Geometry, dxfshapes, femshapes\n'), ((4565, 4614), 'femagtools.dxfsl.geom.dxfshapes', 'dxfshapes', (['dxfile'], {'mindist': 'mindist', 'layers': 'layers'}), '(dxfile, mindist=mindist, layers=layers)\n', (4574, 4614), False, 'from femagtools.dxfsl.geom import Geometry, dxfshapes, femshapes\n'), ((5590, 5614), 'os.path.basename', 'os.path.basename', (['dxfile'], {}), '(dxfile)\n', (5606, 5614), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.